text
stringlengths
28
881k
'''NEWLINECreated on Apr 06, 2012NEWLINENEWLINE@author: Michael Kraus (michael.kraus@ipp.mpg.de)NEWLINE'''NEWLINENEWLINEimport argparseNEWLINEimport matplotlibNEWLINENEWLINEfrom vorticity.diagnostics import DiagnosticsNEWLINENEWLINENEWLINEclass replay(object):NEWLINE '''NEWLINE NEWLINE '''NEWLINENEWLINE def __init__(self, hdf5_file, nPlot=1, nMax=0, output=False, contours=False):NEWLINE '''NEWLINE ConstructorNEWLINE '''NEWLINE NEWLINE self.diagnostics = Diagnostics(hdf5_file)NEWLINE NEWLINE if nMax > 0 and nMax < self.diagnostics.nt:NEWLINE self.nMax = nMaxNEWLINE else:NEWLINE self.nMax = self.diagnostics.ntNEWLINE NEWLINE self.nPlot = nPlotNEWLINE self.plot = PlotVorticity2D(self.diagnostics, output=output)NEWLINE NEWLINE NEWLINE def run(self):NEWLINE# for iTime in range(1, self.nMax+1):NEWLINE for iTime in [5,10,20,30,60]:NEWLINE if iTime == 0 or iTime % self.nPlot == 0 or iTime == self.nMax:NEWLINE print(iTime)NEWLINE self.diagnostics.read_from_hdf5(iTime)NEWLINE self.diagnostics.update_invariants(iTime)NEWLINE self.plot.update(iTime, final=(iTime == self.nMax))NEWLINE NEWLINE NEWLINENEWLINEif __name__ == '__main__':NEWLINE parser = argparse.ArgumentParser(description='Vorticity Equation Solver in 2D')NEWLINE NEWLINE parser.add_argument('hdf5_file', metavar='<run.hdf5>', type=str,NEWLINE help='Run HDF5 File')NEWLINE parser.add_argument('-np', metavar='i', type=int, default=1,NEWLINE help='plot every i\'th frame')NEWLINE parser.add_argument('-nt', metavar='i', type=int, default=0,NEWLINE help='plot up to i\'th frame')NEWLINE parser.add_argument('-o', action='store_true', required=False,NEWLINE help='save plots to file')NEWLINE parser.add_argument('-c', action='store_true', required=False,NEWLINE help='plot contours of streaming function in vorticity')NEWLINE NEWLINE args = parser.parse_args()NEWLINE NEWLINE printNEWLINE print("Replay run with " + args.hdf5_file)NEWLINE printNEWLINE NEWLINE if args.o == True:NEWLINE matplotlib.use('AGG')NEWLINE from vorticity.plot.plot_contours import PlotVorticity2DNEWLINE pyvp = replay(args.hdf5_file, args.np, args.nt, output=True, contours=args.c)NEWLINE pyvp.run()NEWLINE else:NEWLINE from vorticity.plot.plot_contours import PlotVorticity2DNEWLINE pyvp = replay(args.hdf5_file, args.np, args.nt, output=False, contours=args.c)NEWLINE NEWLINE printNEWLINE input('Hit any key to start replay.')NEWLINE printNEWLINE NEWLINE pyvp.run()NEWLINE NEWLINE printNEWLINE print("Replay finished.")NEWLINE printNEWLINE NEWLINE
"""NEWLINESelenium maintains a library of wait conditions under the support module.NEWLINEHowever, custom wait conditions can be created when none of the convenience methodsNEWLINEfit. A custom wait condition can be created using a class with a __call__ method which reutrns FalseNEWLINEwhen the condition does not match.NEWLINE"""NEWLINENEWLINENEWLINEclass LazyLoadElementHasDataCondition(object):NEWLINE """NEWLINE A custom wait condition to check if the lazy loaded element has data.NEWLINENEWLINE Attributes:NEWLINE locator (tuple): locator to find the root element, see `Locators`_ design pattern.NEWLINE data_selector (str): css selector to find the data elements from the :attr:`locator`.NEWLINENEWLINE .. _Locators:NEWLINE http://selenium-python.readthedocs.io/selenium_page-objects.html#locatorsNEWLINE """NEWLINENEWLINE def __init__(self, locator, data_selector):NEWLINE self.locator = locatorNEWLINE self.data_selector = data_selectorNEWLINENEWLINE def __call__(self, driver):NEWLINE lazy_element = driver.find_element(*self.locator)NEWLINE data_elements = lazy_element.find_elements_by_css_selector(self.data_selector)NEWLINE if any([self._check(d) for d in data_elements]):NEWLINE return TrueNEWLINE return FalseNEWLINENEWLINE def _check(self, element):NEWLINE return bool(element.text)NEWLINENEWLINENEWLINEclass LazyLoadInputElementHasDataCondition(LazyLoadElementHasDataCondition):NEWLINE """NEWLINE Similar to :class:`LazyLoadElementHasDataCondition` only checks for value instead of text.NEWLINE """NEWLINE def _check(self, element):NEWLINE value = element.get_attribute('value')NEWLINE return bool(value)NEWLINE
#NEWLINE# Licensed to the Apache Software Foundation (ASF) under one or moreNEWLINE# contributor license agreements. See the NOTICE file distributed withNEWLINE# this work for additional information regarding copyright ownership.NEWLINE# The ASF licenses this file to You under the Apache License, Version 2.0NEWLINE# (the "License"); you may not use this file except in compliance withNEWLINE# the License. You may obtain a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS,NEWLINE# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.NEWLINE# See the License for the specific language governing permissions andNEWLINE# limitations under the License.NEWLINE#NEWLINENEWLINE"""Unit tests for the DataflowRunner class."""NEWLINENEWLINE# pytype: skip-fileNEWLINENEWLINEfrom __future__ import absolute_importNEWLINENEWLINEimport jsonNEWLINEimport sysNEWLINEimport unittestNEWLINEfrom builtins import objectNEWLINEfrom builtins import rangeNEWLINEfrom datetime import datetimeNEWLINENEWLINE# patches unittest.TestCase to be python3 compatibleNEWLINEimport future.tests.base # pylint: disable=unused-importNEWLINEimport mockNEWLINEimport pytestNEWLINENEWLINEimport apache_beam as beamNEWLINEimport apache_beam.transforms as ptransformNEWLINEfrom apache_beam.options.pipeline_options import DebugOptionsNEWLINEfrom apache_beam.options.pipeline_options import PipelineOptionsNEWLINEfrom apache_beam.pipeline import AppliedPTransformNEWLINEfrom apache_beam.pipeline import PipelineNEWLINEfrom apache_beam.portability import common_urnsNEWLINEfrom apache_beam.portability.api import beam_runner_api_pb2NEWLINEfrom apache_beam.pvalue import PCollectionNEWLINEfrom apache_beam.runners import DataflowRunnerNEWLINEfrom apache_beam.runners import TestDataflowRunnerNEWLINEfrom apache_beam.runners import create_runnerNEWLINEfrom apache_beam.runners.dataflow.dataflow_runner import DataflowPipelineResultNEWLINEfrom apache_beam.runners.dataflow.dataflow_runner import DataflowRuntimeExceptionNEWLINEfrom apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_apiNEWLINEfrom apache_beam.runners.runner import PipelineStateNEWLINEfrom apache_beam.testing.extra_assertions import ExtraAssertionsMixinNEWLINEfrom apache_beam.testing.test_pipeline import TestPipelineNEWLINEfrom apache_beam.transforms import windowNEWLINEfrom apache_beam.transforms.core import WindowingNEWLINEfrom apache_beam.transforms.core import _GroupByKeyOnlyNEWLINEfrom apache_beam.transforms.display import DisplayDataItemNEWLINEfrom apache_beam.typehints import typehintsNEWLINENEWLINE# Protect against environments where apitools library is not available.NEWLINE# pylint: disable=wrong-import-order, wrong-import-positionNEWLINEtry:NEWLINE from apache_beam.runners.dataflow.internal import apiclientNEWLINEexcept ImportError:NEWLINE apiclient = None # type: ignoreNEWLINE# pylint: enable=wrong-import-order, wrong-import-positionNEWLINENEWLINE# SpecialParDo and SpecialDoFn are used in test_remote_runner_display_data.NEWLINE# Due to BEAM-8482, these need to be declared outside of the test method.NEWLINE# TODO: Should not subclass ParDo. Switch to PTransform as soon asNEWLINE# composite transforms support display data.NEWLINEclass SpecialParDo(beam.ParDo):NEWLINE def __init__(self, fn, now):NEWLINE super(SpecialParDo, self).__init__(fn)NEWLINE self.fn = fnNEWLINE self.now = nowNEWLINENEWLINE # Make this a list to be accessible within closureNEWLINE def display_data(self):NEWLINE return {'asubcomponent': self.fn,NEWLINE 'a_class': SpecialParDo,NEWLINE 'a_time': self.now}NEWLINENEWLINEclass SpecialDoFn(beam.DoFn):NEWLINE def display_data(self):NEWLINE return {'dofn_value': 42}NEWLINENEWLINE def process(self):NEWLINE passNEWLINENEWLINENEWLINE@unittest.skipIf(apiclient is None, 'GCP dependencies are not installed')NEWLINEclass DataflowRunnerTest(unittest.TestCase, ExtraAssertionsMixin):NEWLINE def setUp(self):NEWLINE self.default_properties = [NEWLINE '--dataflow_endpoint=ignored',NEWLINE '--job_name=test-job',NEWLINE '--project=test-project',NEWLINE '--staging_location=ignored',NEWLINE '--temp_location=/dev/null',NEWLINE '--no_auth',NEWLINE '--dry_run=True']NEWLINENEWLINE @mock.patch('time.sleep', return_value=None)NEWLINE def test_wait_until_finish(self, patched_time_sleep):NEWLINE values_enum = dataflow_api.Job.CurrentStateValueValuesEnumNEWLINENEWLINE class MockDataflowRunner(object):NEWLINENEWLINE def __init__(self, states):NEWLINE self.dataflow_client = mock.MagicMock()NEWLINE self.job = mock.MagicMock()NEWLINE self.job.currentState = values_enum.JOB_STATE_UNKNOWNNEWLINE self._states = statesNEWLINE self._next_state_index = 0NEWLINENEWLINE def get_job_side_effect(*args, **kwargs):NEWLINE self.job.currentState = self._states[self._next_state_index]NEWLINE if self._next_state_index < (len(self._states) - 1):NEWLINE self._next_state_index += 1NEWLINE return mock.DEFAULTNEWLINENEWLINE self.dataflow_client.get_job = mock.MagicMock(NEWLINE return_value=self.job, side_effect=get_job_side_effect)NEWLINE self.dataflow_client.list_messages = mock.MagicMock(NEWLINE return_value=([], None))NEWLINENEWLINE with self.assertRaisesRegex(NEWLINE DataflowRuntimeException, 'Dataflow pipeline failed. State: FAILED'):NEWLINE failed_runner = MockDataflowRunner([values_enum.JOB_STATE_FAILED])NEWLINE failed_result = DataflowPipelineResult(failed_runner.job, failed_runner)NEWLINE failed_result.wait_until_finish()NEWLINENEWLINE succeeded_runner = MockDataflowRunner([values_enum.JOB_STATE_DONE])NEWLINE succeeded_result = DataflowPipelineResult(NEWLINE succeeded_runner.job, succeeded_runner)NEWLINE result = succeeded_result.wait_until_finish()NEWLINE self.assertEqual(result, PipelineState.DONE)NEWLINENEWLINE # Time array has duplicate items, because some logging implementations alsoNEWLINE # call time.NEWLINE with mock.patch('time.time', mock.MagicMock(side_effect=[1, 1, 2, 2, 3])):NEWLINE duration_succeeded_runner = MockDataflowRunner(NEWLINE [values_enum.JOB_STATE_RUNNING, values_enum.JOB_STATE_DONE])NEWLINE duration_succeeded_result = DataflowPipelineResult(NEWLINE duration_succeeded_runner.job, duration_succeeded_runner)NEWLINE result = duration_succeeded_result.wait_until_finish(5000)NEWLINE self.assertEqual(result, PipelineState.DONE)NEWLINENEWLINE with mock.patch('time.time', mock.MagicMock(side_effect=[1, 9, 9, 20, 20])):NEWLINE duration_timedout_runner = MockDataflowRunner(NEWLINE [values_enum.JOB_STATE_RUNNING])NEWLINE duration_timedout_result = DataflowPipelineResult(NEWLINE duration_timedout_runner.job, duration_timedout_runner)NEWLINE result = duration_timedout_result.wait_until_finish(5000)NEWLINE self.assertEqual(result, PipelineState.RUNNING)NEWLINENEWLINE with mock.patch('time.time', mock.MagicMock(side_effect=[1, 1, 2, 2, 3])):NEWLINE with self.assertRaisesRegex(NEWLINE DataflowRuntimeException,NEWLINE 'Dataflow pipeline failed. State: CANCELLED'):NEWLINE duration_failed_runner = MockDataflowRunner(NEWLINE [values_enum.JOB_STATE_CANCELLED])NEWLINE duration_failed_result = DataflowPipelineResult(NEWLINE duration_failed_runner.job, duration_failed_runner)NEWLINE duration_failed_result.wait_until_finish(5000)NEWLINENEWLINE @mock.patch('time.sleep', return_value=None)NEWLINE def test_cancel(self, patched_time_sleep):NEWLINE values_enum = dataflow_api.Job.CurrentStateValueValuesEnumNEWLINENEWLINE class MockDataflowRunner(object):NEWLINENEWLINE def __init__(self, state, cancel_result):NEWLINE self.dataflow_client = mock.MagicMock()NEWLINE self.job = mock.MagicMock()NEWLINE self.job.currentState = stateNEWLINENEWLINE self.dataflow_client.get_job = mock.MagicMock(return_value=self.job)NEWLINE self.dataflow_client.modify_job_state = mock.MagicMock(NEWLINE return_value=cancel_result)NEWLINE self.dataflow_client.list_messages = mock.MagicMock(NEWLINE return_value=([], None))NEWLINENEWLINE with self.assertRaisesRegex(NEWLINE DataflowRuntimeException, 'Failed to cancel job'):NEWLINE failed_runner = MockDataflowRunner(values_enum.JOB_STATE_RUNNING, False)NEWLINE failed_result = DataflowPipelineResult(failed_runner.job, failed_runner)NEWLINE failed_result.cancel()NEWLINENEWLINE succeeded_runner = MockDataflowRunner(values_enum.JOB_STATE_RUNNING, True)NEWLINE succeeded_result = DataflowPipelineResult(NEWLINE succeeded_runner.job, succeeded_runner)NEWLINE succeeded_result.cancel()NEWLINENEWLINE terminal_runner = MockDataflowRunner(values_enum.JOB_STATE_DONE, False)NEWLINE terminal_result = DataflowPipelineResult(NEWLINE terminal_runner.job, terminal_runner)NEWLINE terminal_result.cancel()NEWLINENEWLINE def test_create_runner(self):NEWLINE self.assertTrue(NEWLINE isinstance(create_runner('DataflowRunner'),NEWLINE DataflowRunner))NEWLINE self.assertTrue(NEWLINE isinstance(create_runner('TestDataflowRunner'),NEWLINE TestDataflowRunner))NEWLINENEWLINE def test_environment_override_translation(self):NEWLINE self.default_properties.append('--experiments=beam_fn_api')NEWLINE self.default_properties.append('--worker_harness_container_image=FOO')NEWLINE remote_runner = DataflowRunner()NEWLINE with Pipeline(NEWLINE remote_runner,NEWLINE options=PipelineOptions(self.default_properties)) as p:NEWLINE (p | ptransform.Create([1, 2, 3]) # pylint: disable=expression-not-assignedNEWLINE | 'Do' >> ptransform.FlatMap(lambda x: [(x, x)])NEWLINE | ptransform.GroupByKey())NEWLINE self.assertEqual(NEWLINE list(remote_runner.proto_pipeline.components.environments.values()),NEWLINE [beam_runner_api_pb2.Environment(NEWLINE urn=common_urns.environments.DOCKER.urn,NEWLINE payload=beam_runner_api_pb2.DockerPayload(NEWLINE container_image='FOO').SerializeToString())])NEWLINENEWLINE def test_remote_runner_translation(self):NEWLINE remote_runner = DataflowRunner()NEWLINE with Pipeline(NEWLINE remote_runner,NEWLINE options=PipelineOptions(self.default_properties)) as p:NEWLINENEWLINE (p | ptransform.Create([1, 2, 3]) # pylint: disable=expression-not-assignedNEWLINE | 'Do' >> ptransform.FlatMap(lambda x: [(x, x)])NEWLINE | ptransform.GroupByKey())NEWLINENEWLINE def test_streaming_create_translation(self):NEWLINE remote_runner = DataflowRunner()NEWLINE self.default_properties.append("--streaming")NEWLINE with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:NEWLINE p | ptransform.Create([1]) # pylint: disable=expression-not-assignedNEWLINE job_dict = json.loads(str(remote_runner.job))NEWLINE self.assertEqual(len(job_dict[u'steps']), 3)NEWLINENEWLINE self.assertEqual(job_dict[u'steps'][0][u'kind'], u'ParallelRead')NEWLINE self.assertEqual(NEWLINE job_dict[u'steps'][0][u'properties'][u'pubsub_subscription'],NEWLINE '_starting_signal/')NEWLINE self.assertEqual(job_dict[u'steps'][1][u'kind'], u'ParallelDo')NEWLINE self.assertEqual(job_dict[u'steps'][2][u'kind'], u'ParallelDo')NEWLINENEWLINE def test_biqquery_read_streaming_fail(self):NEWLINE remote_runner = DataflowRunner()NEWLINE self.default_properties.append("--streaming")NEWLINE with self.assertRaisesRegex(ValueError,NEWLINE r'source is not currently available'):NEWLINE with Pipeline(NEWLINE remote_runner,NEWLINE PipelineOptions(self.default_properties)) as p:NEWLINE _ = p | beam.io.Read(beam.io.BigQuerySource('some.table'))NEWLINENEWLINE # TODO(BEAM-8095): Segfaults in Python 3.7 with xdist.NEWLINE @pytest.mark.no_xdistNEWLINE def test_remote_runner_display_data(self):NEWLINE remote_runner = DataflowRunner()NEWLINE p = Pipeline(remote_runner,NEWLINE options=PipelineOptions(self.default_properties))NEWLINENEWLINE now = datetime.now()NEWLINE # pylint: disable=expression-not-assignedNEWLINE (p | ptransform.Create([1, 2, 3, 4, 5])NEWLINE | 'Do' >> SpecialParDo(SpecialDoFn(), now))NEWLINENEWLINE # TODO(BEAM-366) Enable runner API on this test.NEWLINE p.run(test_runner_api=False)NEWLINE job_dict = json.loads(str(remote_runner.job))NEWLINE steps = [stepNEWLINE for step in job_dict['steps']NEWLINE if len(step['properties'].get('display_data', [])) > 0]NEWLINE step = steps[1]NEWLINE disp_data = step['properties']['display_data']NEWLINE nspace = SpecialParDo.__module__+ '.'NEWLINE expected_data = [{'type': 'TIMESTAMP', 'namespace': nspace+'SpecialParDo',NEWLINE 'value': DisplayDataItem._format_value(now, 'TIMESTAMP'),NEWLINE 'key': 'a_time'},NEWLINE {'type': 'STRING', 'namespace': nspace+'SpecialParDo',NEWLINE 'value': nspace+'SpecialParDo', 'key': 'a_class',NEWLINE 'shortValue': 'SpecialParDo'},NEWLINE {'type': 'INTEGER', 'namespace': nspace+'SpecialDoFn',NEWLINE 'value': 42, 'key': 'dofn_value'}]NEWLINE self.assertUnhashableCountEqual(disp_data, expected_data)NEWLINENEWLINE def test_no_group_by_key_directly_after_bigquery(self):NEWLINE remote_runner = DataflowRunner()NEWLINE p = Pipeline(remote_runner,NEWLINE options=PipelineOptions([NEWLINE '--dataflow_endpoint=ignored',NEWLINE '--job_name=test-job',NEWLINE '--project=test-project',NEWLINE '--staging_location=ignored',NEWLINE '--temp_location=/dev/null',NEWLINE '--no_auth'NEWLINE ]))NEWLINE rows = p | beam.io.Read(beam.io.BigQuerySource('dataset.faketable'))NEWLINE with self.assertRaises(ValueError,NEWLINE msg=('Coder for the GroupByKey operation'NEWLINE '"GroupByKey" is not a key-value coder: 'NEWLINE 'RowAsDictJsonCoder')):NEWLINE unused_invalid = rows | beam.GroupByKey()NEWLINENEWLINE def test_group_by_key_input_visitor_with_valid_inputs(self):NEWLINE p = TestPipeline()NEWLINE pcoll1 = PCollection(p)NEWLINE pcoll2 = PCollection(p)NEWLINE pcoll3 = PCollection(p)NEWLINE for transform in [_GroupByKeyOnly(), beam.GroupByKey()]:NEWLINE pcoll1.element_type = NoneNEWLINE pcoll2.element_type = typehints.AnyNEWLINE pcoll3.element_type = typehints.KV[typehints.Any, typehints.Any]NEWLINE for pcoll in [pcoll1, pcoll2, pcoll3]:NEWLINE applied = AppliedPTransform(None, transform, "label", [pcoll])NEWLINE applied.outputs[None] = PCollection(None)NEWLINE DataflowRunner.group_by_key_input_visitor().visit_transform(NEWLINE applied)NEWLINE self.assertEqual(pcoll.element_type,NEWLINE typehints.KV[typehints.Any, typehints.Any])NEWLINENEWLINE def test_group_by_key_input_visitor_with_invalid_inputs(self):NEWLINE p = TestPipeline()NEWLINE pcoll1 = PCollection(p)NEWLINE pcoll2 = PCollection(p)NEWLINE for transform in [_GroupByKeyOnly(), beam.GroupByKey()]:NEWLINE pcoll1.element_type = strNEWLINE pcoll2.element_type = typehints.SetNEWLINE err_msg = (NEWLINE r"Input to 'label' must be compatible with KV\[Any, Any\]. "NEWLINE "Found .*")NEWLINE for pcoll in [pcoll1, pcoll2]:NEWLINE with self.assertRaisesRegex(ValueError, err_msg):NEWLINE DataflowRunner.group_by_key_input_visitor().visit_transform(NEWLINE AppliedPTransform(None, transform, "label", [pcoll]))NEWLINENEWLINE def test_group_by_key_input_visitor_for_non_gbk_transforms(self):NEWLINE p = TestPipeline()NEWLINE pcoll = PCollection(p)NEWLINE for transform in [beam.Flatten(), beam.Map(lambda x: x)]:NEWLINE pcoll.element_type = typehints.AnyNEWLINE DataflowRunner.group_by_key_input_visitor().visit_transform(NEWLINE AppliedPTransform(None, transform, "label", [pcoll]))NEWLINE self.assertEqual(pcoll.element_type, typehints.Any)NEWLINENEWLINE def test_flatten_input_with_visitor_with_single_input(self):NEWLINE self._test_flatten_input_visitor(typehints.KV[int, int], typehints.Any, 1)NEWLINENEWLINE def test_flatten_input_with_visitor_with_multiple_inputs(self):NEWLINE self._test_flatten_input_visitor(NEWLINE typehints.KV[int, typehints.Any], typehints.Any, 5)NEWLINENEWLINE def _test_flatten_input_visitor(self, input_type, output_type, num_inputs):NEWLINE p = TestPipeline()NEWLINE inputs = []NEWLINE for _ in range(num_inputs):NEWLINE input_pcoll = PCollection(p)NEWLINE input_pcoll.element_type = input_typeNEWLINE inputs.append(input_pcoll)NEWLINE output_pcoll = PCollection(p)NEWLINE output_pcoll.element_type = output_typeNEWLINENEWLINE flatten = AppliedPTransform(None, beam.Flatten(), "label", inputs)NEWLINE flatten.add_output(output_pcoll, None)NEWLINE DataflowRunner.flatten_input_visitor().visit_transform(flatten)NEWLINE for _ in range(num_inputs):NEWLINE self.assertEqual(inputs[0].element_type, output_type)NEWLINENEWLINE def test_gbk_then_flatten_input_visitor(self):NEWLINE p = TestPipeline(NEWLINE runner=DataflowRunner(),NEWLINE options=PipelineOptions(self.default_properties))NEWLINE none_str_pc = p | 'c1' >> beam.Create({None: 'a'})NEWLINE none_int_pc = p | 'c2' >> beam.Create({None: 3})NEWLINE flat = (none_str_pc, none_int_pc) | beam.Flatten()NEWLINE _ = flat | beam.GroupByKey()NEWLINENEWLINE # This may change if type inference changes, but we assert it hereNEWLINE # to make sure the check below is not vacuous.NEWLINE self.assertNotIsInstance(flat.element_type, typehints.TupleConstraint)NEWLINENEWLINE p.visit(DataflowRunner.group_by_key_input_visitor())NEWLINE p.visit(DataflowRunner.flatten_input_visitor())NEWLINENEWLINE # The dataflow runner requires gbk input to be tuples *and* flattenNEWLINE # inputs to be equal to their outputs. Assert both hold.NEWLINE self.assertIsInstance(flat.element_type, typehints.TupleConstraint)NEWLINE self.assertEqual(flat.element_type, none_str_pc.element_type)NEWLINE self.assertEqual(flat.element_type, none_int_pc.element_type)NEWLINENEWLINE def test_serialize_windowing_strategy(self):NEWLINE # This just tests the basic path; more complete testsNEWLINE # are in window_test.py.NEWLINE strategy = Windowing(window.FixedWindows(10))NEWLINE self.assertEqual(NEWLINE strategy,NEWLINE DataflowRunner.deserialize_windowing_strategy(NEWLINE DataflowRunner.serialize_windowing_strategy(strategy)))NEWLINENEWLINE def test_side_input_visitor(self):NEWLINE p = TestPipeline()NEWLINE pc = p | beam.Create([])NEWLINENEWLINE transform = beam.Map(NEWLINE lambda x, y, z: (x, y, z),NEWLINE beam.pvalue.AsSingleton(pc),NEWLINE beam.pvalue.AsMultiMap(pc))NEWLINE applied_transform = AppliedPTransform(None, transform, "label", [pc])NEWLINE DataflowRunner.side_input_visitor().visit_transform(applied_transform)NEWLINE self.assertEqual(2, len(applied_transform.side_inputs))NEWLINE for side_input in applied_transform.side_inputs:NEWLINE self.assertEqual(NEWLINE common_urns.side_inputs.MULTIMAP.urn,NEWLINE side_input._side_input_data().access_pattern)NEWLINENEWLINE def test_min_cpu_platform_flag_is_propagated_to_experiments(self):NEWLINE remote_runner = DataflowRunner()NEWLINE self.default_properties.append('--min_cpu_platform=Intel Haswell')NEWLINENEWLINE with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:NEWLINE p | ptransform.Create([1]) # pylint: disable=expression-not-assignedNEWLINE self.assertIn('min_cpu_platform=Intel Haswell',NEWLINE remote_runner.job.options.view_as(DebugOptions).experiments)NEWLINENEWLINE def test_streaming_engine_flag_adds_windmill_experiments(self):NEWLINE remote_runner = DataflowRunner()NEWLINE self.default_properties.append('--streaming')NEWLINE self.default_properties.append('--enable_streaming_engine')NEWLINE self.default_properties.append('--experiment=some_other_experiment')NEWLINENEWLINE with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:NEWLINE p | ptransform.Create([1]) # pylint: disable=expression-not-assignedNEWLINENEWLINE experiments_for_job = (NEWLINE remote_runner.job.options.view_as(DebugOptions).experiments)NEWLINE self.assertIn('enable_streaming_engine', experiments_for_job)NEWLINE self.assertIn('enable_windmill_service', experiments_for_job)NEWLINE self.assertIn('some_other_experiment', experiments_for_job)NEWLINENEWLINE def test_dataflow_worker_jar_flag_non_fnapi_noop(self):NEWLINE remote_runner = DataflowRunner()NEWLINE self.default_properties.append('--experiment=some_other_experiment')NEWLINE self.default_properties.append('--dataflow_worker_jar=test.jar')NEWLINENEWLINE with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:NEWLINE p | ptransform.Create([1]) # pylint: disable=expression-not-assignedNEWLINENEWLINE experiments_for_job = (NEWLINE remote_runner.job.options.view_as(DebugOptions).experiments)NEWLINE self.assertIn('some_other_experiment', experiments_for_job)NEWLINE self.assertNotIn('use_staged_dataflow_worker_jar', experiments_for_job)NEWLINENEWLINE def test_dataflow_worker_jar_flag_adds_use_staged_worker_jar_experiment(self):NEWLINE remote_runner = DataflowRunner()NEWLINE self.default_properties.append('--experiment=beam_fn_api')NEWLINE self.default_properties.append('--dataflow_worker_jar=test.jar')NEWLINENEWLINE with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:NEWLINE p | ptransform.Create([1]) # pylint: disable=expression-not-assignedNEWLINENEWLINE experiments_for_job = (NEWLINE remote_runner.job.options.view_as(DebugOptions).experiments)NEWLINE self.assertIn('beam_fn_api', experiments_for_job)NEWLINE self.assertIn('use_staged_dataflow_worker_jar', experiments_for_job)NEWLINENEWLINE def test_use_fastavro_experiment_is_added_on_py3_and_onwards(self):NEWLINE remote_runner = DataflowRunner()NEWLINENEWLINE with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:NEWLINE p | ptransform.Create([1]) # pylint: disable=expression-not-assignedNEWLINENEWLINE self.assertEqual(NEWLINE sys.version_info[0] > 2,NEWLINE remote_runner.job.options.view_as(DebugOptions).lookup_experiment(NEWLINE 'use_fastavro', False))NEWLINENEWLINE def test_use_fastavro_experiment_is_not_added_when_use_avro_is_present(self):NEWLINE remote_runner = DataflowRunner()NEWLINE self.default_properties.append('--experiment=use_avro')NEWLINENEWLINE with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:NEWLINE p | ptransform.Create([1]) # pylint: disable=expression-not-assignedNEWLINENEWLINE debug_options = remote_runner.job.options.view_as(DebugOptions)NEWLINENEWLINE self.assertFalse(debug_options.lookup_experiment('use_fastavro', False))NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE unittest.main()NEWLINE
class Position:NEWLINE def topLeft(self):NEWLINE """NEWLINE :rtype: (int, int)NEWLINE """NEWLINE raise NotImplementedError()NEWLINENEWLINE def width(self):NEWLINE """NEWLINE :rtype: int | NoneNEWLINE """NEWLINE raise NotImplementedError()NEWLINENEWLINE def height(self):NEWLINE """NEWLINE :rtype: int | NoneNEWLINE """NEWLINE raise NotImplementedError()NEWLINENEWLINENEWLINEclass PointPosition:NEWLINE def __init__(self, x, y):NEWLINE self._x = xNEWLINE self._y = yNEWLINENEWLINE def topLeft(self):NEWLINE return self._x, self._yNEWLINENEWLINE def width(self):NEWLINE return NoneNEWLINENEWLINE def height(self):NEWLINE return NoneNEWLINENEWLINENEWLINEdef positionFactory(descriptor, data):NEWLINE """NEWLINE Factory method for a text position object.NEWLINE :param descriptor: Descriptor of the position typeNEWLINE :param data: List of data to be used for instantiationNEWLINE :return: Text position objectNEWLINE :rtype: PositionNEWLINE """NEWLINE return {NEWLINE "point": lambda: PointPosition(*data)NEWLINE }.get(descriptor)()NEWLINE
import datetimeNEWLINEimport jsonNEWLINEimport loggingNEWLINEimport osNEWLINEfrom dataclasses import dataclassNEWLINEfrom json import JSONDecodeErrorNEWLINEfrom typing import Iterator, OptionalNEWLINENEWLINEimport requestsNEWLINENEWLINENEWLINE@dataclassNEWLINEclass se_object:NEWLINE """Class to deal with StackExchage data collection and manipulation.NEWLINE """NEWLINENEWLINE search_terms: listNEWLINE query_type: str = "and"NEWLINE main_uri: str = "https://api.stackexchange.com/2.2/questions"NEWLINENEWLINE def __repr__(self) -> str:NEWLINE return f"<Object for site {self.main_uri}>"NEWLINENEWLINE def create_payload(self, search_terms, n) -> dict:NEWLINENEWLINE """Construct the payload based on the verification step beforeNEWLINE NEWLINE Returns:NEWLINE payload [dict]: payload to be sent over to the APINEWLINE """NEWLINENEWLINE # note that this needs to be in epochNEWLINE time_now = datetime.datetime.now(datetime.timezone.utc)NEWLINE start_time = time_now - datetime.timedelta(hours=24)NEWLINENEWLINE payload = {NEWLINE "fromdate": int(start_time.timestamp()),NEWLINE "todate": int(time_now.timestamp()),NEWLINE "site": "stackoverflow",NEWLINE "sort": "votes",NEWLINE "order": "desc",NEWLINE "tagged": search_terms,NEWLINE "client_id": os.environ.get("SE_client_id"),NEWLINE "client_secret": os.environ.get("SE_client_secret"),NEWLINE "key": os.environ.get("SE_key", None),NEWLINE "pagesize": n,NEWLINE }NEWLINENEWLINE return payloadNEWLINENEWLINE def call_API(self, payload) -> Optional[Iterator[dict]]:NEWLINENEWLINE resp = requests.get(self.main_uri, payload)NEWLINENEWLINE if resp.status_code == 200:NEWLINE try:NEWLINE new_questions = self.extract_items(resp)NEWLINENEWLINE logging.info(f"🐍 Collected new questions for the search term")NEWLINENEWLINE return new_questionsNEWLINENEWLINE except (JSONDecodeError, KeyError) as e:NEWLINE logging.error(f"{e.__class__.__name__}: {e}")NEWLINE else:NEWLINE error = resp.json()["error_message"]NEWLINE logging.error(NEWLINE f"(Unable to connect to Stack Exchage: status code {resp.status_code} - {error}"NEWLINE )NEWLINENEWLINE def run_query(self, n=100) -> Optional[Iterator[dict]]:NEWLINE """Validate the query, then construct the payload and call the APINEWLINE NEWLINE Args:NEWLINE n (int, optional): Number of questions to collect from the last 24 hours. Defaults to 100. NEWLINENEWLINE Returns:NEWLINE Optional[Iterator[dict]]: results of the API call.NEWLINE """NEWLINE if os.environ.get("SE_key", None) is None:NEWLINE logging.info("No StackExchange API key provided, limited use may apply")NEWLINENEWLINE if len(self.search_terms) == 1:NEWLINENEWLINE payload = self.create_payload(self.search_terms, n)NEWLINENEWLINE new_questions = self.call_API(payload)NEWLINENEWLINE return new_questionsNEWLINENEWLINE elif (len(self.search_terms) > 1) and (self.query_type == "and"):NEWLINE search_items = ";".join(self.search_terms)NEWLINENEWLINE payload = self.create_payload(search_items, n)NEWLINENEWLINE new_questions = self.call_API(payload)NEWLINENEWLINE return new_questionsNEWLINENEWLINE elif (len(self.search_terms) > 1) and (self.query_type == "or"):NEWLINE search_items = self.search_termsNEWLINENEWLINE for term in search_items:NEWLINE payload = self.create_payload(term, n)NEWLINENEWLINE new_questions = self.call_API(payload)NEWLINENEWLINE return new_questionsNEWLINENEWLINE else:NEWLINE logging.error("Only search supported are: 'and' 'or' types.")NEWLINENEWLINE def extract_items(self, response) -> Iterator[dict]:NEWLINE """Method used to extract the response items. This returns a generator for simplicity.NEWLINE NEWLINE Args:NEWLINE response (HTTPResponse): Response from the API callNEWLINE NEWLINE Returns:NEWLINE Iterator[dict]: Generator- dictionary with the response itemsNEWLINE NEWLINE Yields:NEWLINE Iterator[dict]: Generator- dictionary with the response itemsNEWLINE """NEWLINE for question in response.json().get("items", []):NEWLINE # logging.info(f"{question.get('tags')}")NEWLINE yield {NEWLINE "question_id": question["question_id"],NEWLINE "title": question["title"],NEWLINE "is_answered": question["is_answered"],NEWLINE "link": question["link"],NEWLINE "owner_reputation": question["owner"].get("reputation", 0),NEWLINE "score": question["score"],NEWLINE "tags": question["tags"],NEWLINE }NEWLINE
from django.contrib import adminNEWLINENEWLINEfrom guardian.admin import GuardedModelAdminNEWLINEfrom userena.utils import get_profile_modelNEWLINENEWLINEtry:NEWLINE admin.site.unregister(get_profile_model())NEWLINEexcept admin.sites.NotRegistered:NEWLINE passNEWLINENEWLINEadmin.site.register(get_profile_model(), GuardedModelAdmin)NEWLINE
import pytestNEWLINENEWLINEimport stkNEWLINENEWLINEfrom ....case_data import CaseDataNEWLINEfrom .utilities import get_centroid, get_closest_point, get_edgesNEWLINENEWLINENEWLINE@pytest.fixtureNEWLINEdef tail_1(position, flip, building_block_1):NEWLINE point1, point2 = points = (NEWLINE position + [-10, 0, 0],NEWLINE position + [10, 0, 0],NEWLINE )NEWLINENEWLINE def get_centroid_point(building_block):NEWLINE return get_closest_point(NEWLINE points=points,NEWLINE point=get_centroid(building_block),NEWLINE )NEWLINENEWLINE vertex = stk.polymer.linear.TailVertex(0, position, flip)NEWLINE return CaseData(NEWLINE vertex=vertex,NEWLINE edges=(tuple(get_edges(vertex))[0], ),NEWLINE building_block=building_block_1,NEWLINE position=position,NEWLINE alignment_tests={get_centroid_point: point2},NEWLINE functional_group_edges={0: 0},NEWLINE )NEWLINE
"""NEWLINESimple recurrent model - either with LSTM or GRU cells.NEWLINE"""NEWLINEfrom copy import copyNEWLINEfrom typing import Dict, List, Tuple, UnionNEWLINENEWLINEimport numpy as npNEWLINEimport torchNEWLINEimport torch.nn as nnNEWLINENEWLINEfrom pytorch_forecasting.data.encoders import MultiNormalizer, NaNLabelEncoderNEWLINEfrom pytorch_forecasting.data.timeseries import TimeSeriesDataSetNEWLINEfrom pytorch_forecasting.metrics import MAE, MAPE, MASE, RMSE, SMAPE, MultiHorizonMetric, MultiLoss, QuantileLossNEWLINEfrom pytorch_forecasting.models.base_model import AutoRegressiveBaseModelWithCovariatesNEWLINEfrom pytorch_forecasting.models.nn import HiddenState, MultiEmbedding, get_rnnNEWLINEfrom pytorch_forecasting.utils import apply_to_list, to_listNEWLINENEWLINENEWLINEclass RecurrentNetwork(AutoRegressiveBaseModelWithCovariates):NEWLINE def __init__(NEWLINE self,NEWLINE cell_type: str = "LSTM",NEWLINE hidden_size: int = 10,NEWLINE rnn_layers: int = 2,NEWLINE dropout: float = 0.1,NEWLINE static_categoricals: List[str] = [],NEWLINE static_reals: List[str] = [],NEWLINE time_varying_categoricals_encoder: List[str] = [],NEWLINE time_varying_categoricals_decoder: List[str] = [],NEWLINE categorical_groups: Dict[str, List[str]] = {},NEWLINE time_varying_reals_encoder: List[str] = [],NEWLINE time_varying_reals_decoder: List[str] = [],NEWLINE embedding_sizes: Dict[str, Tuple[int, int]] = {},NEWLINE embedding_paddings: List[str] = [],NEWLINE embedding_labels: Dict[str, np.ndarray] = {},NEWLINE x_reals: List[str] = [],NEWLINE x_categoricals: List[str] = [],NEWLINE output_size: Union[int, List[int]] = 1,NEWLINE target: Union[str, List[str]] = None,NEWLINE target_lags: Dict[str, List[int]] = {},NEWLINE loss: MultiHorizonMetric = None,NEWLINE logging_metrics: nn.ModuleList = None,NEWLINE **kwargs,NEWLINE ):NEWLINE """NEWLINE Recurrent Network.NEWLINENEWLINE Simple LSTM or GRU layer followed by output layerNEWLINENEWLINE Args:NEWLINE cell_type (str, optional): Recurrent cell type ["LSTM", "GRU"]. Defaults to "LSTM".NEWLINE hidden_size (int, optional): hidden recurrent size - the most important hyperparameter along withNEWLINE ``rnn_layers``. Defaults to 10.NEWLINE rnn_layers (int, optional): Number of RNN layers - important hyperparameter. Defaults to 2.NEWLINE dropout (float, optional): Dropout in RNN layers. Defaults to 0.1.NEWLINE static_categoricals: integer of positions of static categorical variablesNEWLINE static_reals: integer of positions of static continuous variablesNEWLINE time_varying_categoricals_encoder: integer of positions of categorical variables for encoderNEWLINE time_varying_categoricals_decoder: integer of positions of categorical variables for decoderNEWLINE time_varying_reals_encoder: integer of positions of continuous variables for encoderNEWLINE time_varying_reals_decoder: integer of positions of continuous variables for decoderNEWLINE categorical_groups: dictionary where valuesNEWLINE are list of categorical variables that are forming together a new categoricalNEWLINE variable which is the key in the dictionaryNEWLINE x_reals: order of continuous variables in tensor passed to forward functionNEWLINE x_categoricals: order of categorical variables in tensor passed to forward functionNEWLINE embedding_sizes: dictionary mapping (string) indices to tuple of number of categorical classes andNEWLINE embedding sizeNEWLINE embedding_paddings: list of indices for embeddings which transform the zero's embedding to a zero vectorNEWLINE embedding_labels: dictionary mapping (string) indices to list of categorical labelsNEWLINE output_size (Union[int, List[int]], optional): number of outputs (e.g. number of quantiles forNEWLINE QuantileLoss and one target or list of output sizes).NEWLINE target (str, optional): Target variable or list of target variables. Defaults to None.NEWLINE target_lags (Dict[str, Dict[str, int]]): dictionary of target names mapped to list of time steps byNEWLINE which the variable should be lagged.NEWLINE Lags can be useful to indicate seasonality to the models. If you know the seasonalit(ies) of your data,NEWLINE add at least the target variables with the corresponding lags to improve performance.NEWLINE Defaults to no lags, i.e. an empty dictionary.NEWLINE loss (MultiHorizonMetric, optional): loss: loss function taking prediction and targets.NEWLINE logging_metrics (nn.ModuleList, optional): Metrics to log during training.NEWLINE Defaults to nn.ModuleList([SMAPE(), MAE(), RMSE(), MAPE(), MASE()]).NEWLINE """NEWLINE if loss is None:NEWLINE loss = MAE()NEWLINE if logging_metrics is None:NEWLINE logging_metrics = nn.ModuleList([SMAPE(), MAE(), RMSE(), MAPE(), MASE()])NEWLINE self.save_hyperparameters()NEWLINE # store loss function separately as it is a moduleNEWLINE super().__init__(loss=loss, logging_metrics=logging_metrics, **kwargs)NEWLINENEWLINE self.embeddings = MultiEmbedding(NEWLINE embedding_sizes=embedding_sizes,NEWLINE embedding_paddings=embedding_paddings,NEWLINE categorical_groups=categorical_groups,NEWLINE x_categoricals=x_categoricals,NEWLINE )NEWLINENEWLINE lagged_target_names = [l for lags in target_lags.values() for l in lags]NEWLINE assert set(self.encoder_variables) - set(to_list(target)) - set(lagged_target_names) == set(NEWLINE self.decoder_variablesNEWLINE ), "Encoder and decoder variables have to be the same apart from target variable"NEWLINE for targeti in to_list(target):NEWLINE assert (NEWLINE targeti in time_varying_reals_encoderNEWLINE ), f"target {targeti} has to be real" # todo: remove this restrictionNEWLINE assert (isinstance(target, str) and isinstance(loss, MultiHorizonMetric)) or (NEWLINE isinstance(target, (list, tuple)) and isinstance(loss, MultiLoss) and len(loss) == len(target)NEWLINE ), "number of targets should be equivalent to number of loss metrics"NEWLINENEWLINE rnn_class = get_rnn(cell_type)NEWLINE cont_size = len(self.reals)NEWLINE cat_size = sum([size[1] for size in self.hparams.embedding_sizes.values()])NEWLINE input_size = cont_size + cat_sizeNEWLINE self.rnn = rnn_class(NEWLINE input_size=input_size,NEWLINE hidden_size=self.hparams.hidden_size,NEWLINE num_layers=self.hparams.rnn_layers,NEWLINE dropout=self.hparams.dropout if self.hparams.rnn_layers > 1 else 0,NEWLINE batch_first=True,NEWLINE )NEWLINENEWLINE # add linear layers for argument projectsNEWLINE if isinstance(target, str): # single targetNEWLINE self.output_projector = nn.Linear(self.hparams.hidden_size, self.hparams.output_size)NEWLINE assert not isinstance(self.loss, QuantileLoss), "QuantileLoss does not work with recurrent network"NEWLINE else: # multi targetNEWLINE self.output_projector = nn.ModuleList(NEWLINE [nn.Linear(self.hparams.hidden_size, size) for size in self.hparams.output_size]NEWLINE )NEWLINE for l in self.loss:NEWLINE assert not isinstance(l, QuantileLoss), "QuantileLoss does not work with recurrent network"NEWLINENEWLINE @classmethodNEWLINE def from_dataset(NEWLINE cls,NEWLINE dataset: TimeSeriesDataSet,NEWLINE allowed_encoder_known_variable_names: List[str] = None,NEWLINE **kwargs,NEWLINE ):NEWLINE """NEWLINE Create model from dataset.NEWLINENEWLINE Args:NEWLINE dataset: timeseries datasetNEWLINE allowed_encoder_known_variable_names: List of known variables that are allowed in encoder, defaults to allNEWLINE **kwargs: additional arguments such as hyperparameters for model (see ``__init__()``)NEWLINENEWLINE Returns:NEWLINE Recurrent networkNEWLINE """NEWLINE new_kwargs = copy(kwargs)NEWLINE new_kwargs.update(cls.deduce_default_output_parameters(dataset=dataset, kwargs=kwargs, default_loss=MAE()))NEWLINE assert not isinstance(dataset.target_normalizer, NaNLabelEncoder) and (NEWLINE not isinstance(dataset.target_normalizer, MultiNormalizer)NEWLINE or all([not isinstance(normalizer, NaNLabelEncoder) for normalizer in dataset.target_normalizer])NEWLINE ), "target(s) should be continuous - categorical targets are not supported" # todo: remove this restrictionNEWLINE return super().from_dataset(NEWLINE dataset, allowed_encoder_known_variable_names=allowed_encoder_known_variable_names, **new_kwargsNEWLINE )NEWLINENEWLINE def construct_input_vector(NEWLINE self, x_cat: torch.Tensor, x_cont: torch.Tensor, one_off_target: torch.Tensor = NoneNEWLINE ) -> torch.Tensor:NEWLINE """NEWLINE Create input vector into RNN networkNEWLINENEWLINE Args:NEWLINE one_off_target: tensor to insert into first position of target. If None (default), remove first time step.NEWLINE """NEWLINE # create input vectorNEWLINE if len(self.categoricals) > 0:NEWLINE embeddings = self.embeddings(x_cat)NEWLINE flat_embeddings = torch.cat([emb for emb in embeddings.values()], dim=-1)NEWLINE input_vector = flat_embeddingsNEWLINENEWLINE if len(self.reals) > 0:NEWLINE input_vector = x_contNEWLINENEWLINE if len(self.reals) > 0 and len(self.categoricals) > 0:NEWLINE input_vector = torch.cat([x_cont, flat_embeddings], dim=-1)NEWLINENEWLINE # shift target by oneNEWLINE input_vector[..., self.target_positions] = torch.roll(NEWLINE input_vector[..., self.target_positions], shifts=1, dims=1NEWLINE )NEWLINENEWLINE if one_off_target is not None: # set first target input (which is rolled over)NEWLINE input_vector[:, 0, self.target_positions] = one_off_targetNEWLINE else:NEWLINE input_vector = input_vector[:, 1:]NEWLINENEWLINE # shift targetNEWLINE return input_vectorNEWLINENEWLINE def encode(self, x: Dict[str, torch.Tensor]) -> HiddenState:NEWLINE """NEWLINE Encode sequence into hidden stateNEWLINE """NEWLINE # encode using rnnNEWLINE assert x["encoder_lengths"].min() > 0NEWLINE encoder_lengths = x["encoder_lengths"] - 1NEWLINE input_vector = self.construct_input_vector(x["encoder_cat"], x["encoder_cont"])NEWLINE _, hidden_state = self.rnn(NEWLINE input_vector, lengths=encoder_lengths, enforce_sorted=FalseNEWLINE ) # second ouput is not needed (hidden state)NEWLINE return hidden_stateNEWLINENEWLINE def decode_all(NEWLINE self,NEWLINE x: torch.Tensor,NEWLINE hidden_state: HiddenState,NEWLINE lengths: torch.Tensor = None,NEWLINE ):NEWLINE decoder_output, hidden_state = self.rnn(x, hidden_state, lengths=lengths, enforce_sorted=False)NEWLINE if isinstance(self.hparams.target, str): # single targetNEWLINE output = self.output_projector(decoder_output)NEWLINE else:NEWLINE output = [projector(decoder_output) for projector in self.output_projector]NEWLINE return output, hidden_stateNEWLINENEWLINE def decode(NEWLINE self,NEWLINE input_vector: torch.Tensor,NEWLINE target_scale: torch.Tensor,NEWLINE decoder_lengths: torch.Tensor,NEWLINE hidden_state: HiddenState,NEWLINE n_samples: int = None,NEWLINE ) -> Tuple[torch.Tensor, bool]:NEWLINE """NEWLINE Decode hidden state of RNN into prediction. If n_smaples is given,NEWLINE decode not by using actual values but rather byNEWLINE sampling new targets from past predictions iterativelyNEWLINE """NEWLINE if self.training:NEWLINE output, _ = self.decode_all(input_vector, hidden_state, lengths=decoder_lengths)NEWLINE output = self.transform_output(output, target_scale=target_scale)NEWLINE else:NEWLINE # run in eval, i.e. simulation modeNEWLINE target_pos = self.target_positionsNEWLINE lagged_target_positions = self.lagged_target_positionsNEWLINENEWLINE # define function to run at every decoding stepNEWLINE def decode_one(NEWLINE idx,NEWLINE lagged_targets,NEWLINE hidden_state,NEWLINE ):NEWLINE x = input_vector[:, [idx]]NEWLINE x[:, 0, target_pos] = lagged_targets[-1]NEWLINE for lag, lag_positions in lagged_target_positions.items():NEWLINE if idx > lag:NEWLINE x[:, 0, lag_positions] = lagged_targets[-lag]NEWLINE prediction, hidden_state = self.decode_all(x, hidden_state)NEWLINE prediction = apply_to_list(prediction, lambda x: x[:, 0]) # select first time stepNEWLINE return prediction, hidden_stateNEWLINENEWLINE # make predictions which are fed into next stepNEWLINE output = self.decode_autoregressive(NEWLINE decode_one,NEWLINE first_target=input_vector[:, 0, target_pos],NEWLINE first_hidden_state=hidden_state,NEWLINE target_scale=target_scale,NEWLINE n_decoder_steps=input_vector.size(1),NEWLINE )NEWLINE return outputNEWLINENEWLINE def forward(self, x: Dict[str, torch.Tensor], n_samples: int = None) -> Dict[str, torch.Tensor]:NEWLINE """NEWLINE Forward networkNEWLINE """NEWLINE hidden_state = self.encode(x)NEWLINE # decodeNEWLINE input_vector = self.construct_input_vector(NEWLINE x["decoder_cat"],NEWLINE x["decoder_cont"],NEWLINE one_off_target=x["encoder_cont"][NEWLINE torch.arange(x["encoder_cont"].size(0), device=x["encoder_cont"].device),NEWLINE x["encoder_lengths"] - 1,NEWLINE self.target_positions.unsqueeze(-1),NEWLINE ].T,NEWLINE )NEWLINENEWLINE output = self.decode(NEWLINE input_vector,NEWLINE decoder_lengths=x["decoder_lengths"],NEWLINE target_scale=x["target_scale"],NEWLINE hidden_state=hidden_state,NEWLINE )NEWLINE # return relevant partNEWLINE return self.to_network_output(prediction=output)NEWLINE
# -*- coding: utf-8 -*-NEWLINE"""NEWLINE.. _ex-read-neo:NEWLINENEWLINE===============================================NEWLINEHow to use data in neural ensemble (NEO) formatNEWLINE===============================================NEWLINENEWLINEThis example shows how to create an MNE-Python `~mne.io.Raw` object from dataNEWLINEin the `neural ensemble <https://neo.readthedocs.io>`__ format. For generalNEWLINEinformation on creating MNE-Python's data objects from NumPy arrays, seeNEWLINE:ref:`tut-creating-data-structures`.NEWLINE"""NEWLINENEWLINE# %%NEWLINENEWLINEimport neoNEWLINEimport mneNEWLINENEWLINE# %%NEWLINE# This example uses NEO's ``ExampleIO`` object for creating fake data. The dataNEWLINE# will be all zeros, so the plot won't be very interesting, but it shouldNEWLINE# demonstrate the steps to using NEO data. For actual data and different fileNEWLINE# formats, consult the NEO documentation.NEWLINENEWLINEreader = neo.io.ExampleIO('fakedata.nof')NEWLINEblock = reader.read(lazy=False)[0] # get the first blockNEWLINEsegment = block.segments[0] # get data from first (and only) segmentNEWLINEsignals = segment.analogsignals[0] # get first (multichannel) signalNEWLINENEWLINEdata = signals.rescale('V').magnitude.TNEWLINEsfreq = signals.sampling_rate.magnitudeNEWLINEch_names = [f'Neo {(idx + 1):02}' for idx in range(signals.shape[1])]NEWLINEch_types = ['eeg'] * len(ch_names) # if not specified, type 'misc' is assumedNEWLINENEWLINEinfo = mne.create_info(ch_names=ch_names, ch_types=ch_types, sfreq=sfreq)NEWLINEraw = mne.io.RawArray(data, info)NEWLINEraw.plot(show_scrollbars=False)NEWLINE
import botometer, constantsNEWLINEfrom botometer import NoTimelineErrorNEWLINEfrom requests import ConnectionError, HTTPError, TimeoutNEWLINEfrom urllib3.exceptions import ReadTimeoutError, ProtocolError, SSLErrorNEWLINEimport tweepyNEWLINEimport sysNEWLINEimport osNEWLINEimport globNEWLINEimport csvNEWLINEimport pandas as pdNEWLINEimport smtplibNEWLINEimport randomNEWLINEimport timeNEWLINENEWLINENEWLINEclass BotometerClient:NEWLINENEWLINE def __init__(self, filename):NEWLINENEWLINE self.bot_meter = botometer.Botometer(wait_on_ratelimit=True,NEWLINE mashape_key=constants.mashape_key,NEWLINE **constants.botometer_auth)NEWLINENEWLINE self.master_file_name = 'MasterIDs.csv'NEWLINE # Store all the ids we get an error on so they aren't checked againNEWLINE self.error_ids_file_name = 'ErrorIDs.csv'NEWLINE self.unique_ids_file_name = 'UniqueIDs.csv'NEWLINENEWLINE # Time so we can take how long it takes to scrape all these idsNEWLINE self.start_time = time.time()NEWLINE self.streaming_file_name = filenameNEWLINE self.create_master_file()NEWLINE self.create_error_file()NEWLINE self.tweepy_api = constants.apiNEWLINENEWLINE self.error_df = BotometerClient.load_error_ids_df(self.error_ids_file_name)NEWLINE self.master_df = BotometerClient.load_master_ids_df(self.master_file_name)NEWLINE self.df = self.get_all_ids()NEWLINENEWLINE def start_bot_collection(self):NEWLINE # Get botometer scores for every id in the streamNEWLINE print('Starting Client....')NEWLINE number_of_accounts_to_check = len(self.df)NEWLINENEWLINE self.df.reset_index(drop=True, inplace=True)NEWLINENEWLINE for index, row in self.df.iterrows():NEWLINE if index % 10 == 0:NEWLINE print('On index: ', index, ' out of ', number_of_accounts_to_check)NEWLINENEWLINE tweet_text = row['status_text']NEWLINE tweet_time = row['status_created_at']NEWLINE user_id = row['user_id']NEWLINE tweet_count = row['stream_tweet_count']NEWLINENEWLINE try:NEWLINE result, payload = self.bot_meter.check_account(user_id,NEWLINE full_user_object=True,NEWLINE return_user_data=True)NEWLINE cap = result['cap']['universal']NEWLINE bot_score = result['display_scores']['universal']NEWLINE print('cap: ', cap)NEWLINE print('\n')NEWLINE # print('bot score: ', bot_score)NEWLINENEWLINE if cap > 0.70:NEWLINE self.send_tweet(payload['user']['screen_name'], cap)NEWLINENEWLINE # Save to Master, Mentions, and TimelineNEWLINE self.save_to_master(user_id, bot_score, cap, tweet_count,NEWLINE tweet_time, tweet_text, payload['user'])NEWLINENEWLINE except tweepy.TweepError as exc:NEWLINE # Save this user_id so we don't check it againNEWLINE self.save_to_error_ids(user_id)NEWLINE print('Error encountered for ', user_id)NEWLINE print('Error response: ', exc.response)NEWLINE print('Error reason: ', exc.reason)NEWLINE print('Error api code: ', exc.api_code)NEWLINE print('\n')NEWLINENEWLINE except NoTimelineError as err:NEWLINE self.save_to_error_ids(user_id)NEWLINE print('No Timeline error caught: ', err)NEWLINE print('\n')NEWLINENEWLINE except (ConnectionError, HTTPError, Timeout, ReadTimeoutError, ProtocolError, SSLError) as exc:NEWLINE print("New exception: ", exc)NEWLINE # print(exc.reason)NEWLINE time.sleep(120)NEWLINENEWLINE print('\n\n$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')NEWLINE print('Finished! :)')NEWLINE time_diff = int(time.time() - self.start_time)NEWLINE num_ids = str(len(self.df))NEWLINE print('It look {:02d}:{:02d}:{:02d} time to collect ' + num_ids + ' bot scores!'.format(time_diff // 3600, (time_diff % 3600 // 60), time_diff % 60))NEWLINE BotometerClient.send_notification_email()NEWLINE returnNEWLINENEWLINE def send_tweet(self, user, cap):NEWLINENEWLINE low_start_options = ['Beep, Beep, I think I found another bot... {0}'.format(user),NEWLINE 'R2 says {0}'.format(user),NEWLINE 'It looks like {0}'.format(user),NEWLINE 'I\'ve calculated that {}'.format(user)NEWLINE ]NEWLINENEWLINE high_start_options = ['I spy a bot... {0}'.format(user),NEWLINE 'Danger Will Robinson I\'ve found another political bot {0}'.format(user),NEWLINE 'Robot in disguise {0}'.format(user),NEWLINE 'Looks like {0} is breaking the first law: A robot may not injure a human being or, 'NEWLINE 'through inaction, allow a human being to come to harm. {1}'.format(user, user),NEWLINE 'I guess {0} doesn\'t know the Zeroth Law: A robot may not harm humanity, or, by 'NEWLINE 'inaction, allow humanity to come to harm. {1}'.format(user, user)NEWLINE ]NEWLINENEWLINE cap *= 100NEWLINE cap = round(cap, 2)NEWLINENEWLINE if cap < 90:NEWLINE start = random.choice(low_start_options)NEWLINE ending = ' has a botometer score of {0}%, suggesting it could be a bot or bot assisted. ' \NEWLINE '#politicalbots'.format(cap)NEWLINE else:NEWLINE start = random.choice(high_start_options)NEWLINE ending = ' has a botometer score of {0}%, suggesting it is probably a bot. #politicalbots'.format(cap)NEWLINENEWLINE tweet_text = "{0}{1}".format(start, ending)NEWLINE self.tweepy_api.update_status(tweet_text)NEWLINENEWLINE returnNEWLINENEWLINE def save_to_error_ids(self, user_id):NEWLINE error_ids_file = open(self.error_ids_file_name, 'a')NEWLINE error_writer = csv.writer(error_ids_file)NEWLINENEWLINE try:NEWLINE error_writer.writerow([user_id])NEWLINENEWLINE except Exception as exc:NEWLINE print(exc)NEWLINE passNEWLINENEWLINE error_ids_file.close()NEWLINE returnNEWLINENEWLINE def save_to_master(self, user_id, bot_score, cap, tweet_count, tweet_time, tweet_text, user_dict):NEWLINE # Open the csv file created previouslyNEWLINE master_file = open(self.master_file_name, 'a')NEWLINENEWLINE # Create a csv writerNEWLINE master_writer = csv.writer(master_file)NEWLINENEWLINE try:NEWLINE master_writer.writerow([user_id,NEWLINE bot_score,NEWLINE cap,NEWLINE tweet_count,NEWLINE tweet_time,NEWLINE tweet_text,NEWLINE user_dict['favourites_count'],NEWLINE user_dict['statuses_count'],NEWLINE user_dict['description'],NEWLINE user_dict['location'],NEWLINE user_dict['created_at'],NEWLINE user_dict['verified'],NEWLINE user_dict['following'],NEWLINE user_dict['url'],NEWLINE user_dict['listed_count'],NEWLINE user_dict['followers_count'],NEWLINE user_dict['default_profile_image'],NEWLINE user_dict['utc_offset'],NEWLINE user_dict['friends_count'],NEWLINE user_dict['default_profile'],NEWLINE user_dict['name'],NEWLINE user_dict['lang'],NEWLINE user_dict['screen_name'],NEWLINE user_dict['geo_enabled'],NEWLINE user_dict['profile_background_color'],NEWLINE user_dict['profile_image_url'],NEWLINE user_dict['time_zone'],NEWLINE user_dict['listed_count']NEWLINE ])NEWLINENEWLINE except Exception as exc:NEWLINE print(exc)NEWLINE passNEWLINENEWLINE # Close the csv fileNEWLINE master_file.close()NEWLINE returnNEWLINENEWLINE def create_error_file(self):NEWLINE if os.path.isfile(self.error_ids_file_name):NEWLINE print("Error file found")NEWLINE returnNEWLINENEWLINE else:NEWLINE error_file = open(self.error_ids_file_name, 'w')NEWLINENEWLINE try:NEWLINE writer = csv.writer(error_file)NEWLINE writer.writerow(['user_id'])NEWLINENEWLINE except Exception as exc:NEWLINE print(exc)NEWLINE passNEWLINENEWLINE error_file.close()NEWLINE returnNEWLINENEWLINE def create_master_file(self):NEWLINE if os.path.isfile(self.master_file_name):NEWLINE print('Master ID file found')NEWLINE returnNEWLINENEWLINE else:NEWLINE print('Creating master ID file...')NEWLINE csv_file = open(self.master_file_name, "w")NEWLINENEWLINE try:NEWLINE writer = csv.writer(csv_file)NEWLINENEWLINE writer.writerow(['user_id',NEWLINE 'bot_score',NEWLINE 'cap',NEWLINE 'tweet_count',NEWLINE 'tweet_time',NEWLINE 'tweet_text',NEWLINE 'user_favourites_count',NEWLINE 'user_statuses_count',NEWLINE 'user_description',NEWLINE 'user_location',NEWLINE 'user_created_at',NEWLINE 'user_verified',NEWLINE 'user_following',NEWLINE 'user_url',NEWLINE 'user_listed_count',NEWLINE 'user_followers_count',NEWLINE 'user_default_profile_image',NEWLINE 'user_utc_offset',NEWLINE 'user_friends_count',NEWLINE 'user_default_profile',NEWLINE 'user_name',NEWLINE 'user_lang',NEWLINE 'user_screen_name',NEWLINE 'user_geo_enabled',NEWLINE 'user_profile_background_color',NEWLINE 'user_profile_image_url',NEWLINE 'user_time_zone',NEWLINE 'user_listed_count'NEWLINE ])NEWLINENEWLINE except Exception as exc:NEWLINE print('Error writing to csv: ', exc)NEWLINENEWLINE returnNEWLINENEWLINE def get_all_ids(self):NEWLINE if self.streaming_file_name is None or self.master_df is None or self.error_df is None:NEWLINE print("Streaming file name, master_df or error_df is NONE!")NEWLINE returnNEWLINENEWLINE # Load streamingData from csvNEWLINE path = os.path.dirname(os.path.abspath(__file__)) + '/' + self.streaming_file_nameNEWLINENEWLINE df = pd.read_csv(path, header=0, low_memory=False, error_bad_lines=False, lineterminator='\n')NEWLINENEWLINE # Calculate the tweet count for each user idNEWLINE df['stream_tweet_count'] = df.groupby('user_id')['user_id'].transform('count')NEWLINENEWLINE # Drop all the columns we don't care aboutNEWLINE column_list = ['status_text', 'status_created_at', 'user_id', 'stream_tweet_count']NEWLINE df = df[column_list]NEWLINE original_size = len(df)NEWLINENEWLINE # Drop duplicate ids since we only need to get the user data onceNEWLINE df = df.drop_duplicates('user_id', keep='last')NEWLINE unique_size = len(df)NEWLINE print('Out of ', original_size, ' tweets there were ', (original_size - unique_size), ' duplicate ID\'s')NEWLINENEWLINE # Drop all ids that are already in master_dfNEWLINE master_id_list = self.master_df.user_id.tolist()NEWLINE df = df[~df.user_id.isin(master_id_list)]NEWLINENEWLINE print('Out of ', unique_size, ' there were ', (unique_size - len(df)), ' ids that already have scores')NEWLINENEWLINE print('Error DF cols: ', list(self.error_df.columns.values))NEWLINENEWLINE error_id_list = self.error_df['user_id\r'].tolist()NEWLINE df = df[~df.user_id.isin(error_id_list)]NEWLINENEWLINE print('After removing error ids we have ', len(df), ' ids to check!')NEWLINENEWLINE # Drop any rows that are missing the required columnsNEWLINE size_before_drop = len(df)NEWLINE df.dropna(subset=['status_text', 'status_created_at', 'user_id', 'stream_tweet_count'])NEWLINENEWLINE print('Dropped', (size_before_drop - len(df)), 'rows with missing data!')NEWLINE print('Collecting bot scores for ', len(df), ' new ids')NEWLINENEWLINE return dfNEWLINENEWLINE ###########################NEWLINE # Start of static methods #NEWLINE ###########################NEWLINENEWLINE @staticmethodNEWLINE def get_user_data_as_dict(df):NEWLINE # print(df)NEWLINE user_dict = {'favourites_count': df.iloc[0]['user_favourites_count'],NEWLINE 'statuses_count': df.iloc[0]['user_statuses_count'],NEWLINE 'description': df.iloc[0]['user_description'],NEWLINE 'location': df.iloc[0]['user_location'],NEWLINE 'created_at': df.iloc[0]['user_created_at'],NEWLINE 'verified': df.iloc[0]['user_verified'],NEWLINE 'following': df.iloc[0]['user_following'],NEWLINE 'url': df.iloc[0]['user_url'],NEWLINE 'listed_count': df.iloc[0]['user_listed_count'],NEWLINE 'followers_count': df.iloc[0]['user_followers_count'],NEWLINE 'default_profile_image': df.iloc[0]['user_default_profile_image'],NEWLINE 'utc_offset': df.iloc[0]['user_utc_offset'],NEWLINE 'friends_count': df.iloc[0]['user_friends_count'],NEWLINE 'default_profile': df.iloc[0]['user_default_profile'],NEWLINE 'name': df.iloc[0]['user_name'],NEWLINE 'lang': df.iloc[0]['user_lang'],NEWLINE 'screen_name': df.iloc[0]['user_screen_name'],NEWLINE 'geo_enabled': df.iloc[0]['user_geo_enabled'],NEWLINE 'profile_background_color': df.iloc[0]['user_profile_background_color'],NEWLINE 'profile_image_url': df.iloc[0]['user_profile_image_url'],NEWLINE 'time_zone': df.iloc[0]['user_time_zone']}NEWLINENEWLINE return user_dictNEWLINENEWLINE #####################################NEWLINE # Load Data from Streaming CSV File #NEWLINE #####################################NEWLINE @staticmethodNEWLINE def load_master_ids_df(master_file_name):NEWLINE # Read in MasterIDs and remove any values in there from our data frameNEWLINE path = os.path.dirname(os.path.abspath(__file__)) + '/' + master_file_nameNEWLINE master_df = pd.read_csv(path, header=0, low_memory=False, error_bad_lines=False, lineterminator='\n')NEWLINENEWLINE return master_dfNEWLINENEWLINE @staticmethodNEWLINE def load_error_ids_df(error_ids_file_name):NEWLINE # Read in Error IDs and remove any values already createdNEWLINE path = os.path.dirname(os.path.abspath(__file__)) + '/' + error_ids_file_nameNEWLINE error_df = pd.read_csv(path, header=0, low_memory=False, error_bad_lines=False, lineterminator='\n')NEWLINENEWLINE return error_dfNEWLINENEWLINE #################################NEWLINE # One function to rule them all #NEWLINE #################################NEWLINE @staticmethodNEWLINE def start_mining(file_name):NEWLINE print('\nStarting Botometer mining...')NEWLINENEWLINE # Check if the desired csv file existsNEWLINE if os.path.isfile(file_name):NEWLINE print('\nStreaming data found')NEWLINE client = BotometerClient(file_name)NEWLINENEWLINE # Start it upNEWLINE client.start_bot_collection()NEWLINENEWLINE else:NEWLINE print('Error: requested csv file does not exist!')NEWLINE returnNEWLINENEWLINE @staticmethodNEWLINE def show_csv_files():NEWLINE print("\nI found the following csv files...")NEWLINENEWLINE path = os.path.dirname(os.path.abspath(__file__))NEWLINE extension = 'csv'NEWLINE os.chdir(path)NEWLINE results = [i for i in glob.glob('*.{}'.format(extension))]NEWLINE results.sort()NEWLINENEWLINE for result in results:NEWLINE print(result)NEWLINENEWLINE returnNEWLINENEWLINE ###################NEWLINE # Parsing Methods #NEWLINE ###################NEWLINENEWLINE @staticmethodNEWLINE def parse_entities(entities):NEWLINE hashtag_key = 'hashtags'NEWLINE mentions_key = 'user_mentions'NEWLINE url_key = 'urls'NEWLINENEWLINE if hashtag_key in entities:NEWLINE hashtag_dict = entities[hashtag_key]NEWLINE hashtag_text = BotometerClient.parse_hashtags(hashtag_dict)NEWLINE else:NEWLINE hashtag_text = ''NEWLINENEWLINE if mentions_key in entities:NEWLINE mentions_dict = entities[mentions_key]NEWLINE mentions_text = BotometerClient.parse_mentions(mentions_dict)NEWLINE else:NEWLINE mentions_text = ''NEWLINENEWLINE if url_key in entities:NEWLINE url_dict = entities[url_key]NEWLINE url_text = BotometerClient.parse_urls(url_dict)NEWLINE else:NEWLINE url_text = ''NEWLINENEWLINE return hashtag_text, mentions_text, url_textNEWLINENEWLINE @staticmethodNEWLINE def parse_hashtags(hashtag_dict):NEWLINE hashtag_text = ''NEWLINE for dictionary in hashtag_dict:NEWLINE if 'text' in dictionary:NEWLINE if hashtag_text != '':NEWLINE hashtag_text += ' ' + dictionary['text']NEWLINE else:NEWLINE hashtag_text += dictionary['text']NEWLINENEWLINE return hashtag_textNEWLINENEWLINE @staticmethodNEWLINE def parse_mentions(mentions_dict):NEWLINE mentions_text = ''NEWLINE for dictionary in mentions_dict:NEWLINE if 'id_str' in dictionary:NEWLINE if mentions_text != '':NEWLINE mentions_text += ' ' + dictionary['id_str']NEWLINE else:NEWLINE mentions_text += dictionary['id_str']NEWLINENEWLINE return mentions_textNEWLINENEWLINE @staticmethodNEWLINE def parse_urls(url_dict):NEWLINE url_text = ''NEWLINE for dictionary in url_dict:NEWLINE if 'url' in dictionary:NEWLINE if url_text != '':NEWLINE url_text += ' ' + dictionary['url']NEWLINE else:NEWLINE url_text += dictionary['url']NEWLINENEWLINE return url_textNEWLINENEWLINE ######################NEWLINE # Email Notification #NEWLINE ######################NEWLINE @staticmethodNEWLINE def send_notification_email():NEWLINE # Email myself when the script finishes so I can start on the next set of dataNEWLINE server = smtplib.SMTP('smtp.gmail.com', 587)NEWLINE server.starttls()NEWLINE server.login(constants.email_address, constants.password)NEWLINENEWLINE subject = 'Botometer Script'NEWLINE text = 'Botometer Script Finished!'NEWLINE message = 'Subject: {}\n\n{}'.format(subject, text)NEWLINE server.sendmail(constants.email_address, constants.real_email, message)NEWLINE server.quit()NEWLINENEWLINE returnNEWLINENEWLINENEWLINElength = len(sys.argv)NEWLINEif length == 1:NEWLINE print('Error: please provide csv file name or type \'showCSVs\' to see the available files or type help for 'NEWLINE 'more information')NEWLINEelif length == 2:NEWLINE arg = sys.argv[1]NEWLINE if arg == 'showCSVs':NEWLINE BotometerClient.show_csv_files()NEWLINE elif arg == 'help':NEWLINE print('Type showCSVs to see a list of the csv files in this directory that can be passed as a parameter')NEWLINE print('Sample call: python3 start_botometer.py StreamData-#maga-#qanon-#roseanne-20180531-105244.csv')NEWLINE else:NEWLINE try:NEWLINE BotometerClient.start_mining(arg)NEWLINE except Exception as e:NEWLINE print('outer exception', e)NEWLINE # print(e.__cause__)NEWLINE print('Botometer exception caught')NEWLINE BotometerClient.send_notification_email()NEWLINENEWLINE
"""NEWLINE==============================NEWLINEWFIRST Instruments (pre-alpha)NEWLINE==============================NEWLINENEWLINEWARNING: This model has not yet been validated against other PSFNEWLINE simulations, and uses several approximations (e.g. forNEWLINE mirror polishing errors, which are taken from HST).NEWLINE"""NEWLINENEWLINEimport os.pathNEWLINEimport poppyNEWLINEimport numpy as npNEWLINEfrom . import webbpsf_coreNEWLINEfrom scipy.interpolate import griddataNEWLINEfrom astropy.io import fitsNEWLINEimport loggingNEWLINENEWLINE_log = logging.getLogger('webbpsf')NEWLINEimport pprintNEWLINENEWLINENEWLINEclass WavelengthDependenceInterpolator(object):NEWLINE """WavelengthDependenceInterpolator can be configured withNEWLINE `n_zernikes` worth of Zernike coefficients at up to `n_wavelengths`NEWLINE wavelengths, and will let you `get_aberration_terms` for anyNEWLINE wavelength in range interpolated linearly between measured/knownNEWLINE pointsNEWLINE """NEWLINENEWLINE def __init__(self, n_wavelengths=16, n_zernikes=22):NEWLINE self._n_wavelengths = n_wavelengthsNEWLINE self._n_zernikes = n_zernikesNEWLINE self._aberration_terms = np.zeros((n_wavelengths, n_zernikes), dtype=np.float64)NEWLINE self._wavelengths = []NEWLINENEWLINE def set_aberration_terms(self, wavelength, zernike_array):NEWLINE """Supply a reference `wavelength` and a `zernike_array`NEWLINE (of length `n_zernikes`) where the aberration is knownNEWLINE """NEWLINE n_wavelengths_set = len(self._wavelengths)NEWLINE if wavelength not in self._wavelengths and n_wavelengths_set < self._n_wavelengths:NEWLINE self._wavelengths.append(wavelength)NEWLINE aberration_row_idx = n_wavelengths_set # which is now index of last rowNEWLINE elif wavelength in self._wavelengths:NEWLINE aberration_row_idx = self._wavelengths.index(wavelength)NEWLINE else:NEWLINE # can't add more wavelengths without allocating new _aberration_terms arrayNEWLINE raise ValueError("Already have information at {} wavelengths "NEWLINE "(pass larger n_wavelengths to __init__?)".format(self._n_wavelengths))NEWLINE if len(zernike_array) != self._n_zernikes:NEWLINE raise ValueError("Expected {} aberration terms (pass different "NEWLINE "n_zernikes to __init__?)".format(self._n_zernikes))NEWLINE self._aberration_terms[aberration_row_idx] = zernike_arrayNEWLINENEWLINE def get_aberration_terms(self, wavelength):NEWLINE """Return the Zernike coefficients as interpolated for thisNEWLINE `wavelength`"""NEWLINE # return array of length n_zernikes interpolated for this wavelengthNEWLINE if wavelength in self._wavelengths:NEWLINE # aberration known exactly for this wavelengthNEWLINE aberration_row_idx = self._wavelengths.index(wavelength)NEWLINE return self._aberration_terms[aberration_row_idx]NEWLINE else:NEWLINE # we have to interpolate @ this wavelengthNEWLINE aberration_terms = griddata(self._wavelengths, self._aberration_terms, wavelength, method='linear')NEWLINE if np.any(np.isnan(aberration_terms)):NEWLINE raise RuntimeError("Attempted to get aberrations at wavelength "NEWLINE "outside the range of the reference data")NEWLINE return aberration_termsNEWLINENEWLINENEWLINEclass FieldDependentAberration(poppy.ZernikeWFE):NEWLINE """FieldDependentAberration incorporates aberrations thatNEWLINE are interpolated in wavelength, x, and y pixel positions byNEWLINE computing the Zernike coefficients for a particular wavelengthNEWLINE and position.NEWLINE """NEWLINENEWLINE """By default, `get_aberration_terms` will zero out Z1, Z2, and Z3NEWLINE (piston, tip, and tilt) as they are not meaningful for telescopeNEWLINE PSF calculations (the former is irrelevant, the latter two wouldNEWLINE be handled by a distortion solution). ChangeNEWLINE `_omit_piston_tip_tilt` to False to include the Z1-3 terms."""NEWLINE _omit_piston_tip_tilt = TrueNEWLINE _field_position = NoneNEWLINENEWLINE def __init__(self, pixel_width, pixel_height,NEWLINE name="Field-dependent Aberration", radius=1.0, oversample=1, interp_order=3):NEWLINE self.pixel_width, self.pixel_height = pixel_width, pixel_heightNEWLINE self.field_position = pixel_width // 2, pixel_height // 2NEWLINE self._wavelength_interpolators = {}NEWLINE self.pupil_diam = radius * 2.0NEWLINE super(FieldDependentAberration, self).__init__(NEWLINE name=name,NEWLINE verbose=True,NEWLINE radius=radius,NEWLINE oversample=oversample,NEWLINE interp_order=interp_orderNEWLINE )NEWLINENEWLINE def get_opd(self, wave, units='meters'):NEWLINE """Set the Zernike coefficients (for ZernikeWFE.getOPD) basedNEWLINE on the wavelength of the incoming wavefront and the pixelNEWLINE positionNEWLINE """NEWLINE if not isinstance(wave, poppy.Wavefront):NEWLINE wavelength = waveNEWLINE else:NEWLINE wavelength = wave.wavelengthNEWLINE self.coefficients = wavelength * self.get_aberration_terms(wavelength)NEWLINE return super(FieldDependentAberration, self).get_opd(wave, units=units)NEWLINENEWLINE @propertyNEWLINE def field_position(self):NEWLINE return self._field_positionNEWLINENEWLINE @field_position.setterNEWLINE def field_position(self, position):NEWLINE """Set the x and y pixel position on the detector for which toNEWLINE interpolate aberrations"""NEWLINE x_pixel, y_pixel = positionNEWLINE if x_pixel > self.pixel_width or x_pixel < 0:NEWLINE raise ValueError("Requested pixel_x position lies outside "NEWLINE "the detector width ({})".format(x_pixel))NEWLINE if y_pixel > self.pixel_height or y_pixel < 0:NEWLINE raise ValueError("Requested pixel_y position lies outside "NEWLINE "the detector height ({})".format(y_pixel))NEWLINENEWLINE self._field_position = x_pixel, y_pixelNEWLINENEWLINE def add_field_point(self, x_pixel, y_pixel, interpolator):NEWLINE """Supply a wavelength-space interpolator for a pixel positionNEWLINE on the detector"""NEWLINE self._wavelength_interpolators[(x_pixel, y_pixel)] = interpolatorNEWLINENEWLINE def get_aberration_terms(self, wavelength):NEWLINE """Supply the Zernike coefficients for the aberration based onNEWLINE the wavelength and pixel position on the detector"""NEWLINE if self.field_position in self._wavelength_interpolators:NEWLINE # short path: this is a known pointNEWLINE interpolator = self._wavelength_interpolators[self.field_position]NEWLINE coefficients = interpolator.get_aberration_terms(wavelength)NEWLINE else:NEWLINE # get aberrations at all field pointsNEWLINE field_points, aberration_terms = [], []NEWLINE for field_point_coords, point_interpolator in self._wavelength_interpolators.items():NEWLINE field_points.append(field_point_coords)NEWLINE aberration_terms.append(point_interpolator.get_aberration_terms(wavelength))NEWLINE aberration_array = np.asarray(aberration_terms)NEWLINE assert len(aberration_array.shape) == 2, "computed aberration array is not 2D " \NEWLINE "(inconsistent number of Zernike terms " \NEWLINE "at each point?)"NEWLINE coefficients = griddata(NEWLINE np.asarray(field_points),NEWLINE np.asarray(aberration_terms),NEWLINE self.field_position,NEWLINE method='linear'NEWLINE )NEWLINE if np.any(np.isnan(coefficients)):NEWLINE raise RuntimeError("Attempted to get aberrations for an out-of-bounds field point")NEWLINE if self._omit_piston_tip_tilt:NEWLINE _log.debug("Omitting piston/tip/tilt")NEWLINE coefficients[:3] = 0.0 # omit piston, tip, and tilt ZernikesNEWLINE return coefficientsNEWLINENEWLINENEWLINEdef _load_wfi_detector_aberrations(filename):NEWLINE from astropy.io import asciiNEWLINE zernike_table = ascii.read(filename)NEWLINE detectors = {}NEWLINENEWLINE def build_detector_from_table(number, zernike_table):NEWLINE """Build a FieldDependentAberration optic for a detector usingNEWLINE Zernikes Z1-Z22 at various wavelengths and field points"""NEWLINE single_detector_info = zernike_table[zernike_table['sca'] == number]NEWLINE field_points = set(single_detector_info['field_point'])NEWLINE interpolators = {}NEWLINE detector = FieldDependentAberration(NEWLINE 4096,NEWLINE 4096,NEWLINE radius=WFIRSTInstrument.PUPIL_RADIUS,NEWLINE name="Field Dependent Aberration (SCA{:02})".format(number)NEWLINE )NEWLINE for field_id in field_points:NEWLINE field_point_rows = single_detector_info[single_detector_info['field_point'] == field_id]NEWLINE local_x, local_y = field_point_rows[0]['local_x'], field_point_rows[0]['local_y']NEWLINE interpolator = build_wavelength_dependence(field_point_rows)NEWLINENEWLINE midpoint_pixel = 4096 / 2NEWLINE # (local_x in mm / 10 um pixel size) -> * 1e2NEWLINE # local_x and _y range from -20.44 to +20.44, so adding to the midpoint pixelNEWLINE # makes sense to place (-20.44, -20.44) at (4, 4)NEWLINE pixx, pixy = (round(midpoint_pixel + local_x * 1e2),NEWLINE round(midpoint_pixel + local_y * 1e2))NEWLINENEWLINE detector.add_field_point(pixx, pixy, interpolator)NEWLINE return detectorNEWLINENEWLINE def build_wavelength_dependence(rows):NEWLINE """Build an interpolator object that interpolates Z1-Z22 inNEWLINE wavelength space"""NEWLINE wavelengths = set(rows['wavelength'])NEWLINE interpolator = WavelengthDependenceInterpolator(n_wavelengths=len(wavelengths),NEWLINE n_zernikes=22)NEWLINE for row in rows:NEWLINE z = np.zeros(22)NEWLINE for idx in range(22):NEWLINE z[idx] = row['Z{}'.format(idx + 1)]NEWLINE interpolator.set_aberration_terms(row['wavelength'] * 1e-6, z)NEWLINENEWLINE return interpolatorNEWLINENEWLINE detector_ids = set(zernike_table['sca'])NEWLINE for detid in detector_ids:NEWLINE detectors["SCA{:02}".format(detid)] = build_detector_from_table(detid, zernike_table)NEWLINENEWLINE return detectorsNEWLINENEWLINENEWLINEclass WFIRSTInstrument(webbpsf_core.SpaceTelescopeInstrument):NEWLINE PUPIL_RADIUS = 2.4 / 2.0NEWLINE """NEWLINE WFIRSTInstrument contains data and functionality common to WFIRSTNEWLINE instruments, such as setting the pupil shapeNEWLINE """NEWLINE telescope = "WFIRST"NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE super(WFIRSTInstrument, self).__init__(*args, **kwargs)NEWLINENEWLINE # slightly different versions of the following two functionsNEWLINE # from the parent superclassNEWLINE # in order to interface with the FieldDependentAberration classNEWLINE @propertyNEWLINE def detector_position(self):NEWLINE """The pixel position in (X, Y) on the detector"""NEWLINE return self._detectors[self._detector].field_positionNEWLINENEWLINE @detector_position.setterNEWLINE def detector_position(self, position):NEWLINE # exact copy of superclass function except we save theNEWLINE # into a different location.NEWLINE try:NEWLINE x, y = map(int, position)NEWLINE except ValueError:NEWLINE raise ValueError("Detector pixel coordinates must be pairs of nonnegative numbers, "NEWLINE "not {}".format(position))NEWLINE if x < 0 or y < 0:NEWLINE raise ValueError("Detector pixel coordinates must be nonnegative integers")NEWLINE if x > self._detector_npixels - 1 or y > self._detector_npixels - 1:NEWLINE raise ValueError("The maximum allowed detector pixel "NEWLINE "coordinate value is {}".format(self._detector_npixels - 1))NEWLINENEWLINE self._detectors[self._detector].field_position = (int(position[0]), int(position[1]))NEWLINENEWLINE def _get_aberrations(self):NEWLINE """Get the OpticalElement that applies the field-dependentNEWLINE optical aberrations. (Called in _getOpticalSystem.)"""NEWLINE return self._detectors[self._detector]NEWLINENEWLINE def _get_fits_header(self, result, options):NEWLINE """Populate FITS Header keywords"""NEWLINE super(WFIRSTInstrument, self)._get_fits_header(result, options)NEWLINE result[0].header['DETXPIXL'] = (self.detector_position[0],NEWLINE 'X pixel position (for field dependent aberrations)')NEWLINE result[0].header['DETYPIXL'] = (self.detector_position[1],NEWLINE 'Y pixel position (for field dependent aberrations)')NEWLINE result[0].header['DETECTOR'] = (self.detector, 'Detector selected')NEWLINENEWLINENEWLINEclass WFI(WFIRSTInstrument):NEWLINE """NEWLINE WFI represents to the to-be-named wide field imagerNEWLINE for the WFIRST missionNEWLINENEWLINE WARNING: This model has not yet been validated against other PSFNEWLINE simulations, and uses several approximations (e.g. forNEWLINE mirror polishing errors, which are taken from HST).NEWLINE """NEWLINE # "The H158, F184 and W149 filters and the grism are mounted with proximate cold pupil masks"NEWLINE # from the final draft of the SDT report, page 92, table 3-2NEWLINE UNMASKED_PUPIL_WAVELENGTH_MIN, UNMASKED_PUPIL_WAVELENGTH_MAX = 0.760e-6, 1.454e-6NEWLINE MASKED_PUPIL_WAVELENGTH_MIN, MASKED_PUPIL_WAVELENGTH_MAX = 1.380e-6, 2.000e-6NEWLINENEWLINE def __init__(self, set_pupil_mask_on=None):NEWLINE """NEWLINE Initiate WFINEWLINENEWLINE ParametersNEWLINE -----------NEWLINE set_pupil_mask_on : bool or NoneNEWLINE Set to True or False to force using or not using the cold pupil mask,NEWLINE or to None for the automatic behavior.NEWLINE """NEWLINE pixelscale = 110e-3 # arcsec/px, WFIRST-AFTA SDT report final version (p. 91)NEWLINE super(WFI, self).__init__("WFI", pixelscale=pixelscale)NEWLINENEWLINE self._detector_npixels = 4096NEWLINE self._detectors = _load_wfi_detector_aberrations(os.path.join(self._datapath, 'wim_zernikes_cycle7.csv'))NEWLINE assert len(self._detectors.keys()) > 0NEWLINE self.detector = 'SCA01'NEWLINENEWLINE # Paths to the two possible pupils. The correct one is selected based on requestedNEWLINE # wavelengths in _validate_config()NEWLINE self._unmasked_pupil_path = os.path.join(self._WebbPSF_basepath,NEWLINE 'WFIRST_SRR_WFC_Pupil_Mask_Shortwave_2048.fits')NEWLINE self._masked_pupil_path = os.path.join(self._WebbPSF_basepath,NEWLINE 'WFIRST_SRR_WFC_Pupil_Mask_Longwave_2048.fits')NEWLINENEWLINE # Flag to en-/disable automatic selection of the appropriate pupil_maskNEWLINE self.auto_pupil = TrueNEWLINENEWLINE self._pupil_mask = "AUTO"NEWLINE self.pupil_mask_list = ['AUTO', 'COLD_PUPIL', 'UNMASKED']NEWLINENEWLINE self.pupil = self._unmasked_pupil_pathNEWLINE if set_pupil_mask_on is not None:NEWLINE if isinstance(set_pupil_mask_on, bool):NEWLINE self.auto_pupil = FalseNEWLINE _log.info("Using custom pupil mask")NEWLINE if set_pupil_mask_on:NEWLINE self.pupil = self._masked_pupil_pathNEWLINE else:NEWLINE raise TypeError("set_pupil_mask_on parameter must be boolean")NEWLINENEWLINE self.opd_list = [NEWLINE os.path.join(self._WebbPSF_basepath, 'upscaled_HST_OPD.fits'),NEWLINE ]NEWLINE self.pupilopd = self.opd_list[-1]NEWLINENEWLINE def _validate_config(self, **kwargs):NEWLINE """Validates that the WFI is configured sensiblyNEWLINENEWLINE This mainly consists of selecting the masked or unmasked pupilNEWLINE appropriately based on the wavelengths requested.NEWLINE """NEWLINE if self.auto_pupil and self.pupil in (self._unmasked_pupil_path, self._masked_pupil_path):NEWLINE # Does the wavelength range fit entirely in an unmasked filter?NEWLINE wavelengths = np.array(kwargs['wavelengths'])NEWLINE wl_min, wl_max = np.min(wavelengths), np.max(wavelengths)NEWLINE # test shorter filters first; if wl range fits entirely in one of them, it's not goingNEWLINE # to be the (masked) wideband filterNEWLINE if wl_max <= self.UNMASKED_PUPIL_WAVELENGTH_MAX:NEWLINE # use unmasked pupil opticNEWLINE self.pupil = self._unmasked_pupil_pathNEWLINE _log.info("Using the unmasked WFI pupil shape based on wavelengths requested")NEWLINE else:NEWLINE if wl_max > self.MASKED_PUPIL_WAVELENGTH_MAX:NEWLINE _log.warn("Requested wavelength is > 2e-6 m, defaulting to masked pupil shape")NEWLINE # use masked pupil opticNEWLINE self.pupil = self._masked_pupil_pathNEWLINE _log.info("Using the masked WFI pupil shape based on wavelengths requested")NEWLINE else:NEWLINE # If the user has set the pupil to a custom value, let them worry about theNEWLINE # correct shape it should haveNEWLINE passNEWLINE super(WFI, self)._validate_config(**kwargs)NEWLINENEWLINE @propertyNEWLINE def pupil_mask(self):NEWLINE return self._pupil_maskNEWLINENEWLINE @pupil_mask.setterNEWLINE def pupil_mask(self, name):NEWLINE """ Set the pupil maskNEWLINENEWLINE ParametersNEWLINE ------------NEWLINE name : stringNEWLINE Name of setting.NEWLINE Settings:NEWLINE - "AUTO":NEWLINE Automatically select pupilNEWLINE - "COLD_PUPIL":NEWLINE Masked pupil overrideNEWLINE - "UNMASKED":NEWLINE Unmasked pupil overrideNEWLINE """NEWLINENEWLINE if name and isinstance(name, str):NEWLINE name = name.upper()NEWLINE if "AUTO" == name:NEWLINE self.auto_pupil = TrueNEWLINE _log.info("Using default pupil mask.")NEWLINE elif "COLD_PUPIL" == name:NEWLINE self.auto_pupil = FalseNEWLINE _log.info("Using custom pupil mask: Masked Pupil.")NEWLINE self.pupil = self._masked_pupil_pathNEWLINE elif "UNMASKED" == name:NEWLINE self.auto_pupil = FalseNEWLINE _log.info("Using custom pupil mask: Unmasked Pupil.")NEWLINE self.pupil = self._unmasked_pupil_pathNEWLINE else:NEWLINE raise ValueError("Instrument {0} doesn't have a pupil mask called '{1}'.".format(self.name, name))NEWLINE else:NEWLINE raise ValueError("Pupil mask setting is not valid or empty.")NEWLINE self._pupil_mask = nameNEWLINENEWLINE def _addAdditionalOptics(self, optsys, **kwargs):NEWLINE return optsys, False, NoneNEWLINENEWLINENEWLINEclass CGI(WFIRSTInstrument):NEWLINE """NEWLINE WFIRST Coronagraph InstrumentNEWLINENEWLINE Simulates the PSF of the WFIRST coronagraph.NEWLINENEWLINE Current functionality is limited to the Shaped Pupil Coronagraph (SPC)NEWLINE observing modes, and these modes are only simulated with static, unaberratedNEWLINE wavefronts, without relay optics and without DM control. The designNEWLINE respresented here is an approximation to a baseline concept, and will beNEWLINE subject to change based on trades studies and technology development.NEWLINENEWLINE ParametersNEWLINE ----------NEWLINE mode : strNEWLINE CGI observing mode. If not specified, the __init__ functionNEWLINE will set this to a default mode 'CHARSPC_F660'NEWLINE pixelscale : floatNEWLINE Detector pixelscale. If not specified, the pixelscale will default toNEWLINE 0.02 arcsec for configurations usint the IMAGER camera and 0.025 arcsecNEWLINE for the IFS.NEWLINE fov_arcsec : floatNEWLINE Field of view in arcseconds. If not specified, the field of view willNEWLINE default to 3.20 arcsec for the IMAGER camera and 1.76 arcsec for the IFS.NEWLINENEWLINE """NEWLINENEWLINE camera_list = ['IMAGER', 'IFS']NEWLINE filter_list = ['F660', 'F721', 'F770', 'F890']NEWLINE apodizer_list = ['CHARSPC', 'DISKSPC']NEWLINE fpm_list = ['CHARSPC_F660_BOWTIE', 'CHARSPC_F770_BOWTIE', 'CHARSPC_F890_BOWTIE', 'DISKSPC_F721_ANNULUS']NEWLINE lyotstop_list = ['LS30D88']NEWLINENEWLINE _mode_table = { # MODE CAMERA FILTER APODIZER FPM LYOT STOPNEWLINE 'CHARSPC_F660': ('IFS', 'F660', 'CHARSPC', 'CHARSPC_F660_BOWTIE', 'LS30D88'),NEWLINE 'CHARSPC_F770': ('IFS', 'F770', 'CHARSPC', 'CHARSPC_F770_BOWTIE', 'LS30D88'),NEWLINE 'CHARSPC_F890': ('IFS', 'F890', 'CHARSPC', 'CHARSPC_F890_BOWTIE', 'LS30D88'),NEWLINE 'DISKSPC_F721': ('IMAGER', 'F721', 'DISKSPC', 'DISKSPC_F721_ANNULUS', 'LS30D88')}NEWLINENEWLINE def __init__(self, mode=None, pixelscale=None, fov_arcsec=None, apply_static_opd=False):NEWLINE super(CGI, self).__init__("CGI", pixelscale=pixelscale)NEWLINENEWLINE self._detector_npixels = 1024NEWLINE self._detectors = {camera: 'placeholder' for camera in self.camera_list}NEWLINENEWLINE self.pupil_mask_list = self.lyotstop_list # alias for use in webbpsf_coreNEWLINE self.image_mask_list = self.fpm_list # alias for use in webbpsf_coreNEWLINE self.pupil = os.path.join(self._WebbPSF_basepath, 'AFTA_CGI_C5_Pupil_onax_256px_flip.fits')NEWLINE if apply_static_opd:NEWLINE self.pupilopd = os.path.join(self._WebbPSF_basepath, 'CGI', 'OPD', 'CGI_static_OPD.fits')NEWLINE else:NEWLINE self.pupilopd = NoneNEWLINE self.aberration_optic = NoneNEWLINE self.options = {'force_coron': True}NEWLINE # Allow the user to pre-emptively override the default instrument FoV and pixel scaleNEWLINE if fov_arcsec is not None:NEWLINE self.fov_arcsec = fov_arcsecNEWLINE self._override_fov = TrueNEWLINE else:NEWLINE self._override_fov = FalseNEWLINE if pixelscale is not None:NEWLINE self._pixelscale = pixelscaleNEWLINE self._override_pixelscale = TrueNEWLINE else:NEWLINE self._override_pixelscale = FalseNEWLINENEWLINE if mode is None:NEWLINE self.print_mode_table()NEWLINE _log.info("Since the mode was not specified at instantiation, defaulting to CHARSPC_F660")NEWLINE self.mode = 'CHARSPC_F660'NEWLINE else:NEWLINE self.mode = modeNEWLINENEWLINE @propertyNEWLINE def camera(self):NEWLINE """Currently selected camera name"""NEWLINE return self._cameraNEWLINENEWLINE @camera.setterNEWLINE def camera(self, value):NEWLINE value = value.upper() # force to uppercaseNEWLINE if value not in self.camera_list:NEWLINE raise ValueError("Instrument {0} doesn't have a camera called {1}.".format(self.name, value))NEWLINE self._camera = valueNEWLINE if value == 'IMAGER':NEWLINE if not hasattr(self, 'fov_arcsec') or not self._override_fov:NEWLINE self.fov_arcsec = 3.2NEWLINE if not hasattr(self, 'pixelscale') or not self._override_pixelscale:NEWLINE self.pixelscale = 0.020 # Nyquist at 465 nmNEWLINE else: # default to 'IFS'NEWLINE if not hasattr(self, 'fov_arcsec') or not self._override_fov:NEWLINE self.fov_arcsec = 2 * 0.82 # 2015 SDT report, Section 3.4.1.1.1:NEWLINE # IFS has 76 lenslets across the (2 x 0.82) arcsec FoV.NEWLINE if not hasattr(self, 'pixelscale') or not self._override_pixelscale:NEWLINE self.pixelscale = 0.025 # Nyquist at 600 nmNEWLINENEWLINE # for CGI, there is one detector per camera and it should be set automatically.NEWLINE @propertyNEWLINE def detector(self):NEWLINE return self.cameraNEWLINENEWLINE @detector.setterNEWLINE def detector(self, value):NEWLINE raise RuntimeError("Can't set detector directly for CGI; set camera instead.")NEWLINENEWLINE @propertyNEWLINE def filter(self):NEWLINE """Currently selected filter name"""NEWLINE return self._filterNEWLINENEWLINE @filter.setterNEWLINE def filter(self, value):NEWLINE value = value.upper() # force to uppercaseNEWLINE if value not in self.filter_list:NEWLINE raise ValueError("Instrument {0} doesn't have a filter called {1}.".format(self.name, value))NEWLINE self._filter = valueNEWLINENEWLINE @propertyNEWLINE def apodizer(self):NEWLINE """Currently selected apodizer name"""NEWLINE return self._apodizerNEWLINENEWLINE @apodizer.setterNEWLINE def apodizer(self, value):NEWLINE value = value.upper() # force to uppercaseNEWLINE if value not in self.apodizer_list:NEWLINE raise ValueError("Instrument {0} doesn't have a apodizer called {1}.".format(self.name, value))NEWLINE self._apodizer = valueNEWLINE if value == 'DISKSPC':NEWLINE self._apodizer_fname = \NEWLINE os.path.join(self._datapath, "optics/DISKSPC_SP_256pix.fits.gz")NEWLINE else: # for now, default to CHARSPCNEWLINE self._apodizer_fname = \NEWLINE os.path.join(self._datapath, "optics/CHARSPC_SP_256pix.fits.gz")NEWLINENEWLINE @propertyNEWLINE def fpm(self):NEWLINE """Currently selected FPM name"""NEWLINE return self._fpmNEWLINENEWLINE @fpm.setterNEWLINE def fpm(self, value):NEWLINE value = value.upper() # force to uppercaseNEWLINE if value not in self.fpm_list:NEWLINE raise ValueError("Instrument {0} doesn't have a FPM called {1}.".format(self.name, value))NEWLINE self._fpm = valueNEWLINE if value.startswith('DISKSPC'):NEWLINE self._fpmres = 3NEWLINE self._owa = 20.NEWLINE self._Mfpm = int(np.ceil(self._fpmres * self._owa))NEWLINE self._fpm_fname = \NEWLINE os.path.join(self._datapath,NEWLINE "optics/DISKSPC_FPM_65WA200_360deg_-_FP1res{0:d}_evensamp_D{1:03d}_{2:s}.fits.gz".format(NEWLINE self._fpmres, 2 * self._Mfpm, self.filter))NEWLINE else:NEWLINE self._fpmres = 4NEWLINE self._owa = 9.NEWLINE self._Mfpm = int(np.ceil(self._fpmres * self._owa))NEWLINE self._fpm_fname = \NEWLINE os.path.join(self._datapath,NEWLINE "optics/CHARSPC_FPM_25WA90_2x65deg_-_FP1res{0:d}_evensamp_D{1:03d}_{2:s}.fits.gz".format(NEWLINE self._fpmres, 2 * self._Mfpm, self.filter))NEWLINENEWLINE @propertyNEWLINE def lyotstop(self):NEWLINE """Currently selected Lyot stop name"""NEWLINE return self._lyotstopNEWLINENEWLINE @lyotstop.setterNEWLINE def lyotstop(self, value):NEWLINE # preserve case for this one since we're used to that with the lyot mask namesNEWLINE if value not in self.lyotstop_list:NEWLINE raise ValueError("Instrument {0} doesn't have a Lyot mask called {1}.".format(self.name, value))NEWLINE self._lyotstop = valueNEWLINE self._lyotstop_fname = os.path.join(self._datapath, "optics/SPC_LS_30D88_256pix.fits.gz")NEWLINENEWLINE @propertyNEWLINE def mode_list(self):NEWLINE """Available Observation Modes"""NEWLINE keys = self._mode_table.keys()NEWLINE keys = sorted(keys)NEWLINE return keysNEWLINENEWLINE # mode works differently since it's a meta-property that affects the other ones:NEWLINE @propertyNEWLINE def mode(self):NEWLINE """Currently selected mode name"""NEWLINE for modename, settings in self._mode_table.items():NEWLINE if (self.camera == settings[0].upper() and self.filter == settings[1].upper() andNEWLINE self.apodizer == settings[2].upper() and self.fpm == settings[3].upper() andNEWLINE self.lyotstop == settings[4]):NEWLINE return modenameNEWLINE return 'Custom'NEWLINENEWLINE @mode.setterNEWLINE def mode(self, value):NEWLINE if value not in self.mode_list:NEWLINE raise ValueError("Instrument {0} doesn't have a mode called {1}.".format(self.name, value))NEWLINE settings = self._mode_table[value]NEWLINE self.camera = settings[0]NEWLINE self.filter = settings[1]NEWLINE self.apodizer = settings[2]NEWLINE self.fpm = settings[3]NEWLINE self.lyotstop = settings[4]NEWLINE _log.info('Set the following optical configuration:')NEWLINE _log.info('camera = {0}, filter = {1}, apodizer = {2}, fpm = {3}, lyotstop = {4}'.format(\NEWLINE self.camera, self.filter, self.apodizer, self.fpm, self.lyotstop))NEWLINENEWLINE def print_mode_table(self):NEWLINE """Print the table of observing mode options and their associated optical configuration"""NEWLINE _log.info("Printing the table of WFIRST CGI observing modes supported by WebbPSF.")NEWLINE _log.info("Each is defined by a combo of camera, filter, apodizer, "NEWLINE "focal plane mask (FPM), and Lyot stop settings:")NEWLINE _log.info(pprint.pformat(self._mode_table))NEWLINENEWLINE @propertyNEWLINE def detector_position(self):NEWLINE """The pixel position in (X, Y) on the detector"""NEWLINE return 512, 512NEWLINENEWLINE @detector_position.setterNEWLINE def detector_position(self, position):NEWLINE raise RuntimeError("Detector position not adjustable for CGI")NEWLINENEWLINE def _validate_config(self, **kwargs):NEWLINE super(CGI, self)._validate_config(**kwargs)NEWLINENEWLINE def _addAdditionalOptics(self, optsys, oversample=4):NEWLINE """Add coronagraphic or spectrographic optics for WFIRST CGI."""NEWLINENEWLINE trySAM = FalseNEWLINENEWLINE if ('pupil_shift_x' in self.options and self.options['pupil_shift_x'] != 0) or \NEWLINE ('pupil_shift_y' in self.options and self.options['pupil_shift_y'] != 0):NEWLINE shift = (self.options['pupil_shift_x'], self.options['pupil_shift_y'])NEWLINE else:NEWLINE shift = NoneNEWLINENEWLINE # Add the shaped pupil apodizerNEWLINE optsys.add_pupil(transmission=self._apodizer_fname, name=self.apodizer, shift=None)NEWLINENEWLINE # Add the FPMNEWLINE optsys.add_image(transmission=self._fpm_fname, name=self.fpm)NEWLINENEWLINE # Add Lyot stopNEWLINE self.pupil_mask = self.lyotstopNEWLINE optsys.add_pupil(transmission=self._lyotstop_fname, name=self.lyotstop, shift=shift)NEWLINENEWLINE # Cast as MatrixFTCoronagraph; this configures the detectorNEWLINE occ_box_size = 1.NEWLINE mft_optsys = poppy.MatrixFTCoronagraph(optsys, oversample=oversample, occulter_box=occ_box_size)NEWLINENEWLINE return mft_optsys, trySAM, occ_box_sizeNEWLINENEWLINE def _get_aberrations(self):NEWLINE """Get the OpticalElement that applies the field-dependentNEWLINE optical aberrations. (Called in _getOpticalSystem.)"""NEWLINE return NoneNEWLINENEWLINE def _get_fits_header(self, result, options):NEWLINE """Populate FITS Header keywords"""NEWLINE super(WFIRSTInstrument, self)._get_fits_header(result, options)NEWLINE pupil_hdr = fits.getheader(self.pupil)NEWLINE apodizer_hdr = fits.getheader(self._apodizer_fname)NEWLINE fpm_hdr = fits.getheader(self._fpm_fname)NEWLINE lyotstop_hdr = fits.getheader(self._lyotstop_fname)NEWLINENEWLINE result[0].header.set('MODE', self.mode, comment='Observing mode')NEWLINE result[0].header.set('CAMERA', self.camera, comment='Imager or IFS')NEWLINE result[0].header.set('APODIZER', self.apodizer, comment='Apodizer')NEWLINE result[0].header.set('APODTRAN', os.path.basename(self._apodizer_fname),NEWLINE comment='Apodizer transmission')NEWLINE result[0].header.set('PUPLSCAL', apodizer_hdr['PUPLSCAL'],NEWLINE comment='Apodizer pixel scale in m/pixel')NEWLINE result[0].header.set('PUPLDIAM', apodizer_hdr['PUPLDIAM'],NEWLINE comment='Full apodizer array size, incl padding.')NEWLINE result[0].header.set('FPM', self.fpm, comment='Focal plane mask')NEWLINE result[0].header.set('FPMTRAN', os.path.basename(self._fpm_fname),NEWLINE comment='FPM transmission')NEWLINE result[0].header.set('FPMSCAL', fpm_hdr['PIXSCALE'], comment='FPM spatial sampling, arcsec/pix')NEWLINE result[0].header.set('LYOTSTOP', self.lyotstop, comment='Lyot stop')NEWLINE result[0].header.set('LSTRAN', os.path.basename(self._lyotstop_fname),NEWLINE comment='Lyot stop transmission')NEWLINE result[0].header.set('PUPLSCAL', lyotstop_hdr['PUPLSCAL'],NEWLINE comment='Lyot stop pixel scale in m/pixel')NEWLINE result[0].header.set('PUPLDIAM', lyotstop_hdr['PUPLDIAM'],NEWLINE comment='Lyot stop array size, incl padding.')NEWLINE
'''NEWLINENotice:NEWLINE ConstantOP only supports CPU.NEWLINE For supporting cross-device, please use ConstantOP2NEWLINE'''NEWLINEimport sysNEWLINEsys.path.append('../') # Add MobulaOP pathNEWLINEimport mobulaNEWLINEimport numpy as npNEWLINENEWLINE# ConstantOP only supports CPU.NEWLINENEWLINENEWLINE@mobula.op.register(need_top_grad=False)NEWLINEclass ConstantOP:NEWLINE def __init__(self, constant):NEWLINE self.constant = self.F.array(constant)NEWLINENEWLINE def forward(self):NEWLINE return self.constantNEWLINENEWLINE def backward(self, dy):NEWLINE return []NEWLINENEWLINE def infer_shape(self, in_shape):NEWLINE return [], [self.constant.shape]NEWLINENEWLINENEWLINE@mobula.op.register(need_top_grad=False)NEWLINEclass ConstantOP2:NEWLINE def __init__(self, constant):NEWLINE self.constant = self.F.array(constant)NEWLINE self.constant_buffer = dict()NEWLINENEWLINE def forward(self, x):NEWLINE ctx = x.contextNEWLINE return self.constant_buffer.get(ctx, self.constant.as_in_context(ctx))NEWLINENEWLINE def backward(self, dy):NEWLINE return [0]NEWLINENEWLINE def infer_shape(self, in_shape):NEWLINE return in_shape, [self.constant.shape]NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE import mxnet as mxNEWLINE import numpy as npNEWLINENEWLINE # ConstantOP only supports CPU.NEWLINE if mx.current_context() == mx.cpu():NEWLINE # NDArrayNEWLINE a = mx.nd.array([1, 2, 3])NEWLINE b = mx.nd.array([4, 5, 6])NEWLINE c = a + ConstantOP[mx.nd.NDArray](b)NEWLINE print(c) # [5,7,9]NEWLINENEWLINE # SymbolNEWLINE a_sym = mx.sym.Variable('a')NEWLINE output_sym = a_sym + ConstantOP[mx.sym.Symbol](b)NEWLINE exe = output_sym.simple_bind(NEWLINE ctx=mx.context.current_context(), a=a.shape)NEWLINE exe.forward(a=np.array([1, 2, 3]))NEWLINENEWLINE print(exe.outputs[0].asnumpy()) # [5,7,9]NEWLINENEWLINE '''NEWLINE ConstantOP2: accept a variable for getting the context informationNEWLINE '''NEWLINENEWLINE # NDArrayNEWLINE a = mx.nd.array([1, 2, 3])NEWLINE b = mx.nd.array([4, 5, 6])NEWLINE c = a + ConstantOP2(a, constant=b)NEWLINE print(c) # [5,7,9]NEWLINENEWLINE # SymbolNEWLINE a_sym = mx.sym.Variable('a')NEWLINE # declare input_type explicitly because the inputs includes mx.sym.Symbol and mx.nd.NDArrayNEWLINE output_sym = a_sym + ConstantOP2[mx.sym.Symbol](a_sym, constant=b)NEWLINE exe = output_sym.simple_bind(ctx=mx.context.current_context(), a=a.shape)NEWLINE exe.forward(a=np.array([1, 2, 3]))NEWLINENEWLINE print(exe.outputs[0].asnumpy()) # [5,7,9]NEWLINE
from mogua.protocols import full_node_protocol, introducer_protocol, wallet_protocolNEWLINEfrom mogua.server.outbound_message import NodeTypeNEWLINEfrom mogua.server.ws_connection import WSMoGuaConnectionNEWLINEfrom mogua.types.mempool_inclusion_status import MempoolInclusionStatusNEWLINEfrom mogua.util.api_decorators import api_request, peer_required, execute_taskNEWLINEfrom mogua.util.errors import ErrNEWLINEfrom mogua.wallet.wallet_node import WalletNodeNEWLINENEWLINENEWLINEclass WalletNodeAPI:NEWLINE wallet_node: WalletNodeNEWLINENEWLINE def __init__(self, wallet_node) -> None:NEWLINE self.wallet_node = wallet_nodeNEWLINENEWLINE @propertyNEWLINE def log(self):NEWLINE return self.wallet_node.logNEWLINENEWLINE @propertyNEWLINE def api_ready(self):NEWLINE return self.wallet_node.logged_inNEWLINENEWLINE @peer_requiredNEWLINE @api_requestNEWLINE async def respond_removals(self, response: wallet_protocol.RespondRemovals, peer: WSMoGuaConnection):NEWLINE passNEWLINENEWLINE async def reject_removals_request(self, response: wallet_protocol.RejectRemovalsRequest, peer: WSMoGuaConnection):NEWLINE """NEWLINE The full node has rejected our request for removals.NEWLINE """NEWLINE passNEWLINENEWLINE @api_requestNEWLINE async def reject_additions_request(self, response: wallet_protocol.RejectAdditionsRequest):NEWLINE """NEWLINE The full node has rejected our request for additions.NEWLINE """NEWLINE passNEWLINENEWLINE @execute_taskNEWLINE @peer_requiredNEWLINE @api_requestNEWLINE async def new_peak_wallet(self, peak: wallet_protocol.NewPeakWallet, peer: WSMoGuaConnection):NEWLINE """NEWLINE The full node sent as a new peakNEWLINE """NEWLINE await self.wallet_node.new_peak_wallet(peak, peer)NEWLINENEWLINE @api_requestNEWLINE async def reject_block_header(self, response: wallet_protocol.RejectHeaderRequest):NEWLINE """NEWLINE The full node has rejected our request for a header.NEWLINE """NEWLINE passNEWLINENEWLINE @api_requestNEWLINE async def respond_block_header(self, response: wallet_protocol.RespondBlockHeader):NEWLINE passNEWLINENEWLINE @peer_requiredNEWLINE @api_requestNEWLINE async def respond_additions(self, response: wallet_protocol.RespondAdditions, peer: WSMoGuaConnection):NEWLINE passNEWLINENEWLINE @api_requestNEWLINE async def respond_proof_of_weight(self, response: full_node_protocol.RespondProofOfWeight):NEWLINE passNEWLINENEWLINE @peer_requiredNEWLINE @api_requestNEWLINE async def transaction_ack(self, ack: wallet_protocol.TransactionAck, peer: WSMoGuaConnection):NEWLINE """NEWLINE This is an ack for our previous SendTransaction call. This removes the transaction fromNEWLINE the send queue if we have sent it to enough nodes.NEWLINE """NEWLINE assert peer.peer_node_id is not NoneNEWLINE name = peer.peer_node_id.hex()NEWLINE status = MempoolInclusionStatus(ack.status)NEWLINE if self.wallet_node.wallet_state_manager is None or self.wallet_node.backup_initialized is False:NEWLINE return NoneNEWLINE if status == MempoolInclusionStatus.SUCCESS:NEWLINE self.wallet_node.log.info(f"SpendBundle has been received and accepted to mempool by the FullNode. {ack}")NEWLINE elif status == MempoolInclusionStatus.PENDING:NEWLINE self.wallet_node.log.info(f"SpendBundle has been received (and is pending) by the FullNode. {ack}")NEWLINE else:NEWLINE self.wallet_node.log.warning(f"SpendBundle has been rejected by the FullNode. {ack}")NEWLINE if ack.error is not None:NEWLINE await self.wallet_node.wallet_state_manager.remove_from_queue(ack.txid, name, status, Err[ack.error])NEWLINE else:NEWLINE await self.wallet_node.wallet_state_manager.remove_from_queue(ack.txid, name, status, None)NEWLINENEWLINE @peer_requiredNEWLINE @api_requestNEWLINE async def respond_peers_introducer(NEWLINE self, request: introducer_protocol.RespondPeersIntroducer, peer: WSMoGuaConnectionNEWLINE ):NEWLINE if not self.wallet_node.has_full_node():NEWLINE await self.wallet_node.wallet_peers.respond_peers(request, peer.get_peer_info(), False)NEWLINE else:NEWLINE await self.wallet_node.wallet_peers.ensure_is_closed()NEWLINENEWLINE if peer is not None and peer.connection_type is NodeType.INTRODUCER:NEWLINE await peer.close()NEWLINENEWLINE @peer_requiredNEWLINE @api_requestNEWLINE async def respond_peers(self, request: full_node_protocol.RespondPeers, peer: WSMoGuaConnection):NEWLINE if not self.wallet_node.has_full_node():NEWLINE self.log.info(f"Wallet received {len(request.peer_list)} peers.")NEWLINE await self.wallet_node.wallet_peers.respond_peers(request, peer.get_peer_info(), True)NEWLINE else:NEWLINE self.log.info(f"Wallet received {len(request.peer_list)} peers, but ignoring, since we have a full node.")NEWLINE await self.wallet_node.wallet_peers.ensure_is_closed()NEWLINE return NoneNEWLINENEWLINE @api_requestNEWLINE async def respond_puzzle_solution(self, request: wallet_protocol.RespondPuzzleSolution):NEWLINE if self.wallet_node.wallet_state_manager is None or self.wallet_node.backup_initialized is False:NEWLINE return NoneNEWLINE await self.wallet_node.wallet_state_manager.puzzle_solution_received(request)NEWLINENEWLINE @api_requestNEWLINE async def reject_puzzle_solution(self, request: wallet_protocol.RejectPuzzleSolution):NEWLINE self.log.warning(f"Reject puzzle solution: {request}")NEWLINENEWLINE @api_requestNEWLINE async def respond_header_blocks(self, request: wallet_protocol.RespondHeaderBlocks):NEWLINE passNEWLINENEWLINE @api_requestNEWLINE async def reject_header_blocks(self, request: wallet_protocol.RejectHeaderBlocks):NEWLINE self.log.warning(f"Reject header blocks: {request}")NEWLINE
import osNEWLINEimport sysNEWLINEimport pytestNEWLINEfrom fastapi.testclient import TestClientNEWLINEfrom typer.testing import CliRunnerNEWLINEfrom sqlalchemy.exc import IntegrityErrorNEWLINENEWLINE# This next line ensures tests uses its own database and settings environmentNEWLINEos.environ["FORCE_ENV_FOR_DYNACONF"] = "testing" # noqaNEWLINE# WARNING: Ensure imports from `fastapi_sqlmodel_demo` comes after this lineNEWLINEfrom fastapi_sqlmodel_demo import app, settings, db # noqaNEWLINEfrom fastapi_sqlmodel_demo.cli import create_user, cli # noqaNEWLINENEWLINENEWLINE# each test runs on cwd to its temp dirNEWLINE@pytest.fixture(autouse=True)NEWLINEdef go_to_tmpdir(request):NEWLINE # Get the fixture dynamically by its name.NEWLINE tmpdir = request.getfixturevalue("tmpdir")NEWLINE # ensure local test created packages can be importedNEWLINE sys.path.insert(0, str(tmpdir))NEWLINE # Chdir only for the duration of the test.NEWLINE with tmpdir.as_cwd():NEWLINE yieldNEWLINENEWLINENEWLINE@pytest.fixture(scope="function", name="app")NEWLINEdef _app():NEWLINE return appNEWLINENEWLINENEWLINE@pytest.fixture(scope="function", name="cli")NEWLINEdef _cli():NEWLINE return cliNEWLINENEWLINENEWLINE@pytest.fixture(scope="function", name="settings")NEWLINEdef _settings():NEWLINE return settingsNEWLINENEWLINENEWLINE@pytest.fixture(scope="function")NEWLINEdef api_client():NEWLINE return TestClient(app)NEWLINENEWLINENEWLINE@pytest.fixture(scope="function")NEWLINEdef api_client_authenticated():NEWLINENEWLINE try:NEWLINE create_user("admin", "admin", superuser=True)NEWLINE except IntegrityError:NEWLINE passNEWLINENEWLINE client = TestClient(app)NEWLINE token = client.post(NEWLINE "/token",NEWLINE data={"username": "admin", "password": "admin"},NEWLINE headers={"Content-Type": "application/x-www-form-urlencoded"},NEWLINE ).json()["access_token"]NEWLINE client.headers["Authorization"] = f"Bearer {token}"NEWLINE return clientNEWLINENEWLINENEWLINE@pytest.fixture(scope="function")NEWLINEdef cli_client():NEWLINE return CliRunner()NEWLINENEWLINENEWLINEdef remove_db():NEWLINE # Remove the database fileNEWLINE try:NEWLINE os.remove("testing.db")NEWLINE except FileNotFoundError:NEWLINE passNEWLINENEWLINENEWLINE@pytest.fixture(scope="session", autouse=True)NEWLINEdef initialize_db(request):NEWLINE db.create_db_and_tables(db.engine)NEWLINE request.addfinalizer(remove_db)NEWLINE
MILLION = 1000000.0NEWLINEEXP18 = 10 ** 18NEWLINEEXCHANGE_OSMOSIS = "osmosis_blockchain"NEWLINECUR_OSMO = "OSMO"NEWLINECUR_CRO = "CRO"NEWLINENEWLINEMSG_TYPE_ACKNOWLEDGMENT = "MsgAcknowledgement"NEWLINEMSG_TYPE_BEGIN_UNLOCKING = "MsgBeginUnlocking"NEWLINEMSG_TYPE_DELEGATE = "MsgDelegate"NEWLINEMSG_TYPE_DEPOSIT = "MsgDeposit"NEWLINEMSG_TYPE_EXIT_POOL = "MsgExitPool"NEWLINEMSG_TYPE_IBC_TRANSFER = "MsgTransfer"NEWLINEMSG_TYPE_JOIN_POOL = "MsgJoinPool"NEWLINEMSG_TYPE_JOIN_SWAP_EXTERN_AMOUNT_IN = "MsgJoinSwapExternAmountIn"NEWLINEMSG_TYPE_LOCK_TOKENS = "MsgLockTokens"NEWLINEMSG_TYPE_MSGRECVPACKET = "MsgRecvPacket"NEWLINEMSG_TYPE_REDELEGATE = "MsgBeginRedelegate"NEWLINEMSG_TYPE_SEND = "MsgSend"NEWLINEMSG_TYPE_SET_WITHDRAW_ADDRESS = "MsgSetWithdrawAddress"NEWLINEMSG_TYPE_SUBMIT_PROPOSAL = "MsgSubmitProposal"NEWLINEMSG_TYPE_SWAP_IN = "MsgSwapExactAmountIn"NEWLINEMSG_TYPE_UNDELEGATE = "MsgUndelegate"NEWLINEMSG_TYPE_UPDATE_CLIENT = "MsgUpdateClient"NEWLINEMSG_TYPE_VOTE = "MsgVote"NEWLINEMSG_TYPE_WITHDRAW_COMMISSION = "MsgWithdrawValidatorCommission"NEWLINEMSG_TYPE_WITHDRAW_REWARD = "MsgWithdrawDelegatorReward"NEWLINEMSG_TYPE_TIMEOUT = "MsgTimeout"NEWLINE
from app import socketioNEWLINEfrom config import *NEWLINEfrom .spi import *NEWLINEfrom ..socketio_queue import EmitQueueNEWLINENEWLINEfrom flask_socketio import NamespaceNEWLINENEWLINEimport loggingNEWLINENEWLINElogger = logging.getLogger("SIO_Server")NEWLINENEWLINENEWLINEclass XApiNamespace(Namespace):NEWLINE md = NoneNEWLINE td = NoneNEWLINE mq = NoneNEWLINE spi = NoneNEWLINE orders_map = {}NEWLINENEWLINE def __init__(self, namespace=None):NEWLINE super(XApiNamespace, self).__init__(namespace)NEWLINE self.mq = EmitQueue(socketio)NEWLINE self.spi = md_spi(self.mq, self.namespace)NEWLINENEWLINE def start(self):NEWLINE # 有客户端连接上来时才启动NEWLINE # 1. 网页已经连接过一没有关,重开服务端也会导致触发NEWLINE # 2. 服务端已经连接成功了,但没有通知NEWLINE if self.md is None:NEWLINE self.md = config_md()NEWLINE if enable_md:NEWLINE init_md(self.md)NEWLINE if self.td is None:NEWLINE self.td = config_td()NEWLINE init_td(self.td)NEWLINENEWLINE def stop(self):NEWLINE if self.md is not None:NEWLINE self.md.disconnect()NEWLINE self.md = NoneNEWLINE if self.td is not None:NEWLINE self.td.disconnect()NEWLINE self.td = NoneNEWLINENEWLINE def connect(self):NEWLINE self.spi.set_api(self.md, self.td)NEWLINE self.md.register_spi(self.spi)NEWLINE if not self.md.is_connected():NEWLINE if enable_md:NEWLINE self.md.connect()NEWLINE self.td.register_spi(self.spi)NEWLINE if not self.td.is_connected():NEWLINE if enable_td:NEWLINE self.td.connect()NEWLINENEWLINE def on_connect(self):NEWLINE # 刷新网页时这里会触发两次,所以需要做到防止重连NEWLINE logger.info('on_connect')NEWLINE self.start()NEWLINE self.connect()NEWLINE self.spi.emit_is_connected()NEWLINENEWLINE def on_disconnect(self):NEWLINE # 得所有连接都断开才能取消订阅行情NEWLINE logger.info('on_disconnect')NEWLINENEWLINE def on_sub_quote(self, data):NEWLINE logger.info('on_sub_quote:%s', data)NEWLINE args = data['args']NEWLINE if not self.md.is_connected():NEWLINE returnNEWLINE self.md.subscribe(args['instruments'], args['exchange'])NEWLINENEWLINE def on_unsub_quote(self, data):NEWLINE logger.info('on_unsub_quote:%s', data)NEWLINE args = data['args']NEWLINE if not self.md.is_connected():NEWLINE returnNEWLINE self.md.unsubscribe(args['instruments'], args['exchange'])NEWLINENEWLINE def on_send_order(self, data):NEWLINE logger.info('on_send_order:%s', data)NEWLINE args = data['args']NEWLINE if not self.td.is_connected():NEWLINE returnNEWLINENEWLINE # 默认数据,如果输入的参数不够全,使用默认参数NEWLINE _d0 = {NEWLINE "InstrumentID": "c1909",NEWLINE "Type": "Limit",NEWLINE "Side": "Buy",NEWLINE "Qty": 1,NEWLINE "Price": 100.0,NEWLINE "OpenClose": "Open",NEWLINE "HedgeFlag": "Speculation",NEWLINE }NEWLINENEWLINE _input = argsNEWLINE # 使用输出的参数更新默认字典,防止下面取枚举时出错NEWLINE _d0.update(_input)NEWLINENEWLINE # 将原订单中的枚举字符串都换成数字NEWLINE _d1 = {NEWLINE 'Type': OrderType[_d0["Type"]],NEWLINE 'Side': OrderSide[_d0["Side"]],NEWLINE 'OpenClose': OpenCloseType[_d0["OpenClose"]],NEWLINE 'HedgeFlag': HedgeFlagType[_d0["HedgeFlag"]],NEWLINE }NEWLINE _d0.update(_d1)NEWLINE local_id = _d0['LocalID']NEWLINENEWLINE order_id = self.td.send_order(_d0)NEWLINENEWLINE # 也可以不设置,但这样远程就无法关联了NEWLINE if len(local_id) > 0:NEWLINE self.orders_map[order_id] = local_idNEWLINENEWLINE def on_cancel_order(self, data):NEWLINE logger.info('on_cancel_order:%s', data)NEWLINE args = data['args']NEWLINE if not self.td.is_connected():NEWLINE returnNEWLINE self.td.cancel_order(args["ID"])NEWLINENEWLINE def on_query_account(self, data):NEWLINE logger.info('on_query_account')NEWLINE query = ReqQueryField()NEWLINE self.td.req_query(QueryType.ReqQryTradingAccount, query)NEWLINENEWLINE def on_query_positions(self, data):NEWLINE logger.info('on_query_positions')NEWLINE query = ReqQueryField()NEWLINE self.td.req_query(QueryType.ReqQryInvestorPosition, query)NEWLINENEWLINE def on_query_instrument(self, data):NEWLINE logger.info('on_query_instrument')NEWLINE args = data['args']NEWLINENEWLINE query = ReqQueryField()NEWLINE try:NEWLINE exchange_id = args['ExchangeID']NEWLINE query.ExchangeID = exchange_id.encode()NEWLINE except:NEWLINE passNEWLINE self.td.req_query(QueryType.ReqQryInstrument, query)NEWLINENEWLINE def on_query_order(self, data):NEWLINE logger.info('on_query_order')NEWLINE args = data['args']NEWLINENEWLINE query = ReqQueryField()NEWLINE self.td.req_query(QueryType.ReqQryOrder, query)NEWLINENEWLINE def on_query_settlement_info(self, data):NEWLINE logger.info('on_query_settlement_info:%s', data)NEWLINE args = data['args']NEWLINENEWLINE query = ReqQueryField()NEWLINE query.DateStart = args["TradingDay"]NEWLINE self.td.req_query(QueryType.ReqQrySettlementInfo, query)NEWLINENEWLINE def on_query_history_data(self, data):NEWLINE logger.info('on_query_history_data:%s', data)NEWLINE args = data['args']NEWLINENEWLINE self.spi.emit_rsp_qry_history_data(args)NEWLINE
from __future__ import absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7NEWLINENEWLINE# Importing the Kratos LibraryNEWLINEimport KratosMultiphysicsNEWLINENEWLINE# Import applicationsNEWLINEimport KratosMultiphysics.ConvectionDiffusionApplication as KratosConvDiffNEWLINEimport KratosMultiphysics.MultilevelMonteCarloApplication as KratosMLMCNEWLINENEWLINE# Avoid printing of Kratos informationsNEWLINEKratosMultiphysics.Logger.GetDefaultOutput().SetSeverity(KratosMultiphysics.Logger.Severity.WARNING) # avoid printing of Kratos thingsNEWLINENEWLINE# Importing the base classNEWLINEfrom analysis_stage import AnalysisStageNEWLINENEWLINE# Import packagesNEWLINEimport numpy as npNEWLINENEWLINE# Import Monte Carlo libraryNEWLINEimport mc_utilities as mcNEWLINENEWLINE# Import cpickle to pickle the serializerNEWLINEtry:NEWLINE import cpickle as pickle # Use cPickle on Python 2.7NEWLINEexcept ImportError:NEWLINE import pickleNEWLINENEWLINE# Import exaquteNEWLINEfrom exaqute.ExaquteTaskPyCOMPSs import * # to execute with pycompssNEWLINE# from exaqute.ExaquteTaskHyperLoom import * # to execute with the IT4 schedulerNEWLINE# from exaqute.ExaquteTaskLocal import * # to execute with python3NEWLINE'''NEWLINEget_value_from_remote is the equivalent of compss_wait_on: a synchronization pointNEWLINEin future, when everything is integrated with the it4i team, importing exaqute.ExaquteTaskHyperLoom you can launch your code with their scheduler instead of BSCNEWLINE'''NEWLINENEWLINENEWLINE'''Adapt the following class depending on the problem, deriving the MonteCarloAnalysis class from the problem of interest'''NEWLINENEWLINE'''NEWLINEThis Analysis Stage implementation solves the elliptic PDE in (0,1)^2 with zero Dirichlet boundary conditionsNEWLINE-lapl(u) = xi*f, f= -432*x*(x-1)*y*(y-1)NEWLINE f= -432*(x**2+y**2-x-y)NEWLINEwhere xi is a Beta(2,6) random variable, and computes statistic of the QoINEWLINEQ = int_(0,1)^2 u(x,y)dxdyNEWLINE'''NEWLINEclass MonteCarloAnalysis(AnalysisStage):NEWLINE '''Main analysis stage for Monte Carlo simulations'''NEWLINE def __init__(self,input_model,input_parameters,sample):NEWLINE self.sample = sampleNEWLINE super(MonteCarloAnalysis,self).__init__(input_model,input_parameters)NEWLINE self._GetSolver().main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_AREA)NEWLINENEWLINE def _CreateSolver(self):NEWLINE import convection_diffusion_stationary_solverNEWLINE return convection_diffusion_stationary_solver.CreateSolver(self.model,self.project_parameters["solver_settings"])NEWLINENEWLINE def _GetSimulationName(self):NEWLINE return "Monte Carlo Analysis"NEWLINENEWLINE '''Introduce here the stochasticity in the right hand side defining the forcing function and apply the stochastic contribute'''NEWLINE def ModifyInitialProperties(self):NEWLINE for node in self.model.GetModelPart("MLMCLaplacianModelPart").Nodes:NEWLINE coord_x = node.XNEWLINE coord_y = node.YNEWLINE # forcing = -432.0 * coord_x * (coord_x - 1) * coord_y * (coord_y - 1)NEWLINE forcing = -432.0 * (coord_x**2 + coord_y**2 - coord_x - coord_y) # this forcing presents the below commented analytical solutionNEWLINE node.SetSolutionStepValue(KratosMultiphysics.HEAT_FLUX,forcing*self.sample)NEWLINENEWLINENEWLINE##################################################NEWLINE######## END OF CLASS MONTECARLOANALYSIS #########NEWLINE##################################################NEWLINENEWLINENEWLINE'''NEWLINEfunction generating the random sampleNEWLINEhere the sample has a beta distribution with parameters alpha = 2.0 and beta = 6.0NEWLINE'''NEWLINEdef GenerateSample():NEWLINE alpha = 2.0NEWLINE beta = 6.0NEWLINE number_samples = 1NEWLINE sample = np.random.beta(alpha,beta,number_samples)NEWLINE return sampleNEWLINENEWLINENEWLINE'''NEWLINEfunction evaluating the QoI of the problem: int_{domain} TEMPERATURE(x,y) dx dyNEWLINEright now we are using the midpoint rule to evaluate the integral: improve!NEWLINE'''NEWLINEdef EvaluateQuantityOfInterest(simulation):NEWLINE """here we evaluate the QoI of the problem: int_{domain} SOLUTION(x,y) dx dyNEWLINE we use the midpoint rule to evaluate the integral"""NEWLINE KratosMultiphysics.CalculateNodalAreaProcess(simulation._GetSolver().main_model_part,2).Execute()NEWLINE Q = 0.0NEWLINE for node in simulation._GetSolver().main_model_part.Nodes:NEWLINE Q = Q + (node.GetSolutionStepValue(KratosMultiphysics.NODAL_AREA)*node.GetSolutionStepValue(KratosMultiphysics.TEMPERATURE))NEWLINE return QNEWLINENEWLINENEWLINE'''NEWLINEfunction called in the main returning a future object (the result class) and an integer (the finer level)NEWLINEinput:NEWLINE pickled_coarse_model : pickled modelNEWLINE pickled_coarse_parameters : pickled parametersNEWLINEoutput:NEWLINE MonteCarloResults class : class of the simulation resultsNEWLINE current_MC_level : level of the current MLMC simulationNEWLINE'''NEWLINEdef ExecuteMonteCarloAnalysis(pickled_model, pickled_parameters):NEWLINE current_MC_level = 0 # MC has only level 0NEWLINE return (ExecuteMonteCarloAnalysis_Task(pickled_model, pickled_parameters),current_MC_level)NEWLINENEWLINENEWLINE'''NEWLINEfunction executing the problemNEWLINEinput:NEWLINE model : serialization of the modelNEWLINE parameters : serialization of the Project ParametersNEWLINEoutput:NEWLINE QoI : Quantity of InterestNEWLINE'''NEWLINE@ExaquteTask(returns=1)NEWLINEdef ExecuteMonteCarloAnalysis_Task(pickled_model, pickled_parameters):NEWLINE '''overwrite the old model serializer with the unpickled one'''NEWLINE model_serializer = pickle.loads(pickled_model)NEWLINE current_model = KratosMultiphysics.Model()NEWLINE model_serializer.Load("ModelSerialization",current_model)NEWLINE del(model_serializer)NEWLINE '''overwrite the old parameters serializer with the unpickled one'''NEWLINE serialized_parameters = pickle.loads(pickled_parameters)NEWLINE current_parameters = KratosMultiphysics.Parameters()NEWLINE serialized_parameters.Load("ParametersSerialization",current_parameters)NEWLINE del(serialized_parameters)NEWLINE '''initialize the MonteCarloResults class'''NEWLINE current_level = 0 # always 0 for MCNEWLINE mc_results_class = mc.MonteCarloResults(current_level)NEWLINE sample = GenerateSample()NEWLINE simulation = MonteCarloAnalysis(current_model,current_parameters,sample)NEWLINE simulation.Run()NEWLINE QoI = EvaluateQuantityOfInterest(simulation)NEWLINE mc_results_class.QoI[current_level].append(QoI) # saving results in the corresponding list, for MC only list of level 0NEWLINE return mc_results_classNEWLINENEWLINENEWLINE'''NEWLINEfunction serializing and pickling the model and the parameters of the problemNEWLINEthe idea is the following:NEWLINEi) from Model/Parameters Kratos object to StreamSerializer Kratos objectNEWLINEii) from StreamSerializer Kratos object to pickle stringNEWLINEiii) from pickle string to StreamSerializer Kratos objectNEWLINEiv) from StreamSerializer Kratos object to Model/Parameters Kratos objectNEWLINEinput:NEWLINE parameter_file_name : path of the Project Parameters fileNEWLINEoutput:NEWLINE pickled_model : model serializatonNEWLINE pickled_parameters : project parameters serializationNEWLINE'''NEWLINE@ExaquteTask(parameter_file_name=FILE_IN,returns=2)NEWLINEdef SerializeModelParameters_Task(parameter_file_name):NEWLINE with open(parameter_file_name,'r') as parameter_file:NEWLINE parameters = KratosMultiphysics.Parameters(parameter_file.read())NEWLINE local_parameters = parametersNEWLINE model = KratosMultiphysics.Model()NEWLINE # local_parameters["solver_settings"]["model_import_settings"]["input_filename"].SetString(model_part_file_name[:-5])NEWLINE fake_sample = GenerateSample()NEWLINE simulation = MonteCarloAnalysis(model,local_parameters,fake_sample)NEWLINE simulation.Initialize()NEWLINE serialized_model = KratosMultiphysics.StreamSerializer()NEWLINE serialized_model.Save("ModelSerialization",simulation.model)NEWLINE serialized_parameters = KratosMultiphysics.StreamSerializer()NEWLINE serialized_parameters.Save("ParametersSerialization",simulation.project_parameters)NEWLINE # pickle dataserialized_dataNEWLINE pickled_model = pickle.dumps(serialized_model, 2) # second argument is the protocol and is NECESSARY (according to pybind11 docs)NEWLINE pickled_parameters = pickle.dumps(serialized_parameters, 2)NEWLINE print("\n","#"*50," SERIALIZATION COMPLETED ","#"*50,"\n")NEWLINE return pickled_model,pickled_parametersNEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINENEWLINE '''set the ProjectParameters.json path'''NEWLINE parameter_file_name = "../tests/PoissonSquareTest/parameters_poisson_finer.json"NEWLINE '''create a serialization of the model and of the project parameters'''NEWLINE pickled_model,pickled_parameters = SerializeModelParameters_Task(parameter_file_name)NEWLINE '''customize setting parameters of the ML simulation'''NEWLINE settings_MC_simulation = KratosMultiphysics.Parameters("""NEWLINE {NEWLINE "tolerance" : 0.1,NEWLINE "cphi" : 5e-1,NEWLINE "batch_size" : 20,NEWLINE "convergence_criteria" : "MC_higher_moments_sequential_stopping_rule"NEWLINE }NEWLINE """)NEWLINE '''contruct MonteCarlo class'''NEWLINE mc_class = mc.MonteCarlo(settings_MC_simulation)NEWLINE '''start MC algorithm'''NEWLINE while mc_class.convergence is not True:NEWLINE mc_class.InitializeMCPhase()NEWLINE mc_class.ScreeningInfoInitializeMCPhase()NEWLINE for instance in range (mc_class.difference_number_samples[0]):NEWLINE mc_class.AddResults(ExecuteMonteCarloAnalysis(pickled_model,pickled_parameters))NEWLINE mc_class.FinalizeMCPhase()NEWLINE mc_class.ScreeningInfoFinalizeMCPhase()NEWLINENEWLINE mc_class.QoI.mean = get_value_from_remote(mc_class.QoI.mean)NEWLINE print("\nMC mean = ",mc_class.QoI.mean)NEWLINENEWLINENEWLINE ''' The below part evaluates the relative L2 error between the numerical solution SOLUTION(x,y,sample) and the analytical solution, also dependent on sample.NEWLINE Analytical solution available in case FORCING = sample * -432.0 * (coord_x**2 + coord_y**2 - coord_x - coord_y)'''NEWLINE # model_serializer = pickle.loads(pickled_model)NEWLINE # current_model = KratosMultiphysics.Model()NEWLINE # model_serializer.Load("ModelSerialization",current_model)NEWLINE # del(model_serializer)NEWLINE # serialized_parameters = pickle.loads(pickled_parameters)NEWLINE # current_parameters = KratosMultiphysics.Parameters()NEWLINE # serialized_parameters.Load("ParametersSerialization",current_parameters)NEWLINE # del(serialized_parameters)NEWLINE # sample = 1.0NEWLINE # simulation = MonteCarloAnalysis(current_model,current_parameters,sample)NEWLINE # simulation.Run()NEWLINE # KratosMultiphysics.CalculateNodalAreaProcess(simulation._GetSolver().main_model_part,2).Execute()NEWLINE # error = 0.0NEWLINE # L2norm_analyticalsolution = 0.0NEWLINE # for node in simulation._GetSolver().main_model_part.Nodes:NEWLINE # local_error = ((node.GetSolutionStepValue(KratosMultiphysics.TEMPERATURE) - (432.0*simulation.sample*node.X*node.Y*(1-node.X)*(1-node.Y)*0.5))**2) * node.GetSolutionStepValue(KratosMultiphysics.NODAL_AREA)NEWLINE # error = error + local_errorNEWLINE # local_analyticalsolution = (432.0*simulation.sample*node.X*node.Y*(1-node.X)*(1-node.Y)*0.5)**2 * node.GetSolutionStepValue(KratosMultiphysics.NODAL_AREA)NEWLINE # L2norm_analyticalsolution = L2norm_analyticalsolution + local_analyticalsolutionNEWLINE # error = np.sqrt(error)NEWLINE # L2norm_analyticalsolution = np.sqrt(L2norm_analyticalsolution)NEWLINE # print("L2 relative error = ", error/L2norm_analyticalsolution)
"""NEWLINECreate SQL statements for QuerySets.NEWLINENEWLINEThe code in here encapsulates all of the SQL construction so that QuerySetsNEWLINEthemselves do not have to (and could be backed by things other than SQLNEWLINEdatabases). The abstraction barrier only works one way: this module has to knowNEWLINEall about the internals of models in order to get the information it needs.NEWLINE"""NEWLINEimport copyNEWLINEimport difflibNEWLINEimport functoolsNEWLINEimport inspectNEWLINEimport sysNEWLINEimport warningsNEWLINEfrom collections import Counter, namedtupleNEWLINEfrom collections.abc import Iterator, MappingNEWLINEfrom itertools import chain, count, productNEWLINEfrom string import ascii_uppercaseNEWLINENEWLINEfrom django.core.exceptions import (NEWLINE EmptyResultSet, FieldDoesNotExist, FieldError,NEWLINE)NEWLINEfrom django.db import DEFAULT_DB_ALIAS, NotSupportedError, connectionsNEWLINEfrom django.db.models.aggregates import CountNEWLINEfrom django.db.models.constants import LOOKUP_SEPNEWLINEfrom django.db.models.expressions import BaseExpression, Col, F, OuterRef, RefNEWLINEfrom django.db.models.fields import FieldNEWLINEfrom django.db.models.fields.related_lookups import MultiColSourceNEWLINEfrom django.db.models.lookups import LookupNEWLINEfrom django.db.models.query_utils import (NEWLINE Q, check_rel_lookup_compatibility, refs_expression,NEWLINE)NEWLINEfrom django.db.models.sql.constants import (NEWLINE INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, SINGLE,NEWLINE)NEWLINEfrom django.db.models.sql.datastructures import (NEWLINE BaseTable, Empty, Join, MultiJoin,NEWLINE)NEWLINEfrom django.db.models.sql.where import (NEWLINE AND, OR, ExtraWhere, NothingNode, WhereNode,NEWLINE)NEWLINEfrom django.utils.deprecation import RemovedInDjango40WarningNEWLINEfrom django.utils.functional import cached_propertyNEWLINEfrom django.utils.tree import NodeNEWLINENEWLINE__all__ = ['Query', 'RawQuery']NEWLINENEWLINENEWLINEdef get_field_names_from_opts(opts):NEWLINE return set(chain.from_iterable(NEWLINE (f.name, f.attname) if f.concrete else (f.name,)NEWLINE for f in opts.get_fields()NEWLINE ))NEWLINENEWLINENEWLINEdef get_children_from_q(q):NEWLINE for child in q.children:NEWLINE if isinstance(child, Node):NEWLINE yield from get_children_from_q(child)NEWLINE else:NEWLINE yield childNEWLINENEWLINENEWLINEJoinInfo = namedtuple(NEWLINE 'JoinInfo',NEWLINE ('final_field', 'targets', 'opts', 'joins', 'path', 'transform_function')NEWLINE)NEWLINENEWLINENEWLINEclass RawQuery:NEWLINE """A single raw SQL query."""NEWLINENEWLINE def __init__(self, sql, using, params=None):NEWLINE self.params = params or ()NEWLINE self.sql = sqlNEWLINE self.using = usingNEWLINE self.cursor = NoneNEWLINENEWLINE # Mirror some properties of a normal query so thatNEWLINE # the compiler can be used to process results.NEWLINE self.low_mark, self.high_mark = 0, None # Used for offset/limitNEWLINE self.extra_select = {}NEWLINE self.annotation_select = {}NEWLINENEWLINE def chain(self, using):NEWLINE return self.clone(using)NEWLINENEWLINE def clone(self, using):NEWLINE return RawQuery(self.sql, using, params=self.params)NEWLINENEWLINE def get_columns(self):NEWLINE if self.cursor is None:NEWLINE self._execute_query()NEWLINE converter = connections[self.using].introspection.identifier_converterNEWLINE return [converter(column_meta[0])NEWLINE for column_meta in self.cursor.description]NEWLINENEWLINE def __iter__(self):NEWLINE # Always execute a new query for a new iterator.NEWLINE # This could be optimized with a cache at the expense of RAM.NEWLINE self._execute_query()NEWLINE if not connections[self.using].features.can_use_chunked_reads:NEWLINE # If the database can't use chunked reads we need to make sure weNEWLINE # evaluate the entire query up front.NEWLINE result = list(self.cursor)NEWLINE else:NEWLINE result = self.cursorNEWLINE return iter(result)NEWLINENEWLINE def __repr__(self):NEWLINE return "<%s: %s>" % (self.__class__.__name__, self)NEWLINENEWLINE @propertyNEWLINE def params_type(self):NEWLINE return dict if isinstance(self.params, Mapping) else tupleNEWLINENEWLINE def __str__(self):NEWLINE return self.sql % self.params_type(self.params)NEWLINENEWLINE def _execute_query(self):NEWLINE connection = connections[self.using]NEWLINENEWLINE # Adapt parameters to the database, as much as possible consideringNEWLINE # that the target type isn't known. See #17755.NEWLINE params_type = self.params_typeNEWLINE adapter = connection.ops.adapt_unknown_valueNEWLINE if params_type is tuple:NEWLINE params = tuple(adapter(val) for val in self.params)NEWLINE elif params_type is dict:NEWLINE params = {key: adapter(val) for key, val in self.params.items()}NEWLINE else:NEWLINE raise RuntimeError("Unexpected params type: %s" % params_type)NEWLINENEWLINE self.cursor = connection.cursor()NEWLINE self.cursor.execute(self.sql, params)NEWLINENEWLINENEWLINEclass Query(BaseExpression):NEWLINE """A single SQL query."""NEWLINENEWLINE alias_prefix = 'T'NEWLINE subq_aliases = frozenset([alias_prefix])NEWLINENEWLINE compiler = 'SQLCompiler'NEWLINENEWLINE def __init__(self, model, where=WhereNode, alias_cols=True):NEWLINE self.model = modelNEWLINE self.alias_refcount = {}NEWLINE # alias_map is the most important data structure regarding joins.NEWLINE # It's used for recording which joins exist in the query and whatNEWLINE # types they are. The key is the alias of the joined table (possiblyNEWLINE # the table name) and the value is a Join-like object (seeNEWLINE # sql.datastructures.Join for more information).NEWLINE self.alias_map = {}NEWLINE # Whether to provide alias to columns during reference resolving.NEWLINE self.alias_cols = alias_colsNEWLINE # Sometimes the query contains references to aliases in outer queries (asNEWLINE # a result of split_exclude). Correct alias quoting needs to know theseNEWLINE # aliases too.NEWLINE self.external_aliases = set()NEWLINE self.table_map = {} # Maps table names to list of aliases.NEWLINE self.default_cols = TrueNEWLINE self.default_ordering = TrueNEWLINE self.standard_ordering = TrueNEWLINE self.used_aliases = set()NEWLINE self.filter_is_sticky = FalseNEWLINE self.subquery = FalseNEWLINENEWLINE # SQL-related attributesNEWLINE # Select and related select clauses are expressions to use in theNEWLINE # SELECT clause of the query.NEWLINE # The select is used for cases where we want to set up the selectNEWLINE # clause to contain other than default fields (values(), subqueries...)NEWLINE # Note that annotations go to annotations dictionary.NEWLINE self.select = ()NEWLINE self.where = where()NEWLINE self.where_class = whereNEWLINE # The group_by attribute can have one of the following forms:NEWLINE # - None: no group by at all in the queryNEWLINE # - A tuple of expressions: group by (at least) those expressions.NEWLINE # String refs are also allowed for now.NEWLINE # - True: group by all select fields of the modelNEWLINE # See compiler.get_group_by() for details.NEWLINE self.group_by = NoneNEWLINE self.order_by = ()NEWLINE self.low_mark, self.high_mark = 0, None # Used for offset/limitNEWLINE self.distinct = FalseNEWLINE self.distinct_fields = ()NEWLINE self.select_for_update = FalseNEWLINE self.select_for_update_nowait = FalseNEWLINE self.select_for_update_skip_locked = FalseNEWLINE self.select_for_update_of = ()NEWLINENEWLINE self.select_related = FalseNEWLINE # Arbitrary limit for select_related to prevents infinite recursion.NEWLINE self.max_depth = 5NEWLINENEWLINE # Holds the selects defined by a call to values() or values_list()NEWLINE # excluding annotation_select and extra_select.NEWLINE self.values_select = ()NEWLINENEWLINE # SQL annotation-related attributesNEWLINE self.annotations = {} # Maps alias -> Annotation ExpressionNEWLINE self.annotation_select_mask = NoneNEWLINE self._annotation_select_cache = NoneNEWLINENEWLINE # Set combination attributesNEWLINE self.combinator = NoneNEWLINE self.combinator_all = FalseNEWLINE self.combined_queries = ()NEWLINENEWLINE # These are for extensions. The contents are more or less appendedNEWLINE # verbatim to the appropriate clause.NEWLINE self.extra = {} # Maps col_alias -> (col_sql, params).NEWLINE self.extra_select_mask = NoneNEWLINE self._extra_select_cache = NoneNEWLINENEWLINE self.extra_tables = ()NEWLINE self.extra_order_by = ()NEWLINENEWLINE # A tuple that is a set of model field names and either True, if theseNEWLINE # are the fields to defer, or False if these are the only fields toNEWLINE # load.NEWLINE self.deferred_loading = (frozenset(), True)NEWLINENEWLINE self._filtered_relations = {}NEWLINENEWLINE self.explain_query = FalseNEWLINE self.explain_format = NoneNEWLINE self.explain_options = {}NEWLINENEWLINE @propertyNEWLINE def output_field(self):NEWLINE if len(self.select) == 1:NEWLINE return self.select[0].fieldNEWLINE elif len(self.annotation_select) == 1:NEWLINE return next(iter(self.annotation_select.values())).output_fieldNEWLINENEWLINE @propertyNEWLINE def has_select_fields(self):NEWLINE return bool(self.select or self.annotation_select_mask or self.extra_select_mask)NEWLINENEWLINE @cached_propertyNEWLINE def base_table(self):NEWLINE for alias in self.alias_map:NEWLINE return aliasNEWLINENEWLINE def __str__(self):NEWLINE """NEWLINE Return the query as a string of SQL with the parameter valuesNEWLINE substituted in (use sql_with_params() to see the unsubstituted string).NEWLINENEWLINE Parameter values won't necessarily be quoted correctly, since that isNEWLINE done by the database interface at execution time.NEWLINE """NEWLINE sql, params = self.sql_with_params()NEWLINE return sql % paramsNEWLINENEWLINE def sql_with_params(self):NEWLINE """NEWLINE Return the query as an SQL string and the parameters that will beNEWLINE substituted into the query.NEWLINE """NEWLINE return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()NEWLINENEWLINE def __deepcopy__(self, memo):NEWLINE """Limit the amount of work when a Query is deepcopied."""NEWLINE result = self.clone()NEWLINE memo[id(self)] = resultNEWLINE return resultNEWLINENEWLINE def get_compiler(self, using=None, connection=None):NEWLINE if using is None and connection is None:NEWLINE raise ValueError("Need either using or connection")NEWLINE if using:NEWLINE connection = connections[using]NEWLINE return connection.ops.compiler(self.compiler)(self, connection, using)NEWLINENEWLINE def get_meta(self):NEWLINE """NEWLINE Return the Options instance (the model._meta) from which to startNEWLINE processing. Normally, this is self.model._meta, but it can be changedNEWLINE by subclasses.NEWLINE """NEWLINE return self.model._metaNEWLINENEWLINE def clone(self):NEWLINE """NEWLINE Return a copy of the current Query. A lightweight alternative toNEWLINE to deepcopy().NEWLINE """NEWLINE obj = Empty()NEWLINE obj.__class__ = self.__class__NEWLINE # Copy references to everything.NEWLINE obj.__dict__ = self.__dict__.copy()NEWLINE # Clone attributes that can't use shallow copy.NEWLINE obj.alias_refcount = self.alias_refcount.copy()NEWLINE obj.alias_map = self.alias_map.copy()NEWLINE obj.external_aliases = self.external_aliases.copy()NEWLINE obj.table_map = self.table_map.copy()NEWLINE obj.where = self.where.clone()NEWLINE obj.annotations = self.annotations.copy()NEWLINE if self.annotation_select_mask is None:NEWLINE obj.annotation_select_mask = NoneNEWLINE else:NEWLINE obj.annotation_select_mask = self.annotation_select_mask.copy()NEWLINE # _annotation_select_cache cannot be copied, as doing so breaks theNEWLINE # (necessary) state in which both annotations andNEWLINE # _annotation_select_cache point to the same underlying objects.NEWLINE # It will get re-populated in the cloned queryset the next time it'sNEWLINE # used.NEWLINE obj._annotation_select_cache = NoneNEWLINE obj.extra = self.extra.copy()NEWLINE if self.extra_select_mask is None:NEWLINE obj.extra_select_mask = NoneNEWLINE else:NEWLINE obj.extra_select_mask = self.extra_select_mask.copy()NEWLINE if self._extra_select_cache is None:NEWLINE obj._extra_select_cache = NoneNEWLINE else:NEWLINE obj._extra_select_cache = self._extra_select_cache.copy()NEWLINE if self.select_related is not False:NEWLINE # Use deepcopy because select_related stores fields in nestedNEWLINE # dicts.NEWLINE obj.select_related = copy.deepcopy(obj.select_related)NEWLINE if 'subq_aliases' in self.__dict__:NEWLINE obj.subq_aliases = self.subq_aliases.copy()NEWLINE obj.used_aliases = self.used_aliases.copy()NEWLINE obj._filtered_relations = self._filtered_relations.copy()NEWLINE # Clear the cached_propertyNEWLINE try:NEWLINE del obj.base_tableNEWLINE except AttributeError:NEWLINE passNEWLINE return objNEWLINENEWLINE def chain(self, klass=None):NEWLINE """NEWLINE Return a copy of the current Query that's ready for another operation.NEWLINE The klass argument changes the type of the Query, e.g. UpdateQuery.NEWLINE """NEWLINE obj = self.clone()NEWLINE if klass and obj.__class__ != klass:NEWLINE obj.__class__ = klassNEWLINE if not obj.filter_is_sticky:NEWLINE obj.used_aliases = set()NEWLINE obj.filter_is_sticky = FalseNEWLINE if hasattr(obj, '_setup_query'):NEWLINE obj._setup_query()NEWLINE return objNEWLINENEWLINE def relabeled_clone(self, change_map):NEWLINE clone = self.clone()NEWLINE clone.change_aliases(change_map)NEWLINE return cloneNEWLINENEWLINE def _get_col(self, target, field, alias):NEWLINE if not self.alias_cols:NEWLINE alias = NoneNEWLINE return target.get_col(alias, field)NEWLINENEWLINE def rewrite_cols(self, annotation, col_cnt):NEWLINE # We must make sure the inner query has the referred columns in it.NEWLINE # If we are aggregating over an annotation, then Django uses Ref()NEWLINE # instances to note this. However, if we are annotating over a columnNEWLINE # of a related model, then it might be that column isn't part of theNEWLINE # SELECT clause of the inner query, and we must manually make sureNEWLINE # the column is selected. An example case is:NEWLINE # .aggregate(Sum('author__awards'))NEWLINE # Resolving this expression results in a join to author, but thereNEWLINE # is no guarantee the awards column of author is in the select clauseNEWLINE # of the query. Thus we must manually add the column to the innerNEWLINE # query.NEWLINE orig_exprs = annotation.get_source_expressions()NEWLINE new_exprs = []NEWLINE for expr in orig_exprs:NEWLINE # FIXME: These conditions are fairly arbitrary. Identify a betterNEWLINE # method of having expressions decide which code path they shouldNEWLINE # take.NEWLINE if isinstance(expr, Ref):NEWLINE # Its already a Ref to subquery (see resolve_ref() forNEWLINE # details)NEWLINE new_exprs.append(expr)NEWLINE elif isinstance(expr, (WhereNode, Lookup)):NEWLINE # Decompose the subexpressions further. The code here isNEWLINE # copied from the else clause, but this condition must appearNEWLINE # before the contains_aggregate/is_summary condition below.NEWLINE new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)NEWLINE new_exprs.append(new_expr)NEWLINE else:NEWLINE # Reuse aliases of expressions already selected in subquery.NEWLINE for col_alias, selected_annotation in self.annotation_select.items():NEWLINE if selected_annotation == expr:NEWLINE new_expr = Ref(col_alias, expr)NEWLINE breakNEWLINE else:NEWLINE # An expression that is not selected the subquery.NEWLINE if isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary):NEWLINE # Reference column or another aggregate. Select itNEWLINE # under a non-conflicting alias.NEWLINE col_cnt += 1NEWLINE col_alias = '__col%d' % col_cntNEWLINE self.annotations[col_alias] = exprNEWLINE self.append_annotation_mask([col_alias])NEWLINE new_expr = Ref(col_alias, expr)NEWLINE else:NEWLINE # Some other expression not referencing database valuesNEWLINE # directly. Its subexpression might contain Cols.NEWLINE new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)NEWLINE new_exprs.append(new_expr)NEWLINE annotation.set_source_expressions(new_exprs)NEWLINE return annotation, col_cntNEWLINENEWLINE def get_aggregation(self, using, added_aggregate_names):NEWLINE """NEWLINE Return the dictionary with the values of the existing aggregations.NEWLINE """NEWLINE if not self.annotation_select:NEWLINE return {}NEWLINE existing_annotations = [NEWLINE annotation for alias, annotationNEWLINE in self.annotations.items()NEWLINE if alias not in added_aggregate_namesNEWLINE ]NEWLINE # Decide if we need to use a subquery.NEWLINE #NEWLINE # Existing annotations would cause incorrect results as get_aggregation()NEWLINE # must produce just one result and thus must not use GROUP BY. But weNEWLINE # aren't smart enough to remove the existing annotations from theNEWLINE # query, so those would force us to use GROUP BY.NEWLINE #NEWLINE # If the query has limit or distinct, or uses set operations, thenNEWLINE # those operations must be done in a subquery so that the queryNEWLINE # aggregates on the limit and/or distinct results instead of applyingNEWLINE # the distinct and limit after the aggregation.NEWLINE if (isinstance(self.group_by, tuple) or self.is_sliced or existing_annotations orNEWLINE self.distinct or self.combinator):NEWLINE from django.db.models.sql.subqueries import AggregateQueryNEWLINE outer_query = AggregateQuery(self.model)NEWLINE inner_query = self.clone()NEWLINE inner_query.select_for_update = FalseNEWLINE inner_query.select_related = FalseNEWLINE inner_query.set_annotation_mask(self.annotation_select)NEWLINE if not self.is_sliced and not self.distinct_fields:NEWLINE # Queries with distinct_fields need ordering and when a limitNEWLINE # is applied we must take the slice from the ordered query.NEWLINE # Otherwise no need for ordering.NEWLINE inner_query.clear_ordering(True)NEWLINE if not inner_query.distinct:NEWLINE # If the inner query uses default select and it has someNEWLINE # aggregate annotations, then we must make sure the innerNEWLINE # query is grouped by the main model's primary key. However,NEWLINE # clearing the select clause can alter results if distinct isNEWLINE # used.NEWLINE has_existing_aggregate_annotations = any(NEWLINE annotation for annotation in existing_annotationsNEWLINE if getattr(annotation, 'contains_aggregate', True)NEWLINE )NEWLINE if inner_query.default_cols and has_existing_aggregate_annotations:NEWLINE inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)NEWLINE inner_query.default_cols = FalseNEWLINENEWLINE relabels = {t: 'subquery' for t in inner_query.alias_map}NEWLINE relabels[None] = 'subquery'NEWLINE # Remove any aggregates marked for reduction from the subqueryNEWLINE # and move them to the outer AggregateQuery.NEWLINE col_cnt = 0NEWLINE for alias, expression in list(inner_query.annotation_select.items()):NEWLINE annotation_select_mask = inner_query.annotation_select_maskNEWLINE if expression.is_summary:NEWLINE expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)NEWLINE outer_query.annotations[alias] = expression.relabeled_clone(relabels)NEWLINE del inner_query.annotations[alias]NEWLINE annotation_select_mask.remove(alias)NEWLINE # Make sure the annotation_select wont use cached results.NEWLINE inner_query.set_annotation_mask(inner_query.annotation_select_mask)NEWLINE if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask:NEWLINE # In case of Model.objects[0:3].count(), there would be noNEWLINE # field selected in the inner query, yet we must use a subquery.NEWLINE # So, make sure at least one field is selected.NEWLINE inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)NEWLINE try:NEWLINE outer_query.add_subquery(inner_query, using)NEWLINE except EmptyResultSet:NEWLINE return {NEWLINE alias: NoneNEWLINE for alias in outer_query.annotation_selectNEWLINE }NEWLINE else:NEWLINE outer_query = selfNEWLINE self.select = ()NEWLINE self.default_cols = FalseNEWLINE self.extra = {}NEWLINENEWLINE outer_query.clear_ordering(True)NEWLINE outer_query.clear_limits()NEWLINE outer_query.select_for_update = FalseNEWLINE outer_query.select_related = FalseNEWLINE compiler = outer_query.get_compiler(using)NEWLINE result = compiler.execute_sql(SINGLE)NEWLINE if result is None:NEWLINE result = [None] * len(outer_query.annotation_select)NEWLINENEWLINE converters = compiler.get_converters(outer_query.annotation_select.values())NEWLINE result = next(compiler.apply_converters((result,), converters))NEWLINENEWLINE return dict(zip(outer_query.annotation_select, result))NEWLINENEWLINE def get_count(self, using):NEWLINE """NEWLINE Perform a COUNT() query using the current filter constraints.NEWLINE """NEWLINE obj = self.clone()NEWLINE obj.add_annotation(Count('*'), alias='__count', is_summary=True)NEWLINE number = obj.get_aggregation(using, ['__count'])['__count']NEWLINE if number is None:NEWLINE number = 0NEWLINE return numberNEWLINENEWLINE def has_filters(self):NEWLINE return self.whereNEWLINENEWLINE def has_results(self, using):NEWLINE q = self.clone()NEWLINE if not q.distinct:NEWLINE if q.group_by is True:NEWLINE q.add_fields((f.attname for f in self.model._meta.concrete_fields), False)NEWLINE q.set_group_by()NEWLINE q.clear_select_clause()NEWLINE q.clear_ordering(True)NEWLINE q.set_limits(high=1)NEWLINE compiler = q.get_compiler(using=using)NEWLINE return compiler.has_results()NEWLINENEWLINE def explain(self, using, format=None, **options):NEWLINE q = self.clone()NEWLINE q.explain_query = TrueNEWLINE q.explain_format = formatNEWLINE q.explain_options = optionsNEWLINE compiler = q.get_compiler(using=using)NEWLINE return '\n'.join(compiler.explain_query())NEWLINENEWLINE def combine(self, rhs, connector):NEWLINE """NEWLINE Merge the 'rhs' query into the current one (with any 'rhs' effectsNEWLINE being applied *after* (that is, "to the right of") anything in theNEWLINE current query. 'rhs' is not modified during a call to this function.NEWLINENEWLINE The 'connector' parameter describes how to connect filters from theNEWLINE 'rhs' query.NEWLINE """NEWLINE assert self.model == rhs.model, \NEWLINE "Cannot combine queries on two different base models."NEWLINE assert not self.is_sliced, \NEWLINE "Cannot combine queries once a slice has been taken."NEWLINE assert self.distinct == rhs.distinct, \NEWLINE "Cannot combine a unique query with a non-unique query."NEWLINE assert self.distinct_fields == rhs.distinct_fields, \NEWLINE "Cannot combine queries with different distinct fields."NEWLINENEWLINE # Work out how to relabel the rhs aliases, if necessary.NEWLINE change_map = {}NEWLINE conjunction = (connector == AND)NEWLINENEWLINE # Determine which existing joins can be reused. When combining theNEWLINE # query with AND we must recreate all joins for m2m filters. WhenNEWLINE # combining with OR we can reuse joins. The reason is that in ANDNEWLINE # case a single row can't fulfill a condition like:NEWLINE # revrel__col=1 & revrel__col=2NEWLINE # But, there might be two different related rows matching thisNEWLINE # condition. In OR case a single True is enough, so single row isNEWLINE # enough, too.NEWLINE #NEWLINE # Note that we will be creating duplicate joins for non-m2m joins inNEWLINE # the AND case. The results will be correct but this creates too manyNEWLINE # joins. This is something that could be fixed later on.NEWLINE reuse = set() if conjunction else set(self.alias_map)NEWLINE # Base table must be present in the query - this is the sameNEWLINE # table on both sides.NEWLINE self.get_initial_alias()NEWLINE joinpromoter = JoinPromoter(connector, 2, False)NEWLINE joinpromoter.add_votes(NEWLINE j for j in self.alias_map if self.alias_map[j].join_type == INNER)NEWLINE rhs_votes = set()NEWLINE # Now, add the joins from rhs query into the new query (skipping baseNEWLINE # table).NEWLINE rhs_tables = list(rhs.alias_map)[1:]NEWLINE for alias in rhs_tables:NEWLINE join = rhs.alias_map[alias]NEWLINE # If the left side of the join was already relabeled, use theNEWLINE # updated alias.NEWLINE join = join.relabeled_clone(change_map)NEWLINE new_alias = self.join(join, reuse=reuse)NEWLINE if join.join_type == INNER:NEWLINE rhs_votes.add(new_alias)NEWLINE # We can't reuse the same join again in the query. If we have twoNEWLINE # distinct joins for the same connection in rhs query, then theNEWLINE # combined query must have two joins, too.NEWLINE reuse.discard(new_alias)NEWLINE if alias != new_alias:NEWLINE change_map[alias] = new_aliasNEWLINE if not rhs.alias_refcount[alias]:NEWLINE # The alias was unused in the rhs query. Unref it so that itNEWLINE # will be unused in the new query, too. We have to add andNEWLINE # unref the alias so that join promotion has information ofNEWLINE # the join type for the unused alias.NEWLINE self.unref_alias(new_alias)NEWLINE joinpromoter.add_votes(rhs_votes)NEWLINE joinpromoter.update_join_types(self)NEWLINENEWLINE # Now relabel a copy of the rhs where-clause and add it to the currentNEWLINE # one.NEWLINE w = rhs.where.clone()NEWLINE w.relabel_aliases(change_map)NEWLINE self.where.add(w, connector)NEWLINENEWLINE # Selection columns and extra extensions are those provided by 'rhs'.NEWLINE if rhs.select:NEWLINE self.set_select([col.relabeled_clone(change_map) for col in rhs.select])NEWLINE else:NEWLINE self.select = ()NEWLINENEWLINE if connector == OR:NEWLINE # It would be nice to be able to handle this, but the queries don'tNEWLINE # really make sense (or return consistent value sets). Not worthNEWLINE # the extra complexity when you can write a real query instead.NEWLINE if self.extra and rhs.extra:NEWLINE raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.")NEWLINE self.extra.update(rhs.extra)NEWLINE extra_select_mask = set()NEWLINE if self.extra_select_mask is not None:NEWLINE extra_select_mask.update(self.extra_select_mask)NEWLINE if rhs.extra_select_mask is not None:NEWLINE extra_select_mask.update(rhs.extra_select_mask)NEWLINE if extra_select_mask:NEWLINE self.set_extra_mask(extra_select_mask)NEWLINE self.extra_tables += rhs.extra_tablesNEWLINENEWLINE # Ordering uses the 'rhs' ordering, unless it has none, in which caseNEWLINE # the current ordering is used.NEWLINE self.order_by = rhs.order_by or self.order_byNEWLINE self.extra_order_by = rhs.extra_order_by or self.extra_order_byNEWLINENEWLINE def deferred_to_data(self, target, callback):NEWLINE """NEWLINE Convert the self.deferred_loading data structure to an alternate dataNEWLINE structure, describing the field that *will* be loaded. This is used toNEWLINE compute the columns to select from the database and also by theNEWLINE QuerySet class to work out which fields are being initialized on eachNEWLINE model. Models that have all their fields included aren't mentioned inNEWLINE the result, only those that have field restrictions in place.NEWLINENEWLINE The "target" parameter is the instance that is populated (in place).NEWLINE The "callback" is a function that is called whenever a (model, field)NEWLINE pair need to be added to "target". It accepts three parameters:NEWLINE "target", and the model and list of fields being added for that model.NEWLINE """NEWLINE field_names, defer = self.deferred_loadingNEWLINE if not field_names:NEWLINE returnNEWLINE orig_opts = self.get_meta()NEWLINE seen = {}NEWLINE must_include = {orig_opts.concrete_model: {orig_opts.pk}}NEWLINE for field_name in field_names:NEWLINE parts = field_name.split(LOOKUP_SEP)NEWLINE cur_model = self.model._meta.concrete_modelNEWLINE opts = orig_optsNEWLINE for name in parts[:-1]:NEWLINE old_model = cur_modelNEWLINE if name in self._filtered_relations:NEWLINE name = self._filtered_relations[name].relation_nameNEWLINE source = opts.get_field(name)NEWLINE if is_reverse_o2o(source):NEWLINE cur_model = source.related_modelNEWLINE else:NEWLINE cur_model = source.remote_field.modelNEWLINE opts = cur_model._metaNEWLINE # Even if we're "just passing through" this model, we must addNEWLINE # both the current model's pk and the related reference fieldNEWLINE # (if it's not a reverse relation) to the things we select.NEWLINE if not is_reverse_o2o(source):NEWLINE must_include[old_model].add(source)NEWLINE add_to_dict(must_include, cur_model, opts.pk)NEWLINE field = opts.get_field(parts[-1])NEWLINE is_reverse_object = field.auto_created and not field.concreteNEWLINE model = field.related_model if is_reverse_object else field.modelNEWLINE model = model._meta.concrete_modelNEWLINE if model == opts.model:NEWLINE model = cur_modelNEWLINE if not is_reverse_o2o(field):NEWLINE add_to_dict(seen, model, field)NEWLINENEWLINE if defer:NEWLINE # We need to load all fields for each model, except those thatNEWLINE # appear in "seen" (for all models that appear in "seen"). The onlyNEWLINE # slight complexity here is handling fields that exist on parentNEWLINE # models.NEWLINE workset = {}NEWLINE for model, values in seen.items():NEWLINE for field in model._meta.local_fields:NEWLINE if field not in values:NEWLINE m = field.model._meta.concrete_modelNEWLINE add_to_dict(workset, m, field)NEWLINE for model, values in must_include.items():NEWLINE # If we haven't included a model in workset, we don't add theNEWLINE # corresponding must_include fields for that model, since anNEWLINE # empty set means "include all fields". That's why there's noNEWLINE # "else" branch here.NEWLINE if model in workset:NEWLINE workset[model].update(values)NEWLINE for model, values in workset.items():NEWLINE callback(target, model, values)NEWLINE else:NEWLINE for model, values in must_include.items():NEWLINE if model in seen:NEWLINE seen[model].update(values)NEWLINE else:NEWLINE # As we've passed through this model, but not explicitlyNEWLINE # included any fields, we have to make sure it's mentionedNEWLINE # so that only the "must include" fields are pulled in.NEWLINE seen[model] = valuesNEWLINE # Now ensure that every model in the inheritance chain is mentionedNEWLINE # in the parent list. Again, it must be mentioned to ensure thatNEWLINE # only "must include" fields are pulled in.NEWLINE for model in orig_opts.get_parent_list():NEWLINE seen.setdefault(model, set())NEWLINE for model, values in seen.items():NEWLINE callback(target, model, values)NEWLINENEWLINE def table_alias(self, table_name, create=False, filtered_relation=None):NEWLINE """NEWLINE Return a table alias for the given table_name and whether this is aNEWLINE new alias or not.NEWLINENEWLINE If 'create' is true, a new alias is always created. Otherwise, theNEWLINE most recently created alias for the table (if one exists) is reused.NEWLINE """NEWLINE alias_list = self.table_map.get(table_name)NEWLINE if not create and alias_list:NEWLINE alias = alias_list[0]NEWLINE self.alias_refcount[alias] += 1NEWLINE return alias, FalseNEWLINENEWLINE # Create a new alias for this table.NEWLINE if alias_list:NEWLINE alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)NEWLINE alias_list.append(alias)NEWLINE else:NEWLINE # The first occurrence of a table uses the table name directly.NEWLINE alias = filtered_relation.alias if filtered_relation is not None else table_nameNEWLINE self.table_map[table_name] = [alias]NEWLINE self.alias_refcount[alias] = 1NEWLINE return alias, TrueNEWLINENEWLINE def ref_alias(self, alias):NEWLINE """Increases the reference count for this alias."""NEWLINE self.alias_refcount[alias] += 1NEWLINENEWLINE def unref_alias(self, alias, amount=1):NEWLINE """Decreases the reference count for this alias."""NEWLINE self.alias_refcount[alias] -= amountNEWLINENEWLINE def promote_joins(self, aliases):NEWLINE """NEWLINE Promote recursively the join type of given aliases and its children toNEWLINE an outer join. If 'unconditional' is False, only promote the join ifNEWLINE it is nullable or the parent join is an outer join.NEWLINENEWLINE The children promotion is done to avoid join chains that contain a LOUTERNEWLINE b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,NEWLINE then we must also promote b->c automatically, or otherwise the promotionNEWLINE of a->b doesn't actually change anything in the query results.NEWLINE """NEWLINE aliases = list(aliases)NEWLINE while aliases:NEWLINE alias = aliases.pop(0)NEWLINE if self.alias_map[alias].join_type is None:NEWLINE # This is the base table (first FROM entry) - this tableNEWLINE # isn't really joined at all in the query, so we should notNEWLINE # alter its join type.NEWLINE continueNEWLINE # Only the first alias (skipped above) should have None join_typeNEWLINE assert self.alias_map[alias].join_type is not NoneNEWLINE parent_alias = self.alias_map[alias].parent_aliasNEWLINE parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTERNEWLINE already_louter = self.alias_map[alias].join_type == LOUTERNEWLINE if ((self.alias_map[alias].nullable or parent_louter) andNEWLINE not already_louter):NEWLINE self.alias_map[alias] = self.alias_map[alias].promote()NEWLINE # Join type of 'alias' changed, so re-examine all aliases thatNEWLINE # refer to this one.NEWLINE aliases.extend(NEWLINE join for join in self.alias_mapNEWLINE if self.alias_map[join].parent_alias == alias and join not in aliasesNEWLINE )NEWLINENEWLINE def demote_joins(self, aliases):NEWLINE """NEWLINE Change join type from LOUTER to INNER for all joins in aliases.NEWLINENEWLINE Similarly to promote_joins(), this method must ensure no join chainsNEWLINE containing first an outer, then an inner join are generated. If weNEWLINE are demoting b->c join in chain a LOUTER b LOUTER c then we mustNEWLINE demote a->b automatically, or otherwise the demotion of b->c doesn'tNEWLINE actually change anything in the query results. .NEWLINE """NEWLINE aliases = list(aliases)NEWLINE while aliases:NEWLINE alias = aliases.pop(0)NEWLINE if self.alias_map[alias].join_type == LOUTER:NEWLINE self.alias_map[alias] = self.alias_map[alias].demote()NEWLINE parent_alias = self.alias_map[alias].parent_aliasNEWLINE if self.alias_map[parent_alias].join_type == INNER:NEWLINE aliases.append(parent_alias)NEWLINENEWLINE def reset_refcounts(self, to_counts):NEWLINE """NEWLINE Reset reference counts for aliases so that they match the value passedNEWLINE in `to_counts`.NEWLINE """NEWLINE for alias, cur_refcount in self.alias_refcount.copy().items():NEWLINE unref_amount = cur_refcount - to_counts.get(alias, 0)NEWLINE self.unref_alias(alias, unref_amount)NEWLINENEWLINE def change_aliases(self, change_map):NEWLINE """NEWLINE Change the aliases in change_map (which maps old-alias -> new-alias),NEWLINE relabelling any references to them in select columns and the whereNEWLINE clause.NEWLINE """NEWLINE assert set(change_map).isdisjoint(change_map.values())NEWLINENEWLINE # 1. Update references in "select" (normal columns plus aliases),NEWLINE # "group by" and "where".NEWLINE self.where.relabel_aliases(change_map)NEWLINE if isinstance(self.group_by, tuple):NEWLINE self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by])NEWLINE self.select = tuple([col.relabeled_clone(change_map) for col in self.select])NEWLINE self.annotations = self.annotations and {NEWLINE key: col.relabeled_clone(change_map) for key, col in self.annotations.items()NEWLINE }NEWLINENEWLINE # 2. Rename the alias in the internal table/alias datastructures.NEWLINE for old_alias, new_alias in change_map.items():NEWLINE if old_alias not in self.alias_map:NEWLINE continueNEWLINE alias_data = self.alias_map[old_alias].relabeled_clone(change_map)NEWLINE self.alias_map[new_alias] = alias_dataNEWLINE self.alias_refcount[new_alias] = self.alias_refcount[old_alias]NEWLINE del self.alias_refcount[old_alias]NEWLINE del self.alias_map[old_alias]NEWLINENEWLINE table_aliases = self.table_map[alias_data.table_name]NEWLINE for pos, alias in enumerate(table_aliases):NEWLINE if alias == old_alias:NEWLINE table_aliases[pos] = new_aliasNEWLINE breakNEWLINE self.external_aliases = {change_map.get(alias, alias)NEWLINE for alias in self.external_aliases}NEWLINENEWLINE def bump_prefix(self, outer_query):NEWLINE """NEWLINE Change the alias prefix to the next letter in the alphabet in a wayNEWLINE that the outer query's aliases and this query's aliases will notNEWLINE conflict. Even tables that previously had no alias will get an aliasNEWLINE after this call.NEWLINE """NEWLINE def prefix_gen():NEWLINE """NEWLINE Generate a sequence of characters in alphabetical order:NEWLINE -> 'A', 'B', 'C', ...NEWLINENEWLINE When the alphabet is finished, the sequence will continue with theNEWLINE Cartesian product:NEWLINE -> 'AA', 'AB', 'AC', ...NEWLINE """NEWLINE alphabet = ascii_uppercaseNEWLINE prefix = chr(ord(self.alias_prefix) + 1)NEWLINE yield prefixNEWLINE for n in count(1):NEWLINE seq = alphabet[alphabet.index(prefix):] if prefix else alphabetNEWLINE for s in product(seq, repeat=n):NEWLINE yield ''.join(s)NEWLINE prefix = NoneNEWLINENEWLINE if self.alias_prefix != outer_query.alias_prefix:NEWLINE # No clashes between self and outer query should be possible.NEWLINE returnNEWLINENEWLINE # Explicitly avoid infinite loop. The constant divider is based on howNEWLINE # much depth recursive subquery references add to the stack. This valueNEWLINE # might need to be adjusted when adding or removing function calls fromNEWLINE # the code path in charge of performing these operations.NEWLINE local_recursion_limit = sys.getrecursionlimit() // 16NEWLINE for pos, prefix in enumerate(prefix_gen()):NEWLINE if prefix not in self.subq_aliases:NEWLINE self.alias_prefix = prefixNEWLINE breakNEWLINE if pos > local_recursion_limit:NEWLINE raise RecursionError(NEWLINE 'Maximum recursion depth exceeded: too many subqueries.'NEWLINE )NEWLINE self.subq_aliases = self.subq_aliases.union([self.alias_prefix])NEWLINE outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)NEWLINE self.change_aliases({NEWLINE alias: '%s%d' % (self.alias_prefix, pos)NEWLINE for pos, alias in enumerate(self.alias_map)NEWLINE })NEWLINENEWLINE def get_initial_alias(self):NEWLINE """NEWLINE Return the first alias for this query, after increasing its referenceNEWLINE count.NEWLINE """NEWLINE if self.alias_map:NEWLINE alias = self.base_tableNEWLINE self.ref_alias(alias)NEWLINE else:NEWLINE alias = self.join(BaseTable(self.get_meta().db_table, None))NEWLINE return aliasNEWLINENEWLINE def count_active_tables(self):NEWLINE """NEWLINE Return the number of tables in this query with a non-zero referenceNEWLINE count. After execution, the reference counts are zeroed, so tablesNEWLINE added in compiler will not be seen by this method.NEWLINE """NEWLINE return len([1 for count in self.alias_refcount.values() if count])NEWLINENEWLINE def join(self, join, reuse=None, reuse_with_filtered_relation=False):NEWLINE """NEWLINE Return an alias for the 'join', either reusing an existing alias forNEWLINE that join or creating a new one. 'join' is either aNEWLINE sql.datastructures.BaseTable or Join.NEWLINENEWLINE The 'reuse' parameter can be either None which means all joins areNEWLINE reusable, or it can be a set containing the aliases that can be reused.NEWLINENEWLINE The 'reuse_with_filtered_relation' parameter is used when computingNEWLINE FilteredRelation instances.NEWLINENEWLINE A join is always created as LOUTER if the lhs alias is LOUTER to makeNEWLINE sure chains like t1 LOUTER t2 INNER t3 aren't generated. All newNEWLINE joins are created as LOUTER if the join is nullable.NEWLINE """NEWLINE if reuse_with_filtered_relation and reuse:NEWLINE reuse_aliases = [NEWLINE a for a, j in self.alias_map.items()NEWLINE if a in reuse and j.equals(join, with_filtered_relation=False)NEWLINE ]NEWLINE else:NEWLINE reuse_aliases = [NEWLINE a for a, j in self.alias_map.items()NEWLINE if (reuse is None or a in reuse) and j == joinNEWLINE ]NEWLINE if reuse_aliases:NEWLINE if join.table_alias in reuse_aliases:NEWLINE reuse_alias = join.table_aliasNEWLINE else:NEWLINE # Reuse the most recent alias of the joined tableNEWLINE # (a many-to-many relation may be joined multiple times).NEWLINE reuse_alias = reuse_aliases[-1]NEWLINE self.ref_alias(reuse_alias)NEWLINE return reuse_aliasNEWLINENEWLINE # No reuse is possible, so we need a new alias.NEWLINE alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation)NEWLINE if join.join_type:NEWLINE if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:NEWLINE join_type = LOUTERNEWLINE else:NEWLINE join_type = INNERNEWLINE join.join_type = join_typeNEWLINE join.table_alias = aliasNEWLINE self.alias_map[alias] = joinNEWLINE return aliasNEWLINENEWLINE def join_parent_model(self, opts, model, alias, seen):NEWLINE """NEWLINE Make sure the given 'model' is joined in the query. If 'model' isn'tNEWLINE a parent of 'opts' or if it is None this method is a no-op.NEWLINENEWLINE The 'alias' is the root alias for starting the join, 'seen' is a dictNEWLINE of model -> alias of existing joins. It must also contain a mappingNEWLINE of None -> some alias. This will be returned in the no-op case.NEWLINE """NEWLINE if model in seen:NEWLINE return seen[model]NEWLINE chain = opts.get_base_chain(model)NEWLINE if not chain:NEWLINE return aliasNEWLINE curr_opts = optsNEWLINE for int_model in chain:NEWLINE if int_model in seen:NEWLINE curr_opts = int_model._metaNEWLINE alias = seen[int_model]NEWLINE continueNEWLINE # Proxy model have elements in base chainNEWLINE # with no parents, assign the new optionsNEWLINE # object and skip to the next base in thatNEWLINE # caseNEWLINE if not curr_opts.parents[int_model]:NEWLINE curr_opts = int_model._metaNEWLINE continueNEWLINE link_field = curr_opts.get_ancestor_link(int_model)NEWLINE join_info = self.setup_joins([link_field.name], curr_opts, alias)NEWLINE curr_opts = int_model._metaNEWLINE alias = seen[int_model] = join_info.joins[-1]NEWLINE return alias or seen[None]NEWLINENEWLINE def add_annotation(self, annotation, alias, is_summary=False):NEWLINE """Add a single annotation expression to the Query."""NEWLINE annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None,NEWLINE summarize=is_summary)NEWLINE self.append_annotation_mask([alias])NEWLINE self.annotations[alias] = annotationNEWLINENEWLINE def resolve_expression(self, query, *args, **kwargs):NEWLINE clone = self.clone()NEWLINE # Subqueries need to use a different set of aliases than the outer query.NEWLINE clone.bump_prefix(query)NEWLINE clone.subquery = TrueNEWLINE # It's safe to drop ordering if the queryset isn't using slicing,NEWLINE # distinct(*fields) or select_for_update().NEWLINE if (self.low_mark == 0 and self.high_mark is None andNEWLINE not self.distinct_fields andNEWLINE not self.select_for_update):NEWLINE clone.clear_ordering(True)NEWLINE clone.where.resolve_expression(query, *args, **kwargs)NEWLINE for key, value in clone.annotations.items():NEWLINE resolved = value.resolve_expression(query, *args, **kwargs)NEWLINE if hasattr(resolved, 'external_aliases'):NEWLINE resolved.external_aliases.update(clone.alias_map)NEWLINE clone.annotations[key] = resolvedNEWLINE # Outer query's aliases are considered external.NEWLINE clone.external_aliases.update(NEWLINE alias for alias, table in query.alias_map.items()NEWLINE if (NEWLINE isinstance(table, Join) and table.join_field.related_model._meta.db_table != aliasNEWLINE ) or (NEWLINE isinstance(table, BaseTable) and table.table_name != table.table_aliasNEWLINE )NEWLINE )NEWLINE return cloneNEWLINENEWLINE def as_sql(self, compiler, connection):NEWLINE sql, params = self.get_compiler(connection=connection).as_sql()NEWLINE if self.subquery:NEWLINE sql = '(%s)' % sqlNEWLINE return sql, paramsNEWLINENEWLINE def resolve_lookup_value(self, value, can_reuse, allow_joins):NEWLINE if hasattr(value, 'resolve_expression'):NEWLINE value = value.resolve_expression(NEWLINE self, reuse=can_reuse, allow_joins=allow_joins,NEWLINE )NEWLINE elif isinstance(value, (list, tuple)):NEWLINE # The items of the iterable may be expressions and therefore needNEWLINE # to be resolved independently.NEWLINE return type(value)(NEWLINE self.resolve_lookup_value(sub_value, can_reuse, allow_joins)NEWLINE for sub_value in valueNEWLINE )NEWLINE return valueNEWLINENEWLINE def solve_lookup_type(self, lookup):NEWLINE """NEWLINE Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').NEWLINE """NEWLINE lookup_splitted = lookup.split(LOOKUP_SEP)NEWLINE if self.annotations:NEWLINE expression, expression_lookups = refs_expression(lookup_splitted, self.annotations)NEWLINE if expression:NEWLINE return expression_lookups, (), expressionNEWLINE _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())NEWLINE field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]NEWLINE if len(lookup_parts) > 1 and not field_parts:NEWLINE raise FieldError(NEWLINE 'Invalid lookup "%s" for model %s".' %NEWLINE (lookup, self.get_meta().model.__name__)NEWLINE )NEWLINE return lookup_parts, field_parts, FalseNEWLINENEWLINE def check_query_object_type(self, value, opts, field):NEWLINE """NEWLINE Check whether the object passed while querying is of the correct type.NEWLINE If not, raise a ValueError specifying the wrong object.NEWLINE """NEWLINE if hasattr(value, '_meta'):NEWLINE if not check_rel_lookup_compatibility(value._meta.model, opts, field):NEWLINE raise ValueError(NEWLINE 'Cannot query "%s": Must be "%s" instance.' %NEWLINE (value, opts.object_name))NEWLINENEWLINE def check_related_objects(self, field, value, opts):NEWLINE """Check the type of object passed to query relations."""NEWLINE if field.is_relation:NEWLINE # Check that the field and the queryset use the same model in aNEWLINE # query like .filter(author=Author.objects.all()). For example, theNEWLINE # opts would be Author's (from the author field) and value.modelNEWLINE # would be Author.objects.all() queryset's .model (Author also).NEWLINE # The field is the related field on the lhs side.NEWLINE if (isinstance(value, Query) and not value.has_select_fields andNEWLINE not check_rel_lookup_compatibility(value.model, opts, field)):NEWLINE raise ValueError(NEWLINE 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' %NEWLINE (value.model._meta.object_name, opts.object_name)NEWLINE )NEWLINE elif hasattr(value, '_meta'):NEWLINE self.check_query_object_type(value, opts, field)NEWLINE elif hasattr(value, '__iter__'):NEWLINE for v in value:NEWLINE self.check_query_object_type(v, opts, field)NEWLINENEWLINE def check_filterable(self, expression):NEWLINE """Raise an error if expression cannot be used in a WHERE clause."""NEWLINE if not getattr(expression, 'filterable', True):NEWLINE raise NotSupportedError(NEWLINE expression.__class__.__name__ + ' is disallowed in the filter 'NEWLINE 'clause.'NEWLINE )NEWLINE if hasattr(expression, 'get_source_expressions'):NEWLINE for expr in expression.get_source_expressions():NEWLINE self.check_filterable(expr)NEWLINENEWLINE def build_lookup(self, lookups, lhs, rhs):NEWLINE """NEWLINE Try to extract transforms and lookup from given lhs.NEWLINENEWLINE The lhs value is something that works like SQLExpression.NEWLINE The rhs value is what the lookup is going to compare against.NEWLINE The lookups is a list of names to extract using get_lookup()NEWLINE and get_transform().NEWLINE """NEWLINE # __exact is the default lookup if one isn't given.NEWLINE *transforms, lookup_name = lookups or ['exact']NEWLINE for name in transforms:NEWLINE lhs = self.try_transform(lhs, name)NEWLINE # First try get_lookup() so that the lookup takes precedence if the lhsNEWLINE # supports both transform and lookup for the name.NEWLINE lookup_class = lhs.get_lookup(lookup_name)NEWLINE if not lookup_class:NEWLINE if lhs.field.is_relation:NEWLINE raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name))NEWLINE # A lookup wasn't found. Try to interpret the name as a transformNEWLINE # and do an Exact lookup against it.NEWLINE lhs = self.try_transform(lhs, lookup_name)NEWLINE lookup_name = 'exact'NEWLINE lookup_class = lhs.get_lookup(lookup_name)NEWLINE if not lookup_class:NEWLINE returnNEWLINENEWLINE lookup = lookup_class(lhs, rhs)NEWLINE # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject allNEWLINE # uses of None as a query value unless the lookup supports it.NEWLINE if lookup.rhs is None and not lookup.can_use_none_as_rhs:NEWLINE if lookup_name not in ('exact', 'iexact'):NEWLINE raise ValueError("Cannot use None as a query value")NEWLINE return lhs.get_lookup('isnull')(lhs, True)NEWLINENEWLINE # For Oracle '' is equivalent to null. The check must be done at thisNEWLINE # stage because join promotion can't be done in the compiler. UsingNEWLINE # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.NEWLINE # A similar thing is done in is_nullable(), too.NEWLINE if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls andNEWLINE lookup_name == 'exact' and lookup.rhs == ''):NEWLINE return lhs.get_lookup('isnull')(lhs, True)NEWLINENEWLINE return lookupNEWLINENEWLINE def try_transform(self, lhs, name):NEWLINE """NEWLINE Helper method for build_lookup(). Try to fetch and initializeNEWLINE a transform for name parameter from lhs.NEWLINE """NEWLINE transform_class = lhs.get_transform(name)NEWLINE if transform_class:NEWLINE return transform_class(lhs)NEWLINE else:NEWLINE output_field = lhs.output_field.__class__NEWLINE suggested_lookups = difflib.get_close_matches(name, output_field.get_lookups())NEWLINE if suggested_lookups:NEWLINE suggestion = ', perhaps you meant %s?' % ' or '.join(suggested_lookups)NEWLINE else:NEWLINE suggestion = '.'NEWLINE raise FieldError(NEWLINE "Unsupported lookup '%s' for %s or join on the field not "NEWLINE "permitted%s" % (name, output_field.__name__, suggestion)NEWLINE )NEWLINENEWLINE def build_filter(self, filter_expr, branch_negated=False, current_negated=False,NEWLINE can_reuse=None, allow_joins=True, split_subq=True,NEWLINE reuse_with_filtered_relation=False, check_filterable=True):NEWLINE """NEWLINE Build a WhereNode for a single filter clause but don't add itNEWLINE to this Query. Query.add_q() will then add this filter to the whereNEWLINE Node.NEWLINENEWLINE The 'branch_negated' tells us if the current branch contains anyNEWLINE negations. This will be used to determine if subqueries are needed.NEWLINENEWLINE The 'current_negated' is used to determine if the current filter isNEWLINE negated or not and this will be used to determine if IS NULL filteringNEWLINE is needed.NEWLINENEWLINE The difference between current_negated and branch_negated is thatNEWLINE branch_negated is set on first negation, but current_negated isNEWLINE flipped for each negation.NEWLINENEWLINE Note that add_filter will not do any negating itself, that is doneNEWLINE upper in the code by add_q().NEWLINENEWLINE The 'can_reuse' is a set of reusable joins for multijoins.NEWLINENEWLINE If 'reuse_with_filtered_relation' is True, then only joins in can_reuseNEWLINE will be reused.NEWLINENEWLINE The method will create a filter clause that can be added to the currentNEWLINE query. However, if the filter isn't added to the query then the callerNEWLINE is responsible for unreffing the joins used.NEWLINE """NEWLINE if isinstance(filter_expr, dict):NEWLINE raise FieldError("Cannot parse keyword query as dict")NEWLINE if isinstance(filter_expr, Q):NEWLINE return self._add_q(NEWLINE filter_expr,NEWLINE branch_negated=branch_negated,NEWLINE current_negated=current_negated,NEWLINE used_aliases=can_reuse,NEWLINE allow_joins=allow_joins,NEWLINE split_subq=split_subq,NEWLINE check_filterable=check_filterable,NEWLINE )NEWLINE if hasattr(filter_expr, 'resolve_expression'):NEWLINE if not getattr(filter_expr, 'conditional', False):NEWLINE raise TypeError('Cannot filter against a non-conditional expression.')NEWLINE condition = self.build_lookup(NEWLINE ['exact'], filter_expr.resolve_expression(self, allow_joins=allow_joins), TrueNEWLINE )NEWLINE clause = self.where_class()NEWLINE clause.add(condition, AND)NEWLINE return clause, []NEWLINE arg, value = filter_exprNEWLINE if not arg:NEWLINE raise FieldError("Cannot parse keyword query %r" % arg)NEWLINE lookups, parts, reffed_expression = self.solve_lookup_type(arg)NEWLINENEWLINE if check_filterable:NEWLINE self.check_filterable(reffed_expression)NEWLINENEWLINE if not allow_joins and len(parts) > 1:NEWLINE raise FieldError("Joined field references are not permitted in this query")NEWLINENEWLINE pre_joins = self.alias_refcount.copy()NEWLINE value = self.resolve_lookup_value(value, can_reuse, allow_joins)NEWLINE used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)}NEWLINENEWLINE if check_filterable:NEWLINE self.check_filterable(value)NEWLINENEWLINE clause = self.where_class()NEWLINE if reffed_expression:NEWLINE condition = self.build_lookup(lookups, reffed_expression, value)NEWLINE clause.add(condition, AND)NEWLINE return clause, []NEWLINENEWLINE opts = self.get_meta()NEWLINE alias = self.get_initial_alias()NEWLINE allow_many = not branch_negated or not split_subqNEWLINENEWLINE try:NEWLINE join_info = self.setup_joins(NEWLINE parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many,NEWLINE reuse_with_filtered_relation=reuse_with_filtered_relation,NEWLINE )NEWLINENEWLINE # Prevent iterator from being consumed by check_related_objects()NEWLINE if isinstance(value, Iterator):NEWLINE value = list(value)NEWLINE self.check_related_objects(join_info.final_field, value, join_info.opts)NEWLINENEWLINE # split_exclude() needs to know which joins were generated for theNEWLINE # lookup partsNEWLINE self._lookup_joins = join_info.joinsNEWLINE except MultiJoin as e:NEWLINE return self.split_exclude(filter_expr, can_reuse, e.names_with_path)NEWLINENEWLINE # Update used_joins before trimming since they are reused to determineNEWLINE # which joins could be later promoted to INNER.NEWLINE used_joins.update(join_info.joins)NEWLINE targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)NEWLINE if can_reuse is not None:NEWLINE can_reuse.update(join_list)NEWLINENEWLINE if join_info.final_field.is_relation:NEWLINE # No support for transforms for relational fieldsNEWLINE num_lookups = len(lookups)NEWLINE if num_lookups > 1:NEWLINE raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0]))NEWLINE if len(targets) == 1:NEWLINE col = self._get_col(targets[0], join_info.final_field, alias)NEWLINE else:NEWLINE col = MultiColSource(alias, targets, join_info.targets, join_info.final_field)NEWLINE else:NEWLINE col = self._get_col(targets[0], join_info.final_field, alias)NEWLINENEWLINE condition = self.build_lookup(lookups, col, value)NEWLINE lookup_type = condition.lookup_nameNEWLINE clause.add(condition, AND)NEWLINENEWLINE require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negatedNEWLINE if current_negated and (lookup_type != 'isnull' or condition.rhs is False) and condition.rhs is not None:NEWLINE require_outer = TrueNEWLINE if (lookup_type != 'isnull' and (NEWLINE self.is_nullable(targets[0]) orNEWLINE self.alias_map[join_list[-1]].join_type == LOUTER)):NEWLINE # The condition added here will be SQL like this:NEWLINE # NOT (col IS NOT NULL), where the first NOT is added inNEWLINE # upper layers of code. The reason for addition is that if colNEWLINE # is null, then col != someval will result in SQL "unknown"NEWLINE # which isn't the same as in Python. The Python None handlingNEWLINE # is wanted, and it can be gotten byNEWLINE # (col IS NULL OR col != someval)NEWLINE # <=>NEWLINE # NOT (col IS NOT NULL AND col = someval).NEWLINE lookup_class = targets[0].get_lookup('isnull')NEWLINE col = self._get_col(targets[0], join_info.targets[0], alias)NEWLINE clause.add(lookup_class(col, False), AND)NEWLINE return clause, used_joins if not require_outer else ()NEWLINENEWLINE def add_filter(self, filter_clause):NEWLINE self.add_q(Q(**{filter_clause[0]: filter_clause[1]}))NEWLINENEWLINE def add_q(self, q_object):NEWLINE """NEWLINE A preprocessor for the internal _add_q(). Responsible for doing finalNEWLINE join promotion.NEWLINE """NEWLINE # For join promotion this case is doing an AND for the added q_objectNEWLINE # and existing conditions. So, any existing inner join forces the joinNEWLINE # type to remain inner. Existing outer joins can however be demoted.NEWLINE # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - ifNEWLINE # rel_a doesn't produce any rows, then the whole condition must fail.NEWLINE # So, demotion is OK.NEWLINE existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER}NEWLINE clause, _ = self._add_q(q_object, self.used_aliases)NEWLINE if clause:NEWLINE self.where.add(clause, AND)NEWLINE self.demote_joins(existing_inner)NEWLINENEWLINE def build_where(self, filter_expr):NEWLINE return self.build_filter(filter_expr, allow_joins=False)[0]NEWLINENEWLINE def _add_q(self, q_object, used_aliases, branch_negated=False,NEWLINE current_negated=False, allow_joins=True, split_subq=True,NEWLINE check_filterable=True):NEWLINE """Add a Q-object to the current filter."""NEWLINE connector = q_object.connectorNEWLINE current_negated = current_negated ^ q_object.negatedNEWLINE branch_negated = branch_negated or q_object.negatedNEWLINE target_clause = self.where_class(connector=connector,NEWLINE negated=q_object.negated)NEWLINE joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)NEWLINE for child in q_object.children:NEWLINE child_clause, needed_inner = self.build_filter(NEWLINE child, can_reuse=used_aliases, branch_negated=branch_negated,NEWLINE current_negated=current_negated, allow_joins=allow_joins,NEWLINE split_subq=split_subq, check_filterable=check_filterable,NEWLINE )NEWLINE joinpromoter.add_votes(needed_inner)NEWLINE if child_clause:NEWLINE target_clause.add(child_clause, connector)NEWLINE needed_inner = joinpromoter.update_join_types(self)NEWLINE return target_clause, needed_innerNEWLINENEWLINE def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False):NEWLINE """Add a FilteredRelation object to the current filter."""NEWLINE connector = q_object.connectorNEWLINE current_negated ^= q_object.negatedNEWLINE branch_negated = branch_negated or q_object.negatedNEWLINE target_clause = self.where_class(connector=connector, negated=q_object.negated)NEWLINE for child in q_object.children:NEWLINE if isinstance(child, Node):NEWLINE child_clause = self.build_filtered_relation_q(NEWLINE child, reuse=reuse, branch_negated=branch_negated,NEWLINE current_negated=current_negated,NEWLINE )NEWLINE else:NEWLINE child_clause, _ = self.build_filter(NEWLINE child, can_reuse=reuse, branch_negated=branch_negated,NEWLINE current_negated=current_negated,NEWLINE allow_joins=True, split_subq=False,NEWLINE reuse_with_filtered_relation=True,NEWLINE )NEWLINE target_clause.add(child_clause, connector)NEWLINE return target_clauseNEWLINENEWLINE def add_filtered_relation(self, filtered_relation, alias):NEWLINE filtered_relation.alias = aliasNEWLINE lookups = dict(get_children_from_q(filtered_relation.condition))NEWLINE for lookup in chain((filtered_relation.relation_name,), lookups):NEWLINE lookup_parts, field_parts, _ = self.solve_lookup_type(lookup)NEWLINE shift = 2 if not lookup_parts else 1NEWLINE if len(field_parts) > (shift + len(lookup_parts)):NEWLINE raise ValueError(NEWLINE "FilteredRelation's condition doesn't support nested "NEWLINE "relations (got %r)." % lookupNEWLINE )NEWLINE self._filtered_relations[filtered_relation.alias] = filtered_relationNEWLINENEWLINE def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):NEWLINE """NEWLINE Walk the list of names and turns them into PathInfo tuples. A singleNEWLINE name in 'names' can generate multiple PathInfos (m2m, for example).NEWLINENEWLINE 'names' is the path of names to travel, 'opts' is the model Options weNEWLINE start the name resolving from, 'allow_many' is as for setup_joins().NEWLINE If fail_on_missing is set to True, then a name that can't be resolvedNEWLINE will generate a FieldError.NEWLINENEWLINE Return a list of PathInfo tuples. In addition return the final fieldNEWLINE (the last used join field) and target (which is a field guaranteed toNEWLINE contain the same value as the final field). Finally, return those namesNEWLINE that weren't found (which are likely transforms and the final lookup).NEWLINE """NEWLINE path, names_with_path = [], []NEWLINE for pos, name in enumerate(names):NEWLINE cur_names_with_path = (name, [])NEWLINE if name == 'pk':NEWLINE name = opts.pk.nameNEWLINENEWLINE field = NoneNEWLINE filtered_relation = NoneNEWLINE try:NEWLINE field = opts.get_field(name)NEWLINE except FieldDoesNotExist:NEWLINE if name in self.annotation_select:NEWLINE field = self.annotation_select[name].output_fieldNEWLINE elif name in self._filtered_relations and pos == 0:NEWLINE filtered_relation = self._filtered_relations[name]NEWLINE field = opts.get_field(filtered_relation.relation_name)NEWLINE if field is not None:NEWLINE # Fields that contain one-to-many relations with a genericNEWLINE # model (like a GenericForeignKey) cannot generate reverseNEWLINE # relations and therefore cannot be used for reverse querying.NEWLINE if field.is_relation and not field.related_model:NEWLINE raise FieldError(NEWLINE "Field %r does not generate an automatic reverse "NEWLINE "relation and therefore cannot be used for reverse "NEWLINE "querying. If it is a GenericForeignKey, consider "NEWLINE "adding a GenericRelation." % nameNEWLINE )NEWLINE try:NEWLINE model = field.model._meta.concrete_modelNEWLINE except AttributeError:NEWLINE # QuerySet.annotate() may introduce fields that aren'tNEWLINE # attached to a model.NEWLINE model = NoneNEWLINE else:NEWLINE # We didn't find the current field, so move position backNEWLINE # one step.NEWLINE pos -= 1NEWLINE if pos == -1 or fail_on_missing:NEWLINE available = sorted([NEWLINE *get_field_names_from_opts(opts),NEWLINE *self.annotation_select,NEWLINE *self._filtered_relations,NEWLINE ])NEWLINE raise FieldError("Cannot resolve keyword '%s' into field. "NEWLINE "Choices are: %s" % (name, ", ".join(available)))NEWLINE breakNEWLINE # Check if we need any joins for concrete inheritance cases (theNEWLINE # field lives in parent, but we are currently in one of itsNEWLINE # children)NEWLINE if model is not opts.model:NEWLINE path_to_parent = opts.get_path_to_parent(model)NEWLINE if path_to_parent:NEWLINE path.extend(path_to_parent)NEWLINE cur_names_with_path[1].extend(path_to_parent)NEWLINE opts = path_to_parent[-1].to_optsNEWLINE if hasattr(field, 'get_path_info'):NEWLINE pathinfos = field.get_path_info(filtered_relation)NEWLINE if not allow_many:NEWLINE for inner_pos, p in enumerate(pathinfos):NEWLINE if p.m2m:NEWLINE cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])NEWLINE names_with_path.append(cur_names_with_path)NEWLINE raise MultiJoin(pos + 1, names_with_path)NEWLINE last = pathinfos[-1]NEWLINE path.extend(pathinfos)NEWLINE final_field = last.join_fieldNEWLINE opts = last.to_optsNEWLINE targets = last.target_fieldsNEWLINE cur_names_with_path[1].extend(pathinfos)NEWLINE names_with_path.append(cur_names_with_path)NEWLINE else:NEWLINE # Local non-relational field.NEWLINE final_field = fieldNEWLINE targets = (field,)NEWLINE if fail_on_missing and pos + 1 != len(names):NEWLINE raise FieldError(NEWLINE "Cannot resolve keyword %r into field. Join on '%s'"NEWLINE " not permitted." % (names[pos + 1], name))NEWLINE breakNEWLINE return path, final_field, targets, names[pos + 1:]NEWLINENEWLINE def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True,NEWLINE reuse_with_filtered_relation=False):NEWLINE """NEWLINE Compute the necessary table joins for the passage through the fieldsNEWLINE given in 'names'. 'opts' is the Options class for the current modelNEWLINE (which gives the table we are starting from), 'alias' is the alias forNEWLINE the table to start the joining from.NEWLINENEWLINE The 'can_reuse' defines the reverse foreign key joins we can reuse. ItNEWLINE can be None in which case all joins are reusable or a set of aliasesNEWLINE that can be reused. Note that non-reverse foreign keys are alwaysNEWLINE reusable when using setup_joins().NEWLINENEWLINE The 'reuse_with_filtered_relation' can be used to force 'can_reuse'NEWLINE parameter and force the relation on the given connections.NEWLINENEWLINE If 'allow_many' is False, then any reverse foreign key seen willNEWLINE generate a MultiJoin exception.NEWLINENEWLINE Return the final field involved in the joins, the target field (usedNEWLINE for any 'where' constraint), the final 'opts' value, the joins, theNEWLINE field path traveled to generate the joins, and a transform functionNEWLINE that takes a field and alias and is equivalent to `field.get_col(alias)`NEWLINE in the simple case but wraps field transforms if they were included inNEWLINE names.NEWLINENEWLINE The target field is the field containing the concrete value. FinalNEWLINE field can be something different, for example foreign key pointing toNEWLINE that value. Final field is needed for example in some valueNEWLINE conversions (convert 'obj' in fk__id=obj to pk val using the foreignNEWLINE key field for example).NEWLINE """NEWLINE joins = [alias]NEWLINE # The transform can't be applied yet, as joins must be trimmed later.NEWLINE # To avoid making every caller of this method look up transformsNEWLINE # directly, compute transforms here and create a partial that convertsNEWLINE # fields to the appropriate wrapped version.NEWLINENEWLINE def final_transformer(field, alias):NEWLINE return field.get_col(alias)NEWLINENEWLINE # Try resolving all the names as fields first. If there's an error,NEWLINE # treat trailing names as lookups until a field can be resolved.NEWLINE last_field_exception = NoneNEWLINE for pivot in range(len(names), 0, -1):NEWLINE try:NEWLINE path, final_field, targets, rest = self.names_to_path(NEWLINE names[:pivot], opts, allow_many, fail_on_missing=True,NEWLINE )NEWLINE except FieldError as exc:NEWLINE if pivot == 1:NEWLINE # The first item cannot be a lookup, so it's safeNEWLINE # to raise the field error here.NEWLINE raiseNEWLINE else:NEWLINE last_field_exception = excNEWLINE else:NEWLINE # The transforms are the remaining items that couldn't beNEWLINE # resolved into fields.NEWLINE transforms = names[pivot:]NEWLINE breakNEWLINE for name in transforms:NEWLINE def transform(field, alias, *, name, previous):NEWLINE try:NEWLINE wrapped = previous(field, alias)NEWLINE return self.try_transform(wrapped, name)NEWLINE except FieldError:NEWLINE # FieldError is raised if the transform doesn't exist.NEWLINE if isinstance(final_field, Field) and last_field_exception:NEWLINE raise last_field_exceptionNEWLINE else:NEWLINE raiseNEWLINE final_transformer = functools.partial(transform, name=name, previous=final_transformer)NEWLINE # Then, add the path to the query's joins. Note that we can't trimNEWLINE # joins at this stage - we will need the information about join typeNEWLINE # of the trimmed joins.NEWLINE for join in path:NEWLINE if join.filtered_relation:NEWLINE filtered_relation = join.filtered_relation.clone()NEWLINE table_alias = filtered_relation.aliasNEWLINE else:NEWLINE filtered_relation = NoneNEWLINE table_alias = NoneNEWLINE opts = join.to_optsNEWLINE if join.direct:NEWLINE nullable = self.is_nullable(join.join_field)NEWLINE else:NEWLINE nullable = TrueNEWLINE connection = Join(NEWLINE opts.db_table, alias, table_alias, INNER, join.join_field,NEWLINE nullable, filtered_relation=filtered_relation,NEWLINE )NEWLINE reuse = can_reuse if join.m2m or reuse_with_filtered_relation else NoneNEWLINE alias = self.join(NEWLINE connection, reuse=reuse,NEWLINE reuse_with_filtered_relation=reuse_with_filtered_relation,NEWLINE )NEWLINE joins.append(alias)NEWLINE if filtered_relation:NEWLINE filtered_relation.path = joins[:]NEWLINE return JoinInfo(final_field, targets, opts, joins, path, final_transformer)NEWLINENEWLINE def trim_joins(self, targets, joins, path):NEWLINE """NEWLINE The 'target' parameter is the final field being joined to, 'joins'NEWLINE is the full list of join aliases. The 'path' contain the PathInfosNEWLINE used to create the joins.NEWLINENEWLINE Return the final target field and table alias and the new activeNEWLINE joins.NEWLINENEWLINE Always trim any direct join if the target column is already in theNEWLINE previous table. Can't trim reverse joins as it's unknown if there'sNEWLINE anything on the other side of the join.NEWLINE """NEWLINE joins = joins[:]NEWLINE for pos, info in enumerate(reversed(path)):NEWLINE if len(joins) == 1 or not info.direct:NEWLINE breakNEWLINE if info.filtered_relation:NEWLINE breakNEWLINE join_targets = {t.column for t in info.join_field.foreign_related_fields}NEWLINE cur_targets = {t.column for t in targets}NEWLINE if not cur_targets.issubset(join_targets):NEWLINE breakNEWLINE targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets}NEWLINE targets = tuple(targets_dict[t.column] for t in targets)NEWLINE self.unref_alias(joins.pop())NEWLINE return targets, joins[-1], joinsNEWLINENEWLINE @classmethodNEWLINE def _gen_col_aliases(cls, exprs):NEWLINE for expr in exprs:NEWLINE if isinstance(expr, Col):NEWLINE yield expr.aliasNEWLINE else:NEWLINE yield from cls._gen_col_aliases(expr.get_source_expressions())NEWLINENEWLINE def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):NEWLINE if not allow_joins and LOOKUP_SEP in name:NEWLINE raise FieldError("Joined field references are not permitted in this query")NEWLINE annotation = self.annotations.get(name)NEWLINE if annotation is not None:NEWLINE if not allow_joins:NEWLINE for alias in self._gen_col_aliases([annotation]):NEWLINE if isinstance(self.alias_map[alias], Join):NEWLINE raise FieldError(NEWLINE 'Joined field references are not permitted in 'NEWLINE 'this query'NEWLINE )NEWLINE if summarize:NEWLINE # Summarize currently means we are doing an aggregate() queryNEWLINE # which is executed as a wrapped subquery if any of theNEWLINE # aggregate() elements reference an existing annotation. InNEWLINE # that case we need to return a Ref to the subquery's annotation.NEWLINE return Ref(name, self.annotation_select[name])NEWLINE else:NEWLINE return annotationNEWLINE else:NEWLINE field_list = name.split(LOOKUP_SEP)NEWLINE join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse)NEWLINE targets, final_alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)NEWLINE if not allow_joins and len(join_list) > 1:NEWLINE raise FieldError('Joined field references are not permitted in this query')NEWLINE if len(targets) > 1:NEWLINE raise FieldError("Referencing multicolumn fields with F() objects "NEWLINE "isn't supported")NEWLINE # Verify that the last lookup in name is a field or a transform:NEWLINE # transform_function() raises FieldError if not.NEWLINE join_info.transform_function(targets[0], final_alias)NEWLINE if reuse is not None:NEWLINE reuse.update(join_list)NEWLINE return self._get_col(targets[0], join_info.targets[0], join_list[-1])NEWLINENEWLINE def split_exclude(self, filter_expr, can_reuse, names_with_path):NEWLINE """NEWLINE When doing an exclude against any kind of N-to-many relation, we needNEWLINE to use a subquery. This method constructs the nested query, given theNEWLINE original exclude filter (filter_expr) and the portion up to the firstNEWLINE N-to-many relation field.NEWLINENEWLINE For example, if the origin filter is ~Q(child__name='foo'), filter_exprNEWLINE is ('child__name', 'foo') and can_reuse is a set of joins usable forNEWLINE filters in the original query.NEWLINENEWLINE We will turn this into equivalent of:NEWLINE WHERE NOT (pk IN (SELECT parent_id FROM thetableNEWLINE WHERE name = 'foo' AND parent_id IS NOT NULL))NEWLINENEWLINE It might be worth it to consider using WHERE NOT EXISTS as that hasNEWLINE saner null handling, and is easier for the backend's optimizer toNEWLINE handle.NEWLINE """NEWLINE filter_lhs, filter_rhs = filter_exprNEWLINE if isinstance(filter_rhs, OuterRef):NEWLINE filter_expr = (filter_lhs, OuterRef(filter_rhs))NEWLINE elif isinstance(filter_rhs, F):NEWLINE filter_expr = (filter_lhs, OuterRef(filter_rhs.name))NEWLINE # Generate the inner query.NEWLINE query = Query(self.model)NEWLINE query._filtered_relations = self._filtered_relationsNEWLINE query.add_filter(filter_expr)NEWLINE query.clear_ordering(True)NEWLINE # Try to have as simple as possible subquery -> trim leading joins fromNEWLINE # the subquery.NEWLINE trimmed_prefix, contains_louter = query.trim_start(names_with_path)NEWLINENEWLINE # Add extra check to make sure the selected field will not be nullNEWLINE # since we are adding an IN <subquery> clause. This prevents theNEWLINE # database from tripping over IN (...,NULL,...) selects and returningNEWLINE # nothingNEWLINE col = query.select[0]NEWLINE select_field = col.targetNEWLINE alias = col.aliasNEWLINE if self.is_nullable(select_field):NEWLINE lookup_class = select_field.get_lookup('isnull')NEWLINE lookup = lookup_class(select_field.get_col(alias), False)NEWLINE query.where.add(lookup, AND)NEWLINE if alias in can_reuse:NEWLINE pk = select_field.model._meta.pkNEWLINE # Need to add a restriction so that outer query's filters are in effect forNEWLINE # the subquery, too.NEWLINE query.bump_prefix(self)NEWLINE lookup_class = select_field.get_lookup('exact')NEWLINE # Note that the query.select[0].alias is different from aliasNEWLINE # due to bump_prefix above.NEWLINE lookup = lookup_class(pk.get_col(query.select[0].alias),NEWLINE pk.get_col(alias))NEWLINE query.where.add(lookup, AND)NEWLINE query.external_aliases.add(alias)NEWLINENEWLINE condition, needed_inner = self.build_filter(NEWLINE ('%s__in' % trimmed_prefix, query),NEWLINE current_negated=True, branch_negated=True, can_reuse=can_reuse)NEWLINE if contains_louter:NEWLINE or_null_condition, _ = self.build_filter(NEWLINE ('%s__isnull' % trimmed_prefix, True),NEWLINE current_negated=True, branch_negated=True, can_reuse=can_reuse)NEWLINE condition.add(or_null_condition, OR)NEWLINE # Note that the end result will be:NEWLINE # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.NEWLINE # This might look crazy but due to how IN works, this seems to beNEWLINE # correct. If the IS NOT NULL check is removed then outercol NOTNEWLINE # IN will return UNKNOWN. If the IS NULL check is removed, then ifNEWLINE # outercol IS NULL we will not match the row.NEWLINE return condition, needed_innerNEWLINENEWLINE def set_empty(self):NEWLINE self.where.add(NothingNode(), AND)NEWLINENEWLINE def is_empty(self):NEWLINE return any(isinstance(c, NothingNode) for c in self.where.children)NEWLINENEWLINE def set_limits(self, low=None, high=None):NEWLINE """NEWLINE Adjust the limits on the rows retrieved. Use low/high to set these,NEWLINE as it makes it more Pythonic to read and write. When the SQL query isNEWLINE created, convert them to the appropriate offset and limit values.NEWLINENEWLINE Apply any limits passed in here to the existing constraints. Add lowNEWLINE to the current low value and clamp both to any existing high value.NEWLINE """NEWLINE if high is not None:NEWLINE if self.high_mark is not None:NEWLINE self.high_mark = min(self.high_mark, self.low_mark + high)NEWLINE else:NEWLINE self.high_mark = self.low_mark + highNEWLINE if low is not None:NEWLINE if self.high_mark is not None:NEWLINE self.low_mark = min(self.high_mark, self.low_mark + low)NEWLINE else:NEWLINE self.low_mark = self.low_mark + lowNEWLINENEWLINE if self.low_mark == self.high_mark:NEWLINE self.set_empty()NEWLINENEWLINE def clear_limits(self):NEWLINE """Clear any existing limits."""NEWLINE self.low_mark, self.high_mark = 0, NoneNEWLINENEWLINE @propertyNEWLINE def is_sliced(self):NEWLINE return self.low_mark != 0 or self.high_mark is not NoneNEWLINENEWLINE def has_limit_one(self):NEWLINE return self.high_mark is not None and (self.high_mark - self.low_mark) == 1NEWLINENEWLINE def can_filter(self):NEWLINE """NEWLINE Return True if adding filters to this instance is still possible.NEWLINENEWLINE Typically, this means no limits or offsets have been put on the results.NEWLINE """NEWLINE return not self.is_slicedNEWLINENEWLINE def clear_select_clause(self):NEWLINE """Remove all fields from SELECT clause."""NEWLINE self.select = ()NEWLINE self.default_cols = FalseNEWLINE self.select_related = FalseNEWLINE self.set_extra_mask(())NEWLINE self.set_annotation_mask(())NEWLINENEWLINE def clear_select_fields(self):NEWLINE """NEWLINE Clear the list of fields to select (but not extra_select columns).NEWLINE Some queryset types completely replace any existing list of selectNEWLINE columns.NEWLINE """NEWLINE self.select = ()NEWLINE self.values_select = ()NEWLINENEWLINE def add_select_col(self, col):NEWLINE self.select += col,NEWLINE self.values_select += col.output_field.name,NEWLINENEWLINE def set_select(self, cols):NEWLINE self.default_cols = FalseNEWLINE self.select = tuple(cols)NEWLINENEWLINE def add_distinct_fields(self, *field_names):NEWLINE """NEWLINE Add and resolve the given fields to the query's "distinct on" clause.NEWLINE """NEWLINE self.distinct_fields = field_namesNEWLINE self.distinct = TrueNEWLINENEWLINE def add_fields(self, field_names, allow_m2m=True):NEWLINE """NEWLINE Add the given (model) fields to the select set. Add the field names inNEWLINE the order specified.NEWLINE """NEWLINE alias = self.get_initial_alias()NEWLINE opts = self.get_meta()NEWLINENEWLINE try:NEWLINE cols = []NEWLINE for name in field_names:NEWLINE # Join promotion note - we must not remove any rows here, soNEWLINE # if there is no existing joins, use outer join.NEWLINE join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m)NEWLINE targets, final_alias, joins = self.trim_joins(NEWLINE join_info.targets,NEWLINE join_info.joins,NEWLINE join_info.path,NEWLINE )NEWLINE for target in targets:NEWLINE cols.append(join_info.transform_function(target, final_alias))NEWLINE if cols:NEWLINE self.set_select(cols)NEWLINE except MultiJoin:NEWLINE raise FieldError("Invalid field name: '%s'" % name)NEWLINE except FieldError:NEWLINE if LOOKUP_SEP in name:NEWLINE # For lookups spanning over relationships, show the errorNEWLINE # from the model on which the lookup failed.NEWLINE raiseNEWLINE else:NEWLINE names = sorted([NEWLINE *get_field_names_from_opts(opts), *self.extra,NEWLINE *self.annotation_select, *self._filtered_relationsNEWLINE ])NEWLINE raise FieldError("Cannot resolve keyword %r into field. "NEWLINE "Choices are: %s" % (name, ", ".join(names)))NEWLINENEWLINE def add_ordering(self, *ordering):NEWLINE """NEWLINE Add items from the 'ordering' sequence to the query's "order by"NEWLINE clause. These items are either field names (not column names) --NEWLINE possibly with a direction prefix ('-' or '?') -- or OrderByNEWLINE expressions.NEWLINENEWLINE If 'ordering' is empty, clear all ordering from the query.NEWLINE """NEWLINE errors = []NEWLINE for item in ordering:NEWLINE if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item):NEWLINE errors.append(item)NEWLINE if getattr(item, 'contains_aggregate', False):NEWLINE raise FieldError(NEWLINE 'Using an aggregate in order_by() without also including 'NEWLINE 'it in annotate() is not allowed: %s' % itemNEWLINE )NEWLINE if errors:NEWLINE raise FieldError('Invalid order_by arguments: %s' % errors)NEWLINE if ordering:NEWLINE self.order_by += orderingNEWLINE else:NEWLINE self.default_ordering = FalseNEWLINENEWLINE def clear_ordering(self, force_empty):NEWLINE """NEWLINE Remove any ordering settings. If 'force_empty' is True, there will beNEWLINE no ordering in the resulting query (not even the model's default).NEWLINE """NEWLINE self.order_by = ()NEWLINE self.extra_order_by = ()NEWLINE if force_empty:NEWLINE self.default_ordering = FalseNEWLINENEWLINE def set_group_by(self):NEWLINE """NEWLINE Expand the GROUP BY clause required by the query.NEWLINENEWLINE This will usually be the set of all non-aggregate fields in theNEWLINE return data. If the database backend supports grouping by theNEWLINE primary key, and the query would be equivalent, the optimizationNEWLINE will be made automatically.NEWLINE """NEWLINE group_by = list(self.select)NEWLINE if self.annotation_select:NEWLINE for alias, annotation in self.annotation_select.items():NEWLINE signature = inspect.signature(annotation.get_group_by_cols)NEWLINE if 'alias' not in signature.parameters:NEWLINE annotation_class = annotation.__class__NEWLINE msg = (NEWLINE '`alias=None` must be added to the signature of 'NEWLINE '%s.%s.get_group_by_cols().'NEWLINE ) % (annotation_class.__module__, annotation_class.__qualname__)NEWLINE warnings.warn(msg, category=RemovedInDjango40Warning)NEWLINE group_by_cols = annotation.get_group_by_cols()NEWLINE else:NEWLINE group_by_cols = annotation.get_group_by_cols(alias=alias)NEWLINE group_by.extend(group_by_cols)NEWLINE self.group_by = tuple(group_by)NEWLINENEWLINE def add_select_related(self, fields):NEWLINE """NEWLINE Set up the select_related data structure so that we only selectNEWLINE certain related models (as opposed to all models, whenNEWLINE self.select_related=True).NEWLINE """NEWLINE if isinstance(self.select_related, bool):NEWLINE field_dict = {}NEWLINE else:NEWLINE field_dict = self.select_relatedNEWLINE for field in fields:NEWLINE d = field_dictNEWLINE for part in field.split(LOOKUP_SEP):NEWLINE d = d.setdefault(part, {})NEWLINE self.select_related = field_dictNEWLINENEWLINE def add_extra(self, select, select_params, where, params, tables, order_by):NEWLINE """NEWLINE Add data to the various extra_* attributes for user-created additionsNEWLINE to the query.NEWLINE """NEWLINE if select:NEWLINE # We need to pair any placeholder markers in the 'select'NEWLINE # dictionary with their parameters in 'select_params' so thatNEWLINE # subsequent updates to the select dictionary also adjust theNEWLINE # parameters appropriately.NEWLINE select_pairs = {}NEWLINE if select_params:NEWLINE param_iter = iter(select_params)NEWLINE else:NEWLINE param_iter = iter([])NEWLINE for name, entry in select.items():NEWLINE entry = str(entry)NEWLINE entry_params = []NEWLINE pos = entry.find("%s")NEWLINE while pos != -1:NEWLINE if pos == 0 or entry[pos - 1] != '%':NEWLINE entry_params.append(next(param_iter))NEWLINE pos = entry.find("%s", pos + 2)NEWLINE select_pairs[name] = (entry, entry_params)NEWLINE self.extra.update(select_pairs)NEWLINE if where or params:NEWLINE self.where.add(ExtraWhere(where, params), AND)NEWLINE if tables:NEWLINE self.extra_tables += tuple(tables)NEWLINE if order_by:NEWLINE self.extra_order_by = order_byNEWLINENEWLINE def clear_deferred_loading(self):NEWLINE """Remove any fields from the deferred loading set."""NEWLINE self.deferred_loading = (frozenset(), True)NEWLINENEWLINE def add_deferred_loading(self, field_names):NEWLINE """NEWLINE Add the given list of model field names to the set of fields toNEWLINE exclude from loading from the database when automatic column selectionNEWLINE is done. Add the new field names to any existing field names thatNEWLINE are deferred (or removed from any existing field names that are markedNEWLINE as the only ones for immediate loading).NEWLINE """NEWLINE # Fields on related models are stored in the literal double-underscoreNEWLINE # format, so that we can use a set datastructure. We do the foo__barNEWLINE # splitting and handling when computing the SQL column names (as part ofNEWLINE # get_columns()).NEWLINE existing, defer = self.deferred_loadingNEWLINE if defer:NEWLINE # Add to existing deferred names.NEWLINE self.deferred_loading = existing.union(field_names), TrueNEWLINE else:NEWLINE # Remove names from the set of any existing "immediate load" names.NEWLINE self.deferred_loading = existing.difference(field_names), FalseNEWLINENEWLINE def add_immediate_loading(self, field_names):NEWLINE """NEWLINE Add the given list of model field names to the set of fields toNEWLINE retrieve when the SQL is executed ("immediate loading" fields). TheNEWLINE field names replace any existing immediate loading field names. IfNEWLINE there are field names already specified for deferred loading, removeNEWLINE those names from the new field_names before storing the new namesNEWLINE for immediate loading. (That is, immediate loading overrides anyNEWLINE existing immediate values, but respects existing deferrals.)NEWLINE """NEWLINE existing, defer = self.deferred_loadingNEWLINE field_names = set(field_names)NEWLINE if 'pk' in field_names:NEWLINE field_names.remove('pk')NEWLINE field_names.add(self.get_meta().pk.name)NEWLINENEWLINE if defer:NEWLINE # Remove any existing deferred names from the current set beforeNEWLINE # setting the new names.NEWLINE self.deferred_loading = field_names.difference(existing), FalseNEWLINE else:NEWLINE # Replace any existing "immediate load" field names.NEWLINE self.deferred_loading = frozenset(field_names), FalseNEWLINENEWLINE def get_loaded_field_names(self):NEWLINE """NEWLINE If any fields are marked to be deferred, return a dictionary mappingNEWLINE models to a set of names in those fields that will be loaded. If aNEWLINE model is not in the returned dictionary, none of its fields areNEWLINE deferred.NEWLINENEWLINE If no fields are marked for deferral, return an empty dictionary.NEWLINE """NEWLINE # We cache this because we call this function multiple timesNEWLINE # (compiler.fill_related_selections, query.iterator)NEWLINE try:NEWLINE return self._loaded_field_names_cacheNEWLINE except AttributeError:NEWLINE collection = {}NEWLINE self.deferred_to_data(collection, self.get_loaded_field_names_cb)NEWLINE self._loaded_field_names_cache = collectionNEWLINE return collectionNEWLINENEWLINE def get_loaded_field_names_cb(self, target, model, fields):NEWLINE """Callback used by get_deferred_field_names()."""NEWLINE target[model] = {f.attname for f in fields}NEWLINENEWLINE def set_annotation_mask(self, names):NEWLINE """Set the mask of annotations that will be returned by the SELECT."""NEWLINE if names is None:NEWLINE self.annotation_select_mask = NoneNEWLINE else:NEWLINE self.annotation_select_mask = set(names)NEWLINE self._annotation_select_cache = NoneNEWLINENEWLINE def append_annotation_mask(self, names):NEWLINE if self.annotation_select_mask is not None:NEWLINE self.set_annotation_mask(self.annotation_select_mask.union(names))NEWLINENEWLINE def set_extra_mask(self, names):NEWLINE """NEWLINE Set the mask of extra select items that will be returned by SELECT.NEWLINE Don't remove them from the Query since they might be used later.NEWLINE """NEWLINE if names is None:NEWLINE self.extra_select_mask = NoneNEWLINE else:NEWLINE self.extra_select_mask = set(names)NEWLINE self._extra_select_cache = NoneNEWLINENEWLINE def set_values(self, fields):NEWLINE self.select_related = FalseNEWLINE self.clear_deferred_loading()NEWLINE self.clear_select_fields()NEWLINENEWLINE if self.group_by is True:NEWLINE self.add_fields((f.attname for f in self.model._meta.concrete_fields), False)NEWLINE self.set_group_by()NEWLINE self.clear_select_fields()NEWLINENEWLINE if fields:NEWLINE field_names = []NEWLINE extra_names = []NEWLINE annotation_names = []NEWLINE if not self.extra and not self.annotations:NEWLINE # Shortcut - if there are no extra or annotations, thenNEWLINE # the values() clause must be just field names.NEWLINE field_names = list(fields)NEWLINE else:NEWLINE self.default_cols = FalseNEWLINE for f in fields:NEWLINE if f in self.extra_select:NEWLINE extra_names.append(f)NEWLINE elif f in self.annotation_select:NEWLINE annotation_names.append(f)NEWLINE else:NEWLINE field_names.append(f)NEWLINE self.set_extra_mask(extra_names)NEWLINE self.set_annotation_mask(annotation_names)NEWLINE else:NEWLINE field_names = [f.attname for f in self.model._meta.concrete_fields]NEWLINENEWLINE self.values_select = tuple(field_names)NEWLINE self.add_fields(field_names, True)NEWLINENEWLINE @propertyNEWLINE def annotation_select(self):NEWLINE """NEWLINE Return the dictionary of aggregate columns that are not masked andNEWLINE should be used in the SELECT clause. Cache this result for performance.NEWLINE """NEWLINE if self._annotation_select_cache is not None:NEWLINE return self._annotation_select_cacheNEWLINE elif not self.annotations:NEWLINE return {}NEWLINE elif self.annotation_select_mask is not None:NEWLINE self._annotation_select_cache = {NEWLINE k: v for k, v in self.annotations.items()NEWLINE if k in self.annotation_select_maskNEWLINE }NEWLINE return self._annotation_select_cacheNEWLINE else:NEWLINE return self.annotationsNEWLINENEWLINE @propertyNEWLINE def extra_select(self):NEWLINE if self._extra_select_cache is not None:NEWLINE return self._extra_select_cacheNEWLINE if not self.extra:NEWLINE return {}NEWLINE elif self.extra_select_mask is not None:NEWLINE self._extra_select_cache = {NEWLINE k: v for k, v in self.extra.items()NEWLINE if k in self.extra_select_maskNEWLINE }NEWLINE return self._extra_select_cacheNEWLINE else:NEWLINE return self.extraNEWLINENEWLINE def trim_start(self, names_with_path):NEWLINE """NEWLINE Trim joins from the start of the join path. The candidates for trimNEWLINE are the PathInfos in names_with_path structure that are m2m joins.NEWLINENEWLINE Also set the select column so the start matches the join.NEWLINENEWLINE This method is meant to be used for generating the subquery joins &NEWLINE cols in split_exclude().NEWLINENEWLINE Return a lookup usable for doing outerq.filter(lookup=self) and aNEWLINE boolean indicating if the joins in the prefix contain a LEFT OUTER join.NEWLINE _"""NEWLINE all_paths = []NEWLINE for _, paths in names_with_path:NEWLINE all_paths.extend(paths)NEWLINE contains_louter = FalseNEWLINE # Trim and operate only on tables that were generated forNEWLINE # the lookup part of the query. That is, avoid trimmingNEWLINE # joins generated for F() expressions.NEWLINE lookup_tables = [NEWLINE t for t in self.alias_mapNEWLINE if t in self._lookup_joins or t == self.base_tableNEWLINE ]NEWLINE for trimmed_paths, path in enumerate(all_paths):NEWLINE if path.m2m:NEWLINE breakNEWLINE if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:NEWLINE contains_louter = TrueNEWLINE alias = lookup_tables[trimmed_paths]NEWLINE self.unref_alias(alias)NEWLINE # The path.join_field is a Rel, lets get the other side's fieldNEWLINE join_field = path.join_field.fieldNEWLINE # Build the filter prefix.NEWLINE paths_in_prefix = trimmed_pathsNEWLINE trimmed_prefix = []NEWLINE for name, path in names_with_path:NEWLINE if paths_in_prefix - len(path) < 0:NEWLINE breakNEWLINE trimmed_prefix.append(name)NEWLINE paths_in_prefix -= len(path)NEWLINE trimmed_prefix.append(NEWLINE join_field.foreign_related_fields[0].name)NEWLINE trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)NEWLINE # Lets still see if we can trim the first join from the inner queryNEWLINE # (that is, self). We can't do this for:NEWLINE # - LEFT JOINs because we would miss those rows that have nothing onNEWLINE # the outer side,NEWLINE # - INNER JOINs from filtered relations because we would miss theirNEWLINE # filters.NEWLINE first_join = self.alias_map[lookup_tables[trimmed_paths + 1]]NEWLINE if first_join.join_type != LOUTER and not first_join.filtered_relation:NEWLINE select_fields = [r[0] for r in join_field.related_fields]NEWLINE select_alias = lookup_tables[trimmed_paths + 1]NEWLINE self.unref_alias(lookup_tables[trimmed_paths])NEWLINE extra_restriction = join_field.get_extra_restriction(NEWLINE self.where_class, None, lookup_tables[trimmed_paths + 1])NEWLINE if extra_restriction:NEWLINE self.where.add(extra_restriction, AND)NEWLINE else:NEWLINE # TODO: It might be possible to trim more joins from the start of theNEWLINE # inner query if it happens to have a longer join chain containing theNEWLINE # values in select_fields. Lets punt this one for now.NEWLINE select_fields = [r[1] for r in join_field.related_fields]NEWLINE select_alias = lookup_tables[trimmed_paths]NEWLINE # The found starting point is likely a Join instead of a BaseTable reference.NEWLINE # But the first entry in the query's FROM clause must not be a JOIN.NEWLINE for table in self.alias_map:NEWLINE if self.alias_refcount[table] > 0:NEWLINE self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table)NEWLINE breakNEWLINE self.set_select([f.get_col(select_alias) for f in select_fields])NEWLINE return trimmed_prefix, contains_louterNEWLINENEWLINE def is_nullable(self, field):NEWLINE """NEWLINE Check if the given field should be treated as nullable.NEWLINENEWLINE Some backends treat '' as null and Django treats such fields asNEWLINE nullable for those backends. In such situations field.null can beNEWLINE False even if we should treat the field as nullable.NEWLINE """NEWLINE # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not haveNEWLINE # (nor should it have) knowledge of which connection is going to beNEWLINE # used. The proper fix would be to defer all decisions whereNEWLINE # is_nullable() is needed to the compiler stage, but that is not easyNEWLINE # to do currently.NEWLINE return (NEWLINE connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls andNEWLINE field.empty_strings_allowedNEWLINE ) or field.nullNEWLINENEWLINENEWLINEdef get_order_dir(field, default='ASC'):NEWLINE """NEWLINE Return the field name and direction for an order specification. ForNEWLINE example, '-foo' is returned as ('foo', 'DESC').NEWLINENEWLINE The 'default' param is used to indicate which way no prefix (or a '+'NEWLINE prefix) should sort. The '-' prefix always sorts the opposite way.NEWLINE """NEWLINE dirn = ORDER_DIR[default]NEWLINE if field[0] == '-':NEWLINE return field[1:], dirn[1]NEWLINE return field, dirn[0]NEWLINENEWLINENEWLINEdef add_to_dict(data, key, value):NEWLINE """NEWLINE Add "value" to the set of values for "key", whether or not "key" alreadyNEWLINE exists.NEWLINE """NEWLINE if key in data:NEWLINE data[key].add(value)NEWLINE else:NEWLINE data[key] = {value}NEWLINENEWLINENEWLINEdef is_reverse_o2o(field):NEWLINE """NEWLINE Check if the given field is reverse-o2o. The field is expected to be someNEWLINE sort of relation field or related object.NEWLINE """NEWLINE return field.is_relation and field.one_to_one and not field.concreteNEWLINENEWLINENEWLINEclass JoinPromoter:NEWLINE """NEWLINE A class to abstract away join promotion problems for complex filterNEWLINE conditions.NEWLINE """NEWLINENEWLINE def __init__(self, connector, num_children, negated):NEWLINE self.connector = connectorNEWLINE self.negated = negatedNEWLINE if self.negated:NEWLINE if connector == AND:NEWLINE self.effective_connector = ORNEWLINE else:NEWLINE self.effective_connector = ANDNEWLINE else:NEWLINE self.effective_connector = self.connectorNEWLINE self.num_children = num_childrenNEWLINE # Maps of table alias to how many times it is seen as required forNEWLINE # inner and/or outer joins.NEWLINE self.votes = Counter()NEWLINENEWLINE def add_votes(self, votes):NEWLINE """NEWLINE Add single vote per item to self.votes. Parameter can be anyNEWLINE iterable.NEWLINE """NEWLINE self.votes.update(votes)NEWLINENEWLINE def update_join_types(self, query):NEWLINE """NEWLINE Change join types so that the generated query is as efficient asNEWLINE possible, but still correct. So, change as many joins as possibleNEWLINE to INNER, but don't make OUTER joins INNER if that could removeNEWLINE results from the query.NEWLINE """NEWLINE to_promote = set()NEWLINE to_demote = set()NEWLINE # The effective_connector is used so that NOT (a AND b) is treatedNEWLINE # similarly to (a OR b) for join promotion.NEWLINE for table, votes in self.votes.items():NEWLINE # We must use outer joins in OR case when the join isn't containedNEWLINE # in all of the joins. Otherwise the INNER JOIN itself could removeNEWLINE # valid results. Consider the case where a model with rel_a andNEWLINE # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,NEWLINE # if rel_a join doesn't produce any results is null (for exampleNEWLINE # reverse foreign key or null value in direct foreign key), andNEWLINE # there is a matching row in rel_b with col=2, then an INNER joinNEWLINE # to rel_a would remove a valid match from the query. So, we needNEWLINE # to promote any existing INNER to LOUTER (it is possible thisNEWLINE # promotion in turn will be demoted later on).NEWLINE if self.effective_connector == 'OR' and votes < self.num_children:NEWLINE to_promote.add(table)NEWLINE # If connector is AND and there is a filter that can match onlyNEWLINE # when there is a joinable row, then use INNER. For example, inNEWLINE # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULLNEWLINE # as join output, then the col=1 or col=2 can't match (asNEWLINE # NULL=anything is always false).NEWLINE # For the OR case, if all children voted for a join to be inner,NEWLINE # then we can use INNER for the join. For example:NEWLINE # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)NEWLINE # then if rel_a doesn't produce any rows, the whole conditionNEWLINE # can't match. Hence we can safely use INNER join.NEWLINE if self.effective_connector == 'AND' or (NEWLINE self.effective_connector == 'OR' and votes == self.num_children):NEWLINE to_demote.add(table)NEWLINE # Finally, what happens in cases where we have:NEWLINE # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0NEWLINE # Now, we first generate the OR clause, and promote joins for itNEWLINE # in the first if branch above. Both rel_a and rel_b are promotedNEWLINE # to LOUTER joins. After that we do the AND case. The OR caseNEWLINE # voted no inner joins but the rel_a__col__gte=0 votes inner joinNEWLINE # for rel_a. We demote it back to INNER join (in AND case a singleNEWLINE # vote is enough). The demotion is OK, if rel_a doesn't produceNEWLINE # rows, then the rel_a__col__gte=0 clause can't be true, and thusNEWLINE # the whole clause must be false. So, it is safe to use INNERNEWLINE # join.NEWLINE # Note that in this example we could just as well have the __gteNEWLINE # clause and the OR clause swapped. Or we could replace the __gteNEWLINE # clause with an OR clause containing rel_a__col=1|rel_a__col=2,NEWLINE # and again we could safely demote to INNER.NEWLINE query.promote_joins(to_promote)NEWLINE query.demote_joins(to_demote)NEWLINE return to_demoteNEWLINE
from django import formsNEWLINEfrom django.conf import settingsNEWLINEfrom django.contrib.auth.hashers import check_passwordNEWLINEfrom django.core.exceptions import ValidationErrorNEWLINEfrom django.core.validators import RegexValidatorNEWLINEfrom django.db.models import QNEWLINEfrom django.forms import formset_factoryNEWLINEfrom django.utils.html import escapeNEWLINEfrom django.utils.safestring import mark_safeNEWLINEfrom django.utils.timezone import get_current_timezone_nameNEWLINEfrom django.utils.translation import (NEWLINE pgettext, pgettext_lazy, ugettext_lazy as _,NEWLINE)NEWLINEfrom django_countries import Countries, countriesNEWLINEfrom django_countries.fields import LazyTypedChoiceFieldNEWLINEfrom i18nfield.forms import (NEWLINE I18nForm, I18nFormField, I18nFormSetMixin, I18nTextarea, I18nTextInput,NEWLINE)NEWLINEfrom pytz import common_timezones, timezoneNEWLINENEWLINEfrom pretix.base.forms import I18nModelForm, PlaceholderValidator, SettingsFormNEWLINEfrom pretix.base.models import Event, Organizer, TaxRuleNEWLINEfrom pretix.base.models.event import EventMetaValue, SubEventNEWLINEfrom pretix.base.reldate import RelativeDateField, RelativeDateTimeFieldNEWLINEfrom pretix.base.settings import PERSON_NAME_SCHEMESNEWLINEfrom pretix.control.forms import (NEWLINE ExtFileField, MultipleLanguagesWidget, SingleLanguageWidget, SlugWidget,NEWLINE SplitDateTimeField, SplitDateTimePickerWidget,NEWLINE)NEWLINEfrom pretix.multidomain.urlreverse import build_absolute_uriNEWLINEfrom pretix.plugins.banktransfer.payment import BankTransferNEWLINEfrom pretix.presale.style import get_fontsNEWLINENEWLINENEWLINEclass EventWizardFoundationForm(forms.Form):NEWLINE locales = forms.MultipleChoiceField(NEWLINE choices=settings.LANGUAGES,NEWLINE label=_("Use languages"),NEWLINE widget=MultipleLanguagesWidget,NEWLINE help_text=_('Choose all languages that your event should be available in.')NEWLINE )NEWLINE has_subevents = forms.BooleanField(NEWLINE label=_("This is an event series"),NEWLINE help_text=_('Only recommended for advanced users. If this feature is enabled, this will not only be a 'NEWLINE 'single event but a series of very similar events that are handled within a single shop. 'NEWLINE 'The single events inside the series can only differ in date, time, location, prices and 'NEWLINE 'quotas, but not in other settings, and buying tickets across multiple of these events at 'NEWLINE 'the same time is possible. You cannot change this setting for this event later.'),NEWLINE required=False,NEWLINE )NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE self.user = kwargs.pop('user')NEWLINE super().__init__(*args, **kwargs)NEWLINE self.fields['organizer'] = forms.ModelChoiceField(NEWLINE label=_("Organizer"),NEWLINE queryset=Organizer.objects.filter(NEWLINE id__in=self.user.teams.filter(can_create_events=True).values_list('organizer', flat=True)NEWLINE ),NEWLINE widget=forms.RadioSelect,NEWLINE empty_label=None,NEWLINE required=TrueNEWLINE )NEWLINE if len(self.fields['organizer'].choices) == 1:NEWLINE self.fields['organizer'].initial = self.fields['organizer'].queryset.first()NEWLINENEWLINENEWLINEclass EventWizardBasicsForm(I18nModelForm):NEWLINE error_messages = {NEWLINE 'duplicate_slug': _("You already used this slug for a different event. Please choose a new one."),NEWLINE }NEWLINE timezone = forms.ChoiceField(NEWLINE choices=((a, a) for a in common_timezones),NEWLINE label=_("Event timezone"),NEWLINE )NEWLINE locale = forms.ChoiceField(NEWLINE choices=settings.LANGUAGES,NEWLINE label=_("Default language"),NEWLINE )NEWLINE tax_rate = forms.DecimalField(NEWLINE label=_("Sales tax rate"),NEWLINE help_text=_("Do you need to pay sales tax on your tickets? In this case, please enter the applicable tax rate "NEWLINE "here in percent. If you have a more complicated tax situation, you can add more tax rates and "NEWLINE "detailed configuration later."),NEWLINE required=FalseNEWLINE )NEWLINENEWLINE class Meta:NEWLINE model = EventNEWLINE fields = [NEWLINE 'name',NEWLINE 'slug',NEWLINE 'currency',NEWLINE 'date_from',NEWLINE 'date_to',NEWLINE 'presale_start',NEWLINE 'presale_end',NEWLINE 'location',NEWLINE ]NEWLINE field_classes = {NEWLINE 'date_from': SplitDateTimeField,NEWLINE 'date_to': SplitDateTimeField,NEWLINE 'presale_start': SplitDateTimeField,NEWLINE 'presale_end': SplitDateTimeField,NEWLINE }NEWLINE widgets = {NEWLINE 'date_from': SplitDateTimePickerWidget(),NEWLINE 'date_to': SplitDateTimePickerWidget(attrs={'data-date-after': '#id_basics-date_from_0'}),NEWLINE 'presale_start': SplitDateTimePickerWidget(),NEWLINE 'presale_end': SplitDateTimePickerWidget(attrs={'data-date-after': '#id_basics-presale_start_0'}),NEWLINE 'slug': SlugWidget,NEWLINE }NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE self.organizer = kwargs.pop('organizer')NEWLINE self.locales = kwargs.get('locales')NEWLINE self.has_subevents = kwargs.pop('has_subevents')NEWLINE kwargs.pop('user')NEWLINE super().__init__(*args, **kwargs)NEWLINE self.initial['timezone'] = get_current_timezone_name()NEWLINE self.fields['locale'].choices = [(a, b) for a, b in settings.LANGUAGES if a in self.locales]NEWLINE self.fields['location'].widget.attrs['rows'] = '3'NEWLINE self.fields['location'].widget.attrs['placeholder'] = _(NEWLINE 'Sample Conference Center\nHeidelberg, Germany'NEWLINE )NEWLINE self.fields['slug'].widget.prefix = build_absolute_uri(self.organizer, 'presale:organizer.index')NEWLINE if self.has_subevents:NEWLINE del self.fields['presale_start']NEWLINE del self.fields['presale_end']NEWLINENEWLINE def clean(self):NEWLINE data = super().clean()NEWLINE if data.get('locale') not in self.locales:NEWLINE raise ValidationError({NEWLINE 'locale': _('Your default locale must also be enabled for your event (see box above).')NEWLINE })NEWLINE if data.get('timezone') not in common_timezones:NEWLINE raise ValidationError({NEWLINE 'timezone': _('Your default locale must be specified.')NEWLINE })NEWLINENEWLINE # change timezoneNEWLINE zone = timezone(data.get('timezone'))NEWLINE data['date_from'] = self.reset_timezone(zone, data.get('date_from'))NEWLINE data['date_to'] = self.reset_timezone(zone, data.get('date_to'))NEWLINE data['presale_start'] = self.reset_timezone(zone, data.get('presale_start'))NEWLINE data['presale_end'] = self.reset_timezone(zone, data.get('presale_end'))NEWLINE return dataNEWLINENEWLINE @staticmethodNEWLINE def reset_timezone(tz, dt):NEWLINE return tz.localize(dt.replace(tzinfo=None)) if dt is not None else NoneNEWLINENEWLINE def clean_slug(self):NEWLINE slug = self.cleaned_data['slug']NEWLINE if Event.objects.filter(slug__iexact=slug, organizer=self.organizer).exists():NEWLINE raise forms.ValidationError(NEWLINE self.error_messages['duplicate_slug'],NEWLINE code='duplicate_slug'NEWLINE )NEWLINE return slugNEWLINENEWLINENEWLINEclass EventChoiceField(forms.ModelChoiceField):NEWLINE def label_from_instance(self, obj):NEWLINE return mark_safe('{}<br /><span class="text-muted">{} · {}</span>'.format(NEWLINE escape(str(obj)),NEWLINE obj.get_date_range_display() if not obj.has_subevents else _("Event series"),NEWLINE obj.slugNEWLINE ))NEWLINENEWLINENEWLINEclass EventWizardCopyForm(forms.Form):NEWLINENEWLINE @staticmethodNEWLINE def copy_from_queryset(user):NEWLINE return Event.objects.filter(NEWLINE Q(organizer_id__in=user.teams.filter(NEWLINE all_events=True, can_change_event_settings=True, can_change_items=TrueNEWLINE ).values_list('organizer', flat=True)) | Q(id__in=user.teams.filter(NEWLINE can_change_event_settings=True, can_change_items=TrueNEWLINE ).values_list('limit_events__id', flat=True))NEWLINE )NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE kwargs.pop('organizer')NEWLINE kwargs.pop('locales')NEWLINE kwargs.pop('has_subevents')NEWLINE self.user = kwargs.pop('user')NEWLINE super().__init__(*args, **kwargs)NEWLINE self.fields['copy_from_event'] = EventChoiceField(NEWLINE label=_("Copy configuration from"),NEWLINE queryset=EventWizardCopyForm.copy_from_queryset(self.user),NEWLINE widget=forms.RadioSelect,NEWLINE empty_label=_('Do not copy'),NEWLINE required=FalseNEWLINE )NEWLINENEWLINENEWLINEclass EventMetaValueForm(forms.ModelForm):NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE self.property = kwargs.pop('property')NEWLINE super().__init__(*args, **kwargs)NEWLINE self.fields['value'].required = FalseNEWLINE self.fields['value'].widget.attrs['placeholder'] = self.property.defaultNEWLINENEWLINE class Meta:NEWLINE model = EventMetaValueNEWLINE fields = ['value']NEWLINE widgets = {NEWLINE 'value': forms.TextInputNEWLINE }NEWLINENEWLINENEWLINEclass EventUpdateForm(I18nModelForm):NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE self.change_slug = kwargs.pop('change_slug', False)NEWLINE super().__init__(*args, **kwargs)NEWLINE if not self.change_slug:NEWLINE self.fields['slug'].widget.attrs['readonly'] = 'readonly'NEWLINE self.fields['location'].widget.attrs['rows'] = '3'NEWLINE self.fields['location'].widget.attrs['placeholder'] = _(NEWLINE 'Sample Conference Center\nHeidelberg, Germany'NEWLINE )NEWLINENEWLINE def clean_slug(self):NEWLINE if self.change_slug:NEWLINE return self.cleaned_data['slug']NEWLINE return self.instance.slugNEWLINENEWLINE class Meta:NEWLINE model = EventNEWLINE localized_fields = '__all__'NEWLINE fields = [NEWLINE 'name',NEWLINE 'slug',NEWLINE 'currency',NEWLINE 'date_from',NEWLINE 'date_to',NEWLINE 'date_admission',NEWLINE 'is_public',NEWLINE 'presale_start',NEWLINE 'presale_end',NEWLINE 'location',NEWLINE ]NEWLINE field_classes = {NEWLINE 'date_from': SplitDateTimeField,NEWLINE 'date_to': SplitDateTimeField,NEWLINE 'date_admission': SplitDateTimeField,NEWLINE 'presale_start': SplitDateTimeField,NEWLINE 'presale_end': SplitDateTimeField,NEWLINE }NEWLINE widgets = {NEWLINE 'date_from': SplitDateTimePickerWidget(),NEWLINE 'date_to': SplitDateTimePickerWidget(attrs={'data-date-after': '#id_date_from_0'}),NEWLINE 'date_admission': SplitDateTimePickerWidget(attrs={'data-date-default': '#id_date_from_0'}),NEWLINE 'presale_start': SplitDateTimePickerWidget(),NEWLINE 'presale_end': SplitDateTimePickerWidget(attrs={'data-date-after': '#id_presale_start_0'}),NEWLINE }NEWLINENEWLINENEWLINEclass EventSettingsForm(SettingsForm):NEWLINE show_date_to = forms.BooleanField(NEWLINE label=_("Show event end date"),NEWLINE help_text=_("If disabled, only event's start date will be displayed to the public."),NEWLINE required=FalseNEWLINE )NEWLINE show_times = forms.BooleanField(NEWLINE label=_("Show dates with time"),NEWLINE help_text=_("If disabled, the event's start and end date will be displayed without the time of day."),NEWLINE required=FalseNEWLINE )NEWLINE show_items_outside_presale_period = forms.BooleanField(NEWLINE label=_("Show items outside presale period"),NEWLINE help_text=_("Show item details before presale has started and after presale has ended"),NEWLINE required=FalseNEWLINE )NEWLINE display_net_prices = forms.BooleanField(NEWLINE label=_("Show net prices instead of gross prices in the product list (not recommended!)"),NEWLINE help_text=_("Independent of your choice, the cart will show gross prices as this the price that needs to be "NEWLINE "paid"),NEWLINE required=FalseNEWLINE )NEWLINE presale_start_show_date = forms.BooleanField(NEWLINE label=_("Show start date"),NEWLINE help_text=_("Show the presale start date before presale has started."),NEWLINE widget=forms.CheckboxInput,NEWLINE required=FalseNEWLINE )NEWLINE last_order_modification_date = RelativeDateTimeField(NEWLINE label=_('Last date of modifications'),NEWLINE help_text=_("The last date users can modify details of their orders, such as attendee names or "NEWLINE "answers to questions. If you use the event series feature and an order contains tickets for "NEWLINE "multiple event dates, the earliest date will be used."),NEWLINE required=False,NEWLINE )NEWLINE timezone = forms.ChoiceField(NEWLINE choices=((a, a) for a in common_timezones),NEWLINE label=_("Default timezone"),NEWLINE )NEWLINE locales = forms.MultipleChoiceField(NEWLINE choices=settings.LANGUAGES,NEWLINE widget=MultipleLanguagesWidget,NEWLINE label=_("Available languages"),NEWLINE )NEWLINE locale = forms.ChoiceField(NEWLINE choices=settings.LANGUAGES,NEWLINE widget=SingleLanguageWidget,NEWLINE label=_("Default language"),NEWLINE )NEWLINE show_quota_left = forms.BooleanField(NEWLINE label=_("Show number of tickets left"),NEWLINE help_text=_("Publicly show how many tickets of a certain type are still available."),NEWLINE required=FalseNEWLINE )NEWLINE waiting_list_enabled = forms.BooleanField(NEWLINE label=_("Enable waiting list"),NEWLINE help_text=_("Once a ticket is sold out, people can add themselves to a waiting list. As soon as a ticket "NEWLINE "becomes available again, it will be reserved for the first person on the waiting list and this "NEWLINE "person will receive an email notification with a voucher that can be used to buy a ticket."),NEWLINE required=FalseNEWLINE )NEWLINE waiting_list_hours = forms.IntegerField(NEWLINE label=_("Waiting list response time"),NEWLINE min_value=6,NEWLINE help_text=_("If a ticket voucher is sent to a person on the waiting list, it has to be redeemed within this "NEWLINE "number of hours until it expires and can be re-assigned to the next person on the list."),NEWLINE required=False,NEWLINE widget=forms.NumberInput(),NEWLINE )NEWLINE waiting_list_auto = forms.BooleanField(NEWLINE label=_("Automatic waiting list assignments"),NEWLINE help_text=_("If ticket capacity becomes free, automatically create a voucher and send it to the first person "NEWLINE "on the waiting list for that product. If this is not active, mails will not be send automatically "NEWLINE "but you can send them manually via the control panel. If you disable the waiting list but keep "NEWLINE "this option enabled, tickets will still be sent out."),NEWLINE required=False,NEWLINE widget=forms.CheckboxInput(),NEWLINE )NEWLINE attendee_names_asked = forms.BooleanField(NEWLINE label=_("Ask for attendee names"),NEWLINE help_text=_("Ask for a name for all tickets which include admission to the event."),NEWLINE required=False,NEWLINE )NEWLINE attendee_names_required = forms.BooleanField(NEWLINE label=_("Require attendee names"),NEWLINE help_text=_("Require customers to fill in the names of all attendees."),NEWLINE required=False,NEWLINE widget=forms.CheckboxInput(attrs={'data-checkbox-dependency': '#id_settings-attendee_names_asked'}),NEWLINE )NEWLINE name_scheme = forms.ChoiceField(NEWLINE label=_("Name format"),NEWLINE help_text=_("This defines how pretix will ask for human names. Changing this after you already received "NEWLINE "orders might lead to unexpected behaviour when sorting or changing names."),NEWLINE required=True,NEWLINE )NEWLINE attendee_emails_asked = forms.BooleanField(NEWLINE label=_("Ask for email addresses per ticket"),NEWLINE help_text=_("Normally, pretix asks for one email address per order and the order confirmation will be sent "NEWLINE "only to that email address. If you enable this option, the system will additionally ask for "NEWLINE "individual email addresses for every admission ticket. This might be useful if you want to "NEWLINE "obtain individual addresses for every attendee even in case of group orders. However, "NEWLINE "pretix will send the order confirmation only to the one primary email address, not to the "NEWLINE "per-attendee addresses."),NEWLINE required=FalseNEWLINE )NEWLINE attendee_emails_required = forms.BooleanField(NEWLINE label=_("Require email addresses per ticket"),NEWLINE help_text=_("Require customers to fill in individual e-mail addresses for all admission tickets. See the "NEWLINE "above option for more details. One email address for the order confirmation will always be "NEWLINE "required regardless of this setting."),NEWLINE required=False,NEWLINE widget=forms.CheckboxInput(attrs={'data-checkbox-dependency': '#id_settings-attendee_emails_asked'}),NEWLINE )NEWLINE order_email_asked_twice = forms.BooleanField(NEWLINE label=_("Ask for the order email address twice"),NEWLINE help_text=_("Require customers to fill in the primary email address twice to avoid errors."),NEWLINE required=False,NEWLINE )NEWLINE max_items_per_order = forms.IntegerField(NEWLINE min_value=1,NEWLINE label=_("Maximum number of items per order"),NEWLINE help_text=_("Add-on products will not be counted.")NEWLINE )NEWLINE reservation_time = forms.IntegerField(NEWLINE min_value=0,NEWLINE label=_("Reservation period"),NEWLINE help_text=_("The number of minutes the items in a user's cart are reserved for this user."),NEWLINE )NEWLINE imprint_url = forms.URLField(NEWLINE label=_("Imprint URL"),NEWLINE help_text=_("This should point e.g. to a part of your website that has your contact details and legal "NEWLINE "information."),NEWLINE required=False,NEWLINE )NEWLINE confirm_text = I18nFormField(NEWLINE label=_('Confirmation text'),NEWLINE help_text=_('This text needs to be confirmed by the user before a purchase is possible. You could for example 'NEWLINE 'link your terms of service here. If you use the Pages feature to publish your terms of service, 'NEWLINE 'you don\'t need this setting since you can configure it there.'),NEWLINE required=False,NEWLINE widget=I18nTextareaNEWLINE )NEWLINE contact_mail = forms.EmailField(NEWLINE label=_("Contact address"),NEWLINE required=False,NEWLINE help_text=_("We'll show this publicly to allow attendees to contact you.")NEWLINE )NEWLINENEWLINE def clean(self):NEWLINE data = super().clean()NEWLINE if data['locale'] not in data['locales']:NEWLINE raise ValidationError({NEWLINE 'locale': _('Your default locale must also be enabled for your event (see box above).')NEWLINE })NEWLINE if data['attendee_names_required'] and not data['attendee_names_asked']:NEWLINE raise ValidationError({NEWLINE 'attendee_names_required': _('You cannot require specifying attendee names if you do not ask for them.')NEWLINE })NEWLINE if data['attendee_emails_required'] and not data['attendee_emails_asked']:NEWLINE raise ValidationError({NEWLINE 'attendee_emails_required': _('You have to ask for attendee emails if you want to make them required.')NEWLINE })NEWLINE return dataNEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE super().__init__(*args, **kwargs)NEWLINE self.fields['confirm_text'].widget.attrs['rows'] = '3'NEWLINE self.fields['confirm_text'].widget.attrs['placeholder'] = _(NEWLINE 'e.g. I hereby confirm that I have read and agree with the event organizer\'s terms of service 'NEWLINE 'and agree with them.'NEWLINE )NEWLINE self.fields['name_scheme'].choices = (NEWLINE (k, _('Ask for {fields}, display like {example}').format(NEWLINE fields=' + '.join(str(vv[1]) for vv in v['fields']),NEWLINE example=v['concatenation'](v['sample'])NEWLINE ))NEWLINE for k, v in PERSON_NAME_SCHEMES.items()NEWLINE )NEWLINENEWLINENEWLINEclass CancelSettingsForm(SettingsForm):NEWLINE cancel_allow_user = forms.BooleanField(NEWLINE label=_("Customers can cancel their unpaid orders"),NEWLINE required=FalseNEWLINE )NEWLINE cancel_allow_user_until = RelativeDateTimeField(NEWLINE label=_("Do not allow cancellations after"),NEWLINE required=FalseNEWLINE )NEWLINE cancel_allow_user_paid = forms.BooleanField(NEWLINE label=_("Customers can cancel their paid orders"),NEWLINE help_text=_("Paid money will be automatically paid back if the payment method allows it. "NEWLINE "Otherwise, a manual refund will be created for you to process manually."),NEWLINE required=FalseNEWLINE )NEWLINE cancel_allow_user_paid_keep = forms.DecimalField(NEWLINE label=_("Keep a fixed cancellation fee"),NEWLINE required=FalseNEWLINE )NEWLINE cancel_allow_user_paid_keep_fees = forms.BooleanField(NEWLINE label=_("Keep payment, shipping and service fees"),NEWLINE required=FalseNEWLINE )NEWLINE cancel_allow_user_paid_keep_percentage = forms.DecimalField(NEWLINE label=_("Keep a percentual cancellation fee"),NEWLINE required=FalseNEWLINE )NEWLINE cancel_allow_user_paid_until = RelativeDateTimeField(NEWLINE label=_("Do not allow cancellations after"),NEWLINE required=FalseNEWLINE )NEWLINENEWLINENEWLINEclass PaymentSettingsForm(SettingsForm):NEWLINE payment_term_days = forms.IntegerField(NEWLINE label=_('Payment term in days'),NEWLINE help_text=_("The number of days after placing an order the user has to pay to preserve their reservation. If "NEWLINE "you use slow payment methods like bank transfer, we recommend 14 days. If you only use real-time "NEWLINE "payment methods, we recommend still setting two or three days to allow people to retry failed "NEWLINE "payments."),NEWLINE )NEWLINE payment_term_last = RelativeDateField(NEWLINE label=_('Last date of payments'),NEWLINE help_text=_("The last date any payments are accepted. This has precedence over the number of "NEWLINE "days configured above. If you use the event series feature and an order contains tickets for "NEWLINE "multiple dates, the earliest date will be used."),NEWLINE required=False,NEWLINE )NEWLINE payment_term_weekdays = forms.BooleanField(NEWLINE label=_('Only end payment terms on weekdays'),NEWLINE help_text=_("If this is activated and the payment term of any order ends on a Saturday or Sunday, it will be "NEWLINE "moved to the next Monday instead. This is required in some countries by civil law. This will "NEWLINE "not effect the last date of payments configured above."),NEWLINE required=False,NEWLINE )NEWLINE payment_term_expire_automatically = forms.BooleanField(NEWLINE label=_('Automatically expire unpaid orders'),NEWLINE help_text=_("If checked, all unpaid orders will automatically go from 'pending' to 'expired' "NEWLINE "after the end of their payment deadline. This means that those tickets go back to "NEWLINE "the pool and can be ordered by other people."),NEWLINE required=FalseNEWLINE )NEWLINE payment_term_accept_late = forms.BooleanField(NEWLINE label=_('Accept late payments'),NEWLINE help_text=_("Accept payments for orders even when they are in 'expired' state as long as enough "NEWLINE "capacity is available. No payments will ever be accepted after the 'Last date of payments' "NEWLINE "configured above."),NEWLINE required=FalseNEWLINE )NEWLINE tax_rate_default = forms.ModelChoiceField(NEWLINE queryset=TaxRule.objects.none(),NEWLINE label=_('Tax rule for payment fees'),NEWLINE required=False,NEWLINE help_text=_("The tax rule that applies for additional fees you configured for single payment methods. This "NEWLINE "will set the tax rate and reverse charge rules, other settings of the tax rule are ignored.")NEWLINE )NEWLINE payment_explanation = I18nFormField(NEWLINE widget=I18nTextarea,NEWLINE widget_kwargs={'attrs': {NEWLINE 'rows': 3,NEWLINE }},NEWLINE required=False,NEWLINE label=_("Guidance text"),NEWLINE help_text=_("This text will be shown above the payment options. You can explain the choices to the user here, "NEWLINE "if you want.")NEWLINE )NEWLINENEWLINE def clean(self):NEWLINE cleaned_data = super().clean()NEWLINE payment_term_last = cleaned_data.get('payment_term_last')NEWLINE if payment_term_last and self.obj.presale_end:NEWLINE if payment_term_last.date(self.obj) < self.obj.presale_end.date():NEWLINE self.add_error(NEWLINE 'payment_term_last',NEWLINE _('The last payment date cannot be before the end of presale.'),NEWLINE )NEWLINE return cleaned_dataNEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE super().__init__(*args, **kwargs)NEWLINE self.fields['tax_rate_default'].queryset = self.obj.tax_rules.all()NEWLINENEWLINENEWLINEclass ProviderForm(SettingsForm):NEWLINE """NEWLINE This is a SettingsForm, but if fields are set to required=True, validationNEWLINE errors are only raised if the payment method is enabled.NEWLINE """NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE self.settingspref = kwargs.pop('settingspref')NEWLINE self.provider = kwargs.pop('provider', None)NEWLINE super().__init__(*args, **kwargs)NEWLINENEWLINE def prepare_fields(self):NEWLINE for k, v in self.fields.items():NEWLINE v._required = v.requiredNEWLINE v.required = FalseNEWLINE v.widget.is_required = FalseNEWLINE if isinstance(v, I18nFormField):NEWLINE v._required = v.one_requiredNEWLINE v.one_required = FalseNEWLINE v.widget.enabled_locales = self.localesNEWLINE elif isinstance(v, (RelativeDateTimeField, RelativeDateField)):NEWLINE v.set_event(self.obj)NEWLINENEWLINE if hasattr(v, '_as_type'):NEWLINE self.initial[k] = self.obj.settings.get(k, as_type=v._as_type)NEWLINENEWLINE def clean(self):NEWLINE cleaned_data = super().clean()NEWLINE enabled = cleaned_data.get(self.settingspref + '_enabled')NEWLINE if not enabled:NEWLINE returnNEWLINE for k, v in self.fields.items():NEWLINE val = cleaned_data.get(k)NEWLINE if v._required and not val:NEWLINE self.add_error(k, _('This field is required.'))NEWLINE if self.provider:NEWLINE cleaned_data = self.provider.settings_form_clean(cleaned_data)NEWLINE return cleaned_dataNEWLINENEWLINENEWLINEclass InvoiceSettingsForm(SettingsForm):NEWLINE allcountries = list(countries)NEWLINE allcountries.insert(0, ('', _('Select country')))NEWLINENEWLINE invoice_address_asked = forms.BooleanField(NEWLINE label=_("Ask for invoice address"),NEWLINE required=FalseNEWLINE )NEWLINE invoice_address_required = forms.BooleanField(NEWLINE label=_("Require invoice address"),NEWLINE required=False,NEWLINE widget=forms.CheckboxInput(attrs={'data-checkbox-dependency': '#id_invoice_address_asked'}),NEWLINE )NEWLINE invoice_address_company_required = forms.BooleanField(NEWLINE label=_("Require a business addresses"),NEWLINE help_text=_('This will require users to enter a company name.'),NEWLINE required=False,NEWLINE widget=forms.CheckboxInput(attrs={'data-checkbox-dependency': '#id_invoice_address_required'}),NEWLINE )NEWLINE invoice_name_required = forms.BooleanField(NEWLINE label=_("Require customer name"),NEWLINE required=False,NEWLINE widget=forms.CheckboxInput(NEWLINE attrs={'data-inverse-dependency': '#id_invoice_address_required'}NEWLINE ),NEWLINE )NEWLINE invoice_address_vatid = forms.BooleanField(NEWLINE label=_("Ask for VAT ID"),NEWLINE help_text=_("Does only work if an invoice address is asked for. VAT ID is not required."),NEWLINE widget=forms.CheckboxInput(attrs={'data-checkbox-dependency': '#id_invoice_address_asked'}),NEWLINE required=FalseNEWLINE )NEWLINE invoice_address_beneficiary = forms.BooleanField(NEWLINE label=_("Ask for beneficiary"),NEWLINE widget=forms.CheckboxInput(attrs={'data-checkbox-dependency': '#id_invoice_address_asked'}),NEWLINE required=FalseNEWLINE )NEWLINE invoice_include_free = forms.BooleanField(NEWLINE label=_("Show free products on invoices"),NEWLINE help_text=_("Note that invoices will never be generated for orders that contain only free "NEWLINE "products."),NEWLINE required=FalseNEWLINE )NEWLINE invoice_numbers_consecutive = forms.BooleanField(NEWLINE label=_("Generate invoices with consecutive numbers"),NEWLINE help_text=_("If deactivated, the order code will be used in the invoice number."),NEWLINE required=FalseNEWLINE )NEWLINE invoice_numbers_prefix = forms.CharField(NEWLINE label=_("Invoice number prefix"),NEWLINE help_text=_("This will be prepended to invoice numbers. If you leave this field empty, your event slug will "NEWLINE "be used followed by a dash. Attention: If multiple events within the same organization use the "NEWLINE "same value in this field, they will share their number range, i.e. every full number will be "NEWLINE "used at most once over all of your events. This setting only affects future invoices."),NEWLINE required=False,NEWLINE )NEWLINE invoice_generate = forms.ChoiceField(NEWLINE label=_("Generate invoices"),NEWLINE required=False,NEWLINE widget=forms.RadioSelect,NEWLINE choices=(NEWLINE ('False', _('Do not generate invoices')),NEWLINE ('admin', _('Only manually in admin panel')),NEWLINE ('user', _('Automatically on user request')),NEWLINE ('True', _('Automatically for all created orders')),NEWLINE ('paid', _('Automatically on payment')),NEWLINE ),NEWLINE help_text=_("Invoices will never be automatically generated for free orders.")NEWLINE )NEWLINE invoice_attendee_name = forms.BooleanField(NEWLINE label=_("Show attendee names on invoices"),NEWLINE required=FalseNEWLINE )NEWLINE invoice_email_attachment = forms.BooleanField(NEWLINE label=_("Attach invoices to emails"),NEWLINE help_text=_("If invoices are automatically generated for all orders, they will be attached to the order "NEWLINE "confirmation mail. If they are automatically generated on payment, they will be attached to the "NEWLINE "payment confirmation mail. If they are not automatically generated, they will not be attached "NEWLINE "to emails."),NEWLINE required=FalseNEWLINE )NEWLINE invoice_renderer = forms.ChoiceField(NEWLINE label=_("Invoice style"),NEWLINE required=True,NEWLINE choices=[]NEWLINE )NEWLINE invoice_address_from_name = forms.CharField(NEWLINE label=_("Company name"),NEWLINE required=False,NEWLINE )NEWLINE invoice_address_from = forms.CharField(NEWLINE label=_("Address line"),NEWLINE widget=forms.Textarea(attrs={NEWLINE 'rows': 2,NEWLINE 'placeholder': _(NEWLINE 'Albert Einstein Road 52'NEWLINE )NEWLINE }),NEWLINE required=False,NEWLINE )NEWLINE invoice_address_from_zipcode = forms.CharField(NEWLINE widget=forms.TextInput(attrs={NEWLINE 'placeholder': '12345'NEWLINE }),NEWLINE required=False,NEWLINE label=_("ZIP code"),NEWLINE )NEWLINE invoice_address_from_city = forms.CharField(NEWLINE widget=forms.TextInput(attrs={NEWLINE 'placeholder': _('Random City')NEWLINE }),NEWLINE required=False,NEWLINE label=_("City"),NEWLINE )NEWLINE invoice_address_from_country = forms.ChoiceField(NEWLINE choices=allcountries,NEWLINE required=False,NEWLINE label=_("Country"),NEWLINE )NEWLINE invoice_address_from_tax_id = forms.CharField(NEWLINE required=False,NEWLINE label=_("Domestic tax ID"),NEWLINE )NEWLINE invoice_address_from_vat_id = forms.CharField(NEWLINE required=False,NEWLINE label=_("EU VAT ID"),NEWLINE )NEWLINE invoice_introductory_text = I18nFormField(NEWLINE widget=I18nTextarea,NEWLINE widget_kwargs={'attrs': {NEWLINE 'rows': 3,NEWLINE 'placeholder': _(NEWLINE 'e.g. With this document, we sent you the invoice for your ticket order.'NEWLINE )NEWLINE }},NEWLINE required=False,NEWLINE label=_("Introductory text"),NEWLINE help_text=_("Will be printed on every invoice above the invoice rows.")NEWLINE )NEWLINE invoice_additional_text = I18nFormField(NEWLINE widget=I18nTextarea,NEWLINE widget_kwargs={'attrs': {NEWLINE 'rows': 3,NEWLINE 'placeholder': _(NEWLINE 'e.g. Thank you for your purchase! You can find more information on the event at ...'NEWLINE )NEWLINE }},NEWLINE required=False,NEWLINE label=_("Additional text"),NEWLINE help_text=_("Will be printed on every invoice below the invoice total.")NEWLINE )NEWLINE invoice_footer_text = I18nFormField(NEWLINE widget=I18nTextarea,NEWLINE widget_kwargs={'attrs': {NEWLINE 'rows': 5,NEWLINE 'placeholder': _(NEWLINE 'e.g. your bank details, legal details like your VAT ID, registration numbers, etc.'NEWLINE )NEWLINE }},NEWLINE required=False,NEWLINE label=_("Footer"),NEWLINE help_text=_("Will be printed centered and in a smaller font at the end of every invoice page.")NEWLINE )NEWLINE invoice_language = forms.ChoiceField(NEWLINE widget=forms.Select, required=True,NEWLINE label=_("Invoice language"),NEWLINE choices=[('__user__', _('The user\'s language'))] + settings.LANGUAGES,NEWLINE )NEWLINE invoice_logo_image = ExtFileField(NEWLINE label=_('Logo image'),NEWLINE ext_whitelist=(".png", ".jpg", ".gif", ".jpeg"),NEWLINE required=False,NEWLINE help_text=_('We will show your logo with a maximal height and width of 2.5 cm.')NEWLINE )NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE event = kwargs.get('obj')NEWLINE super().__init__(*args, **kwargs)NEWLINE self.fields['invoice_renderer'].choices = [NEWLINE (r.identifier, r.verbose_name) for r in event.get_invoice_renderers().values()NEWLINE ]NEWLINE self.fields['invoice_numbers_prefix'].widget.attrs['placeholder'] = event.slug.upper() + '-'NEWLINE locale_names = dict(settings.LANGUAGES)NEWLINE self.fields['invoice_language'].choices = [('__user__', _('The user\'s language'))] + [(a, locale_names[a]) for a in event.settings.locales]NEWLINENEWLINENEWLINEclass MailSettingsForm(SettingsForm):NEWLINE mail_prefix = forms.CharField(NEWLINE label=_("Subject prefix"),NEWLINE help_text=_("This will be prepended to the subject of all outgoing emails, formatted as [prefix]. "NEWLINE "Choose, for example, a short form of your event name."),NEWLINE required=FalseNEWLINE )NEWLINE mail_from = forms.EmailField(NEWLINE label=_("Sender address"),NEWLINE help_text=_("Sender address for outgoing emails")NEWLINE )NEWLINE mail_bcc = forms.EmailField(NEWLINE label=_("Bcc address"),NEWLINE help_text=_("All emails will be sent to this address as a Bcc copy"),NEWLINE required=FalseNEWLINE )NEWLINENEWLINE mail_text_signature = I18nFormField(NEWLINE label=_("Signature"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("This will be attached to every email. Available placeholders: {event}"),NEWLINE validators=[PlaceholderValidator(['{event}'])],NEWLINE widget_kwargs={'attrs': {NEWLINE 'rows': '4',NEWLINE 'placeholder': _(NEWLINE 'e.g. your contact details'NEWLINE )NEWLINE }}NEWLINE )NEWLINE mail_html_renderer = forms.ChoiceField(NEWLINE label=_("HTML mail renderer"),NEWLINE required=True,NEWLINE choices=[]NEWLINE )NEWLINENEWLINE mail_text_order_placed = I18nFormField(NEWLINE label=_("Text"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("Available placeholders: {event}, {total_with_currency}, {total}, {currency}, {date}, "NEWLINE "{payment_info}, {url}, {invoice_name}, {invoice_company}"),NEWLINE validators=[PlaceholderValidator(['{event}', '{total_with_currency}', '{total}', '{currency}', '{date}',NEWLINE '{payment_info}', '{url}', '{invoice_name}', '{invoice_company}'])]NEWLINE )NEWLINE mail_text_order_paid = I18nFormField(NEWLINE label=_("Text"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("Available placeholders: {event}, {url}, {invoice_name}, {invoice_company}, {payment_info}"),NEWLINE validators=[PlaceholderValidator(['{event}', '{url}', '{invoice_name}', '{invoice_company}', '{payment_info}'])]NEWLINE )NEWLINE mail_text_order_free = I18nFormField(NEWLINE label=_("Text"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("Available placeholders: {event}, {url}, {invoice_name}, {invoice_company}"),NEWLINE validators=[PlaceholderValidator(['{event}', '{url}', '{invoice_name}', '{invoice_company}'])]NEWLINE )NEWLINE mail_text_order_changed = I18nFormField(NEWLINE label=_("Text"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("Available placeholders: {event}, {url}, {invoice_name}, {invoice_company}"),NEWLINE validators=[PlaceholderValidator(['{event}', '{url}', '{invoice_name}', '{invoice_company}'])]NEWLINE )NEWLINE mail_text_resend_link = I18nFormField(NEWLINE label=_("Text (sent by admin)"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("Available placeholders: {event}, {url}, {invoice_name}, {invoice_company}"),NEWLINE validators=[PlaceholderValidator(['{event}', '{url}', '{invoice_name}', '{invoice_company}'])]NEWLINE )NEWLINE mail_text_resend_all_links = I18nFormField(NEWLINE label=_("Text (requested by user)"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("Available placeholders: {event}, {orders}"),NEWLINE validators=[PlaceholderValidator(['{event}', '{orders}'])]NEWLINE )NEWLINE mail_days_order_expire_warning = forms.IntegerField(NEWLINE label=_("Number of days"),NEWLINE required=False,NEWLINE min_value=0,NEWLINE help_text=_("This email will be sent out this many days before the order expires. If the "NEWLINE "value is 0, the mail will never be sent.")NEWLINE )NEWLINE mail_text_order_expire_warning = I18nFormField(NEWLINE label=_("Text"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("Available placeholders: {event}, {url}, {expire_date}, {invoice_name}, {invoice_company}"),NEWLINE validators=[PlaceholderValidator(['{event}', '{url}', '{expire_date}', '{invoice_name}', '{invoice_company}'])]NEWLINE )NEWLINE mail_text_waiting_list = I18nFormField(NEWLINE label=_("Text"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("Available placeholders: {event}, {url}, {product}, {hours}, {code}"),NEWLINE validators=[PlaceholderValidator(['{event}', '{url}', '{product}', '{hours}', '{code}'])]NEWLINE )NEWLINE mail_text_order_canceled = I18nFormField(NEWLINE label=_("Text"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("Available placeholders: {event}, {code}, {url}"),NEWLINE validators=[PlaceholderValidator(['{event}', '{code}', '{url}'])]NEWLINE )NEWLINE mail_text_order_custom_mail = I18nFormField(NEWLINE label=_("Text"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("Available placeholders: {expire_date}, {event}, {code}, {date}, {url}, "NEWLINE "{invoice_name}, {invoice_company}"),NEWLINE validators=[PlaceholderValidator(['{expire_date}', '{event}', '{code}', '{date}', '{url}',NEWLINE '{invoice_name}', '{invoice_company}'])]NEWLINE )NEWLINE mail_text_download_reminder = I18nFormField(NEWLINE label=_("Text"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("Available placeholders: {event}, {url}"),NEWLINE validators=[PlaceholderValidator(['{event}', '{url}'])]NEWLINE )NEWLINE mail_days_download_reminder = forms.IntegerField(NEWLINE label=_("Number of days"),NEWLINE required=False,NEWLINE min_value=0,NEWLINE help_text=_("This email will be sent out this many days before the order event starts. If the "NEWLINE "field is empty, the mail will never be sent.")NEWLINE )NEWLINE mail_text_order_placed_require_approval = I18nFormField(NEWLINE label=_("Received order"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("Available placeholders: {event}, {total_with_currency}, {total}, {currency}, {date}, "NEWLINE "{url}, {invoice_name}, {invoice_company}"),NEWLINE validators=[PlaceholderValidator(['{event}', '{total_with_currency}', '{total}', '{currency}', '{date}',NEWLINE '{url}', '{invoice_name}', '{invoice_company}'])]NEWLINE )NEWLINE mail_text_order_approved = I18nFormField(NEWLINE label=_("Approved order"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("This will only be sent out for non-free orders. Free orders will receive the free order "NEWLINE "template from above instead. Available placeholders: {event}, {total_with_currency}, {total}, "NEWLINE "{currency}, {date}, {payment_info}, {url}, {invoice_name}, {invoice_company}"),NEWLINE validators=[PlaceholderValidator(['{event}', '{total_with_currency}', '{total}', '{currency}', '{date}',NEWLINE '{url}', '{invoice_name}', '{invoice_company}'])]NEWLINE )NEWLINE mail_text_order_denied = I18nFormField(NEWLINE label=_("Denied order"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE help_text=_("Available placeholders: {event}, {total_with_currency}, {total}, {currency}, {date}, "NEWLINE "{comment}, {url}, {invoice_name}, {invoice_company}"),NEWLINE validators=[PlaceholderValidator(['{event}', '{total_with_currency}', '{total}', '{currency}', '{date}',NEWLINE '{comment}', '{url}', '{invoice_name}', '{invoice_company}'])]NEWLINE )NEWLINE smtp_use_custom = forms.BooleanField(NEWLINE label=_("Use custom SMTP server"),NEWLINE help_text=_("All mail related to your event will be sent over the smtp server specified by you."),NEWLINE required=FalseNEWLINE )NEWLINE smtp_host = forms.CharField(NEWLINE label=_("Hostname"),NEWLINE required=False,NEWLINE widget=forms.TextInput(attrs={'placeholder': 'mail.example.org'})NEWLINE )NEWLINE smtp_port = forms.IntegerField(NEWLINE label=_("Port"),NEWLINE required=False,NEWLINE widget=forms.TextInput(attrs={'placeholder': 'e.g. 587, 465, 25, ...'})NEWLINE )NEWLINE smtp_username = forms.CharField(NEWLINE label=_("Username"),NEWLINE widget=forms.TextInput(attrs={'placeholder': 'myuser@example.org'}),NEWLINE required=FalseNEWLINE )NEWLINE smtp_password = forms.CharField(NEWLINE label=_("Password"),NEWLINE required=False,NEWLINE widget=forms.PasswordInput(attrs={NEWLINE 'autocomplete': 'new-password' # see https://bugs.chromium.org/p/chromium/issues/detail?id=370363#c7NEWLINE }),NEWLINE )NEWLINE smtp_use_tls = forms.BooleanField(NEWLINE label=_("Use STARTTLS"),NEWLINE help_text=_("Commonly enabled on port 587."),NEWLINE required=FalseNEWLINE )NEWLINE smtp_use_ssl = forms.BooleanField(NEWLINE label=_("Use SSL"),NEWLINE help_text=_("Commonly enabled on port 465."),NEWLINE required=FalseNEWLINE )NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE event = kwargs.get('obj')NEWLINE super().__init__(*args, **kwargs)NEWLINE self.fields['mail_html_renderer'].choices = [NEWLINE (r.identifier, r.verbose_name) for r in event.get_html_mail_renderers().values()NEWLINE ]NEWLINENEWLINE def clean(self):NEWLINE data = self.cleaned_dataNEWLINE if not data.get('smtp_password') and data.get('smtp_username'):NEWLINE # Leave password unchanged if the username is set and the password field is empty.NEWLINE # This makes it impossible to set an empty password as long as a username is set, butNEWLINE # Python's smtplib does not support password-less schemes anyway.NEWLINE data['smtp_password'] = self.initial.get('smtp_password')NEWLINENEWLINE if data.get('smtp_use_tls') and data.get('smtp_use_ssl'):NEWLINE raise ValidationError(_('You can activate either SSL or STARTTLS security, but not both at the same time.'))NEWLINENEWLINENEWLINEclass DisplaySettingsForm(SettingsForm):NEWLINE primary_color = forms.CharField(NEWLINE label=_("Primary color"),NEWLINE required=False,NEWLINE validators=[NEWLINE RegexValidator(regex='^#[0-9a-fA-F]{6}$',NEWLINE message=_('Please enter the hexadecimal code of a color, e.g. #990000.')),NEWLINE ],NEWLINE widget=forms.TextInput(attrs={'class': 'colorpickerfield'})NEWLINE )NEWLINE theme_color_success = forms.CharField(NEWLINE label=_("Accent color for success"),NEWLINE help_text=_("We strongly suggest to use a shade of green."),NEWLINE required=False,NEWLINE validators=[NEWLINE RegexValidator(regex='^#[0-9a-fA-F]{6}$',NEWLINE message=_('Please enter the hexadecimal code of a color, e.g. #990000.')),NEWLINE ],NEWLINE widget=forms.TextInput(attrs={'class': 'colorpickerfield'})NEWLINE )NEWLINE theme_color_danger = forms.CharField(NEWLINE label=_("Accent color for errors"),NEWLINE help_text=_("We strongly suggest to use a dark shade of red."),NEWLINE required=False,NEWLINE validators=[NEWLINE RegexValidator(regex='^#[0-9a-fA-F]{6}$',NEWLINE message=_('Please enter the hexadecimal code of a color, e.g. #990000.')),NEWLINE ],NEWLINE widget=forms.TextInput(attrs={'class': 'colorpickerfield'})NEWLINE )NEWLINE logo_image = ExtFileField(NEWLINE label=_('Logo image'),NEWLINE ext_whitelist=(".png", ".jpg", ".gif", ".jpeg"),NEWLINE required=False,NEWLINE help_text=_('If you provide a logo image, we will by default not show your events name and date 'NEWLINE 'in the page header. We will show your logo with a maximal height of 120 pixels.')NEWLINE )NEWLINE primary_font = forms.ChoiceField(NEWLINE label=_('Font'),NEWLINE choices=[NEWLINE ('Open Sans', 'Open Sans')NEWLINE ],NEWLINE help_text=_('Only respected by modern browsers.')NEWLINE )NEWLINE frontpage_text = I18nFormField(NEWLINE label=_("Frontpage text"),NEWLINE required=False,NEWLINE widget=I18nTextareaNEWLINE )NEWLINE voucher_explanation_text = I18nFormField(NEWLINE label=_("Voucher explanation"),NEWLINE required=False,NEWLINE widget=I18nTextarea,NEWLINE widget_kwargs={'attrs': {'rows': '2'}},NEWLINE help_text=_("This text will be shown next to the input for a voucher code. You can use it e.g. to explain "NEWLINE "how to obtain a voucher code.")NEWLINE )NEWLINE show_variations_expanded = forms.BooleanField(NEWLINE label=_("Show variations of a product expanded by default"),NEWLINE required=FalseNEWLINE )NEWLINE frontpage_subevent_ordering = forms.ChoiceField(NEWLINE label=pgettext('subevent', 'Date ordering'),NEWLINE choices=[NEWLINE ('date_ascending', _('Event start time')),NEWLINE ('date_descending', _('Event start time (descending)')),NEWLINE ('name_ascending', _('Name')),NEWLINE ('name_descending', _('Name (descending)')),NEWLINE ], # When adding a new ordering, remember to also define it in the event modelNEWLINE )NEWLINE meta_noindex = forms.BooleanField(NEWLINE label=_('Ask search engines not to index the ticket shop'),NEWLINE required=FalseNEWLINE )NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE event = kwargs['obj']NEWLINE super().__init__(*args, **kwargs)NEWLINE self.fields['primary_font'].choices += [NEWLINE (a, a) for a in get_fonts()NEWLINE ]NEWLINE if not event.has_subevents:NEWLINE del self.fields['frontpage_subevent_ordering']NEWLINENEWLINENEWLINEclass TicketSettingsForm(SettingsForm):NEWLINE ticket_download = forms.BooleanField(NEWLINE label=_("Use feature"),NEWLINE help_text=_("Use pretix to generate tickets for the user to download and print out."),NEWLINE required=FalseNEWLINE )NEWLINE ticket_download_date = RelativeDateTimeField(NEWLINE label=_("Download date"),NEWLINE help_text=_("Ticket download will be offered after this date. If you use the event series feature and an order "NEWLINE "contains tickets for multiple event dates, download of all tickets will be available if at least "NEWLINE "one of the event dates allows it."),NEWLINE required=False,NEWLINE )NEWLINE ticket_download_addons = forms.BooleanField(NEWLINE label=_("Offer to download tickets separately for add-on products"),NEWLINE required=False,NEWLINE )NEWLINE ticket_download_nonadm = forms.BooleanField(NEWLINE label=_("Generate tickets for non-admission products"),NEWLINE required=False,NEWLINE )NEWLINE ticket_download_pending = forms.BooleanField(NEWLINE label=_("Offer to download tickets even before an order is paid"),NEWLINE required=False,NEWLINE )NEWLINENEWLINE def prepare_fields(self):NEWLINE # See clean()NEWLINE for k, v in self.fields.items():NEWLINE v._required = v.requiredNEWLINE v.required = FalseNEWLINE v.widget.is_required = FalseNEWLINE if isinstance(v, I18nFormField):NEWLINE v._required = v.one_requiredNEWLINE v.one_required = FalseNEWLINE v.widget.enabled_locales = self.localesNEWLINENEWLINE def clean(self):NEWLINE # required=True files should only be required if the feature is enabledNEWLINE cleaned_data = super().clean()NEWLINE enabled = cleaned_data.get('ticket_download') == 'True'NEWLINE if not enabled:NEWLINE returnNEWLINE for k, v in self.fields.items():NEWLINE val = cleaned_data.get(k)NEWLINE if v._required and (val is None or val == ""):NEWLINE self.add_error(k, _('This field is required.'))NEWLINENEWLINENEWLINEclass CommentForm(I18nModelForm):NEWLINE class Meta:NEWLINE model = EventNEWLINE fields = ['comment']NEWLINE widgets = {NEWLINE 'comment': forms.Textarea(attrs={NEWLINE 'rows': 3,NEWLINE 'class': 'helper-width-100',NEWLINE }),NEWLINE }NEWLINENEWLINENEWLINEclass CountriesAndEU(Countries):NEWLINE override = {NEWLINE 'ZZ': _('Any country'),NEWLINE 'EU': _('European Union')NEWLINE }NEWLINE first = ['ZZ', 'EU']NEWLINENEWLINENEWLINEclass TaxRuleLineForm(forms.Form):NEWLINE country = LazyTypedChoiceField(NEWLINE choices=CountriesAndEU(),NEWLINE required=FalseNEWLINE )NEWLINE address_type = forms.ChoiceField(NEWLINE choices=[NEWLINE ('', _('Any customer')),NEWLINE ('individual', _('Individual')),NEWLINE ('business', _('Business')),NEWLINE ('business_vat_id', _('Business with valid VAT ID')),NEWLINE ],NEWLINE required=FalseNEWLINE )NEWLINE action = forms.ChoiceField(NEWLINE choices=[NEWLINE ('vat', _('Charge VAT')),NEWLINE ('reverse', _('Reverse charge')),NEWLINE ('no', _('No VAT')),NEWLINE ],NEWLINE )NEWLINENEWLINENEWLINETaxRuleLineFormSet = formset_factory(NEWLINE TaxRuleLineForm,NEWLINE can_order=False, can_delete=True, extra=0NEWLINE)NEWLINENEWLINENEWLINEclass TaxRuleForm(I18nModelForm):NEWLINE class Meta:NEWLINE model = TaxRuleNEWLINE fields = ['name', 'rate', 'price_includes_tax', 'eu_reverse_charge', 'home_country']NEWLINENEWLINENEWLINEclass WidgetCodeForm(forms.Form):NEWLINE subevent = forms.ModelChoiceField(NEWLINE label=pgettext_lazy('subevent', "Date"),NEWLINE required=True,NEWLINE queryset=SubEvent.objects.none()NEWLINE )NEWLINE language = forms.ChoiceField(NEWLINE label=_("Language"),NEWLINE required=True,NEWLINE choices=settings.LANGUAGESNEWLINE )NEWLINE voucher = forms.CharField(NEWLINE label=_("Pre-selected voucher"),NEWLINE required=False,NEWLINE help_text=_("If set, the widget will show products as if this voucher has been entered and when a product is "NEWLINE "bought via the widget, this voucher will be used. This can for example be used to provide "NEWLINE "widgets that give discounts or unlock secret products.")NEWLINE )NEWLINE compatibility_mode = forms.BooleanField(NEWLINE label=_("Compatibility mode"),NEWLINE required=False,NEWLINE help_text=_("Our regular widget doesn't work in all website builders. If you run into trouble, try using "NEWLINE "this compatibility mode.")NEWLINE )NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE self.event = kwargs.pop('event')NEWLINE super().__init__(*args, **kwargs)NEWLINENEWLINE if self.event.has_subevents:NEWLINE self.fields['subevent'].queryset = self.event.subevents.all()NEWLINE else:NEWLINE del self.fields['subevent']NEWLINENEWLINE self.fields['language'].choices = [(l, n) for l, n in settings.LANGUAGES if l in self.event.settings.locales]NEWLINENEWLINE def clean_voucher(self):NEWLINE v = self.cleaned_data.get('voucher')NEWLINE if not v:NEWLINE returnNEWLINENEWLINE if not self.event.vouchers.filter(code=v).exists():NEWLINE raise ValidationError(_('The given voucher code does not exist.'))NEWLINENEWLINE return vNEWLINENEWLINENEWLINEclass EventDeleteForm(forms.Form):NEWLINE error_messages = {NEWLINE 'pw_current_wrong': _("The password you entered was not correct."),NEWLINE 'slug_wrong': _("The slug you entered was not correct."),NEWLINE }NEWLINE user_pw = forms.CharField(NEWLINE max_length=255,NEWLINE label=_("Your password"),NEWLINE widget=forms.PasswordInput()NEWLINE )NEWLINE slug = forms.CharField(NEWLINE max_length=255,NEWLINE label=_("Event slug"),NEWLINE )NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE self.event = kwargs.pop('event')NEWLINE self.user = kwargs.pop('user')NEWLINE super().__init__(*args, **kwargs)NEWLINENEWLINE def clean_user_pw(self):NEWLINE user_pw = self.cleaned_data.get('user_pw')NEWLINE if not check_password(user_pw, self.user.password):NEWLINE raise forms.ValidationError(NEWLINE self.error_messages['pw_current_wrong'],NEWLINE code='pw_current_wrong',NEWLINE )NEWLINENEWLINE return user_pwNEWLINENEWLINE def clean_slug(self):NEWLINE slug = self.cleaned_data.get('slug')NEWLINE if slug != self.event.slug:NEWLINE raise forms.ValidationError(NEWLINE self.error_messages['slug_wrong'],NEWLINE code='slug_wrong',NEWLINE )NEWLINE return slugNEWLINENEWLINENEWLINEclass QuickSetupForm(I18nForm):NEWLINE show_quota_left = forms.BooleanField(NEWLINE label=_("Show number of tickets left"),NEWLINE help_text=_("Publicly show how many tickets of a certain type are still available."),NEWLINE required=FalseNEWLINE )NEWLINE waiting_list_enabled = forms.BooleanField(NEWLINE label=_("Waiting list"),NEWLINE help_text=_("Once a ticket is sold out, people can add themselves to a waiting list. As soon as a ticket "NEWLINE "becomes available again, it will be reserved for the first person on the waiting list and this "NEWLINE "person will receive an email notification with a voucher that can be used to buy a ticket."),NEWLINE required=FalseNEWLINE )NEWLINE ticket_download = forms.BooleanField(NEWLINE label=_("Ticket downloads"),NEWLINE help_text=_("Your customers will be able to download their tickets in PDF format."),NEWLINE required=FalseNEWLINE )NEWLINE attendee_names_required = forms.BooleanField(NEWLINE label=_("Require all attendees to fill in their names"),NEWLINE help_text=_("By default, we will ask for names but not require them. You can turn this off completely in the "NEWLINE "settings."),NEWLINE required=FalseNEWLINE )NEWLINE imprint_url = forms.URLField(NEWLINE label=_("Imprint URL"),NEWLINE help_text=_("This should point e.g. to a part of your website that has your contact details and legal "NEWLINE "information."),NEWLINE required=False,NEWLINE )NEWLINE contact_mail = forms.EmailField(NEWLINE label=_("Contact address"),NEWLINE required=False,NEWLINE help_text=_("We'll show this publicly to allow attendees to contact you.")NEWLINE )NEWLINE total_quota = forms.IntegerField(NEWLINE label=_("Total capacity"),NEWLINE min_value=0,NEWLINE widget=forms.NumberInput(NEWLINE attrs={NEWLINE 'placeholder': '∞'NEWLINE }NEWLINE ),NEWLINE required=FalseNEWLINE )NEWLINE payment_stripe__enabled = forms.BooleanField(NEWLINE label=_("Payment via Stripe"),NEWLINE help_text=_("Stripe is an online payments processor supporting credit cards and lots of other payment options. "NEWLINE "To accept payments via Stripe, you will need to set up an account with them, which takes less "NEWLINE "than five minutes using their simple interface."),NEWLINE required=FalseNEWLINE )NEWLINE payment_banktransfer__enabled = forms.BooleanField(NEWLINE label=_("Payment by bank transfer"),NEWLINE help_text=_("Your customers will be instructed to wire the money to your account. You can then import your "NEWLINE "bank statements to process the payments within pretix, or mark them as paid manually."),NEWLINE required=FalseNEWLINE )NEWLINE btf = BankTransfer.form_fields()NEWLINE payment_banktransfer_bank_details_type = btf['bank_details_type']NEWLINE payment_banktransfer_bank_details_sepa_name = btf['bank_details_sepa_name']NEWLINE payment_banktransfer_bank_details_sepa_iban = btf['bank_details_sepa_iban']NEWLINE payment_banktransfer_bank_details_sepa_bic = btf['bank_details_sepa_bic']NEWLINE payment_banktransfer_bank_details_sepa_bank = btf['bank_details_sepa_bank']NEWLINE payment_banktransfer_bank_details = btf['bank_details']NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE self.obj = kwargs.pop('event', None)NEWLINE self.locales = self.obj.settings.get('locales') if self.obj else kwargs.pop('locales', None)NEWLINE kwargs['locales'] = self.localesNEWLINE super().__init__(*args, **kwargs)NEWLINE if not self.obj.settings.payment_stripe_connect_client_id:NEWLINE del self.fields['payment_stripe__enabled']NEWLINE self.fields['payment_banktransfer_bank_details'].required = FalseNEWLINE for f in self.fields.values():NEWLINE if 'data-required-if' in f.widget.attrs:NEWLINE del f.widget.attrs['data-required-if']NEWLINENEWLINE def clean(self):NEWLINE cleaned_data = super().clean()NEWLINE if cleaned_data.get('payment_banktransfer__enabled'):NEWLINE provider = BankTransfer(self.obj)NEWLINE cleaned_data = provider.settings_form_clean(cleaned_data)NEWLINE return cleaned_dataNEWLINENEWLINENEWLINEclass QuickSetupProductForm(I18nForm):NEWLINE name = I18nFormField(NEWLINE max_length=255,NEWLINE label=_("Product name"),NEWLINE widget=I18nTextInputNEWLINE )NEWLINE default_price = forms.DecimalField(NEWLINE label=_("Price (optional)"),NEWLINE max_digits=7, decimal_places=2, required=False,NEWLINE localize=True,NEWLINE widget=forms.TextInput(NEWLINE attrs={NEWLINE 'placeholder': _('Free')NEWLINE }NEWLINE ),NEWLINE )NEWLINE quota = forms.IntegerField(NEWLINE label=_("Quantity available"),NEWLINE min_value=0,NEWLINE widget=forms.NumberInput(NEWLINE attrs={NEWLINE 'placeholder': '∞'NEWLINE }NEWLINE ),NEWLINE initial=100,NEWLINE required=FalseNEWLINE )NEWLINENEWLINENEWLINEclass BaseQuickSetupProductFormSet(I18nFormSetMixin, forms.BaseFormSet):NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE event = kwargs.pop('event', None)NEWLINE if event:NEWLINE kwargs['locales'] = event.settings.get('locales')NEWLINE super().__init__(*args, **kwargs)NEWLINENEWLINENEWLINEQuickSetupProductFormSet = formset_factory(NEWLINE QuickSetupProductForm,NEWLINE formset=BaseQuickSetupProductFormSet,NEWLINE can_order=False, can_delete=True, extra=0NEWLINE)NEWLINE
#!/usr/bin/env python3NEWLINENEWLINEimport argparseNEWLINENEWLINEfrom utils import (NEWLINE load_yaml, load_wordset, sorted_items, get_morphgnt, parse_verse_ranges)NEWLINENEWLINEargparser = argparse.ArgumentParser()NEWLINEargparser.add_argument("verses", help="verses to cover (e.g. 'John 18:1-11')")NEWLINEargparser.add_argument("--exclude", help="exclusion list file")NEWLINEargparser.add_argument(NEWLINE "--existing", dest="headwords", help="existing headword file")NEWLINEargparser.add_argument(NEWLINE "--lexicon", dest="lexemes",NEWLINE default="../morphological-lexicon/lexemes.yaml",NEWLINE help="path to morphological-lexicon lexemes.yaml file "NEWLINE "(defaults to ../morphological-lexicon/lexemes.yaml)")NEWLINEargparser.add_argument(NEWLINE "--sblgnt", dest="sblgnt_dir", default="../sblgnt",NEWLINE help="path to MorphGNT sblgnt directory (defaults to ../sblgnt)")NEWLINENEWLINEargs = argparser.parse_args()NEWLINENEWLINEverses = parse_verse_ranges(args.verses)NEWLINENEWLINEif args.exclude:NEWLINE exclusions = load_wordset(args.exclude)NEWLINEelse:NEWLINE exclusions = set()NEWLINENEWLINElexemes = load_yaml(args.lexemes)NEWLINENEWLINEif args.headwords:NEWLINE headwords = load_yaml(args.headwords)NEWLINEelse:NEWLINE headwords = {}NEWLINENEWLINENEWLINEfor entry in get_morphgnt(verses, args.sblgnt_dir):NEWLINE if entry[0] == "WORD":NEWLINE lexeme = entry[8]NEWLINE if lexeme not in exclusions and lexeme not in headwords:NEWLINE pos = entry[2]NEWLINE if pos in ["N-", "A-"]:NEWLINE if "full-citation-form" in lexemes[lexeme]:NEWLINE headword = lexemes[lexeme]["full-citation-form"]NEWLINE else:NEWLINE headword = lexemes[lexeme]["danker-entry"]NEWLINE headwords[lexeme] = headwordNEWLINENEWLINEfor lexeme, headword in sorted_items(headwords):NEWLINE print("{}: {}".format(lexeme, headword))NEWLINE
from business_register.models import (NEWLINE company_models,NEWLINE fop_models,NEWLINE kved_models,NEWLINE pep_models,NEWLINE sanction_models,NEWLINE declaration_models,NEWLINE)NEWLINE
# -------------------------------------------------------------------------NEWLINE# Copyright (c) Microsoft Corporation. All rights reserved.NEWLINE# Licensed under the MIT License. See License.txt in the project root forNEWLINE# license information.NEWLINE# --------------------------------------------------------------------------NEWLINEfrom enum import EnumNEWLINEfrom typing import TYPE_CHECKING, Any, Dict, ListNEWLINENEWLINEfrom azure.core.exceptions import HttpResponseErrorNEWLINEfrom azure.core.paging import PageIteratorNEWLINE# from azure.core import CaseInsensitiveEnumMetaNEWLINE# from six import with_metaclassNEWLINENEWLINEfrom ._generated.models import TableServiceStats as GenTableServiceStatsNEWLINEfrom ._generated.models import AccessPolicy as GenAccessPolicyNEWLINEfrom ._generated.models import Logging as GeneratedLoggingNEWLINEfrom ._generated.models import Metrics as GeneratedMetricsNEWLINEfrom ._generated.models import RetentionPolicy as GeneratedRetentionPolicyNEWLINEfrom ._generated.models import CorsRule as GeneratedCorsRuleNEWLINEfrom ._generated.models import QueryOptionsNEWLINEfrom ._deserialize import (NEWLINE _convert_to_entity,NEWLINE _return_context_and_deserialized,NEWLINE _extract_continuation_token,NEWLINE)NEWLINEfrom ._error import _process_table_errorNEWLINEfrom ._constants import NEXT_PARTITION_KEY, NEXT_ROW_KEY, NEXT_TABLE_NAMENEWLINENEWLINEif TYPE_CHECKING:NEWLINE from ._generated.models import TableQueryResponseNEWLINE from ._generated.models import TableServiceProperties as GenTableServicePropertiesNEWLINENEWLINENEWLINEclass TableServiceStats(GenTableServiceStats):NEWLINE """Stats for the serviceNEWLINENEWLINE :param geo_replication: Geo-Replication information for the Secondary Storage Service.NEWLINE :type geo_replication: ~azure.data.tables.models.GeoReplicationNEWLINE """NEWLINENEWLINE def __init__( # pylint: disable=super-init-not-calledNEWLINE self, geo_replication=None, **kwargsNEWLINE ):NEWLINE self.geo_replication = geo_replicationNEWLINENEWLINENEWLINEclass AccessPolicy(GenAccessPolicy):NEWLINE """Access Policy class used by the set and get access policy methods.NEWLINENEWLINE A stored access policy can specify the start time, expiry time, andNEWLINE permissions for the Shared Access Signatures with which it's associated.NEWLINE Depending on how you want to control access to your resource, you canNEWLINE specify all of these parameters within the stored access policy, and omitNEWLINE them from the URL for the Shared Access Signature. Doing so permits you toNEWLINE modify the associated signature's behavior at any time, as well as to revokeNEWLINE it. Or you can specify one or more of the access policy parameters withinNEWLINE the stored access policy, and the others on the URL. Finally, you canNEWLINE specify all of the parameters on the URL. In this case, you can use theNEWLINE stored access policy to revoke the signature, but not to modify its behavior.NEWLINENEWLINE Together the Shared Access Signature and the stored access policy mustNEWLINE include all fields required to authenticate the signature. If any requiredNEWLINE fields are missing, the request will fail. Likewise, if a field is specifiedNEWLINE both in the Shared Access Signature URL and in the stored access policy, theNEWLINE request will fail with status code 400 (Bad Request).NEWLINENEWLINE :param str permission:NEWLINE The permissions associated with the shared access signature. TheNEWLINE user is restricted to operations allowed by the permissions.NEWLINE Required unless an id is given referencing a stored access policyNEWLINE which contains this field. This field must be omitted if it has beenNEWLINE specified in an associated stored access policy.NEWLINE :param expiry:NEWLINE The time at which the shared access signature becomes invalid.NEWLINE Required unless an id is given referencing a stored access policyNEWLINE which contains this field. This field must be omitted if it hasNEWLINE been specified in an associated stored access policy. Azure will alwaysNEWLINE convert values to UTC. If a date is passed in without timezone info, itNEWLINE is assumed to be UTC.NEWLINE :type expiry: ~datetime.datetime or strNEWLINE :param start:NEWLINE The time at which the shared access signature becomes valid. IfNEWLINE omitted, start time for this call is assumed to be the time when theNEWLINE storage service receives the request. Azure will always convert valuesNEWLINE to UTC. If a date is passed in without timezone info, it is assumed toNEWLINE be UTC.NEWLINE :type start: ~datetime.datetime or strNEWLINE """NEWLINENEWLINE def __init__( # pylint: disable=super-init-not-calledNEWLINE self, permission=None, expiry=None, start=None, **kwargsNEWLINE ):NEWLINE self.start = startNEWLINE self.expiry = expiryNEWLINE self.permission = permissionNEWLINENEWLINENEWLINEclass TableAnalyticsLogging(GeneratedLogging):NEWLINE """Azure Analytics Logging settings.NEWLINENEWLINE All required parameters must be populated in order to send to Azure.NEWLINENEWLINE :keyword str version: Required. The version of Storage Analytics to configure.NEWLINE :keyword bool delete: Required. Indicates whether all delete requests should be logged.NEWLINE :keyword bool read: Required. Indicates whether all read requests should be logged.NEWLINE :keyword bool write: Required. Indicates whether all write requests should be logged.NEWLINE :keyword ~azure.data.tables.RetentionPolicy retention_policy: Required.NEWLINE The retention policy for the metrics.NEWLINE """NEWLINENEWLINE def __init__( # pylint: disable=super-init-not-calledNEWLINE self, **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...)-> NoneNEWLINENEWLINE self.version = kwargs.get("version", u"1.0")NEWLINE self.delete = kwargs.get("delete", False)NEWLINE self.read = kwargs.get("read", False)NEWLINE self.write = kwargs.get("write", False)NEWLINE self.retention_policy = kwargs.get("retention_policy") or RetentionPolicy()NEWLINENEWLINE @classmethodNEWLINE def _from_generated(cls, generated):NEWLINE if not generated:NEWLINE return cls()NEWLINE return cls(NEWLINE version=generated.version,NEWLINE delete=generated.delete,NEWLINE read=generated.read,NEWLINE write=generated.write,NEWLINE retention_policy=RetentionPolicy._from_generated( # pylint: disable=protected-accessNEWLINE generated.retention_policyNEWLINE )NEWLINE )NEWLINENEWLINENEWLINEclass Metrics(GeneratedMetrics):NEWLINE """A summary of request statistics grouped by API in hour or minute aggregates.NEWLINENEWLINE All required parameters must be populated in order to send to Azure.NEWLINENEWLINE :keyword str version: The version of Storage Analytics to configure.NEWLINE :keyword bool enabled: Required. Indicates whether metrics are enabled for the service.NEWLINE :keyword bool include_apis: Indicates whether metrics should generate summaryNEWLINE statistics for called API operations.NEWLINE :keyword ~azure.data.tables.RetentionPolicy retention_policy: Required.NEWLINE The retention policy for the metrics.NEWLINE """NEWLINENEWLINE def __init__( # pylint: disable=super-init-not-calledNEWLINE self,NEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE self.version = kwargs.get("version", u"1.0")NEWLINE self.enabled = kwargs.get("enabled", False)NEWLINE self.include_apis = kwargs.get("include_apis")NEWLINE self.retention_policy = kwargs.get("retention_policy") or RetentionPolicy()NEWLINENEWLINE @classmethodNEWLINE def _from_generated(cls, generated):NEWLINE # type: (...) -> MetricsNEWLINE """A summary of request statistics grouped by API in hour or minute aggregates.NEWLINENEWLINE :param Metrics generated: generated MetricsNEWLINE """NEWLINE if not generated:NEWLINE return cls()NEWLINE return cls(NEWLINE version=generated.version,NEWLINE enabled=generated.enabled,NEWLINE include_apis=generated.include_apis,NEWLINE retention_policy=RetentionPolicy._from_generated( # pylint: disable=protected-accessNEWLINE generated.retention_policyNEWLINE )NEWLINE )NEWLINENEWLINENEWLINEclass RetentionPolicy(GeneratedRetentionPolicy):NEWLINE def __init__( # pylint: disable=super-init-not-calledNEWLINE self,NEWLINE enabled=False, # type: boolNEWLINE days=None, # type: intNEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) ->NoneNEWLINE """The retention policy which determines how long the associated data shouldNEWLINE persist.NEWLINENEWLINE All required parameters must be populated in order to send to Azure.NEWLINENEWLINE :param bool enabled: Required. Indicates whether a retention policy is enabledNEWLINE for the storage service.NEWLINE :param int days: Indicates the number of days that metrics or logging orNEWLINE soft-deleted data should be retained. All data older than this value willNEWLINE be deleted.NEWLINE :param Any kwargs:NEWLINE """NEWLINE self.enabled = enabledNEWLINE self.days = daysNEWLINE if self.enabled and (self.days is None):NEWLINE raise ValueError("If policy is enabled, 'days' must be specified.")NEWLINENEWLINE @classmethodNEWLINE def _from_generated(cls, generated, **kwargs): # pylint: disable=unused-argumentNEWLINE # type: (GeneratedRetentionPolicy, Dict[str, Any]) -> RetentionPolicyNEWLINE """The retention policy which determines how long the associated data shouldNEWLINE persist.NEWLINENEWLINE All required parameters must be populated in order to send to Azure.NEWLINENEWLINE :param RetentionPolicy generated: Generated Retention PolicyNEWLINE """NEWLINENEWLINE if not generated:NEWLINE return cls()NEWLINE return cls(NEWLINE enabled=generated.enabled,NEWLINE days=generated.days,NEWLINE )NEWLINENEWLINENEWLINEclass CorsRule(GeneratedCorsRule):NEWLINE """CORS is an HTTP feature that enables a web application running under oneNEWLINE domain to access resources in another domain. Web browsers implement aNEWLINE security restriction known as same-origin policy that prevents a web pageNEWLINE from calling APIs in a different domain; CORS provides a secure way toNEWLINE allow one domain (the origin domain) to call APIs in another domain.NEWLINENEWLINE All required parameters must be populated in order to send to Azure.NEWLINENEWLINE :param list[str] allowed_origins:NEWLINE A list of origin domains that will be allowed via CORS, or "*" to allowNEWLINE all domains. The list of must contain at least one entry. Limited to 64NEWLINE origin domains. Each allowed origin can have up to 256 characters.NEWLINE :param list[str] allowed_methods:NEWLINE A list of HTTP methods that are allowed to be executed by the origin.NEWLINE The list of must contain at least one entry. For Azure Storage,NEWLINE permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.NEWLINE :keyword int max_age_in_seconds:NEWLINE The number of seconds that the client/browser should cache aNEWLINE pre-flight response.NEWLINE :keyword list[str] exposed_headers:NEWLINE Defaults to an empty list. A list of response headers to expose to CORSNEWLINE clients. Limited to 64 defined headers and two prefixed headers. EachNEWLINE header can be up to 256 characters.NEWLINE :keyword list[str] allowed_headers:NEWLINE Defaults to an empty list. A list of headers allowed to be part ofNEWLINE the cross-origin request. Limited to 64 defined headers and 2 prefixedNEWLINE headers. Each header can be up to 256 characters.NEWLINE """NEWLINENEWLINE def __init__( # pylint: disable=super-init-not-calledNEWLINE self,NEWLINE allowed_origins, # type: List[str]NEWLINE allowed_methods, # type: List[str]NEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...)-> NoneNEWLINENEWLINE self.allowed_origins = ",".join(allowed_origins)NEWLINE self.allowed_methods = ",".join(allowed_methods)NEWLINE self.allowed_headers = ",".join(kwargs.get("allowed_headers", []))NEWLINE self.exposed_headers = ",".join(kwargs.get("exposed_headers", []))NEWLINE self.max_age_in_seconds = kwargs.get("max_age_in_seconds", 0)NEWLINENEWLINE @classmethodNEWLINE def _from_generated(cls, generated):NEWLINE return cls(NEWLINE [generated.allowed_origins],NEWLINE [generated.allowed_methods],NEWLINE allowed_headers=[generated.allowed_headers],NEWLINE exposed_headers=[generated.exposed_headers],NEWLINE max_age_in_seconds=generated.max_age_in_seconds,NEWLINE )NEWLINENEWLINENEWLINEclass TablePropertiesPaged(PageIterator):NEWLINE """An iterable of Table properties.NEWLINENEWLINE :param callable command: Function to retrieve the next page of items.NEWLINE :keyword int results_per_page: The maximum number of results retrieved per API call.NEWLINE :keyword str filter: The filter to apply to results.NEWLINE :keyword str continuation_token: An opaque continuation token.NEWLINE """NEWLINENEWLINE def __init__(self, command, **kwargs):NEWLINE super(TablePropertiesPaged, self).__init__(NEWLINE self._get_next_cb,NEWLINE self._extract_data_cb,NEWLINE continuation_token=kwargs.get("continuation_token") or "",NEWLINE )NEWLINE self._command = commandNEWLINE self._headers = NoneNEWLINE self._response = NoneNEWLINE self.results_per_page = kwargs.get("results_per_page")NEWLINE self.filter = kwargs.get("filter")NEWLINE self._location_mode = NoneNEWLINENEWLINE def _get_next_cb(self, continuation_token, **kwargs):NEWLINE query_options = QueryOptions(top=self.results_per_page, filter=self.filter)NEWLINE try:NEWLINE return self._command(NEWLINE query_options=query_options,NEWLINE next_table_name=continuation_token or None,NEWLINE cls=kwargs.pop("cls", None) or _return_context_and_deserialized,NEWLINE use_location=self._location_mode,NEWLINE )NEWLINE except HttpResponseError as error:NEWLINE _process_table_error(error)NEWLINENEWLINE def _extract_data_cb(self, get_next_return):NEWLINE self._location_mode, self._response, self._headers = get_next_returnNEWLINE props_list = [NEWLINE TableItem._from_generated(t, **self._headers) for t in self._response.value # pylint: disable=protected-accessNEWLINE ]NEWLINE return self._headers[NEXT_TABLE_NAME] or None, props_listNEWLINENEWLINENEWLINEclass TableEntityPropertiesPaged(PageIterator):NEWLINE """An iterable of TableEntity properties.NEWLINENEWLINE :param callable command: Function to retrieve the next page of items.NEWLINE :param str table: The name of the table.NEWLINE :keyword int results_per_page: The maximum number of results retrieved per API call.NEWLINE :keyword str filter: The filter to apply to results.NEWLINE :keyword str select: The select filter to apply to results.NEWLINE :keyword str continuation_token: An opaque continuation token.NEWLINE """NEWLINENEWLINE def __init__(self, command, table, **kwargs):NEWLINE super(TableEntityPropertiesPaged, self).__init__(NEWLINE self._get_next_cb,NEWLINE self._extract_data_cb,NEWLINE continuation_token=kwargs.get("continuation_token") or {},NEWLINE )NEWLINE self._command = commandNEWLINE self._headers = NoneNEWLINE self._response = NoneNEWLINE self.table = tableNEWLINE self.results_per_page = kwargs.get("results_per_page")NEWLINE self.filter = kwargs.get("filter")NEWLINE self.select = kwargs.get("select")NEWLINE self._location_mode = NoneNEWLINENEWLINE def _get_next_cb(self, continuation_token, **kwargs):NEWLINE next_partition_key, next_row_key = _extract_continuation_token(NEWLINE continuation_tokenNEWLINE )NEWLINE query_options = QueryOptions(NEWLINE top=self.results_per_page, select=self.select, filter=self.filterNEWLINE )NEWLINE try:NEWLINE return self._command(NEWLINE query_options=query_options,NEWLINE next_row_key=next_row_key,NEWLINE next_partition_key=next_partition_key,NEWLINE table=self.table,NEWLINE cls=kwargs.pop("cls", None) or _return_context_and_deserialized,NEWLINE use_location=self._location_mode,NEWLINE )NEWLINE except HttpResponseError as error:NEWLINE _process_table_error(error)NEWLINENEWLINE def _extract_data_cb(self, get_next_return):NEWLINE self._location_mode, self._response, self._headers = get_next_returnNEWLINE props_list = [_convert_to_entity(t) for t in self._response.value]NEWLINE next_entity = {}NEWLINE if self._headers[NEXT_PARTITION_KEY] or self._headers[NEXT_ROW_KEY]:NEWLINE next_entity = {NEWLINE "PartitionKey": self._headers[NEXT_PARTITION_KEY],NEWLINE "RowKey": self._headers[NEXT_ROW_KEY],NEWLINE }NEWLINE return next_entity or None, props_listNEWLINENEWLINENEWLINEclass TableSasPermissions(object):NEWLINE def __init__(NEWLINE self,NEWLINE _str=None, # type: strNEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> NoneNEWLINE """NEWLINE :keyword bool read:NEWLINE Get entities and query entities.NEWLINE :keyword bool add:NEWLINE Add entities. Add and Update permissions are required for upsert operations.NEWLINE :keyword bool update:NEWLINE Update entities. Add and Update permissions are required for upsert operations.NEWLINE :keyword bool delete:NEWLINE Delete entities.NEWLINE :param str _str:NEWLINE A string representing the permissions.NEWLINE """NEWLINE if not _str:NEWLINE _str = ""NEWLINE self.read = kwargs.pop("read", None) or ("r" in _str)NEWLINE self.add = kwargs.pop("add", None) or ("a" in _str)NEWLINE self.update = kwargs.pop("update", None) or ("u" in _str)NEWLINE self.delete = kwargs.pop("delete", None) or ("d" in _str)NEWLINENEWLINE def __or__(self, other):NEWLINE # type: (TableSasPermissions) -> TableSasPermissionsNEWLINE return TableSasPermissions(_str=str(self) + str(other))NEWLINENEWLINE def __add__(self, other):NEWLINE # type: (TableSasPermissions) -> TableSasPermissionsNEWLINE return TableSasPermissions(_str=str(self) + str(other))NEWLINENEWLINE def __str__(self):NEWLINE # type: () -> strNEWLINE return (NEWLINE ("r" if self.read else "")NEWLINE + ("a" if self.add else "")NEWLINE + ("u" if self.update else "")NEWLINE + ("d" if self.delete else "")NEWLINE )NEWLINENEWLINE @classmethodNEWLINE def from_string(NEWLINE cls,NEWLINE permission,NEWLINE **kwargsNEWLINE ):NEWLINE # Type: (str, Dict[str, Any]) -> AccountSasPermissionsNEWLINE """Create AccountSasPermissions from a string.NEWLINENEWLINE To specify read, write, delete, etc. permissions you need only toNEWLINE include the first letter of the word in the string. E.g. for read and writeNEWLINE permissions you would provide a string "rw".NEWLINENEWLINE :param str permission: Specify permissions inNEWLINE the string with the first letter of the word.NEWLINE :keyword callable cls: A custom type or function that will be passed the direct responseNEWLINE :return: An AccountSasPermissions objectNEWLINE :rtype: :class:`~azure.data.tables.AccountSasPermissions`NEWLINE """NEWLINE p_read = "r" in permissionNEWLINE p_add = "a" in permissionNEWLINE p_delete = "d" in permissionNEWLINE p_update = "u" in permissionNEWLINENEWLINE parsed = cls(NEWLINE **dict(kwargs, read=p_read, add=p_add, delete=p_delete, update=p_update)NEWLINE )NEWLINE parsed._str = permission # pylint: disable=protected-access,attribute-defined-outside-initNEWLINE return parsedNEWLINENEWLINENEWLINEdef service_stats_deserialize(generated):NEWLINE # type: (GenTableServiceStats) -> Dict[str, Any]NEWLINE """Deserialize a ServiceStats objects into a dict."""NEWLINE return {NEWLINE "geo_replication": {NEWLINE "status": generated.geo_replication.status, # type: ignoreNEWLINE "last_sync_time": generated.geo_replication.last_sync_time, # type: ignoreNEWLINE }NEWLINE }NEWLINENEWLINENEWLINEdef service_properties_deserialize(generated):NEWLINE # type: (GenTableServiceProperties) -> Dict[str, Any]NEWLINE """Deserialize a ServiceProperties objects into a dict."""NEWLINE return {NEWLINE "analytics_logging": TableAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-accessNEWLINE "hour_metrics": Metrics._from_generated( # pylint: disable=protected-accessNEWLINE generated.hour_metricsNEWLINE ),NEWLINE "minute_metrics": Metrics._from_generated( # pylint: disable=protected-accessNEWLINE generated.minute_metricsNEWLINE ),NEWLINE "cors": [NEWLINE CorsRule._from_generated(cors) # pylint: disable=protected-accessNEWLINE for cors in generated.cors # type: ignoreNEWLINE ],NEWLINE }NEWLINENEWLINENEWLINEclass TableItem(object):NEWLINE """NEWLINE Represents an Azure TableItem.NEWLINE Returned by TableServiceClient.list_tables and TableServiceClient.query_tables.NEWLINENEWLINE :ivar str name: The name of the table.NEWLINE """NEWLINENEWLINE def __init__(self, name, **kwargs): # pylint: disable=unused-argumentNEWLINE # type: (str, Dict[str, Any]) -> NoneNEWLINE """NEWLINE :param str name: Name of the TableNEWLINE """NEWLINE self.name = nameNEWLINENEWLINE # TODO: TableQueryResponse is not the correct typeNEWLINE @classmethodNEWLINE def _from_generated(cls, generated, **kwargs):NEWLINE # type: (TableQueryResponse, Dict[str, Any]) -> TableItemNEWLINE return cls(generated.table_name, **kwargs) # type: ignoreNEWLINENEWLINENEWLINEclass TablePayloadFormat(object):NEWLINE """NEWLINE Specifies the accepted content type of the response payload. More informationNEWLINE can be found here: https://msdn.microsoft.com/en-us/library/azure/dn535600.aspxNEWLINE """NEWLINENEWLINE JSON_NO_METADATA = "application/json;odata=nometadata"NEWLINE """Returns no type information for the entity properties."""NEWLINENEWLINE JSON_MINIMAL_METADATA = "application/json;odata=minimalmetadata"NEWLINE """Returns minimal type information for the entity properties."""NEWLINENEWLINE JSON_FULL_METADATA = "application/json;odata=fullmetadata"NEWLINE """Returns minimal type information for the entity properties plus some extra odata properties."""NEWLINENEWLINENEWLINEclass UpdateMode(str, Enum):NEWLINE REPLACE = "replace"NEWLINE MERGE = "merge"NEWLINENEWLINENEWLINEclass TransactionOperation(str, Enum):NEWLINE CREATE = "create"NEWLINE UPSERT = "upsert"NEWLINE UPDATE = "update"NEWLINE DELETE = "delete"NEWLINENEWLINENEWLINEclass SASProtocol(str, Enum):NEWLINE HTTPS = "https"NEWLINE HTTP = "http"NEWLINENEWLINENEWLINEclass LocationMode(str, Enum):NEWLINE """NEWLINE Specifies the location the request should be sent to. This mode only appliesNEWLINE for RA-GRS accounts which allow secondary read access. All other account typesNEWLINE must use PRIMARY.NEWLINE """NEWLINENEWLINE PRIMARY = "primary" #: Requests should be sent to the primary location.NEWLINE SECONDARY = (NEWLINE "secondary" #: Requests should be sent to the secondary location, if possible.NEWLINE )NEWLINENEWLINENEWLINEclass ResourceTypes(object):NEWLINE """NEWLINE Specifies the resource types that are accessible with the account SAS.NEWLINENEWLINE :param bool service:NEWLINE Access to service-level APIs (e.g., Get/Set Service Properties,NEWLINE Get Service Stats, List Tables)NEWLINE :param bool object:NEWLINE Access to object-level APIs for tables (e.g. Get/Create/Query Entity etc.)NEWLINE """NEWLINENEWLINE def __init__(self, service=False, object=False): # pylint: disable=redefined-builtinNEWLINE # type: (bool, bool) -> NoneNEWLINE self.service = serviceNEWLINE self.object = objectNEWLINE self._str = ("s" if self.service else "") + ("o" if self.object else "")NEWLINENEWLINE def __str__(self):NEWLINE return self._strNEWLINENEWLINE @classmethodNEWLINE def from_string(cls, string):NEWLINE # type: (str) -> ResourceTypesNEWLINE """Create a ResourceTypes from a string.NEWLINENEWLINE To specify service, container, or object you need only toNEWLINE include the first letter of the word in the string. E.g. service and container,NEWLINE you would provide a string "sc".NEWLINENEWLINE :param str string: Specify service, container, or object inNEWLINE in the string with the first letter of the word.NEWLINE :return: A ResourceTypes objectNEWLINE :rtype: :class:`~azure.data.tables.ResourceTypes`NEWLINE """NEWLINE res_service = "s" in stringNEWLINE res_object = "o" in stringNEWLINENEWLINE parsed = cls(res_service, res_object)NEWLINE parsed._str = string # pylint: disable = protected-accessNEWLINE return parsedNEWLINENEWLINENEWLINEclass AccountSasPermissions(object):NEWLINE """NEWLINE :class:`~ResourceTypes` class to be used with generate_account_sasNEWLINE function and for the AccessPolicies used with set_*_acl. There are two types ofNEWLINE SAS which may be used to grant resource access. One is to grant access to aNEWLINE specific resource (resource-specific). Another is to grant access to theNEWLINE entire service for a specific account and allow certain operations based onNEWLINE perms found here.NEWLINENEWLINE :ivar bool read:NEWLINE Valid for all signed resources types (Service, Container, and Object).NEWLINE Permits read permissions to the specified resource type.NEWLINE :ivar bool write:NEWLINE Valid for all signed resources types (Service, Container, and Object).NEWLINE Permits write permissions to the specified resource type.NEWLINE :ivar bool delete:NEWLINE Valid for Container and Object resource types, except for queue messages.NEWLINE :ivar bool list:NEWLINE Valid for Service and Container resource types only.NEWLINE :ivar bool add:NEWLINE Valid for the following Object resource types only: queue messages, and append blobs.NEWLINE :ivar bool create:NEWLINE Valid for the following Object resource types only: blobs and files.NEWLINE Users can create new blobs or files, but may not overwrite existingNEWLINE blobs or files.NEWLINE :ivar bool update:NEWLINE Valid for the following Object resource types only: queue messages.NEWLINE :ivar bool process:NEWLINE Valid for the following Object resource type only: queue messages.NEWLINE """NEWLINENEWLINE def __init__(self, **kwargs):NEWLINE self.read = kwargs.pop("read", None)NEWLINE self.write = kwargs.pop("write", None)NEWLINE self.delete = kwargs.pop("delete", None)NEWLINE self.list = kwargs.pop("list", None)NEWLINE self.add = kwargs.pop("add", None)NEWLINE self.create = kwargs.pop("create", None)NEWLINE self.update = kwargs.pop("update", None)NEWLINE self.process = kwargs.pop("process", None)NEWLINE self._str = (NEWLINE ("r" if self.read else "")NEWLINE + ("w" if self.write else "")NEWLINE + ("d" if self.delete else "")NEWLINE + ("l" if self.list else "")NEWLINE + ("a" if self.add else "")NEWLINE + ("c" if self.create else "")NEWLINE + ("u" if self.update else "")NEWLINE + ("p" if self.process else "")NEWLINE )NEWLINENEWLINE def __str__(self):NEWLINE return self._strNEWLINENEWLINE @classmethodNEWLINE def from_string(cls, permission, **kwargs):NEWLINE # type: (str, Dict[str, Any]) -> AccountSasPermissionsNEWLINE """Create AccountSasPermissions from a string.NEWLINENEWLINE To specify read, write, delete, etc. permissions you need only toNEWLINE include the first letter of the word in the string. E.g. for read and writeNEWLINE permissions you would provide a string "rw".NEWLINENEWLINE :param permission: Specify permissions in the string with the first letter of the word.NEWLINE :type permission: strNEWLINE :keyword callable cls: A custom type or function that will be passed the direct responseNEWLINE :return: An AccountSasPermissions objectNEWLINE :rtype: :class:`~azure.data.tables.AccountSasPermissions`NEWLINE """NEWLINE p_read = "r" in permissionNEWLINE p_write = "w" in permissionNEWLINE p_delete = "d" in permissionNEWLINE p_list = "l" in permissionNEWLINE p_add = "a" in permissionNEWLINE p_create = "c" in permissionNEWLINE p_update = "u" in permissionNEWLINE p_process = "p" in permissionNEWLINENEWLINE parsed = cls(NEWLINE **dict(NEWLINE kwargs,NEWLINE read=p_read,NEWLINE write=p_write,NEWLINE delete=p_delete,NEWLINE list=p_list,NEWLINE add=p_add,NEWLINE create=p_create,NEWLINE update=p_update,NEWLINE process=p_process,NEWLINE )NEWLINE )NEWLINE parsed._str = permission # pylint: disable = protected-accessNEWLINE return parsedNEWLINE
from .test_auth import enable_ad, load_setup_data, enable_openldap, \NEWLINE OPENLDAP_AUTH_USER_PASSWORD, enable_freeipa, FREEIPA_AUTH_USER_PASSWORDNEWLINEfrom .common import * # NOQANEWLINEimport astNEWLINENEWLINEAGENT_REG_CMD = os.environ.get('RANCHER_AGENT_REG_CMD', "")NEWLINEHOST_COUNT = int(os.environ.get('RANCHER_HOST_COUNT', 1))NEWLINEHOST_NAME = os.environ.get('RANCHER_HOST_NAME', "testsa")NEWLINERANCHER_SERVER_VERSION = os.environ.get('RANCHER_SERVER_VERSION',NEWLINE "master-head")NEWLINErke_config = {"authentication": {"type": "authnConfig", "strategy": "x509"},NEWLINE "ignoreDockerVersion": False,NEWLINE "network": {"type": "networkConfig", "plugin": "canal"},NEWLINE "type": "rancherKubernetesEngineConfig"NEWLINE }NEWLINEAUTO_DEPLOY_CUSTOM_CLUSTER = ast.literal_eval(NEWLINE os.environ.get('RANCHER_AUTO_DEPLOY_CUSTOM_CLUSTER', "True"))NEWLINEKEYPAIR_NAME_PREFIX = os.environ.get('RANCHER_KEYPAIR_NAME_PREFIX', "")NEWLINERANCHER_CLUSTER_NAME = os.environ.get('RANCHER_CLUSTER_NAME', "")NEWLINERANCHER_ELASTIC_SEARCH_ENDPOINT = os.environ.get(NEWLINE 'RANCHER_ELASTIC_SEARCH_ENDPOINT', "")NEWLINEK8S_VERSION = os.environ.get('RANCHER_K8S_VERSION', "")NEWLINENEWLINENEWLINEdef test_add_custom_host():NEWLINE aws_nodes = AmazonWebServices().create_multiple_nodes(NEWLINE HOST_COUNT, random_test_name("testsa" + HOST_NAME))NEWLINE if AGENT_REG_CMD != "":NEWLINE for aws_node in aws_nodes:NEWLINE additional_options = " --address " + aws_node.public_ip_address + \NEWLINE " --internal-address " + \NEWLINE aws_node.private_ip_addressNEWLINE if 'Administrator' == aws_node.ssh_user:NEWLINE agent_cmd_temp = AGENT_REG_CMD.replace('| iex', ' ' + additional_options + ' | iex ')NEWLINE agent_cmd = agent_cmd_temp + additional_optionsNEWLINE else:NEWLINE agent_cmd = AGENT_REG_CMD + additional_optionsNEWLINE aws_node.execute_command(agent_cmd)NEWLINE print("Nodes: " + aws_node.public_ip_address)NEWLINENEWLINENEWLINEdef test_delete_keypair():NEWLINE AmazonWebServices().delete_keypairs(KEYPAIR_NAME_PREFIX)NEWLINENEWLINENEWLINEdef test_deploy_rancher_server():NEWLINE RANCHER_SERVER_CMD = \NEWLINE 'sudo docker run -d --name="rancher-server" ' \NEWLINE '--restart=unless-stopped -p 80:80 -p 443:443 ' \NEWLINE 'rancher/rancher'NEWLINE RANCHER_SERVER_CMD += ":" + RANCHER_SERVER_VERSIONNEWLINE aws_nodes = AmazonWebServices().create_multiple_nodes(NEWLINE 1, random_test_name("testsa" + HOST_NAME))NEWLINE aws_nodes[0].execute_command(RANCHER_SERVER_CMD)NEWLINE time.sleep(120)NEWLINE RANCHER_SERVER_URL = "https://" + aws_nodes[0].public_ip_addressNEWLINE print(RANCHER_SERVER_URL)NEWLINE wait_until_active(RANCHER_SERVER_URL, timeout=300)NEWLINENEWLINE RANCHER_SET_DEBUG_CMD = \NEWLINE "sudo docker exec rancher-server loglevel --set debug"NEWLINE aws_nodes[0].execute_command(RANCHER_SET_DEBUG_CMD)NEWLINENEWLINE token = set_url_password_token(RANCHER_SERVER_URL)NEWLINE admin_client = rancher.Client(url=RANCHER_SERVER_URL + "/v3",NEWLINE token=token, verify=False)NEWLINE if AUTH_PROVIDER:NEWLINE enable_url = \NEWLINE RANCHER_SERVER_URL + "/v3/" + AUTH_PROVIDER + \NEWLINE "Configs/" + AUTH_PROVIDER.lower() + "?action=testAndApply"NEWLINE auth_admin_user = load_setup_data()["admin_user"]NEWLINE auth_user_login_url = \NEWLINE RANCHER_SERVER_URL + "/v3-public/" + AUTH_PROVIDER + "Providers/" \NEWLINE + AUTH_PROVIDER.lower() + "?action=login"NEWLINENEWLINE if AUTH_PROVIDER == "activeDirectory":NEWLINENEWLINE enable_ad(auth_admin_user, token, enable_url=enable_url,NEWLINE password=AUTH_USER_PASSWORD, nested=NESTED_GROUP_ENABLED)NEWLINE user_token = login_as_auth_user(NEWLINE load_setup_data()["standard_user"],NEWLINE AUTH_USER_PASSWORD,NEWLINE login_url=auth_user_login_url)["token"]NEWLINE elif AUTH_PROVIDER == "openLdap":NEWLINENEWLINE enable_openldap(auth_admin_user, token, enable_url=enable_url,NEWLINE password=OPENLDAP_AUTH_USER_PASSWORD,NEWLINE nested=NESTED_GROUP_ENABLED)NEWLINE user_token = login_as_auth_user(NEWLINE load_setup_data()["standard_user"],NEWLINE OPENLDAP_AUTH_USER_PASSWORD,NEWLINE login_url=auth_user_login_url)["token"]NEWLINE elif AUTH_PROVIDER == "freeIpa":NEWLINENEWLINE enable_freeipa(auth_admin_user, token, enable_url=enable_url,NEWLINE password=FREEIPA_AUTH_USER_PASSWORD,NEWLINE nested=NESTED_GROUP_ENABLED)NEWLINE user_token = login_as_auth_user(NEWLINE load_setup_data()["standard_user"],NEWLINE FREEIPA_AUTH_USER_PASSWORD,NEWLINE login_url=auth_user_login_url)["token"]NEWLINE else:NEWLINE AUTH_URL = \NEWLINE RANCHER_SERVER_URL + "/v3-public/localproviders/local?action=login"NEWLINE user, user_token = create_user(admin_client, AUTH_URL)NEWLINENEWLINE env_details = "env.CATTLE_TEST_URL='" + RANCHER_SERVER_URL + "'\n"NEWLINE env_details += "env.ADMIN_TOKEN='" + token + "'\n"NEWLINE env_details += "env.USER_TOKEN='" + user_token + "'\n"NEWLINENEWLINE if AUTO_DEPLOY_CUSTOM_CLUSTER:NEWLINE aws_nodes = \NEWLINE AmazonWebServices().create_multiple_nodes(NEWLINE 5, random_test_name("testcustom"))NEWLINE node_roles = [["controlplane"], ["etcd"],NEWLINE ["worker"], ["worker"], ["worker"]]NEWLINE client = rancher.Client(url=RANCHER_SERVER_URL + "/v3",NEWLINE token=user_token, verify=False)NEWLINE if K8S_VERSION != "":NEWLINE rke_config["kubernetesVersion"] = K8S_VERSIONNEWLINE print("the rke config for creating the cluster:")NEWLINE print(rke_config)NEWLINE cluster = client.create_cluster(NEWLINE name=random_name(),NEWLINE driver="rancherKubernetesEngine",NEWLINE rancherKubernetesEngineConfig=rke_config)NEWLINE assert cluster.state == "provisioning"NEWLINE i = 0NEWLINE for aws_node in aws_nodes:NEWLINE docker_run_cmd = \NEWLINE get_custom_host_registration_cmd(NEWLINE client, cluster, node_roles[i], aws_node)NEWLINE aws_node.execute_command(docker_run_cmd)NEWLINE i += 1NEWLINE validate_cluster_state(client, cluster)NEWLINE env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"NEWLINE create_config_file(env_details)NEWLINENEWLINENEWLINEdef test_delete_rancher_server():NEWLINE client = get_admin_client()NEWLINE clusters = client.list_cluster().dataNEWLINE for cluster in clusters:NEWLINE delete_cluster(client, cluster)NEWLINE clusters = client.list_cluster().dataNEWLINE start = time.time()NEWLINE while len(clusters) > 0:NEWLINE time.sleep(30)NEWLINE clusters = client.list_cluster().dataNEWLINE if time.time() - start > MACHINE_TIMEOUT:NEWLINE exceptionMsg = 'Timeout waiting for clusters to be removed'NEWLINE raise Exception(exceptionMsg)NEWLINE ip_address = CATTLE_TEST_URL[8:]NEWLINE print("Ip Address:" + ip_address)NEWLINE filters = [NEWLINE {'Name': 'network-interface.addresses.association.public-ip',NEWLINE 'Values': [ip_address]}]NEWLINE aws_nodes = AmazonWebServices().get_nodes(filters)NEWLINE assert len(aws_nodes) == 1NEWLINE AmazonWebServices().delete_nodes(aws_nodes, wait_for_deleted=True)NEWLINENEWLINENEWLINEdef test_cluster_enable_logging_elasticsearch():NEWLINE client = get_user_client()NEWLINE cluster = get_cluster_by_name(client, RANCHER_CLUSTER_NAME)NEWLINE cluster_name = cluster.nameNEWLINE client.create_cluster_logging(name=random_test_name("elasticsearch"),NEWLINE clusterId=cluster.id,NEWLINE elasticsearchConfig={NEWLINE "dateFormat": "YYYY-MM-DD",NEWLINE "sslVerify": False,NEWLINE "sslVersion": "TLSv1_2",NEWLINE "indexPrefix": cluster_name,NEWLINE "endpoint":NEWLINE RANCHER_ELASTIC_SEARCH_ENDPOINT}NEWLINE )NEWLINE projects = client.list_project(name="System",NEWLINE clusterId=cluster.id).dataNEWLINE assert len(projects) == 1NEWLINE project = projects[0]NEWLINE p_client = get_project_client_for_token(project, USER_TOKEN)NEWLINE wait_for_app_to_active(p_client, "rancher-logging")NEWLINE
###############################################################################NEWLINE# Copyright Kitware Inc. and ContributorsNEWLINE# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)NEWLINE# See accompanying Copyright.txt and LICENSE files for detailsNEWLINE###############################################################################NEWLINENEWLINE"""NEWLINEReferences:NEWLINE https://github.com/alykhantejani/nninitNEWLINE"""NEWLINEimport numpy as npNEWLINEimport torchNEWLINEfrom torch.autograd import VariableNEWLINENEWLINENEWLINEdef uniform(tensor, a=0, b=1):NEWLINE """Fills the input Tensor or Variable with values drawn from a uniform U(a,b)NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE a: the lower bound of the uniform distributionNEWLINE b: the upper bound of the uniform distributionNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.uniform(w)NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE uniform(tensor.data, a=a, b=b)NEWLINE return tensorNEWLINE else:NEWLINE return tensor.uniform_(a, b)NEWLINENEWLINENEWLINEdef normal(tensor, mean=0, std=1):NEWLINE """Fills the input Tensor or Variable with values drawn from a normalNEWLINE distribution with the given mean and stdNEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE mean: the mean of the normal distributionNEWLINE std: the standard deviation of the normal distributionNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.normal(w)NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE normal(tensor.data, mean=mean, std=std)NEWLINE return tensorNEWLINE else:NEWLINE return tensor.normal_(mean, std)NEWLINENEWLINENEWLINEdef constant(tensor, val):NEWLINE """Fills the input Tensor or Variable with the value `val`NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE val: the value to fill the tensor withNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.constant(w)NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE constant(tensor.data, val)NEWLINE return tensorNEWLINE else:NEWLINE return tensor.fill_(val)NEWLINENEWLINENEWLINEdef _calculate_fan_in_and_fan_out(tensor):NEWLINE if tensor.ndimension() < 2:NEWLINE raise ValueError(NEWLINE "fan in and fan out can not be computed for tensor of size ", tensor.size())NEWLINENEWLINE if tensor.ndimension() == 2: # LinearNEWLINE fan_in = tensor.size(1)NEWLINE fan_out = tensor.size(0)NEWLINE else:NEWLINE num_input_fmaps = tensor.size(1)NEWLINE num_output_fmaps = tensor.size(0)NEWLINE receptive_field_size = np.prod(tensor.numpy().shape[2:])NEWLINE fan_in = num_input_fmaps * receptive_field_sizeNEWLINE fan_out = num_output_fmaps * receptive_field_sizeNEWLINENEWLINE return fan_in, fan_outNEWLINENEWLINENEWLINEdef xavier_uniform(tensor, gain=1):NEWLINE """NEWLINE Fills the input Tensor or Variable with values according to the methodNEWLINE described in "Understanding the difficulty of training deep feedforwardNEWLINE neural networks" - Glorot, X. and Bengio, Y., using a uniform distribution.NEWLINENEWLINE The resulting tensor will have values sampled from U(-a, a) whereNEWLINE a = gain * sqrt(2/(fan_in + fan_out))NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE gain: an optional scaling factor to be appliedNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.xavier_uniform(w, gain=np.sqrt(2.0))NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE xavier_uniform(tensor.data, gain=gain)NEWLINE return tensorNEWLINE else:NEWLINE fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)NEWLINE std = gain * np.sqrt(2.0 / (fan_in + fan_out))NEWLINE a = np.sqrt(3.0) * stdNEWLINE return tensor.uniform_(-a, a)NEWLINENEWLINENEWLINEdef xavier_normal(tensor, gain=1):NEWLINE """Fills the input Tensor or Variable with values according to the methodNEWLINE described in "Understanding the difficulty of trainingNEWLINE deep feedforward neural networks" - Glorot, X. and Bengio, Y., usingNEWLINE a normal distribution.NEWLINENEWLINE The resulting tensor will have values sampled from normal distribution with mean=0 andNEWLINE std = gain * sqrt(2/(fan_in + fan_out))NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE gain: an optional scaling factor to be appliedNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.xavier_normal(w, gain=np.sqrt(2.0))NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE xavier_normal(tensor.data, gain=gain)NEWLINE return tensorNEWLINE else:NEWLINE fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)NEWLINE std = gain * np.sqrt(2.0 / (fan_in + fan_out))NEWLINE return tensor.normal_(0, std)NEWLINENEWLINENEWLINEdef he_uniform(tensor, gain=1):NEWLINE """NEWLINE Fills the input Tensor or Variable with values according to the methodNEWLINE described in "Delving deep into rectifiers: Surpassing human-levelNEWLINE performance on ImageNet classification" - He, K. et al using a uniformNEWLINE distribution.NEWLINENEWLINE The resulting tensor will have values sampled from U(-a, a) where a = gain * sqrt(1/(fan_in))NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE gain: an optional scaling factor to be appliedNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.he_uniform(w, gain=np.sqrt(2.0))NEWLINE """NEWLINENEWLINE if isinstance(tensor, Variable):NEWLINE he_uniform(tensor.data, gain=gain)NEWLINE return tensorNEWLINE else:NEWLINE fan_in, _ = _calculate_fan_in_and_fan_out(tensor)NEWLINE std = gain * np.sqrt(1.0 / fan_in)NEWLINE a = np.sqrt(3.0) * stdNEWLINE return tensor.uniform_(-a, a)NEWLINENEWLINENEWLINEdef he_normal(tensor, gain=1):NEWLINE """NEWLINE Fills the input Tensor or Variable with values according to the methodNEWLINE described in "Delving deep into rectifiers: Surpassing human-levelNEWLINE performance on ImageNet classification" - He, K. et al using a normalNEWLINE distribution.NEWLINENEWLINE The resulting tensor will have values sampled from normal distribution withNEWLINE mean=0 and std = gain * sqrt(1/(fan_in))NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE gain: an optional scaling factor to be appliedNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> he_normal(w, gain=np.sqrt(2.0))NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE he_normal(tensor.data, gain=gain)NEWLINE return tensorNEWLINE else:NEWLINE fan_in, _ = _calculate_fan_in_and_fan_out(tensor)NEWLINE std = gain * np.sqrt(1.0 / fan_in)NEWLINE return tensor.normal_(0, std)NEWLINENEWLINENEWLINEdef orthogonal(tensor, gain=1):NEWLINE """Fills the input Tensor or Variable with a (semi) orthogonal matrix.NEWLINE The input tensor must have at least 2 dimensions,NEWLINE and for tensors with more than 2 dimensions the trailing dimensionsNEWLINE are flattened. viewed as 2D representation withNEWLINE rows equal to the first dimension and columns equal to the product ofNEWLINE as a sparse matrix, where the non-zero elementsNEWLINE will be drawn from a normal distribution with mean=0 and std=`std`.NEWLINE Reference: "Exact solutions to the nonlinear dynamics of learning inNEWLINE deep linear neural networks" - Saxe, A. et al.NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.Tensor, where n >= 2NEWLINE gain: optional gain to be appliedNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.orthogonal(w)NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE orthogonal(tensor.data, gain=gain)NEWLINE return tensorNEWLINE else:NEWLINE if tensor.ndimension() < 2:NEWLINE raise ValueError(NEWLINE "Only tensors with 2 or more dimensions are supported.")NEWLINENEWLINE flattened_shape = (tensor.size(0), int(NEWLINE np.prod(tensor.numpy().shape[1:])))NEWLINE flattened = torch.Tensor(NEWLINE flattened_shape[0], flattened_shape[1]).normal_(0, 1)NEWLINENEWLINE u, s, v = np.linalg.svd(flattened.numpy(), full_matrices=False)NEWLINE if u.shape == flattened.numpy().shape:NEWLINE tensor.view_as(flattened).copy_(torch.from_numpy(u))NEWLINE else:NEWLINE tensor.view_as(flattened).copy_(torch.from_numpy(v))NEWLINENEWLINE tensor.mul_(gain)NEWLINE return tensorNEWLINENEWLINENEWLINEdef sparse(tensor, sparsity, std=0.01):NEWLINE """Fills the 2D input Tensor or Variable as a sparse matrix,NEWLINE where the non-zero elements will be drawn from aNEWLINE normal distribution with mean=0 and std=`std`.NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE sparsity: The fraction of elements in each column to be set to zeroNEWLINE std: the standard deviation of the normal distribution used to generate the non-zero valuesNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.sparse(w, sparsity=0.1)NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE sparse(tensor.data, sparsity, std=std)NEWLINE return tensorNEWLINE else:NEWLINE if tensor.ndimension() != 2:NEWLINE raise ValueError(NEWLINE "Sparse initialization only supported for 2D inputs")NEWLINE tensor.normal_(0, std)NEWLINE rows, cols = tensor.size(0), tensor.size(1)NEWLINE num_zeros = int(np.ceil(cols * sparsity))NEWLINENEWLINE for col_idx in range(tensor.size(1)):NEWLINE row_indices = np.arange(rows)NEWLINE np.random.shuffle(row_indices)NEWLINE zero_indices = row_indices[:num_zeros]NEWLINE tensor.numpy()[zero_indices, col_idx] = 0NEWLINENEWLINE return tensorNEWLINENEWLINENEWLINEdef shock_he(tensor, gain=.00001):NEWLINE """NEWLINE Adds a very small he initial values to current tensor state.NEWLINE Helps tensor achieve full rank in case it lost it.NEWLINENEWLINE Example:NEWLINE >>> tensor = torch.eye(3, 3)NEWLINE >>> tensor[0, 0] = 0NEWLINE >>> np.linalg.matrix_rank(tensor.numpy())NEWLINE 2NEWLINE >>> shock_he(tensor, gain=.00001)NEWLINE >>> np.linalg.matrix_rank(tensor.numpy())NEWLINE 3NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE shock(tensor.data, gain)NEWLINE return tensorNEWLINE else:NEWLINE fan_in, _ = _calculate_fan_in_and_fan_out(tensor)NEWLINE std = gain * np.sqrt(1.0 / fan_in)NEWLINE prb = torch.randn(tensor.shape) * stdNEWLINE tensor += prbNEWLINE return tensorNEWLINENEWLINENEWLINEdef shock(tensor, scale=.1):NEWLINE if isinstance(tensor, Variable):NEWLINE shock(tensor.data, scale)NEWLINE return tensorNEWLINE else:NEWLINE # shock by some fraction of the stdNEWLINE std = tensor.std() * scaleNEWLINE prb = torch.randn(tensor.shape) * stdNEWLINE tensor += prbNEWLINE return tensorNEWLINENEWLINENEWLINEdef shock_outward(tensor, scale=.1, a_min=.01):NEWLINE """NEWLINE send weights away from zeroNEWLINE """NEWLINENEWLINE if isinstance(tensor, Variable):NEWLINE shock_outward(tensor.data, scale)NEWLINE return tensorNEWLINE else:NEWLINE std = max(torch.abs(tensor).max(), a_min) * scaleNEWLINE # perterb outwardNEWLINE offset = np.abs(torch.randn(tensor.shape) * std) * torch.sign(tensor)NEWLINE tensor += offsetNEWLINE return tensorNEWLINE
import cursesNEWLINEimport collectionsNEWLINEimport sysNEWLINEimport osNEWLINEfrom time import sleepNEWLINENEWLINE# File syntaxNEWLINE#NEWLINE# When there's not enough space for all elements UI will go into scroll modeNEWLINE#NEWLINE# Syntax:NEWLINE# script.py ui_example.txtNEWLINE#NEWLINE# an object is one line, split by ;NEWLINE# The first part is the Name the second part is the shell actionNEWLINE# Use the sample file to tweak colors.NEWLINE# Valid colors are: black, red, green, yellow, blue, magenta, cyan, whiteNEWLINE# Also valid colors are black2, red2, green2.. those are usually brighter versionsNEWLINE#NEWLINE# To run an inbuilt function just use an action as followed:NEWLINE# Show version;function:Show_versionNEWLINE#NEWLINE# To implement a quit button you can do so:NEWLINE# Quit menu;quitNEWLINE#NEWLINE# For more information check out the github readme: https://github.com/DiscordDigital/ui.py/NEWLINENEWLINEdef RunInbuiltFunction(function_name):NEWLINE if (function_name == "Show_version"):NEWLINE print("Running python version " + sys.version)NEWLINENEWLINEdef generate_sample_file():NEWLINE sample_file = open('sample_ui.txt','w')NEWLINE sample_file.write(NEWLINE """menutext=Sample UI!\nmaxh=3\ntitlecolor=white\nwindow_bg=blue\nobjcolor_text=white\nobjcolor_bg=blue\nobjcolor_sel_text=black\nobjcolor_sel_bg=white\nStart Nano;nano\nShow date;date\nCredits;echo Made by discord.digital\nShow Python version;function:Show_version\nQuit;quit"""NEWLINE )NEWLINE sample_file.close()NEWLINENEWLINEif len(sys.argv) != 2:NEWLINE print("Specify ui file")NEWLINE print("Get started by typing: " + sys.argv[0] + " sample")NEWLINE exit()NEWLINEelif (sys.argv[1] == "sample"):NEWLINE generate_sample_file()NEWLINE print("Created sample_ui.txt")NEWLINE print("Use it like that: " + sys.argv[0] + " sample_ui.txt")NEWLINE exit(0)NEWLINEelse:NEWLINE if not os.path.isfile(sys.argv[1]):NEWLINE print("File not found!")NEWLINE exit()NEWLINENEWLINEscreen = curses.initscr()NEWLINEcurses.curs_set(0)NEWLINEcurses.noecho()NEWLINEscreen.keypad(1)NEWLINEcurses.start_color()NEWLINEcurses.mousemask(1)NEWLINENEWLINEdef convert_text_to_color(text):NEWLINE textup = text.upper()NEWLINE if (textup == "BLACK"):NEWLINE return 0NEWLINE if (textup == "RED"):NEWLINE return 1NEWLINE if (textup == "GREEN"):NEWLINE return 2NEWLINE if (textup == "YELLOW"):NEWLINE return 3NEWLINE if (textup == "BLUE"):NEWLINE return 4NEWLINE if (textup == "MAGENTA"):NEWLINE return 5NEWLINE if (textup == "CYAN"):NEWLINE return 6NEWLINE if (textup == "WHITE"):NEWLINE return 7NEWLINE if (textup == "BLACK2"):NEWLINE return 8NEWLINE if (textup == "RED2"):NEWLINE return 9NEWLINE if (textup == "GREEN2"):NEWLINE return 10NEWLINE if (textup == "YELLOW2"):NEWLINE return 11NEWLINE if (textup == "BLUE2"):NEWLINE return 12NEWLINE if (textup == "MAGENTA2"):NEWLINE return 13NEWLINE if (textup == "CYAN2"):NEWLINE return 14NEWLINE if (textup == "WHITE2"):NEWLINE return 15NEWLINE NEWLINE return 7NEWLINENEWLINEobjects = collections.defaultdict(dict)NEWLINEobject_i = 0NEWLINEmenutext = "Menu"NEWLINEmaxh = 3NEWLINEtitlecolor = "white"NEWLINEwindow_bg = "black"NEWLINEobjcolor_text = "white"NEWLINEobjcolor_bg = "black"NEWLINEobjcolor_sel_text = "black"NEWLINEobjcolor_sel_bg = "white"NEWLINENEWLINEfp = open(sys.argv[1])NEWLINEfor _, line in enumerate(fp):NEWLINE if line.startswith("menutext="):NEWLINE menutext = line.replace('menutext=','').replace('\n','')NEWLINE elif line.startswith("maxh="):NEWLINE maxh = line.replace('maxh=','').replace('\n','')NEWLINE elif line.startswith("titlecolor="):NEWLINE titlecolor = line.replace('titlecolor=','').replace('\n','')NEWLINE elif line.startswith("window_bg="):NEWLINE window_bg = line.replace('window_bg=','').replace('\n','')NEWLINE elif line.startswith("objcolor_text="):NEWLINE objcolor_text = line.replace('objcolor_text=','').replace('\n','')NEWLINE elif line.startswith("objcolor_bg="):NEWLINE objcolor_bg = line.replace('objcolor_bg=','').replace('\n','')NEWLINE elif line.startswith("objcolor_sel_text="):NEWLINE objcolor_sel_text = line.replace('objcolor_sel_text=','').replace('\n','')NEWLINE elif line.startswith("objcolor_sel_bg="):NEWLINE objcolor_sel_bg = line.replace('objcolor_sel_bg=','').replace('\n','')NEWLINE else:NEWLINE if (line == '\n'):NEWLINE breakNEWLINE interface = line.split(';')NEWLINE objects[object_i]['Label'] = interface[0].replace('\n','')NEWLINE objects[object_i]['Action'] = interface[1].replace('\n','')NEWLINE object_i = object_i + 1NEWLINEfp.close()NEWLINENEWLINEcolorcode = convert_text_to_color(titlecolor)NEWLINEcolorcode_bg = convert_text_to_color(window_bg)NEWLINEcurses.init_pair(2, colorcode, colorcode_bg)NEWLINEcolorcode_text = convert_text_to_color(objcolor_text)NEWLINEcolorcode_bg = convert_text_to_color(objcolor_bg)NEWLINEcurses.init_pair(3, colorcode_text, colorcode_bg)NEWLINEcolorcode_text = convert_text_to_color(objcolor_sel_text)NEWLINEcolorcode_bg = convert_text_to_color(objcolor_sel_bg)NEWLINEcurses.init_pair(4, colorcode_text, colorcode_bg)NEWLINENEWLINEmaxh = int(maxh)NEWLINENEWLINEscreen.bkgd(' ', curses.color_pair(2))NEWLINENEWLINE_, x = screen.getmaxyx()NEWLINEtitlepad = curses.newpad(1, x-2)NEWLINEtitlepad.addstr(menutext, curses.color_pair(2))NEWLINEtitlepad.bkgd(' ', curses.color_pair(2) | curses.A_BOLD)NEWLINENEWLINEinfopad = curses.newpad(3, 15)NEWLINEinfopad.addstr("Press q to exit", curses.color_pair(2))NEWLINENEWLINEdef create_entry(text,startheight):NEWLINE _, x = screen.getmaxyx()NEWLINE pad = curses.newpad(maxh, x - 2)NEWLINE cheight = int(maxh / 2)NEWLINE tstart = int((x / 2) - (len(text) / 2))-1NEWLINE pad.addstr(cheight,tstart,text)NEWLINE pad.bkgd(' ', curses.color_pair(3))NEWLINE return padNEWLINENEWLINEdef select_entry(pad):NEWLINE global parseoffsetNEWLINE global selectNEWLINE global refreshlistNEWLINE global selectedpadNEWLINE global scrolldirectionNEWLINE global object_iNEWLINE global maxfitobjNEWLINE global resizeNEWLINE if (object_i > maxfitobj) or (parseoffset != 0):NEWLINE selectpad.erase()NEWLINE selectpad.resize(3,len(str(100) + "/") + len(str(object_i)))NEWLINE selectpad.addstr(str(select + 1) + "/" + str(object_i), curses.color_pair(2))NEWLINE selectpad.refresh(0, 0, 1, 2, 1, x-2)NEWLINE if (pad):NEWLINE if (selectedpad != None) and not (resize):NEWLINE deselect_entry(selectedpad)NEWLINE pad['pad'].bkgd(' ', curses.color_pair(4))NEWLINE cheight = int(maxh / 2)NEWLINE tstart = int((x / 2) - (len(pad['label']) / 2))-1NEWLINE pad['pad'].addstr(cheight,tstart,pad['label'])NEWLINE y, _ = pad['pad'].getbegyx()NEWLINE sy, sx = screen.getmaxyx()NEWLINE pad['pad'].refresh(0,0,y,1,sy,sx-2)NEWLINE selectedpad = padNEWLINE else:NEWLINE scrolldirection = "up"NEWLINE parseoffset = parseoffset - 1NEWLINE refreshlist = TrueNEWLINE screen.refresh() NEWLINENEWLINEdef deselect_entry(pad):NEWLINE pad['pad'].bkgd(' ', curses.color_pair(3))NEWLINE cheight = int(maxh / 2)NEWLINE tstart = int((x / 2) - (len(pad['label']) / 2))-1NEWLINE pad['pad'].addstr(cheight,tstart,pad['label'])NEWLINE y, _ = pad['pad'].getbegyx()NEWLINE sy, sx = screen.getmaxyx()NEWLINE pad['pad'].refresh(0,0,y,1,sy,sx-2)NEWLINE screen.refresh()NEWLINENEWLINEcurseLoop = TrueNEWLINEpads = FalseNEWLINEaction = FalseNEWLINEselect = 0NEWLINEselectedpad = NoneNEWLINEscroll = FalseNEWLINEparseoffset = 0NEWLINErefreshlist = FalseNEWLINEscrolldirection = "down"NEWLINENEWLINEseltext = "Selecting 0/0"NEWLINEselectpad = curses.newpad(3, len(seltext))NEWLINEselectpad.bkgd(' ', curses.color_pair(3))NEWLINENEWLINEy, x = screen.getmaxyx()NEWLINEscreensize = y - 4NEWLINEmaxfitobj = int(screensize / maxh)NEWLINENEWLINEwhile curseLoop:NEWLINE screen.refresh()NEWLINE resize = curses.is_term_resized(y, x)NEWLINE if resize is True:NEWLINE y, x = screen.getmaxyx()NEWLINE screen.clear()NEWLINE curses.resizeterm(y, x)NEWLINE screensize = y - 4NEWLINE maxfitobj = int(screensize / maxh)NEWLINE pads = FalseNEWLINE screen.refresh()NEWLINE else:NEWLINE try:NEWLINE titlepad.refresh(0, 0, 2, int((x/2)-(len(menutext)/2)), 2, x-2)NEWLINE infopad.refresh(0, 0, 1, x-17, 1, x-2)NEWLINE except:NEWLINE passNEWLINENEWLINE j = 4NEWLINE NEWLINE if (pads == False) or (refreshlist):NEWLINE pads = collections.defaultdict(dict)NEWLINENEWLINE if (object_i > maxfitobj):NEWLINE parserange = range(0 + parseoffset, maxfitobj + parseoffset)NEWLINE else:NEWLINE parserange = range(object_i)NEWLINE NEWLINE for i in parserange:NEWLINE pads[i]['pad'] = create_entry(objects[i]['Label'],j)NEWLINE try:NEWLINE pads[i]['pad'].refresh(0,0,j,1,y,x-2)NEWLINE except:NEWLINE passNEWLINE pads[i]['action'] = objects[i]['Action']NEWLINE pads[i]['label'] = objects[i]['Label']NEWLINE pads[i]['range-start'] = jNEWLINE pads[i]['range-end'] = j + maxhNEWLINE j = j + maxhNEWLINE if (refreshlist):NEWLINE if (scrolldirection == "down"):NEWLINE select = maxfitobj + parseoffset - 1NEWLINE select_entry(pads[select])NEWLINE if (scrolldirection == "up"):NEWLINE select = parseoffsetNEWLINE select_entry(pads[select])NEWLINE else:NEWLINE select = 0NEWLINE select_entry(pads[select])NEWLINE refreshlist = FalseNEWLINENEWLINE event = screen.getch()NEWLINE if event == ord("q"): breakNEWLINE if event == curses.KEY_MOUSE:NEWLINE try:NEWLINE _, _, my, _, _ = curses.getmouse()NEWLINE if (object_i > maxfitobj):NEWLINE parserange = range(0 + parseoffset, maxfitobj + parseoffset)NEWLINE else:NEWLINE parserange = range(object_i)NEWLINE for i in parserange:NEWLINE if (my >= pads[i]['range-start']) and (my < pads[i]['range-end']):NEWLINE if (selectedpad != None):NEWLINE deselect_entry(selectedpad)NEWLINE select_entry(pads[i])NEWLINE action = pads[i]['action']NEWLINE y, _ = pads[i]['pad'].getbegyx()NEWLINE sy, sx = screen.getmaxyx()NEWLINE pads[i]['pad'].refresh(0,0,y,1,sy,sx-2)NEWLINE sleep(0.2)NEWLINE curseLoop = FalseNEWLINE except:NEWLINE passNEWLINE if event == curses.KEY_UP:NEWLINE if (selectedpad == None):NEWLINE select = 0NEWLINE select_entry(pads[select])NEWLINE if (select != 0):NEWLINE select = select - 1NEWLINE select_entry(pads[select])NEWLINENEWLINE if event == curses.KEY_DOWN:NEWLINE if (selectedpad != None):NEWLINE if (select != maxfitobj + parseoffset - 1):NEWLINE if not (select == object_i - 1):NEWLINE select = select + 1NEWLINE deselect_entry(selectedpad)NEWLINE select_entry(pads[select])NEWLINE else:NEWLINE if (select == maxfitobj + parseoffset - 1):NEWLINE if (select != object_i - 1):NEWLINE select = select + 1NEWLINE parseoffset = parseoffset + 1NEWLINE scrolldirection = "down"NEWLINE refreshlist = TrueNEWLINE else:NEWLINE if (object_i == 1):NEWLINE select = 0NEWLINE select_entry(pads[select])NEWLINE else:NEWLINE select = 1NEWLINE select_entry(pads[select])NEWLINE if event == 10:NEWLINE if (selectedpad != None):NEWLINE action = objects[select]['Action']NEWLINE curseLoop = FalseNEWLINEcurses.endwin()NEWLINEsleep(0.1)NEWLINEif (action):NEWLINE if action.startswith("function:"):NEWLINE function = action.split(":")[1]NEWLINE RunInbuiltFunction(function)NEWLINE elif (action == "quit"):NEWLINE exit()NEWLINE else:NEWLINE os.system(action)NEWLINE
#!/usr/bin/env python3NEWLINENEWLINE"""NEWLINEusage: steps.py [-h] [--after AFTER] [--before BEFORE] [--tz TZ]NEWLINE [--output OUTPUT] [--account ACCOUNT] [--password PASSWORD]NEWLINE [--subaccount SUBACCOUNT]NEWLINE FLOWNEWLINENEWLINECreates a CSV file of steps within engagements in a Twilio Studio flow, forNEWLINEthe given time period, for the purposes of analyzing paths through an IVR. NEWLINENEWLINEpositional arguments:NEWLINE FLOW Flow SIDNEWLINENEWLINEoptional arguments:NEWLINE -h, --help show this help message and exitNEWLINE --after AFTER yyyy-mm-dd [HH:MM[:SS]]; time defaults to 00:00:00NEWLINE (default: None)NEWLINE --before BEFORE, -b BEFORENEWLINE yyyy-mm-dd [HH:MM[:SS]]; time defaults to 00:00:00NEWLINE (default: None)NEWLINE --tz TZ, -t TZ Time zone name (default: UTC)NEWLINE --output OUTPUT, -o OUTPUTNEWLINE Output file; defaults to terminal (default: None)NEWLINE --account ACCOUNT, -a ACCOUNTNEWLINE Account SID; if not given, value of environmentNEWLINE variable TWILIO_ACCOUNT_SID (default: None)NEWLINE --password PASSWORD, -p PASSWORDNEWLINE Auth token; if not given, value of environmentNEWLINE variable TWILIO_AUTH_TOKEN (default: None)NEWLINE --subaccount SUBACCOUNT, -s SUBACCOUNTNEWLINE If present, subaccount to use (default: None)NEWLINE"""NEWLINENEWLINENEWLINEimport osNEWLINEimport sysNEWLINEfrom datetime import datetimeNEWLINEfrom pytz import timezone, UnknownTimeZoneErrorNEWLINEimport beginNEWLINEfrom twilio.base.exceptions import TwilioRestExceptionNEWLINEfrom twilio.rest import ClientNEWLINENEWLINENEWLINEdef get_datetime(dt_string, tz):NEWLINE """Converts a date/time string into a datetime object with the given time zone."""NEWLINE try:NEWLINE dt = datetime.strptime(dt_string, '%Y-%m-%d')NEWLINE except ValueError:NEWLINE try:NEWLINE dt = datetime.strptime(dt_string, '%Y-%m-%d %H:%M')NEWLINE except ValueError:NEWLINE dt = datetime.strptime(dt_string, '%Y-%m-%d %H:%M:%S')NEWLINE NEWLINE return tz.localize(dt)NEWLINENEWLINENEWLINE@begin.startNEWLINEdef main(NEWLINE flow: "Flow SID",NEWLINE after: "yyyy-mm-dd [HH:MM[:SS]]; time defaults to 00:00:00" = None,NEWLINE before: "yyyy-mm-dd [HH:MM[:SS]]; time defaults to 00:00:00" = None,NEWLINE tz: "Time zone name" = "UTC",NEWLINE output: "Output file; defaults to terminal" = None,NEWLINE account: "Account SID; if not given, value of environment variable TWILIO_ACCOUNT_SID" = None,NEWLINE password: "Auth token; if not given, value of environment variable TWILIO_AUTH_TOKEN" = None,NEWLINE subaccount: "If present, subaccount to use" = NoneNEWLINE ):NEWLINE """NEWLINE Creates a CSV file of steps within engagements in a Twilio Studio flow, for the given time period,NEWLINE for the purposes of analyzing paths through an IVR. NEWLINE """NEWLINE if not flow:NEWLINE sys.exit("Error: no Flow SID")NEWLINENEWLINE try:NEWLINE account = account or os.environ['TWILIO_ACCOUNT_SID']NEWLINE password = password or os.environ['TWILIO_AUTH_TOKEN']NEWLINE except KeyError:NEWLINE sys.exit("Error: missing environment variable TWILIO_ACCOUNT_SID and/or TWILIO_AUTH_TOKEN")NEWLINENEWLINE try:NEWLINE tz = timezone(tz)NEWLINE except UnknownTimeZoneError:NEWLINE sys.exit("Invalid timezone: {}".format(tz))NEWLINE NEWLINE try:NEWLINE after = get_datetime(after, tz) if after else NoneNEWLINE except ValueError:NEWLINE sys.exit("Invalid date/time: {}".format(after))NEWLINENEWLINE try:NEWLINE before = get_datetime(before, tz) if before else NoneNEWLINE except ValueError:NEWLINE sys.exit("Invalid date/time: {}".format(before))NEWLINENEWLINE if after and before and after > before:NEWLINE sys.exit("Error: end date/time is before start date/time")NEWLINENEWLINE client = Client(account, password, subaccount)NEWLINENEWLINE # Grab the flow instance.NEWLINE try:NEWLINE flow = client.studio.flows.get(flow).fetch()NEWLINE except TwilioRestException:NEWLINE sys.exit("Error: unable to get Flow {}".format(flow))NEWLINENEWLINE def in_range(engagement):NEWLINE """Does the engagement fall between after and before?"""NEWLINE if after and engagement.date_created < after:NEWLINE return FalseNEWLINE if before and engagement.date_created >= before:NEWLINE return FalseNEWLINE return TrueNEWLINE NEWLINE engagements = filter(in_range, flow.engagements.list())NEWLINENEWLINE output = open(output, 'w') if output else sys.stdoutNEWLINE print("Date/Time,Engagement SID,Contact Address,Step,Event,Next Step", file=output)NEWLINENEWLINE for engagement in engagements:NEWLINE steps = engagement.steps.list()NEWLINE for step in steps:NEWLINE print("{},{},{},{},{},{}".format(NEWLINE step.date_created,NEWLINE engagement.sid,NEWLINE engagement.contact_channel_address,NEWLINE step.transitioned_from,NEWLINE step.name,NEWLINE step.transitioned_toNEWLINE ), file=output)NEWLINENEWLINE output.close()NEWLINE
# Copyright 2021 UW-IT, University of WashingtonNEWLINE# SPDX-License-Identifier: Apache-2.0NEWLINENEWLINEfrom .utils import MOCK_SETTINGS, get_mock_manifest, generate_mocksNEWLINEfrom webpack_bridge.templatetags.webpack_bridge import render_webpack_entryNEWLINENEWLINEfrom django.test import TestCaseNEWLINEfrom django.test.utils import override_settingsNEWLINENEWLINENEWLINE@override_settings(**MOCK_SETTINGS)NEWLINEclass TestRenderWebpackEntry(TestCase):NEWLINE def setUp(self):NEWLINE self.mock_manifest = get_mock_manifest()NEWLINENEWLINE def test_render(self):NEWLINE mocks = generate_mocks(self.mock_manifest)NEWLINE with mocks[0], mocks[1]:NEWLINE self.assertEqual(NEWLINE render_webpack_entry(NEWLINE 'home',NEWLINE js='async',NEWLINE css='crossorigin'NEWLINE ),NEWLINE '<script src="/static/home-234xz0jk.js" async></script>\n'NEWLINE '<script src="/static/vendor-4t4g534y.js" async></script>\n'NEWLINE '<link rel="stylesheet" type="text/css"'NEWLINE ' href="/static/other-home-89m07yfg.css" crossorigin>\n'NEWLINE )NEWLINE
#!/usr/bin/env pythonNEWLINENEWLINE# for non-list/dict, CHANGE_TO -> variables replace previous valuesNEWLINE# for 'list' , ADD_TO -> append to listNEWLINE# EXCLUDE_FROM -> remove element from listNEWLINE# for 'dict' , REPLACE_WITH -> replace matching keys NEWLINE# as CHANGE_TO, ADD_TO or EXCLUDE_FROMNEWLINENEWLINEimport pprintNEWLINEimport argparseNEWLINEimport sysNEWLINEimport osNEWLINEimport errnoNEWLINEimport reNEWLINENEWLINEclass dryRun(Exception): passNEWLINENEWLINEclass ConfigParseError(dryRun):NEWLINE def __init__(self, msg, config_filename, orig_exc):NEWLINE self.config_filename = config_filenameNEWLINE self.orig_exc = orig_excNEWLINE Exception.__init__(self, msg)NEWLINENEWLINEclass CmdLine():NEWLINE def __init__(self, argvList):NEWLINE self.json_dir = '../json'NEWLINENEWLINE # env_name: '-e' [dev, qa, uat, prod, ...] NEWLINE # app_name:'-a' [ 'eq', ... ]NEWLINE # app_use: '-u' [ centos_6u6, rh_7.2, ...]NEWLINE # hw_name: '-m' [ hp_dl_360_gen9, hp_dl_360_gen10, ...]NEWLINE # dc_name: '-l' [ rfp, sec, lnd, ....]NEWLINE # host_name: '-n' NEWLINENEWLINE self.env_name = ''NEWLINE self.app_name = ''NEWLINE self.app_use = 'all'NEWLINE self.hw_name = ''NEWLINE self.dc_name = ''NEWLINE self.host_name = ''NEWLINENEWLINE # This is JSON configuration file name and key value to look in toNEWLINE self.configFile = ''NEWLINE self.configKey = ''NEWLINE self.c = {}NEWLINE self.d = []NEWLINENEWLINE self.parseCmdLine(argvList)NEWLINENEWLINE print(self.hw_name)NEWLINENEWLINE # Read YAML in to main_configNEWLINE self.main_config = self.read_config(self.configFile)NEWLINE NEWLINE #print(self.main_config)NEWLINENEWLINE # parse JSONNEWLINE self.parseJSON(self.main_config[self.configKey])NEWLINE self.d.append(self.configFile)NEWLINE print(self.d)NEWLINE dane=self.mergeJSON(self.d)NEWLINE pprint.pprint(dane)NEWLINE print('dane')NEWLINENEWLINE def writeJSON(self, filename, datastore):NEWLINE import jsonNEWLINE if filename:NEWLINE # Writing JSON dataNEWLINE with open(filename, 'w') as f:NEWLINE json.dump(datastore, f)NEWLINENEWLINE def readJSON(self, filename):NEWLINE import yamlNEWLINE ret_dict={}NEWLINENEWLINE if filename:NEWLINE # Read JSON dataNEWLINE with open(filename, 'r') as f:NEWLINE ret_dict=yaml.load(f)NEWLINE NEWLINE return(ret_dict)NEWLINENEWLINE def mergeJSON(self, read_list, ret_dict={}):NEWLINE if not read_list:NEWLINE return(ret_dict)NEWLINENEWLINE for j_data in read_list:NEWLINE # read JSONNEWLINE tmp_dict=self.readJSON(j_data)[j_data.split('/')[-1]]NEWLINENEWLINE if (tmp_dict.has_key('extends')):NEWLINE k_data=tmp_dict.pop('extends')NEWLINENEWLINE ret_dict.update(tmp_dict)NEWLINENEWLINE k=my_func(ret_dict)NEWLINENEWLINE ret_dict=kNEWLINENEWLINE print('KK')NEWLINE pprint.pprint(k)NEWLINE #print('KK-End')NEWLINE return(ret_dict) NEWLINE #ret_dict.update( my_func(tmp_dict) )NEWLINENEWLINENEWLINENEWLINE def parseCmdLine(self, cmdLine):NEWLINE parser = argparse.ArgumentParser(description="parse josn files")NEWLINE parser.add_argument('-m', action="store", dest="model")NEWLINE parser.add_argument('-n', action="store", dest="host_name")NEWLINE parser.add_argument('-u', action="store", dest="unix_os")NEWLINE parser.add_argument('-l', action="store", dest="location")NEWLINE parser.add_argument('-a', action="store", dest="app_name")NEWLINE parser.add_argument('-e', action="store", dest="env_name")NEWLINE parser.add_argument('-r', action='store_true', dest="run" )NEWLINE parser.add_argument('command', nargs='*', action="store")NEWLINENEWLINE args = parser.parse_args(cmdLine)NEWLINE NEWLINE if (not args.run):NEWLINE print(args.model)NEWLINE raise dryRunNEWLINENEWLINE # command validationNEWLINE NEWLINE self.hw_name = args.modelNEWLINENEWLINE self.configFile = os.path.join(self.json_dir, self.hw_name)NEWLINE self.configKey = '%s' % (self.hw_name)NEWLINE NEWLINE def read_config(self, config_filename):NEWLINE from yaml import loadNEWLINE from os.path import existsNEWLINENEWLINE if not exists(config_filename): returnNEWLINENEWLINE with open(config_filename) as f:NEWLINE try:NEWLINE return load(f)NEWLINE except ValueError as exc:NEWLINE msg = 'Error parsing %s:\n %s' % (config_filename, exc)NEWLINE raise ConfigParseError(msg, config_filename, exc)NEWLINENEWLINE def parseJSON(self, k):NEWLINE if not k.has_key('extends'):NEWLINE return(self.d)NEWLINENEWLINE for j in k.pop('extends'):NEWLINE m = self.read_config(self.json_dir +'/'+ j )[j]NEWLINE self.parseJSON(m)NEWLINE print( 'Applying ' + self.json_dir +'/'+ j )NEWLINE self.d.append(os.path.join(self.json_dir, j))NEWLINE#NEWLINEdef manage_list(v, nv, r="ADD_TO"):NEWLINE if (r in ['ADD_TO']):NEWLINE try:NEWLINE print('aaaa')NEWLINE print(v)NEWLINE print([x for x in nv if any(re.search(y, x) for y in v)])NEWLINE print(nv)NEWLINE print('bbbb')NEWLINE #v.extend([x for x in nv if not any(re.search(y, x) for y in v)])NEWLINE v.extend(nv)NEWLINE except TypeError as exc:NEWLINE v.extend(nv)NEWLINE NEWLINE if (r in ['EXCLUDE_FROM']):NEWLINE v=reduce(lambda x,y : filter(lambda z: z!=y,x),nv,v)NEWLINE return(v)NEWLINENEWLINEdef my_func(a):NEWLINE ret_dict={}NEWLINE done_key_list=[]NEWLINENEWLINE for k,v in a.iteritems():NEWLINE reserved_word=''NEWLINE if (len(k.split('_')) > 2):NEWLINE reserved_word='_'.join(k.split('_')[:2])NEWLINE if (reserved_word in [NEWLINE 'ADD_TO', NEWLINE 'EXCLUDE_FROM', NEWLINE 'REPLACE_WITH', NEWLINE 'CHANGE_TO'NEWLINE ]):NEWLINE var='_'.join(k.split('_')[2:])NEWLINE else:NEWLINE var=kNEWLINE else:NEWLINE var=kNEWLINE NEWLINE if (isinstance(v, list)):NEWLINE if (reserved_word in ['ADD_TO', 'EXCLUDE_FROM']):NEWLINE done_key_list.append('%s' % (var))NEWLINE print('%s: %s' % ('Done List', ', '.join(done_key_list)))NEWLINE NEWLINE # check ret_dict if var exists or use from previous dict NEWLINENEWLINE # empty listNEWLINE pv=[]NEWLINENEWLINE pprint.pprint(ret_dict)NEWLINE pprint.pprint(a)NEWLINE print(var)NEWLINE if (a.has_key(var)):NEWLINE pv=a[var]NEWLINE pprint.pprint(pv)NEWLINE pprint.pprint(v)NEWLINE print(reserved_word)NEWLINENEWLINE ret_dict[var]=manage_list(pv, v, reserved_word)NEWLINE NEWLINE else:NEWLINE if (var not in done_key_list):NEWLINE ret_dict[var]=vNEWLINE NEWLINE if (not isinstance(v, list) and not isinstance(v, dict)):NEWLINE if (reserved_word in ['REPLACE_WITH']):NEWLINE done_key_list.append(var)NEWLINE ret_dict[var]=a['%s_%s' % (reserved_word, var)]NEWLINE else:NEWLINE if (var not in done_key_list):NEWLINE ret_dict[var]=vNEWLINE NEWLINE if (isinstance(v, dict)):NEWLINE if (reserved_word in ['CHANGE_TO']):NEWLINE done_key_list.append(var)NEWLINE tmp_dict={}NEWLINENEWLINE for k1,v1 in v.iteritems():NEWLINE tmp_dict[k1]=v1NEWLINENEWLINE if (a.has_key(var)):NEWLINE for k1,v1 in a[var].iteritems():NEWLINE tmp_dict[k1]=v1NEWLINE NEWLINE ret_dict[var]=my_func(tmp_dict)NEWLINE else: NEWLINE if (var not in done_key_list):NEWLINE ret_dict[var]=vNEWLINE NEWLINE return(ret_dict)NEWLINENEWLINEu=CmdLine(sys.argv[1:])NEWLINENEWLINEprint('End')NEWLINEpprint.pprint(u.c)NEWLINE
"""users tableNEWLINENEWLINERevision ID: bd2fc65f4c5eNEWLINERevises: NEWLINECreate Date: 2020-03-09 14:33:46.107957NEWLINENEWLINE"""NEWLINEfrom alembic import opNEWLINEimport sqlalchemy as saNEWLINENEWLINENEWLINE# revision identifiers, used by Alembic.NEWLINErevision = 'bd2fc65f4c5e'NEWLINEdown_revision = NoneNEWLINEbranch_labels = NoneNEWLINEdepends_on = NoneNEWLINENEWLINENEWLINEdef upgrade():NEWLINE # ### commands auto generated by Alembic - please adjust! ###NEWLINE op.create_table('user',NEWLINE sa.Column('id', sa.Integer(), nullable=False),NEWLINE sa.Column('username', sa.String(length=64), nullable=True),NEWLINE sa.Column('email', sa.String(length=120), nullable=True),NEWLINE sa.Column('password_hash', sa.String(length=128), nullable=True),NEWLINE sa.Column('created_on', sa.DateTime(), nullable=True),NEWLINE sa.PrimaryKeyConstraint('id')NEWLINE )NEWLINE op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)NEWLINE op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)NEWLINE # ### end Alembic commands ###NEWLINENEWLINENEWLINEdef downgrade():NEWLINE # ### commands auto generated by Alembic - please adjust! ###NEWLINE op.drop_index(op.f('ix_user_username'), table_name='user')NEWLINE op.drop_index(op.f('ix_user_email'), table_name='user')NEWLINE op.drop_table('user')NEWLINE # ### end Alembic commands ###NEWLINE
"""NEWLINEWSGI config for DjangoECom project.NEWLINENEWLINEIt exposes the WSGI callable as a module-level variable named ``application``.NEWLINENEWLINEFor more information on this file, seeNEWLINEhttps://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/NEWLINE"""NEWLINENEWLINEimport osNEWLINENEWLINEfrom django.core.wsgi import get_wsgi_applicationNEWLINENEWLINEos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoECom.settings')NEWLINENEWLINEapplication = get_wsgi_application()NEWLINE
#ABC089eNEWLINEimport sysNEWLINEinput = sys.stdin.readlineNEWLINEsys.setrecursionlimit(10**6)NEWLINE
#!/usr/bin/env pythonNEWLINE# Licensed under a 3-clause BSD style license - see LICENSE.rstNEWLINENEWLINEimport globNEWLINEimport osNEWLINEimport sysNEWLINENEWLINEimport ah_bootstrapNEWLINEfrom setuptools import setupNEWLINENEWLINE#A dirty hack to get around some early import/configurations ambiguitiesNEWLINEif sys.version_info[0] >= 3:NEWLINE import builtinsNEWLINEelse:NEWLINE import __builtin__ as builtinsNEWLINEbuiltins._ASTROPY_SETUP_ = TrueNEWLINENEWLINEfrom astropy_helpers.setup_helpers import (NEWLINE register_commands, adjust_compiler, get_debug_option, get_package_info)NEWLINEfrom astropy_helpers.git_helpers import get_git_devstrNEWLINEfrom astropy_helpers.version_helpers import generate_version_pyNEWLINENEWLINE# Get some values from the setup.cfgNEWLINEtry:NEWLINE from ConfigParser import ConfigParserNEWLINEexcept ImportError:NEWLINE from configparser import ConfigParserNEWLINEconf = ConfigParser()NEWLINEconf.read(['setup.cfg'])NEWLINEmetadata = dict(conf.items('metadata'))NEWLINENEWLINEPACKAGENAME = metadata.get('package_name', 'packagename')NEWLINEDESCRIPTION = metadata.get('description', 'Astropy affiliated package')NEWLINEAUTHOR = metadata.get('author', '')NEWLINEAUTHOR_EMAIL = metadata.get('author_email', '')NEWLINELICENSE = metadata.get('license', 'unknown')NEWLINEURL = metadata.get('url', 'http://astropy.org')NEWLINENEWLINE# Get the long description from the package's docstringNEWLINE__import__(PACKAGENAME)NEWLINEpackage = sys.modules[PACKAGENAME]NEWLINELONG_DESCRIPTION = package.__doc__NEWLINENEWLINE# Store the package name in a built-in variable so it's easyNEWLINE# to get from other parts of the setup infrastructureNEWLINEbuiltins._ASTROPY_PACKAGE_NAME_ = PACKAGENAMENEWLINENEWLINE# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)NEWLINEVERSION = '0.2.dev'NEWLINENEWLINE# Indicates if this version is a release versionNEWLINERELEASE = 'dev' not in VERSIONNEWLINENEWLINEif not RELEASE:NEWLINE VERSION += get_git_devstr(False)NEWLINENEWLINE# Populate the dict of setup command overrides; this should be done beforeNEWLINE# invoking any other functionality from distutils since it can potentiallyNEWLINE# modify distutils' behavior.NEWLINEcmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)NEWLINENEWLINE# Adjust the compiler in case the default on this platform is to use aNEWLINE# broken one.NEWLINEadjust_compiler(PACKAGENAME)NEWLINENEWLINE# Freeze build information in version.pyNEWLINEgenerate_version_py(PACKAGENAME, VERSION, RELEASE,NEWLINE get_debug_option(PACKAGENAME))NEWLINENEWLINE# Treat everything in scripts except README.rst as a script to be installedNEWLINEscripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))NEWLINE if os.path.basename(fname) != 'README.rst']NEWLINENEWLINENEWLINE# Get configuration information from all of the various subpackages.NEWLINE# See the docstring for setup_helpers.update_package_files for moreNEWLINE# details.NEWLINEpackage_info = get_package_info()NEWLINENEWLINE# Add the project-global dataNEWLINEpackage_info['package_data'].setdefault(PACKAGENAME, [])NEWLINEpackage_info['package_data'][PACKAGENAME].append('data/*')NEWLINENEWLINE# Include all .c files, recursively, including those generated byNEWLINE# Cython, since we can not do this in MANIFEST.in with a "dynamic"NEWLINE# directory name.NEWLINEc_files = []NEWLINEfor root, dirs, files in os.walk(PACKAGENAME):NEWLINE for filename in files:NEWLINE if filename.endswith('.c'):NEWLINE c_files.append(NEWLINE os.path.join(NEWLINE os.path.relpath(root, PACKAGENAME), filename))NEWLINEpackage_info['package_data'][PACKAGENAME].extend(c_files)NEWLINENEWLINEsetup(name=PACKAGENAME,NEWLINE version=VERSION,NEWLINE description=DESCRIPTION,NEWLINE scripts=scripts,NEWLINE install_requires=['astropy','matplotlib>=2.0'],NEWLINE author=AUTHOR,NEWLINE author_email=AUTHOR_EMAIL,NEWLINE license=LICENSE,NEWLINE url=URL,NEWLINE long_description=LONG_DESCRIPTION,NEWLINE cmdclass=cmdclassd,NEWLINE zip_safe=False,NEWLINE use_2to3=True,NEWLINE **package_infoNEWLINE)NEWLINE
"""NEWLINECode to automatically generate inputs for switch.NEWLINENEWLINEDevelopers:NEWLINE Pedro Andres Sanchez PerezNEWLINE Sergio Castellanos RodriguezNEWLINE And other I do not know.NEWLINE"""NEWLINEimport osNEWLINEimport sysNEWLINEimport yamlNEWLINEimport numpy as npNEWLINEimport pandas as pdNEWLINEfrom context import *NEWLINEfrom collections import OrderedDictNEWLINEfrom utils import read_yaml, init_scenarioNEWLINEfrom utils import create_gen_build_cost_new, look_for_fileNEWLINENEWLINENEWLINEdef get_load_data(path=default_path, filename='HighLoads.csv',NEWLINE total=False, *args, **kwargs):NEWLINE """ Read load consumption dataNEWLINENEWLINE Args:NEWLINE path (str): path to the file,NEWLINE filename (str): name of the file,NEWLINE total (bool): if true returns only the sum of all loads.NEWLINENEWLINE TODO:NEWLINE * Migrate this function to utilitiesNEWLINE * This could be a csv or it could connect to a DB.NEWLINE * This could be a csv or it could connect to a DB.NEWLINE """NEWLINE if filename == 'high':NEWLINE filename = 'HighLoads.csv'NEWLINE elif filename == 'low':NEWLINE filename = 'LowLoads.csv'NEWLINE elif filename == 'medium':NEWLINE filename = 'MediumLoads.csv'NEWLINE else:NEWLINE # Is this really necessary?NEWLINE sys.exit(1)NEWLINENEWLINENEWLINE file_path = os.path.join(path, filename)NEWLINENEWLINE try:NEWLINE df = pd.read_csv(file_path)NEWLINE except FileNotFoundError:NEWLINE # TODO: Change this to f' string formatNEWLINE raise FileNotFoundError('File not found. Please verify the file is in: {}'.format(os.path.join(path, filename)))NEWLINENEWLINE # Calculate the sum of loadsNEWLINE df['total'] = df.sum(axis=1)NEWLINENEWLINE try:NEWLINE # Shift load data to match generation.NEWLINE df.loc[:, 'hour'] -= 1NEWLINE except:NEWLINE print ('Something went wrong')NEWLINE passNEWLINENEWLINE # Add datetime indexNEWLINE df.index = pd.to_datetime(df[['year', 'month', 'day', 'hour']])NEWLINE df.index.rename('datetime', inplace=True)NEWLINENEWLINE # Remove columnsNEWLINE df = df.drop(['year', 'month', 'day', 'hour'], axis=1)NEWLINENEWLINE df = df.sort_index()NEWLINENEWLINE if total:NEWLINE return df[['total']]NEWLINE else:NEWLINE return dfNEWLINENEWLINEdef get_peak_day(data, number, freq='MS'):NEWLINE """ Construct a representative day based on a single timestampNEWLINENEWLINE Args:NEWLINE data (pd.DataFrame): data to filter,NEWLINE number (float): number of days to return.NEWLINENEWLINE Note:NEWLINE * Month start is to avoid getting more timepoints in a even divisionNEWLINE """NEWLINE years = []NEWLINENEWLINE # Check if number of timepoints is multipleNEWLINE if not 24 % number == 0 :NEWLINE raise ValueError('Odd number of timepoints. Use even number')NEWLINENEWLINE # Iterate over all monthsNEWLINE for _, group in data.groupby([pd.Grouper(freq='A'),\NEWLINE pd.Grouper(freq=freq)]):NEWLINENEWLINE delta_t = int(24/number)NEWLINENEWLINE #Temporal fix for duplicatesNEWLINE group = group[~group.index.duplicated(keep='last')]NEWLINENEWLINE # Get index of max valueNEWLINE peak_timestamp = group.idxmax()NEWLINENEWLINE # Convert the max value index to timestampNEWLINE mask = peak_timestamp.strftime('%Y-%m-%d')NEWLINENEWLINE peak_loc = group[mask].index.get_loc(peak_timestamp)NEWLINENEWLINE # Check if delta_t does not jump to next dayNEWLINE if (peak_timestamp.hour + delta_t ) > 23:NEWLINE peak_timestamps = group.loc[mask].iloc[peak_loc::-delta_t]NEWLINE if len(peak_timestamps) < delta_t:NEWLINE missing = number - len(peak_timestamps)NEWLINE peak_timestamps = (peak_timestamps.append(groupNEWLINE .loc[mask]NEWLINE .iloc[peak_loc::delta_t][1:missing+1]))NEWLINE else:NEWLINE peak_timestamps = group.loc[mask].iloc[peak_loc::delta_t]NEWLINE if len(peak_timestamps) < delta_t:NEWLINE missing = number - len(peak_timestamps)NEWLINE peak_timestamps = (peak_timestamps.append(groupNEWLINE .loc[mask]NEWLINE .iloc[peak_loc::-delta_t][1:missing+1]))NEWLINENEWLINE # Sort the index. Why not?NEWLINE peak_timestamps = peak_timestamps.sort_index().reset_index()NEWLINENEWLINE years.append(peak_timestamps)NEWLINENEWLINE output_data = pd.concat(years, sort=True)NEWLINE output_data = output_data.rename(columns={'datetime':'date',NEWLINE 'total':'peak_day'})NEWLINE return (output_data)NEWLINENEWLINEdef get_median_day(data, number, freq='MS'):NEWLINE """ Calculate median day giving a timeseriesNEWLINENEWLINE Args:NEWLINE data (pd.DataFrame): data to filter,NEWLINE number (float): number of days to return.NEWLINENEWLINE Note(s):NEWLINE * Month start is to avoid getting more timepoints in a even divisionNEWLINE """NEWLINENEWLINE years = []NEWLINENEWLINE for _, group in data.groupby([pd.Grouper(freq='A'),\NEWLINE pd.Grouper(freq=freq)]):NEWLINE # Calculate the daily meanNEWLINE grouper = group.groupby(pd.Grouper(freq='D')).mean()NEWLINE if len(grouper) & 1:NEWLINE # Odd number of daysNEWLINE index_median = grouper.loc[grouper==grouper.median()].index[0]NEWLINE else:NEWLINE # Even number of daysNEWLINE index_median = (np.abs(grouper-grouper.median())).idxmin()NEWLINE years.append(group.loc[index_median.strftime('%Y-%m-%d')].iloc[::int((24/number))].reset_index())NEWLINE output_data = pd.concat(years, sort=True)NEWLINE output_data.rename(columns={'datetime': 'date', 'total':'median_day'},\NEWLINE inplace=True)NEWLINENEWLINE return (output_data)NEWLINENEWLINEdef create_investment_period(path=default_path, ext='.tab', **kwargs):NEWLINE """ Create periods file using periods.yaml inputNEWLINENEWLINE Args:NEWLINE path (str): path to file,NEWLINE ext (str): output extension to save the fileNEWLINENEWLINE Note(s):NEWLINE * .tab extension is to match the switch inputs,NEWLINE """NEWLINENEWLINE output_file = output_path + 'periods' + extNEWLINENEWLINE periods = read_yaml(path, 'periods.yaml')NEWLINENEWLINE d = OrderedDict(periods)NEWLINE periods_tab = pd.DataFrame(d)NEWLINE periods_tab = periods_tab.set_index('INVESTMENT_PERIOD')NEWLINE periods_tab.to_csv(output_file, sep='\t')NEWLINENEWLINENEWLINEdef create_rps(path=default_path, filename='rps_targets',NEWLINE output_name='rps_targets', ext='.yaml', output_ext='.tab'):NEWLINE """ Create rps targets file using rps_target.yaml"""NEWLINENEWLINE if output_ext == '.tab': sep='\t'NEWLINENEWLINE output_file = output_path + output_name + output_extNEWLINE file_path = os.path.join(path, filename + ext)NEWLINENEWLINE if ext == '.yaml':NEWLINE rps = read_yaml(path, 'rps_targets.yaml')NEWLINENEWLINE d = OrderedDict(rps)NEWLINE rps_tab = pd.DataFrame(d)NEWLINE rps_tab = rps_tab.set_index('PERIOD')NEWLINENEWLINE rps_tab.to_csv(output_file, sep=sep)NEWLINENEWLINENEWLINENEWLINEdef create_timepoints(data, ext='.tab', **kwargs):NEWLINE """ Create timepoints fileNEWLINENEWLINE Args:NEWLINE data (pd.DataFrame): dataframe witht dates ,NEWLINE ext (str): output extension to save the file.NEWLINENEWLINE Note(s):NEWLINE * .tab extension is to match the switch inputs,NEWLINE """NEWLINENEWLINE # Filename conventionNEWLINE if ext == '.tab': sep='\t'NEWLINENEWLINE output_file = output_path + 'timepoints' + extNEWLINENEWLINE # If multiple timeseries included in dataNEWLINE if isinstance(data, list):NEWLINE data = pd.concat(data, sort=True)NEWLINENEWLINE # TODO: Write test to check if columns existNEWLINE data = data[['timestamp', 'TIMESERIES', 'daysinmonth']]NEWLINE data.index.name = 'timepoint_id'NEWLINE data = data.reset_index(drop=True)NEWLINE data = data.rename(columns={'TIMESERIES':'timeseries'})NEWLINE data.index += 1 # To start on 1 instead of 0NEWLINE data.index.name = 'timepoint_id'NEWLINE output_cols = ['timestamp', 'timeseries']NEWLINE data[output_cols].to_csv(output_file, sep=sep)NEWLINENEWLINENEWLINEdef create_strings(data, scale_to_period, identifier='P', ext='.tab',NEWLINE **kwargs):NEWLINE """ Create strings to process filesNEWLINENEWLINE Args:NEWLINE data (pd.DataFrame): dataframe witht dates,NEWLINE scale_to_period (int): difference between period ranges,NEWLINE identifier (str): identifier for each seriesNEWLINE ext (str): output extension to save the file.NEWLINENEWLINE Note(s):NEWLINE * .tab extension is to match the switch inputs,NEWLINE """NEWLINENEWLINE strftime = '%Y%m%d%H' # Strftime for labelNEWLINE data['timestamp'] = data['date'].dt.strftime(strftime)NEWLINE data['TIMESERIES'] = data['date'].dt.strftime('%Y_%m{}'.format(identifier))NEWLINE data['daysinmonth'] = data['date'].dt.daysinmonthNEWLINENEWLINE # TODO: Fix this. Probably bug in near futureNEWLINE data['ts_period'] = data['date'].dt.yearNEWLINE data['scale_to_period'] = scale_to_periodNEWLINENEWLINE return (data)NEWLINENEWLINEdef create_timeseries(data, number, ext='.tab', **kwargs):NEWLINE """ Create timeseries output fileNEWLINENEWLINE Args:NEWLINE data (pd.DataFrame): dataframe witht dates,NEWLINE number (int) : number of timepointsNEWLINE ext (str): output extension to save the file.NEWLINENEWLINE Note(s):NEWLINE * .tab extension is to match the switch inputs,NEWLINE """NEWLINENEWLINE # Filename conventionNEWLINE output_file = output_path + 'timeseries' + extNEWLINE if ext == '.tab': sep='\t'NEWLINENEWLINE # If multiple timeseries included in dataNEWLINE if isinstance(data, list):NEWLINE data = pd.concat(data, sort=True)NEWLINENEWLINENEWLINE # Extract unique timeseries_idNEWLINE timeseries = data[['TIMESERIES', 'daysinmonth', 'ts_period',NEWLINE 'scale_to_period']].drop_duplicates('TIMESERIES')NEWLINE timeseries = timeseries.reset_index(drop=True)NEWLINENEWLINE timeseries['ts_duration_of_tp'] = 24/numberNEWLINE timeseries['count'] = timeseries.groupby('ts_period')['TIMESERIES'].transform(len)NEWLINE timeseries['ts_num_tps'] = data[['timestamp', 'TIMESERIES']].groupby('TIMESERIES').count().valuesNEWLINENEWLINE # TODO: Change value of 24 for number of days to represent and 365 forNEWLINE # the total amount of years?NEWLINE scaling = timeseries['scale_to_period']*24*(365/timeseries['count'])/(timeseries['ts_duration_of_tp']*timeseries['ts_num_tps'])NEWLINE timeseries['ts_scale_to_period'] = scalingNEWLINENEWLINE # timeseries.index += 1 # To start on 1 instead of 0NEWLINE timeseries.index.name = 'timepoint_id'NEWLINENEWLINE # Delete unused columnsNEWLINE del timeseries['daysinmonth']NEWLINE del timeseries['scale_to_period']NEWLINE del timeseries['count']NEWLINENEWLINE timeseries.to_csv(output_file, index=False, sep=sep)NEWLINENEWLINEdef create_variablecp(gen_project, data, timeseries_dict, path=default_path, ext='.tab', **kwargs):NEWLINE """ Create variable capacity factor output fileNEWLINENEWLINE Args:NEWLINE data (pd.DataFrame): dataframe witht dates,NEWLINE timeseries_dict (Dict): dictionary with the timeseries for each period,NEWLINE path (string): path to renewable energy file,NEWLINE ext (str): output extension to save the file.NEWLINENEWLINE Note(s):NEWLINE * .tab extension is to match the switch inputs,NEWLINE """NEWLINENEWLINE # Filename conventionNEWLINE output_file = output_path + 'variable_capacity_factors' + extNEWLINE if ext == '.tab': sep='\t'NEWLINENEWLINE # If multiple timeseries included in dataNEWLINE if isinstance(data, list):NEWLINE data = pd.concat(data, sort=True)NEWLINENEWLINE # Check if file exist and removeitNEWLINE if os.path.exists(output_file):NEWLINE os.remove(output_file)NEWLINENEWLINE output_file = output_path + 'variable_capacity_factors' + extNEWLINENEWLINE file_name = 'renewable.csv'NEWLINENEWLINE ren_cap_data = pd.read_csv(os.path.join(path, file_name), index_col=0,NEWLINE parse_dates=True)NEWLINE ren_cap_data = ren_cap_data.loc[ren_cap_data['project_name'].isin(gen_project['GENERATION_PROJECT'])]NEWLINENEWLINE # Quick fix to names. I need to include more code hereNEWLINE replaces = [('e', 'é'), ('a', 'á'), ('i', 'í'), ('o', 'ó'), ('u', 'ú') ,NEWLINE ('n', 'ñ')]NEWLINE try:NEWLINE for tupl in replaces:NEWLINE ren_cap_data['GENERATION_PROJECT'] = ren_cap_data['GENERATION_PROJECT'].str.replace(tupl[1], tupl[0])NEWLINE except KeyError:NEWLINE passNEWLINENEWLINE # Extract datetime without year informationNEWLINE filter_dates = pd.DatetimeIndex(data['date'].reset_index(drop=True)).strftime('%m-%d %H:%M:%S')NEWLINENEWLINE ren_tmp = ren_cap_data.copy()NEWLINE # print (ren_tmp.head())NEWLINENEWLINE list1 = []NEWLINE for row, value in timeseries_dict.items():NEWLINE # print (row)NEWLINE tmp2 = pd.concat(value, sort=True)NEWLINE filter_dates = (pd.DatetimeIndex(tmp2['date']NEWLINE .reset_index(drop=True))NEWLINE .strftime('%m-%d %H:%M:%S'))NEWLINE grouped = (ren_tmp[ren_tmp['time'].isin(filter_dates)]NEWLINE .reset_index(drop=True)NEWLINE .groupby('project_name', as_index=False))NEWLINE list1.append(pd.concat([group.reset_index(drop=True)NEWLINE for name, group in grouped], sort=True))NEWLINENEWLINE variable_cap = pd.concat(list1, sort=True)NEWLINE # FIXME: Temporal fixNEWLINE try:NEWLINE del variable_cap['GENERATION_PROJECT']NEWLINE except:NEWLINE passNEWLINE variable_tab = variable_cap.groupby('project_name')NEWLINE for keys in variable_tab.groups.keys():NEWLINE data = variable_tab.get_group(keys).reset_index(drop=True)NEWLINE data.index +=1NEWLINE data.index.name = 'timepoint'NEWLINE data.rename(columns={'capacity_factor': 'gen_max_capacity_factor',NEWLINE 'project_name':'GENERATION_PROJECT'},NEWLINE inplace=True)NEWLINE data.reset_index()[['GENERATION_PROJECT', 'timepoint',NEWLINE 'gen_max_capacity_factor']].to_csv(output_file, sep=sep,NEWLINE index=False, mode='a', header=(notNEWLINE os.path.exists(output_file)))NEWLINENEWLINEdef create_loads(load, data, ext='.tab', **kwargs):NEWLINE """ Create load data output fileNEWLINENEWLINE Args:NEWLINE load (pd.DataFrame): load dataNEWLINE data (pd.DataFrame): dataframe witht dates,NEWLINE ext (str): output extension to save the file.NEWLINENEWLINE Note(s):NEWLINE * .tab extension is to match the switch inputs,NEWLINE """NEWLINE if 'total' in load.columns:NEWLINE del load['total']NEWLINENEWLINE # Filename conventionNEWLINE output_file = output_path + 'loads' + extNEWLINE if ext == '.tab': sep='\t'NEWLINENEWLINE # If multiple timeseries included in dataNEWLINE if isinstance(data, list):NEWLINE data = pd.concat(data, sort=True)NEWLINENEWLINE loads_tmp = load.copy() #[load.year <= 2025]NEWLINENEWLINE # FIXME: This function is weird. We will need to change itNEWLINE # for something clearer.NEWLINE # Get data from the datetime providedNEWLINENEWLINE unstack_loads = (loads_tmp.loc[data['date']] # Get filter datesNEWLINE .reset_index(drop=True) # Remove datetimeNEWLINE .unstack(0)) # Change rows and columnsNEWLINENEWLINE # Temporal fix to convert series to dataframeNEWLINE loads_tab = pd.concat([group.reset_index()NEWLINE for _, group in unstack_loads.groupby(level=0)]NEWLINE , sort=True)NEWLINENEWLINE # Renaming columnsNEWLINE loads_tab = loads_tab.rename(columns={'level_0':'LOAD_ZONE',NEWLINE 0:'zone_demand_mw',NEWLINE 'level_1': 'TIMEPOINT'})NEWLINENEWLINE # Restart numbering of timepoint to start from 1NEWLINE loads_tab.loc[:, 'TIMEPOINT'] += 1NEWLINENEWLINE # Change columns orderNEWLINE columns_order = ['LOAD_ZONE', 'TIMEPOINT', 'zone_demand_mw']NEWLINE loads_tab = loads_tab[columns_order]NEWLINENEWLINE # Save output fileNEWLINE loads_tab.to_csv(output_file, sep=sep, index=False)NEWLINENEWLINEdef create_gen_build_cost(gen_project, gen_legacy, ext='.tab',NEWLINE path=default_path,NEWLINE **kwargs):NEWLINE """ Create gen build cost output fileNEWLINENEWLINE Args:NEWLINE data (pd.DataFrame): dataframe witht dates,NEWLINE ext (str): output extension to save the file.NEWLINENEWLINE Note(s):NEWLINE * .tab extension is to match the switch inputs,NEWLINE """NEWLINENEWLINE if ext == '.tab': sep='\t'NEWLINE output_file = output_path + 'gen_build_costs' + extNEWLINENEWLINE periods = read_yaml(path, 'periods.yaml')NEWLINENEWLINE output_costs = []NEWLINE gen_legacy.rename(columns={'PROJECT': 'GENERATION_PROJECT'}, inplace=True)NEWLINE costs_legacy = pd.read_csv(os.path.join(path,'gen_build_costs.tab'), sep='\t')NEWLINE columns = ['GENERATION_PROJECT','gen_overnight_cost', 'gen_fixed_om']NEWLINE output_columns = ['GENERATION_PROJECT', 'build_year', 'gen_overnight_cost',NEWLINE 'gen_fixed_om']NEWLINENEWLINE # Merge to get the build year of the predetermined plantsNEWLINE merged = pd.merge(gen_legacy, costs_legacy[columns], on=['GENERATION_PROJECT'])NEWLINENEWLINE # TODO: Check why we get duplicate values from the previous rowNEWLINE merged.drop_duplicates('GENERATION_PROJECT', inplace=True)NEWLINE old_plants = gen_legacy['GENERATION_PROJECT'].unique()NEWLINE new_plants = gen_project.loc[~gen_project['GENERATION_PROJECT'].isin(old_plants)]NEWLINENEWLINE # First add old plants NEWLINE output_costs.append(merged[output_columns])NEWLINENEWLINE # Add new plantsNEWLINE new_cost = create_gen_build_cost_new(new_plants)NEWLINE output_costs.append(new_cost)NEWLINENEWLINE gen_build_cost = pd.concat(output_costs, sort=True)NEWLINENEWLINE # FIXME: Temporal fix to avoid duplicate entriesNEWLINE df = (gen_build_cost.sort_values('gen_fixed_om')NEWLINE .drop_duplicates(subset=['GENERATION_PROJECT', 'build_year'],NEWLINE keep='last'))NEWLINE df = df.sort_values(by=['GENERATION_PROJECT', 'build_year'])NEWLINE df[output_columns].to_csv(output_file, sep=sep, index=False)NEWLINE # gen_build_cost.to_csv(output_file, sep=sep, index=False)NEWLINENEWLINEdef modify_costs(data, path=default_path):NEWLINE """ Modify cost data to derate itNEWLINENEWLINE Args:NEWLINE data (pd.DataFrame): dataframe witht dates,NEWLINENEWLINE Note(s):NEWLINE * This read the cost table and modify the cost by periodNEWLINE """NEWLINENEWLINE # TODO: Make a more cleaner way to load the fileNEWLINE cost_table = pd.read_csv(os.path.join(path, 'cost_tables.csv'))NEWLINENEWLINE df = data.copy()NEWLINE techo = cost_table['Technology'].unique()NEWLINE for index in df.build_year.unique():NEWLINE mask = (df['gen_tech'].isin(techo)) & (df['build_year'] == index)NEWLINE df.loc[mask]NEWLINE cost_table.loc[cost_table['Year'] == index]NEWLINE for tech in df['gen_tech'].unique():NEWLINE if tech in cost_table['Technology'].unique():NEWLINE mask2 = (cost_table['Technology'] == tech) & (cost_table['Year'] == index)NEWLINE df.loc[mask & (df['gen_tech'] == tech)]NEWLINE cost_table.loc[mask2, 'gen_overnight_cost'].values[0]NEWLINE df.loc[mask & (df['gen_tech'] == tech), 'gen_overnight_cost'] = cost_table.loc[mask2, 'gen_overnight_cost'].values[0]NEWLINE return (df)NEWLINENEWLINEdef gen_build_predetermined(existing, path=default_path, ext='.tab'):NEWLINE """ Construct gen build predetermined file"""NEWLINE output_file = output_path + 'gen_build_predetermined' + extNEWLINENEWLINE if ext == '.tab': sep = '\t'NEWLINENEWLINE # FIXME: Check what format is better to readNEWLINE file_name = 'gen_build_predetermined' + extNEWLINENEWLINE file_path = os.path.join(path, file_name)NEWLINENEWLINE gen_legacy = pd.read_csv(file_path, sep=sep) if existing else NoneNEWLINENEWLINE gen_legacy.to_csv(output_file, sep=sep, index=False)NEWLINENEWLINE return gen_legacyNEWLINENEWLINEdef create_fuel_cost(path=default_path, ext='.csv'):NEWLINE """ Create fuel_costs.tab """NEWLINE output_file = output_path + 'fuel_costs' + extNEWLINE periods = read_yaml(path, 'periods.yaml')NEWLINE fuel_costs = pd.read_csv(os.path.join(path, 'fuel_cost.csv'), index_col=0)NEWLINE fuel_cost = fuel_costs.loc[fuel_costs['period'].isin(periods['INVESTMENT_PERIOD'])]NEWLINE fuel_cost.to_csv(output_file)NEWLINENEWLINEif __name__ == "__main__":NEWLINE passNEWLINE
# -*- coding:utf-8 -*-NEWLINE# @Time: 2021/1/18 8:49NEWLINE# @Author: Zhanyi HouNEWLINE# @Email: 1295752786@qq.comNEWLINE# @File: syntaxana.pyNEWLINE# -*- coding: utf-8 -*-NEWLINE'''NEWLINEpowered by NovalIDENEWLINE来自NovalIDE的词法分析模块NEWLINE作者:侯展意NEWLINE词法分析模块的重要组成单元、NEWLINE依靠各种正则表达式进行特征的提取。NEWLINENEWLINE'''NEWLINEfrom typing import List, Tuple, DictNEWLINEimport reNEWLINENEWLINENEWLINEdef getReplacingDic() -> Dict:NEWLINE dic = {}NEWLINE dic[','] = ','NEWLINE dic['。'] = '.'NEWLINE dic[';'] = ';'NEWLINE dic[':'] = ':'NEWLINE dic['‘'] = '\''NEWLINE dic['’'] = '\''NEWLINE dic['“'] = '\"'NEWLINE dic['”'] = '\"'NEWLINE dic['【'] = '['NEWLINE dic['】'] = ']'NEWLINE dic['('] = '('NEWLINE dic[')'] = ')'NEWLINE return dicNEWLINENEWLINENEWLINEdef getIndent(s: str) -> Tuple[str, int]:NEWLINE s = s.replace('\t', ' ') # tab替换成四个空格NEWLINE s = s.rstrip()NEWLINE if (len(s) > 0):NEWLINE for i, ch in enumerate(s):NEWLINE if (ch != ' '):NEWLINE return s[i:], iNEWLINE return "", i + 1NEWLINE else:NEWLINE return "", 0NEWLINENEWLINENEWLINEdef removeComment(s: str) -> str:NEWLINE pos = s.find('#')NEWLINE if (pos != -1):NEWLINE return s[:pos]NEWLINE else:NEWLINE return sNEWLINENEWLINENEWLINEdef getStringContent(row):NEWLINE passNEWLINENEWLINENEWLINEdef removeStringContent(row: str) -> str:NEWLINE row = row.replace('\"', '\'')NEWLINE if (row.count('\'') >= 2):NEWLINE s = getAllFromRegex(regex=r'[\'](.*?)[\']', st=row)NEWLINE for item in s:NEWLINE row = row.replace('\'%s\'' % item, '\'\'') # 带着分号一起换掉。NEWLINE return rowNEWLINE else:NEWLINE return rowNEWLINENEWLINENEWLINEdef parseVarType(row: str):NEWLINE getInfoFromRegex(r'[\'](.*?)[\']', row)NEWLINENEWLINENEWLINEdef getAllFromRegex(regex: str, st: str) -> List[str]:NEWLINE foundList = re.findall(re.compile(regex, re.S), st)NEWLINENEWLINE return foundListNEWLINENEWLINENEWLINEdef getInfoFromRegex(regex: str, st: str) -> str: # 从正则表达式中获取信息的函数。如果没有任何结果则返回0。NEWLINE foundList = re.findall(re.compile(regex, re.S), st)NEWLINE item = ''NEWLINE if (foundList != []):NEWLINE item = foundList[0]NEWLINE return itemNEWLINENEWLINENEWLINEdef getWordsFromString(s: str) -> list:NEWLINE if (s != ''):NEWLINE syms = s.split(',') # 用逗号分隔开。NEWLINE for i in range(len(syms)):NEWLINE syms[i] = syms[i].strip()NEWLINE return symsNEWLINE else:NEWLINE return []NEWLINENEWLINENEWLINEdef countPar(row: str) -> Tuple[int, int, int]: # 检测三类括号的数量。NEWLINE lparNum = row.count('(')NEWLINE rparNum = row.count(')')NEWLINE lbraceNum = row.count('{')NEWLINE rbraceNum = row.count('}')NEWLINE lbracketNum = row.count('[')NEWLINE rbracketNum = row.count(']')NEWLINENEWLINE return lparNum - rparNum, lbraceNum - rbraceNum, lbracketNum - rbracketNum # 返回左括号数量减去右括号数量。NEWLINENEWLINENEWLINEdef checkPar(row: str) -> int:NEWLINE a, b, c = countPar(row)NEWLINE if (a == 0) & (b == 0) & (c == 0):NEWLINE return 1NEWLINE else:NEWLINE if (a < 0) | (b < 0) | (c < 0):NEWLINE return -1NEWLINE else:NEWLINE return 0NEWLINENEWLINENEWLINEdef getBracketedContent(row: str) -> Tuple[str, str, str]: # 获取任何类型括号最外层内部的东西。(不是小括号!!!)NEWLINE # 返回值:一个表示括号类型的量,以及一个有关括号中内容的字符串,以及括号前的内容。NEWLINE lst = [-1, -1, -1]NEWLINE symList = ['(', '[', '{']NEWLINE symListCouple = [')', ']', '}']NEWLINE length = len(row)NEWLINE for i in range(len(lst)):NEWLINE lst[i] = row.find(symList[i])NEWLINE if (lst[i] == -1):NEWLINE lst[i] = lengthNEWLINE minVal = min(lst)NEWLINE if (minVal == length): # 说明根本没括号NEWLINE return '', '', row[:minVal] # 所以返回值不仅没有括号,还没有括号中的内容(废话),只是返回括号前面的东西。NEWLINE else:NEWLINE pos = lst.index(minVal) # 获取最小值的索引NEWLINE regex = r'[%s](.*)[%s]' % (symList[pos], symListCouple[pos])NEWLINE return symList[pos], getInfoFromRegex(regex=regex, st=row), row[:minVal]NEWLINENEWLINENEWLINEdef getFuncArgs(row: str) -> List[str]: # 获取函数的输入参数。NEWLINENEWLINE s = getInfoFromRegex(regex=r'[(](.*)[)]', st=row)NEWLINENEWLINE li = getWordsFromString(s)NEWLINENEWLINE if (len(li) > 0):NEWLINE if (li[0] == 'self'): # 不允许函数的第一个参数名字叫self。NEWLINE li.pop(0)NEWLINE for i in range(len(li)):NEWLINE eqSymPos = li[i].find('=')NEWLINENEWLINE if (eqSymPos != -1): # 如果eqSymPos中有一个等号NEWLINE li[i] = li[i][:eqSymPos] # 那么将等号去除NEWLINE colonSymPos = li[i].find(':')NEWLINE if colonSymPos != -1:NEWLINE li[i] = li[i][:colonSymPos]NEWLINENEWLINE return liNEWLINENEWLINENEWLINEdef getFuncName(row: str) -> str: # 获取函数的名称。NEWLINE return getInfoFromRegex(regex=r'def\s(.*?)[(]', st=row) # 注意,需要匹配函数名,其中还有个空格。NEWLINENEWLINENEWLINEdef getLocalVarNames(row: str) -> List[str]: # 获取局部变量的名称。NEWLINE li = getInfoFromRegex(regex=r'(.*?)[=]', st=row) # 注意,需要匹配局部变量的名称,其中还有个空格。NEWLINENEWLINE words = getWordsFromString(li)NEWLINE result = []NEWLINE for w in words: # 如果是函数的方法,则不可。NEWLINE if (w.find('.') == -1):NEWLINE result.append(w)NEWLINE return resultNEWLINENEWLINENEWLINEdef is_number(str_number: str) -> bool:NEWLINE if (str_number.split(".")[0]).isdigit() or str_number.isdigit() or (str_number.split('-')[-1]).split(".")[NEWLINE -1].isdigit():NEWLINE return TrueNEWLINE else:NEWLINE return FalseNEWLINENEWLINENEWLINEdef getForVariables(row: str) -> List[int]:NEWLINE '''NEWLINE 获取for循环中定义的变量。NEWLINE '''NEWLINE s = getInfoFromRegex(r'for(.*?)in', row)NEWLINE s = s.strip()NEWLINE return getWordsFromString(s)NEWLINENEWLINENEWLINEdef getVarType(row: str) -> str:NEWLINE '''NEWLINE 获取变量的类型,比如集合,数字等等。NEWLINE '''NEWLINE bracket, content, outer = getBracketedContent(row)NEWLINE li = outer.split('=')NEWLINE if (len(li) >= 1):NEWLINENEWLINE if (li[1].strip() == ''): # 这种情况下为直接赋值的语句,NEWLINE if (bracket == '('):NEWLINE return ':tuple'NEWLINE elif (bracket == '['):NEWLINE return ':list'NEWLINE else:NEWLINE st = li[1].split(',')[0]NEWLINE if (is_number(st)):NEWLINE return ':number'NEWLINENEWLINE return ''NEWLINENEWLINENEWLINEclass Row():NEWLINE def __init__(self, pos: int, text: str, indent: int) -> None:NEWLINE self.pos = posNEWLINE self.text = textNEWLINE self.indent = indentNEWLINENEWLINE def __repr__(self) -> str:NEWLINE return 'row:' + repr(self.pos) + "\t indent:" + repr(self.indent) + "\t text:" + self.text + '\n'NEWLINENEWLINENEWLINEdef regularize(rawText: List[str]) -> List[Row]:NEWLINE global kwdTuple, indexList, charStrNEWLINENEWLINE f = rawText # 获取打开的文件数组,每个元素是一行。NEWLINE regularifiedText = ''NEWLINE rowList = []NEWLINE currentRow = Row(0, '', 0) # 创建一个没有含义的对象,这样方便类型检查。NEWLINE inStaticFunction = FalseNEWLINE inFunctionDefinition = FalseNEWLINE skipLine = FalseNEWLINE currentFuncIndent = 0NEWLINE currentIndent = 0NEWLINE funcIndent = 0NEWLINENEWLINE for i, l in enumerate(f):NEWLINE l = removeStringContent(l)NEWLINE l = removeComment(l)NEWLINENEWLINE if (skipLine == False):NEWLINE row, currentIndent = getIndent(l) # 获取当前的行名和缩进,同时修剪掉行首的空格NEWLINE currentRow = Row(i, row, currentIndent)NEWLINE rowList.append(currentRow)NEWLINENEWLINE else:NEWLINE currentRow.text += l.strip() # 如果判断出这一行还没有结束,就不用获取当前的缩进,直接缀连即可。NEWLINE rowList.append(Row(i, '', 0)) # 这一行相应的没有任何内容NEWLINENEWLINE cp = checkPar(currentRow.text)NEWLINENEWLINE if (cp == 0): # 如果括号不匹配,那么就再继续进行,直至寻找到符合要求的行为止。NEWLINE skipLine = TrueNEWLINE if (len(currentRow.text) >= 200): # 长度超出,强行退出。NEWLINE skipLine = FalseNEWLINE continueNEWLINE elif (cp == -1): # 如果右边括号反倒更多,就跳出这种情况。NEWLINE skipLine = FalseNEWLINE continueNEWLINE else:NEWLINE skipLine = FalseNEWLINE return rowListNEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE regularize(['', '', ''])NEWLINE
"""NEWLINE# https://code.google.com/p/promisedata/source/browse/#svn%2Ftrunk%2Feffort%2FalbrechtNEWLINENEWLINEStandard header:NEWLINENEWLINE"""NEWLINEfrom __future__ import division,print_functionNEWLINEimport sysNEWLINEsys.dont_write_bytecode = TrueNEWLINEfrom lib import *NEWLINENEWLINE"""NEWLINE@attribute Language numericNEWLINE@attribute Hardware numericNEWLINE@attribute Duration numericNEWLINE@attribute KSLOC numericNEWLINE@attribute AdjFP numericNEWLINE@attribute RAWFP numericNEWLINE@attribute EffortMM numericNEWLINE"""NEWLINENEWLINEdef kemerer(weighFeature = None, NEWLINE split = "variance"):NEWLINE vl=1;l=2;n=3;h=4;vh=5;xh=6;_=0NEWLINE return data(indep= [ NEWLINE # 0..5NEWLINE 'Language','Hardware','Duration','KSLOC','AdjFP','RAWFP'],NEWLINE less = ['Effort'],NEWLINE _rows=[NEWLINE [1,1,17,253.6,1217.1,1010,287],NEWLINE [1,2,7,40.5,507.3,457,82.5],NEWLINE [1,3,15,450,2306.8,2284,1107.31],NEWLINE [1,1,18,214.4,788.5,881,86.9],NEWLINE [1,2,13,449.9,1337.6,1583,336.3],NEWLINE [1,4,5,50,421.3,411,84],NEWLINE [2,4,5,43,99.9,97,23.2],NEWLINE [1,2,11,200,993,998,130.3],NEWLINE [1,1,14,289,1592.9,1554,116],NEWLINE [1,1,5,39,240,250,72],NEWLINE [1,1,13,254.2,1611,1603,258.7],NEWLINE [1,5,31,128.6,789,724,230.7],NEWLINE [1,6,20,161.4,690.9,705,157],NEWLINE [1,1,26,164.8,1347.5,1375,246.9],NEWLINE [3,1,14,60.2,1044.3,976,69.9]NEWLINE ],NEWLINE _tunings =[[NEWLINE # vlow low nom high vhigh xhighNEWLINE #scale factors:NEWLINE 'Prec', 6.20, 4.96, 3.72, 2.48, 1.24, _ ],[NEWLINE 'Flex', 5.07, 4.05, 3.04, 2.03, 1.01, _ ],[NEWLINE 'Resl', 7.07, 5.65, 4.24, 2.83, 1.41, _ ],[NEWLINE 'Pmat', 7.80, 6.24, 4.68, 3.12, 1.56, _ ],[NEWLINE 'Team', 5.48, 4.38, 3.29, 2.19, 1.01, _ ]],NEWLINE weighFeature = weighFeature,NEWLINE _split = split,NEWLINE _isCocomo = FalseNEWLINE )NEWLINENEWLINEdef _kemerer(): print(kemerer())
#!/usr/bin/env python3NEWLINENEWLINEimport reNEWLINENEWLINENEWLINEdef parse(line):NEWLINE m = re.match(r'(.*) (\d+),(\d+) through (\d+),(\d+)', line)NEWLINE if m:NEWLINE op = m.group(1)NEWLINE p0 = [int(m.group(2)), int(m.group(3))]NEWLINE p1 = [int(m.group(4)), int(m.group(5))]NEWLINENEWLINE return op, p0, p1NEWLINENEWLINENEWLINEdef part1(filename):NEWLINE with open(filename) as f:NEWLINE lines = f.readlines()NEWLINENEWLINE grid = [[0] * 1000 for _ in range(1000)]NEWLINENEWLINE for line in lines:NEWLINE op, p0, p1 = parse(line)NEWLINE for i in range(p0[0], p1[0] + 1):NEWLINE for j in range(p0[1], p1[1] + 1):NEWLINE if op == 'turn on':NEWLINE grid[i][j] = 1NEWLINE elif op == 'turn off':NEWLINE grid[i][j] = 0NEWLINE elif op == 'toggle':NEWLINE grid[i][j] = int(not grid[i][j])NEWLINENEWLINE count = 0NEWLINE for i in range(1000):NEWLINE for j in range(1000):NEWLINE if grid[i][j] == 1:NEWLINE count += 1NEWLINENEWLINE print(count)NEWLINENEWLINENEWLINEdef part2(filename):NEWLINE with open(filename) as f:NEWLINE lines = f.readlines()NEWLINENEWLINE grid = [[0] * 1000 for _ in range(1000)]NEWLINENEWLINE for line in lines:NEWLINE op, p0, p1 = parse(line)NEWLINE for i in range(p0[0], p1[0] + 1):NEWLINE for j in range(p0[1], p1[1] + 1):NEWLINE if op == 'turn on':NEWLINE grid[i][j] += 1NEWLINE elif op == 'turn off':NEWLINE grid[i][j] = max(0, grid[i][j] - 1)NEWLINE elif op == 'toggle':NEWLINE grid[i][j] += 2NEWLINENEWLINE count = 0NEWLINE for i in range(1000):NEWLINE for j in range(1000):NEWLINE count += grid[i][j]NEWLINENEWLINE print(count)NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE part1('day06input.txt')NEWLINE part2('day06input.txt')
# -*- coding: utf-8 -*-NEWLINE# Copyright (c) 2012-2015, Philip Xu <pyx@xrefactor.com>NEWLINE# License: BSD New, see LICENSE for details.NEWLINEimport pytestNEWLINENEWLINEfrom monad.actions import firstNEWLINEfrom monad.decorators import maybeNEWLINEfrom monad.exceptions import ExtractErrorNEWLINEfrom monad.types import Maybe, Just, NothingNEWLINENEWLINEtest_range = range(-100, 100)NEWLINEunit = Maybe.unitNEWLINENEWLINENEWLINEdef add_1(n):NEWLINE if isinstance(n, int):NEWLINE return unit(n + 1)NEWLINE else:NEWLINE return NothingNEWLINENEWLINENEWLINEdef double(n):NEWLINE if isinstance(n, int):NEWLINE return unit(n * 2)NEWLINE else:NEWLINE return NothingNEWLINENEWLINENEWLINEdef fail(n):NEWLINE return NothingNEWLINENEWLINENEWLINEdef test_local_helper_function_add_one():NEWLINE for n in test_range:NEWLINE assert add_1(n) == unit(n + 1)NEWLINE assert add_1('1') is NothingNEWLINENEWLINENEWLINEdef test_local_helper_function_double():NEWLINE for n in test_range:NEWLINE assert double(n) == unit(n * 2)NEWLINE assert double('1') is NothingNEWLINENEWLINENEWLINEdef test_local_helper_function_fail():NEWLINE for n in test_range:NEWLINE assert fail(n) is NothingNEWLINENEWLINENEWLINEdef test_type():NEWLINE assert unit(1) == Just(1) == Maybe(1)NEWLINE assert Nothing != unit(1)NEWLINE assert type(unit(1)) == type(Just(1)) == type(Maybe(1)) == type(Nothing)NEWLINENEWLINENEWLINEdef test_compare():NEWLINE assert Nothing == NothingNEWLINE for n in test_range:NEWLINE assert unit(n) != NothingNEWLINE assert unit(n) is not NothingNEWLINENEWLINENEWLINEdef test_ordering():NEWLINE assert (Nothing < Nothing) is FalseNEWLINE assert (Nothing > Nothing) is FalseNEWLINE for n in test_range:NEWLINE assert (Nothing > unit(n)) is FalseNEWLINE assert (unit(n) < Nothing) is FalseNEWLINENEWLINENEWLINEdef test_as_context_manager():NEWLINE for n in test_range:NEWLINE with pytest.raises(ExtractError):NEWLINE with unit(n) >> double >> fail >> double as result:NEWLINE assert FalseNEWLINE assert resultNEWLINENEWLINE with pytest.raises(ExtractError):NEWLINE with Nothing as n:NEWLINE assert FalseNEWLINENEWLINE with pytest.raises(ExtractError):NEWLINE with double(n) as result:NEWLINE with Nothing as n:NEWLINE assert FalseNEWLINENEWLINE with pytest.raises(ExtractError):NEWLINE with double(n) as result, Nothing as n:NEWLINE assert FalseNEWLINENEWLINENEWLINEdef test_bool():NEWLINE assert bool(Nothing) is FalseNEWLINE for n in test_range:NEWLINE assert bool(unit(n)) is TrueNEWLINENEWLINENEWLINEdef test_from_value():NEWLINE false_v = [False, None, 0, 0.0, (), [], {}, '', set(), frozenset()]NEWLINE for v in false_v:NEWLINE assert Maybe.from_value(v) is NothingNEWLINENEWLINE true_v = [True, 1, 1.0, (0,), [0], {0: 0}, '0', set('0'), frozenset('0')]NEWLINE for v in true_v:NEWLINE assert Maybe.from_value(v) == unit(v)NEWLINENEWLINENEWLINEdef test_as_iterator():NEWLINE for n in test_range:NEWLINE for i in unit(n):NEWLINE assert i == nNEWLINENEWLINE assert list(unit(n)) == [n]NEWLINENEWLINENEWLINEdef test_bind():NEWLINE assert Nothing.bind(add_1) is NothingNEWLINE for n in test_range:NEWLINE m = unit(n)NEWLINE assert m.bind(fail) is NothingNEWLINENEWLINENEWLINEdef test_bind_operator():NEWLINE for n in test_range:NEWLINE m = unit(n)NEWLINE assert m >> fail is NothingNEWLINE assert fail(n) >> add_1 is NothingNEWLINENEWLINENEWLINEdef test_reversed_bind_operator():NEWLINE for n in test_range:NEWLINE m = unit(n)NEWLINE assert fail << m is NothingNEWLINE assert add_1 << fail(n) is NothingNEWLINENEWLINENEWLINEdef test_chain_bind_operator():NEWLINE for n in test_range:NEWLINE m = unit(n)NEWLINE assert m >> fail >> add_1 == NothingNEWLINE assert m >> add_1 >> fail == NothingNEWLINE assert m >> fail >> double == NothingNEWLINE assert m >> double >> fail == NothingNEWLINENEWLINENEWLINEdef test_monad_law_left_identity():NEWLINE for n in test_range:NEWLINE # unit n >>= f == f nNEWLINE f = failNEWLINE assert unit(n) >> f == f(n)NEWLINENEWLINENEWLINEdef test_monad_law_right_identity():NEWLINE for n in test_range:NEWLINE # m >>= unit == mNEWLINE assert Nothing >> unit == NothingNEWLINENEWLINENEWLINEdef test_monad_law_associativity():NEWLINE for n in test_range:NEWLINE # m >>= (\x -> k x >>= h) == (m >>= k) >>= hNEWLINE m = unit(n)NEWLINE k = add_1NEWLINE h = failNEWLINE assert m >> (lambda x: k(x) >> h) == (m >> k) >> hNEWLINE k = failNEWLINE h = doubleNEWLINE assert m >> (lambda x: k(x) >> h) == (m >> k) >> hNEWLINE k = failNEWLINE h = failNEWLINE assert m >> (lambda x: k(x) >> h) == (m >> k) >> hNEWLINENEWLINENEWLINEdef test_maybe_decorator():NEWLINE @maybeNEWLINE def div(a, b):NEWLINE return a / bNEWLINENEWLINE assert div(42, 21) == unit(2)NEWLINE assert div(42, 0) is NothingNEWLINENEWLINENEWLINEdef test_maybe_decorator_with_predicate():NEWLINE @maybe(predicate=bool)NEWLINE def truth(x):NEWLINE return xNEWLINENEWLINE assert truth(42) == unit(42)NEWLINE assert truth(None) is NothingNEWLINE assert add_1(0) >> truth == unit(1)NEWLINE assert add_1(-1) >> truth is NothingNEWLINE assert truth(False) >> double is NothingNEWLINE assert double([]) >> truth is NothingNEWLINENEWLINENEWLINEdef test_maybe_decorator_with_value():NEWLINE @maybe(nothing_on_value=None)NEWLINE def truth(x):NEWLINE return xNEWLINENEWLINE assert truth(42) is not NothingNEWLINE assert truth('') is not NothingNEWLINE assert truth(0) is not NothingNEWLINE assert truth(False) is not NothingNEWLINE assert truth(None) is NothingNEWLINENEWLINENEWLINEdef test_maybe_decorator_combined():NEWLINE @maybe(predicate=bool, nothing_on_value=42)NEWLINE def wrap(x):NEWLINE return xNEWLINENEWLINE assert wrap(True) == unit(True)NEWLINE assert wrap(False) is NothingNEWLINE assert wrap('something') == unit('something')NEWLINE assert wrap('') is NothingNEWLINE assert wrap([False]) == unit([False])NEWLINE assert wrap([]) is NothingNEWLINE assert wrap(1) == unit(1)NEWLINE assert wrap(0) is NothingNEWLINE assert wrap(None) is NothingNEWLINE assert wrap(42) is NothingNEWLINENEWLINENEWLINEdef test_maybe_decorator_none_exception():NEWLINE @maybe(nothing_on_exception=None)NEWLINE def div(a, b):NEWLINE return a / bNEWLINENEWLINE with pytest.raises(ZeroDivisionError):NEWLINE div(42, 0)NEWLINENEWLINENEWLINEdef test_maybe_decorator_empty_seq_exception():NEWLINE for empty in ([], tuple(), set()):NEWLINE @maybe(nothing_on_exception=empty)NEWLINE def div(a, b):NEWLINE return a / bNEWLINENEWLINE with pytest.raises(ZeroDivisionError):NEWLINE div(42, 0)NEWLINENEWLINENEWLINEdef test_maybe_decorator_specific_exception():NEWLINE @maybe(nothing_on_exception=ZeroDivisionError)NEWLINE def div(a, b):NEWLINE return a / bNEWLINENEWLINE assert div(42, 0) is NothingNEWLINENEWLINENEWLINEdef test_maybe_decorator_specific_exception_tuple():NEWLINE @maybe(nothing_on_exception=(IOError, ZeroDivisionError))NEWLINE def div(a, b):NEWLINE if a < 0:NEWLINE raise IOErrorNEWLINE return a / bNEWLINENEWLINE assert div(42, 0) is NothingNEWLINE assert div(-42, 2) is NothingNEWLINENEWLINENEWLINEdef test_first():NEWLINE assert first([Nothing, Just(42)]) == Just(42)NEWLINE assert first([Just(42), Just(43)]) == Just(42)NEWLINE assert first([Nothing, Nothing]) == NothingNEWLINE assert first([]) == NothingNEWLINENEWLINENEWLINEdef test_first_default():NEWLINE assert first([Nothing, Nothing], default=Just(42)) == Just(42)NEWLINENEWLINENEWLINEdef test_first_predicate():NEWLINE assert first([False, 0, 2, 1], predicate=bool) == Just(2)NEWLINE assert first([False, 0, ''], predicate=bool) == NothingNEWLINE assert first(range(100), predicate=lambda x: x > 50) == Just(51)NEWLINE assert first(range(100), predicate=lambda x: x > 100) == NothingNEWLINENEWLINENEWLINEdef test_first_wrap_just_only_if_not_already():NEWLINE assert first([False, True], predicate=bool) == Just(True)NEWLINE assert first([False, Just(True)], bool) != Just(Just(True))NEWLINE assert first([False, Just(True)], bool) == Just(True)NEWLINENEWLINENEWLINEdef test_first_is_lazy():NEWLINE def once():NEWLINE yield Just(42)NEWLINE raise ExceptionNEWLINENEWLINE assert first(once()) == Just(42)NEWLINE
#!/usr/bin/python3NEWLINE# -*- coding: utf-8 -*-NEWLINENEWLINE#NEWLINE# Copyright (C) 2017 Kévin MathieuNEWLINE#NEWLINE# This software may be modified and distributed under the termsNEWLINE# of the MIT license. See the LICENSE file for details.NEWLINE#NEWLINENEWLINEimport requestsNEWLINENEWLINENEWLINEclass URL:NEWLINE def __init__(self, url):NEWLINE self.baseUrl = urlNEWLINENEWLINE def call(self, params = None):NEWLINE req = requests.get(self.baseUrl, params=params)NEWLINENEWLINE return reqNEWLINE
""""NEWLINEОсновной файл ботаNEWLINE"""NEWLINENEWLINEimport osNEWLINEimport platformNEWLINEimport randomNEWLINENEWLINEimport discordNEWLINEfrom discord.ext import commands, tasksNEWLINEfrom discord.ext.commands import BotNEWLINEfrom discord_slash import SlashCommand, SlashContext # Importing the newly installed library.NEWLINENEWLINEfrom scripts.config import read_configNEWLINENEWLINEconfig = read_config()NEWLINENEWLINETOKEN = config['app_token']NEWLINEAPP_ID = config['app_id']NEWLINENEWLINENEWLINE""" NEWLINESetup bot intents (events restrictions)NEWLINEFor more information about intents, please go to the following websites:NEWLINEhttps://discordpy.readthedocs.io/en/latest/intents.htmlNEWLINEhttps://discordpy.readthedocs.io/en/latest/intents.html#privileged-intentsNEWLINENEWLINENEWLINEDefault Intents:NEWLINEintents.messages = TrueNEWLINEintents.reactions = TrueNEWLINEintents.guilds = TrueNEWLINEintents.emojis = TrueNEWLINEintents.bans = TrueNEWLINEintents.guild_typing = FalseNEWLINEintents.typing = FalseNEWLINEintents.dm_messages = FalseNEWLINEintents.dm_reactions = FalseNEWLINEintents.dm_typing = FalseNEWLINEintents.guild_messages = TrueNEWLINEintents.guild_reactions = TrueNEWLINEintents.integrations = TrueNEWLINEintents.invites = TrueNEWLINEintents.voice_states = FalseNEWLINEintents.webhooks = FalseNEWLINENEWLINEPrivileged Intents (Needs to be enabled on dev page), please use them only if you need them:NEWLINEintents.presences = TrueNEWLINEintents.members = TrueNEWLINE"""NEWLINENEWLINENEWLINEintents = discord.Intents.default()NEWLINEintents.members = TrueNEWLINENEWLINEbot = Bot(command_prefix=config["bot_prefix"], intents=intents)NEWLINEslash = SlashCommand(bot, sync_commands=True)NEWLINENEWLINE# The code in this even is executed when the bot is readyNEWLINE@bot.eventNEWLINEasync def on_ready():NEWLINE print(f"Logged in as {bot.user.name}")NEWLINE print(f"Discord.py API version: {discord.__version__}")NEWLINE print(f"Python version: {platform.python_version()}")NEWLINE print(f"Running on: {platform.system()} {platform.release()} ({os.name})")NEWLINE print("-------------------")NEWLINE status_task.start()NEWLINENEWLINENEWLINE# Setup the game status task of the botNEWLINE@tasks.loop(minutes=1.0)NEWLINEasync def status_task():NEWLINE statuses = ["поиск картинок", ""]NEWLINE await bot.change_presence(activity=discord.Game(random.choice(statuses)))NEWLINENEWLINENEWLINEif __name__ == "__main__":NEWLINE for file in os.listdir("./cogs"):NEWLINE if file.endswith(".py"):NEWLINE extension = file[:-3]NEWLINE try:NEWLINE bot.load_extension(f"cogs.{extension}")NEWLINE print(f"Loaded extension '{extension}'")NEWLINE except Exception as e:NEWLINE exception = f"{type(e).__name__}: {e}"NEWLINE print(f"Failed to load extension {extension}\n{exception}")NEWLINENEWLINENEWLINE# The code in this event is executed every time someone sends a message, with or without the prefixNEWLINE@bot.eventNEWLINEasync def on_message(message):NEWLINE # Ignores if a command is being executed by a bot or by the bot itselfNEWLINE if message.author == bot.user or message.author.bot:NEWLINE returnNEWLINE await bot.process_commands(message)NEWLINENEWLINENEWLINE# The code in this event is executed every time a command has been *successfully* executedNEWLINE@bot.eventNEWLINEasync def on_slash_command(ctx: SlashContext):NEWLINE fullCommandName = ctx.nameNEWLINE split = fullCommandName.split(" ")NEWLINE executedCommand = str(split[0])NEWLINE print(NEWLINE f"Executed {executedCommand} command in {ctx.guild.name} (ID: {ctx.guild.id}) by {ctx.author} (ID: {ctx.author.id})")NEWLINENEWLINENEWLINE# The code in this event is executed every time a valid commands catches an errorNEWLINE@bot.eventNEWLINEasync def on_command_error(context, error):NEWLINE raise errorNEWLINENEWLINENEWLINE# Run the bot with the tokenNEWLINEbot.run(TOKEN)NEWLINE
#!/usr/bin/env python3NEWLINE# Copyright (c) 2020 Bitcoin AssociationNEWLINE# Distributed under the Open BSV software license, see the accompanying file LICENSE.NEWLINENEWLINE# Test mempool eviction based on transaction feeNEWLINENEWLINE# 1. Fill 90% of the mempool with transactions with a high feeNEWLINE# 2. Fill 10% of the mempool with transactions with a lower feeNEWLINE# 3. Send a large transaction (15% of mempool) that has a lower fee thanNEWLINE# most of the transactions in the poolNEWLINE# 4. See what happens...NEWLINENEWLINEfrom test_framework.test_framework import BitcoinTestFrameworkNEWLINEfrom test_framework.authproxy import JSONRPCExceptionNEWLINEfrom test_framework.cdefs import ONE_MEGABYTENEWLINEfrom test_framework.util import bytes_to_hex_str, create_confirmed_utxos, satoshi_roundNEWLINEfrom test_framework.util import assert_equal, assert_raises_rpc_errorNEWLINEimport decimalNEWLINEimport randomNEWLINENEWLINEdef send_tx_with_data(node, utxo, fee, data_size):NEWLINE assert(data_size > 24)NEWLINE send_value = utxo['amount'] - feeNEWLINE inputs = []NEWLINE inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})NEWLINE outputs = {}NEWLINE addr = node.getnewaddress()NEWLINE outputs[addr] = satoshi_round(send_value)NEWLINE data = bytearray(random.getrandbits(8) for _ in range(24)) + bytearray(data_size - 24)NEWLINE outputs["data"] = bytes_to_hex_str(data)NEWLINE rawTxn = node.createrawtransaction(inputs, outputs)NEWLINE signedTxn = node.signrawtransaction(rawTxn)["hex"]NEWLINE return node.sendrawtransaction(signedTxn)NEWLINENEWLINEclass MempoolEvictionPriorityTest(BitcoinTestFramework):NEWLINE mempool_size = 300NEWLINE total_number_of_transactions = 50NEWLINENEWLINE def set_test_params(self):NEWLINE self.setup_clean_chain = TrueNEWLINE self.num_nodes = 1NEWLINE self.extra_args = [["-maxmempool={}".format(self.mempool_size),NEWLINE "-maxmempoolsizedisk=0",NEWLINE "-spendzeroconfchange=0",NEWLINE "-genesisactivationheight=1",NEWLINE "-maxtxsizepolicy=0",NEWLINE '-maxtxfee=1.0']]NEWLINENEWLINE def run_test(self):NEWLINE transaction_overhead = 2048NEWLINE mempool_size = self.mempool_sizeNEWLINE total_number_of_transactions = self.total_number_of_transactionsNEWLINE number_of_good_transactions = total_number_of_transactions * 90 // 100NEWLINE number_of_cheap_transactions = total_number_of_transactions - number_of_good_transactionsNEWLINE last_transaction_factor = total_number_of_transactions * 15 // 100NEWLINE transaction_size = mempool_size * ONE_MEGABYTE // total_number_of_transactions - transaction_overheadNEWLINENEWLINE relayfee = self.nodes[0].getnetworkinfo()['relayfee']NEWLINE utxos = create_confirmed_utxos(relayfee, self.nodes[0], total_number_of_transactions + 1)NEWLINENEWLINE # Transactions with higher fee rateNEWLINE # size: 6MiB, fee: 10,000,000 satoshi (0.1 BSV) --> fee rate: 1.6 sat/byteNEWLINE good_fee = decimal.Decimal('0.1')NEWLINE good_txids = []NEWLINE for i in range(number_of_good_transactions):NEWLINE txid = send_tx_with_data(self.nodes[0], utxos.pop(), good_fee, transaction_size)NEWLINE self.log.debug("Inserted good transaction %d %s", i + 1, txid)NEWLINE good_txids.append(txid)NEWLINENEWLINE assert_equal(len(self.nodes[0].getrawmempool()), number_of_good_transactions)NEWLINE self.log.info("%d transactions successfully arrived to mempool.", number_of_good_transactions)NEWLINENEWLINE # Transactions with lower fee rateNEWLINE # size: 6MiB, fee: 2,500,000 satoshi (0.025 BSV) --> fee rate: 0.4 sat/byteNEWLINE cheap_fee = good_fee / 4NEWLINE cheap_txids = []NEWLINE for i in range(number_of_cheap_transactions):NEWLINE txid = send_tx_with_data(self.nodes[0], utxos.pop(), cheap_fee, transaction_size)NEWLINE self.log.debug("Inserted cheap transaction %d %s", i + 1, txid)NEWLINE cheap_txids.append(txid)NEWLINENEWLINE assert_equal(len(self.nodes[0].getrawmempool()), total_number_of_transactions)NEWLINE self.log.info("%d transactions successfully arrived to mempool.", total_number_of_transactions)NEWLINENEWLINE # The mempool should now be full. Insert the last, large transactionNEWLINE # size: 42MiB, fee: 35,000,000 satoshi (0.35 BSV) --> fee rate: 0.8 sat/byteNEWLINE self.log.info("Inserting last transaction")NEWLINE last_fee = last_transaction_factor * good_fee / 2NEWLINE last_size = last_transaction_factor * transaction_sizeNEWLINE assert_raises_rpc_error(NEWLINE -26, 'mempool full',NEWLINE send_tx_with_data, self.nodes[0], utxos.pop(), last_fee, last_size)NEWLINENEWLINE # Now let's see what happens. There should be no cheap transactions in the pool any more.NEWLINE mempool = self.nodes[0].getrawmempool()NEWLINE assert_equal(len(mempool), number_of_good_transactions)NEWLINE self.log.info("%d transactions were evicted.", total_number_of_transactions - len(mempool))NEWLINENEWLINE for txid in cheap_txids:NEWLINE assert(txid not in mempool)NEWLINE self.log.info("All transactions with insufficient fee were evicted.")NEWLINENEWLINEif __name__ == '__main__':NEWLINE MempoolEvictionPriorityTest().main()NEWLINE
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.NEWLINE#NEWLINE# Licensed under the Apache License, Version 2.0 (the "License");NEWLINE# you may not use this file except in compliance with the License.NEWLINE# You may obtain a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS,NEWLINE# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.NEWLINE# See the License for the specific language governing permissions andNEWLINE# limitations under the License.NEWLINE# ==============================================================================NEWLINE"""Functions for reading and updating configuration files."""NEWLINENEWLINEimport tensorflow.compat.v1 as tfNEWLINENEWLINEfrom google.protobuf import text_formatNEWLINENEWLINEfrom object_detection.protos import eval_pb2NEWLINEfrom object_detection.protos import input_reader_pb2NEWLINEfrom object_detection.protos import model_pb2NEWLINEfrom object_detection.protos import pipeline_pb2NEWLINEfrom object_detection.protos import train_pb2NEWLINENEWLINENEWLINEdef get_image_resizer_config(model_config):NEWLINE """Returns the image resizer config from a model config.NEWLINENEWLINE Args:NEWLINE model_config: A model_pb2.DetectionModel.NEWLINENEWLINE Returns:NEWLINE An image_resizer_pb2.ImageResizer.NEWLINENEWLINE Raises:NEWLINE ValueError: If the model type is not recognized.NEWLINE """NEWLINE meta_architecture = model_config.WhichOneof("model")NEWLINE if meta_architecture == "faster_rcnn":NEWLINE return model_config.faster_rcnn.image_resizerNEWLINE if meta_architecture == "ssd":NEWLINE return model_config.ssd.image_resizerNEWLINENEWLINE raise ValueError("Unknown model type: {}".format(meta_architecture))NEWLINENEWLINENEWLINEdef get_spatial_image_size(image_resizer_config):NEWLINE """Returns expected spatial size of the output image from a given config.NEWLINENEWLINE Args:NEWLINE image_resizer_config: An image_resizer_pb2.ImageResizer.NEWLINENEWLINE Returns:NEWLINE A list of two integers of the form [height, width]. `height` and `width` areNEWLINE set -1 if they cannot be determined during graph construction.NEWLINENEWLINE Raises:NEWLINE ValueError: If the model type is not recognized.NEWLINE """NEWLINE if image_resizer_config.HasField("fixed_shape_resizer"):NEWLINE return [image_resizer_config.fixed_shape_resizer.height,NEWLINE image_resizer_config.fixed_shape_resizer.width]NEWLINE if image_resizer_config.HasField("keep_aspect_ratio_resizer"):NEWLINE if image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension:NEWLINE return [image_resizer_config.keep_aspect_ratio_resizer.max_dimension] * 2NEWLINE else:NEWLINE return [-1, -1]NEWLINE raise ValueError("Unknown image resizer type.")NEWLINENEWLINENEWLINEdef get_configs_from_pipeline_file(pipeline_config_path):NEWLINE """Reads configuration from a pipeline_pb2.TrainEvalPipelineConfig.NEWLINENEWLINE Args:NEWLINE pipeline_config_path: Path to pipeline_pb2.TrainEvalPipelineConfig textNEWLINE proto.NEWLINENEWLINE Returns:NEWLINE Dictionary of configuration objects. Keys are `model`, `train_config`,NEWLINE `train_input_config`, `eval_config`, `eval_input_config`. Value are theNEWLINE corresponding config objects.NEWLINE """NEWLINE pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()NEWLINE with tf.gfile.GFile(pipeline_config_path, "r") as f:NEWLINE proto_str = f.read()NEWLINE text_format.Merge(proto_str, pipeline_config)NEWLINENEWLINE configs = {}NEWLINE configs["model"] = pipeline_config.modelNEWLINE configs["train_config"] = pipeline_config.train_configNEWLINE configs["train_input_config"] = pipeline_config.train_input_readerNEWLINE configs["eval_config"] = pipeline_config.eval_configNEWLINE configs["eval_input_config"] = pipeline_config.eval_input_readerNEWLINENEWLINE return configsNEWLINENEWLINENEWLINEdef create_pipeline_proto_from_configs(configs):NEWLINE """Creates a pipeline_pb2.TrainEvalPipelineConfig from configs dictionary.NEWLINENEWLINE This function nearly performs the inverse operation ofNEWLINE get_configs_from_pipeline_file(). Instead of returning a file path, it returnsNEWLINE a `TrainEvalPipelineConfig` object.NEWLINENEWLINE Args:NEWLINE configs: Dictionary of configs. See get_configs_from_pipeline_file().NEWLINENEWLINE Returns:NEWLINE A fully populated pipeline_pb2.TrainEvalPipelineConfig.NEWLINE """NEWLINE pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()NEWLINE pipeline_config.model.CopyFrom(configs["model"])NEWLINE pipeline_config.train_config.CopyFrom(configs["train_config"])NEWLINE pipeline_config.train_input_reader.CopyFrom(configs["train_input_config"])NEWLINE pipeline_config.eval_config.CopyFrom(configs["eval_config"])NEWLINE pipeline_config.eval_input_reader.CopyFrom(configs["eval_input_config"])NEWLINE return pipeline_configNEWLINENEWLINENEWLINEdef get_configs_from_multiple_files(model_config_path="",NEWLINE train_config_path="",NEWLINE train_input_config_path="",NEWLINE eval_config_path="",NEWLINE eval_input_config_path=""):NEWLINE """Reads training configuration from multiple config files.NEWLINENEWLINE Args:NEWLINE model_config_path: Path to model_pb2.DetectionModel.NEWLINE train_config_path: Path to train_pb2.TrainConfig.NEWLINE train_input_config_path: Path to input_reader_pb2.InputReader.NEWLINE eval_config_path: Path to eval_pb2.EvalConfig.NEWLINE eval_input_config_path: Path to input_reader_pb2.InputReader.NEWLINENEWLINE Returns:NEWLINE Dictionary of configuration objects. Keys are `model`, `train_config`,NEWLINE `train_input_config`, `eval_config`, `eval_input_config`. Key/Values areNEWLINE returned only for valid (non-empty) strings.NEWLINE """NEWLINE configs = {}NEWLINE if model_config_path:NEWLINE model_config = model_pb2.DetectionModel()NEWLINE with tf.gfile.GFile(model_config_path, "r") as f:NEWLINE text_format.Merge(f.read(), model_config)NEWLINE configs["model"] = model_configNEWLINENEWLINE if train_config_path:NEWLINE train_config = train_pb2.TrainConfig()NEWLINE with tf.gfile.GFile(train_config_path, "r") as f:NEWLINE text_format.Merge(f.read(), train_config)NEWLINE configs["train_config"] = train_configNEWLINENEWLINE if train_input_config_path:NEWLINE train_input_config = input_reader_pb2.InputReader()NEWLINE with tf.gfile.GFile(train_input_config_path, "r") as f:NEWLINE text_format.Merge(f.read(), train_input_config)NEWLINE configs["train_input_config"] = train_input_configNEWLINENEWLINE if eval_config_path:NEWLINE eval_config = eval_pb2.EvalConfig()NEWLINE with tf.gfile.GFile(eval_config_path, "r") as f:NEWLINE text_format.Merge(f.read(), eval_config)NEWLINE configs["eval_config"] = eval_configNEWLINENEWLINE if eval_input_config_path:NEWLINE eval_input_config = input_reader_pb2.InputReader()NEWLINE with tf.gfile.GFile(eval_input_config_path, "r") as f:NEWLINE text_format.Merge(f.read(), eval_input_config)NEWLINE configs["eval_input_config"] = eval_input_configNEWLINENEWLINE return configsNEWLINENEWLINENEWLINEdef get_number_of_classes(model_config):NEWLINE """Returns the number of classes for a detection model.NEWLINENEWLINE Args:NEWLINE model_config: A model_pb2.DetectionModel.NEWLINENEWLINE Returns:NEWLINE Number of classes.NEWLINENEWLINE Raises:NEWLINE ValueError: If the model type is not recognized.NEWLINE """NEWLINE meta_architecture = model_config.WhichOneof("model")NEWLINE if meta_architecture == "faster_rcnn":NEWLINE return model_config.faster_rcnn.num_classesNEWLINE if meta_architecture == "ssd":NEWLINE return model_config.ssd.num_classesNEWLINENEWLINE raise ValueError("Expected the model to be one of 'faster_rcnn' or 'ssd'.")NEWLINENEWLINENEWLINEdef get_optimizer_type(train_config):NEWLINE """Returns the optimizer type for training.NEWLINENEWLINE Args:NEWLINE train_config: A train_pb2.TrainConfig.NEWLINENEWLINE Returns:NEWLINE The type of the optimizerNEWLINE """NEWLINE return train_config.optimizer.WhichOneof("optimizer")NEWLINENEWLINENEWLINEdef get_learning_rate_type(optimizer_config):NEWLINE """Returns the learning rate type for training.NEWLINENEWLINE Args:NEWLINE optimizer_config: An optimizer_pb2.Optimizer.NEWLINENEWLINE Returns:NEWLINE The type of the learning rate.NEWLINE """NEWLINE return optimizer_config.learning_rate.WhichOneof("learning_rate")NEWLINENEWLINENEWLINEdef merge_external_params_with_configs(configs, hparams=None, **kwargs):NEWLINE """Updates `configs` dictionary based on supplied parameters.NEWLINENEWLINE This utility is for modifying specific fields in the object detection configs.NEWLINE Say that one would like to experiment with different learning rates, momentumNEWLINE values, or batch sizes. Rather than creating a new config text file for eachNEWLINE experiment, one can use a single base config file, and update particularNEWLINE values.NEWLINENEWLINE Args:NEWLINE configs: Dictionary of configuration objects. See outputs fromNEWLINE get_configs_from_pipeline_file() or get_configs_from_multiple_files().NEWLINE hparams: A `HParams`.NEWLINE **kwargs: Extra keyword arguments that are treated the same way asNEWLINE attribute/value pairs in `hparams`. Note that hyperparameters with theNEWLINE same names will override keyword arguments.NEWLINENEWLINE Returns:NEWLINE `configs` dictionary.NEWLINE """NEWLINENEWLINE if hparams:NEWLINE kwargs.update(hparams.values())NEWLINE for key, value in kwargs.items():NEWLINE # pylint: disable=g-explicit-bool-comparisonNEWLINE if value == "" or value is None:NEWLINE continueNEWLINE # pylint: enable=g-explicit-bool-comparisonNEWLINE if key == "learning_rate":NEWLINE _update_initial_learning_rate(configs, value)NEWLINE tf.logging.info("Overwriting learning rate: %f", value)NEWLINE if key == "batch_size":NEWLINE _update_batch_size(configs, value)NEWLINE tf.logging.info("Overwriting batch size: %d", value)NEWLINE if key == "momentum_optimizer_value":NEWLINE _update_momentum_optimizer_value(configs, value)NEWLINE tf.logging.info("Overwriting momentum optimizer value: %f", value)NEWLINE if key == "classification_localization_weight_ratio":NEWLINE # Localization weight is fixed to 1.0.NEWLINE _update_classification_localization_weight_ratio(configs, value)NEWLINE if key == "focal_loss_gamma":NEWLINE _update_focal_loss_gamma(configs, value)NEWLINE if key == "focal_loss_alpha":NEWLINE _update_focal_loss_alpha(configs, value)NEWLINE if key == "train_steps":NEWLINE _update_train_steps(configs, value)NEWLINE tf.logging.info("Overwriting train steps: %d", value)NEWLINE if key == "eval_steps":NEWLINE _update_eval_steps(configs, value)NEWLINE tf.logging.info("Overwriting eval steps: %d", value)NEWLINE if key == "train_input_path":NEWLINE _update_input_path(configs["train_input_config"], value)NEWLINE tf.logging.info("Overwriting train input path: %s", value)NEWLINE if key == "eval_input_path":NEWLINE _update_input_path(configs["eval_input_config"], value)NEWLINE tf.logging.info("Overwriting eval input path: %s", value)NEWLINE if key == "label_map_path":NEWLINE _update_label_map_path(configs, value)NEWLINE tf.logging.info("Overwriting label map path: %s", value)NEWLINE if key == "mask_type":NEWLINE _update_mask_type(configs, value)NEWLINE tf.logging.info("Overwritten mask type: %s", value)NEWLINE return configsNEWLINENEWLINENEWLINEdef _update_initial_learning_rate(configs, learning_rate):NEWLINE """Updates `configs` to reflect the new initial learning rate.NEWLINENEWLINE This function updates the initial learning rate. For learning rate schedules,NEWLINE all other defined learning rates in the pipeline config are scaled to maintainNEWLINE their same ratio with the initial learning rate.NEWLINE The configs dictionary is updated in place, and hence not returned.NEWLINENEWLINE Args:NEWLINE configs: Dictionary of configuration objects. See outputs fromNEWLINE get_configs_from_pipeline_file() or get_configs_from_multiple_files().NEWLINE learning_rate: Initial learning rate for optimizer.NEWLINENEWLINE Raises:NEWLINE TypeError: if optimizer type is not supported, or if learning rate type isNEWLINE not supported.NEWLINE """NEWLINENEWLINE optimizer_type = get_optimizer_type(configs["train_config"])NEWLINE if optimizer_type == "rms_prop_optimizer":NEWLINE optimizer_config = configs["train_config"].optimizer.rms_prop_optimizerNEWLINE elif optimizer_type == "momentum_optimizer":NEWLINE optimizer_config = configs["train_config"].optimizer.momentum_optimizerNEWLINE elif optimizer_type == "adam_optimizer":NEWLINE optimizer_config = configs["train_config"].optimizer.adam_optimizerNEWLINE else:NEWLINE raise TypeError("Optimizer %s is not supported." % optimizer_type)NEWLINENEWLINE learning_rate_type = get_learning_rate_type(optimizer_config)NEWLINE if learning_rate_type == "constant_learning_rate":NEWLINE constant_lr = optimizer_config.learning_rate.constant_learning_rateNEWLINE constant_lr.learning_rate = learning_rateNEWLINE elif learning_rate_type == "exponential_decay_learning_rate":NEWLINE exponential_lr = (NEWLINE optimizer_config.learning_rate.exponential_decay_learning_rate)NEWLINE exponential_lr.initial_learning_rate = learning_rateNEWLINE elif learning_rate_type == "manual_step_learning_rate":NEWLINE manual_lr = optimizer_config.learning_rate.manual_step_learning_rateNEWLINE original_learning_rate = manual_lr.initial_learning_rateNEWLINE learning_rate_scaling = float(learning_rate) / original_learning_rateNEWLINE manual_lr.initial_learning_rate = learning_rateNEWLINE for schedule in manual_lr.schedule:NEWLINE schedule.learning_rate *= learning_rate_scalingNEWLINE elif learning_rate_type == "cosine_decay_learning_rate":NEWLINE cosine_lr = optimizer_config.learning_rate.cosine_decay_learning_rateNEWLINE learning_rate_base = cosine_lr.learning_rate_baseNEWLINE warmup_learning_rate = cosine_lr.warmup_learning_rateNEWLINE warmup_scale_factor = warmup_learning_rate / learning_rate_baseNEWLINE cosine_lr.learning_rate_base = learning_rateNEWLINE cosine_lr.warmup_learning_rate = warmup_scale_factor * learning_rateNEWLINE else:NEWLINE raise TypeError("Learning rate %s is not supported." % learning_rate_type)NEWLINENEWLINENEWLINEdef _update_batch_size(configs, batch_size):NEWLINE """Updates `configs` to reflect the new training batch size.NEWLINENEWLINE The configs dictionary is updated in place, and hence not returned.NEWLINENEWLINE Args:NEWLINE configs: Dictionary of configuration objects. See outputs fromNEWLINE get_configs_from_pipeline_file() or get_configs_from_multiple_files().NEWLINE batch_size: Batch size to use for training (Ideally a power of 2). InputsNEWLINE are rounded, and capped to be 1 or greater.NEWLINE """NEWLINE configs["train_config"].batch_size = max(1, int(round(batch_size)))NEWLINENEWLINENEWLINEdef _update_momentum_optimizer_value(configs, momentum):NEWLINE """Updates `configs` to reflect the new momentum value.NEWLINENEWLINE Momentum is only supported for RMSPropOptimizer and MomentumOptimizer. For anyNEWLINE other optimizer, no changes take place. The configs dictionary is updated inNEWLINE place, and hence not returned.NEWLINENEWLINE Args:NEWLINE configs: Dictionary of configuration objects. See outputs fromNEWLINE get_configs_from_pipeline_file() or get_configs_from_multiple_files().NEWLINE momentum: New momentum value. Values are clipped at 0.0 and 1.0.NEWLINENEWLINE Raises:NEWLINE TypeError: If the optimizer type is not `rms_prop_optimizer` orNEWLINE `momentum_optimizer`.NEWLINE """NEWLINE optimizer_type = get_optimizer_type(configs["train_config"])NEWLINE if optimizer_type == "rms_prop_optimizer":NEWLINE optimizer_config = configs["train_config"].optimizer.rms_prop_optimizerNEWLINE elif optimizer_type == "momentum_optimizer":NEWLINE optimizer_config = configs["train_config"].optimizer.momentum_optimizerNEWLINE else:NEWLINE raise TypeError("Optimizer type must be one of `rms_prop_optimizer` or "NEWLINE "`momentum_optimizer`.")NEWLINENEWLINE optimizer_config.momentum_optimizer_value = min(max(0.0, momentum), 1.0)NEWLINENEWLINENEWLINEdef _update_classification_localization_weight_ratio(configs, ratio):NEWLINE """Updates the classification/localization weight loss ratio.NEWLINENEWLINE Detection models usually define a loss weight for both classification andNEWLINE objectness. This function updates the weights such that the ratio betweenNEWLINE classification weight to localization weight is the ratio provided.NEWLINE Arbitrarily, localization weight is set to 1.0.NEWLINENEWLINE Note that in the case of Faster R-CNN, this same ratio is applied to the firstNEWLINE stage objectness loss weight relative to localization loss weight.NEWLINENEWLINE The configs dictionary is updated in place, and hence not returned.NEWLINENEWLINE Args:NEWLINE configs: Dictionary of configuration objects. See outputs fromNEWLINE get_configs_from_pipeline_file() or get_configs_from_multiple_files().NEWLINE ratio: Desired ratio of classification (and/or objectness) loss weight toNEWLINE localization loss weight.NEWLINE """NEWLINE meta_architecture = configs["model"].WhichOneof("model")NEWLINE if meta_architecture == "faster_rcnn":NEWLINE model = configs["model"].faster_rcnnNEWLINE model.first_stage_localization_loss_weight = 1.0NEWLINE model.first_stage_objectness_loss_weight = ratioNEWLINE model.second_stage_localization_loss_weight = 1.0NEWLINE model.second_stage_classification_loss_weight = ratioNEWLINE if meta_architecture == "ssd":NEWLINE model = configs["model"].ssdNEWLINE model.loss.localization_weight = 1.0NEWLINE model.loss.classification_weight = ratioNEWLINENEWLINENEWLINEdef _get_classification_loss(model_config):NEWLINE """Returns the classification loss for a model."""NEWLINE meta_architecture = model_config.WhichOneof("model")NEWLINE if meta_architecture == "faster_rcnn":NEWLINE model = model_config.faster_rcnnNEWLINE classification_loss = model.second_stage_classification_lossNEWLINE if meta_architecture == "ssd":NEWLINE model = model_config.ssdNEWLINE classification_loss = model.loss.classification_lossNEWLINE else:NEWLINE raise TypeError("Did not recognize the model architecture.")NEWLINE return classification_lossNEWLINENEWLINENEWLINEdef _update_focal_loss_gamma(configs, gamma):NEWLINE """Updates the gamma value for a sigmoid focal loss.NEWLINENEWLINE The configs dictionary is updated in place, and hence not returned.NEWLINENEWLINE Args:NEWLINE configs: Dictionary of configuration objects. See outputs fromNEWLINE get_configs_from_pipeline_file() or get_configs_from_multiple_files().NEWLINE gamma: Exponent term in focal loss.NEWLINENEWLINE Raises:NEWLINE TypeError: If the classification loss is not `weighted_sigmoid_focal`.NEWLINE """NEWLINE classification_loss = _get_classification_loss(configs["model"])NEWLINE classification_loss_type = classification_loss.WhichOneof(NEWLINE "classification_loss")NEWLINE if classification_loss_type != "weighted_sigmoid_focal":NEWLINE raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")NEWLINE classification_loss.weighted_sigmoid_focal.gamma = gammaNEWLINENEWLINENEWLINEdef _update_focal_loss_alpha(configs, alpha):NEWLINE """Updates the alpha value for a sigmoid focal loss.NEWLINENEWLINE The configs dictionary is updated in place, and hence not returned.NEWLINENEWLINE Args:NEWLINE configs: Dictionary of configuration objects. See outputs fromNEWLINE get_configs_from_pipeline_file() or get_configs_from_multiple_files().NEWLINE alpha: Class weight multiplier for sigmoid loss.NEWLINENEWLINE Raises:NEWLINE TypeError: If the classification loss is not `weighted_sigmoid_focal`.NEWLINE """NEWLINE classification_loss = _get_classification_loss(configs["model"])NEWLINE classification_loss_type = classification_loss.WhichOneof(NEWLINE "classification_loss")NEWLINE if classification_loss_type != "weighted_sigmoid_focal":NEWLINE raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")NEWLINE classification_loss.weighted_sigmoid_focal.alpha = alphaNEWLINENEWLINENEWLINEdef _update_train_steps(configs, train_steps):NEWLINE """Updates `configs` to reflect new number of training steps."""NEWLINE configs["train_config"].num_steps = int(train_steps)NEWLINENEWLINENEWLINEdef _update_eval_steps(configs, eval_steps):NEWLINE """Updates `configs` to reflect new number of eval steps per evaluation."""NEWLINE configs["eval_config"].num_examples = int(eval_steps)NEWLINENEWLINENEWLINEdef _update_input_path(input_config, input_path):NEWLINE """Updates input configuration to reflect a new input path.NEWLINENEWLINE The input_config object is updated in place, and hence not returned.NEWLINENEWLINE Args:NEWLINE input_config: A input_reader_pb2.InputReader.NEWLINE input_path: A path to data or list of paths.NEWLINENEWLINE Raises:NEWLINE TypeError: if input reader type is not `tf_record_input_reader`.NEWLINE """NEWLINE input_reader_type = input_config.WhichOneof("input_reader")NEWLINE if input_reader_type == "tf_record_input_reader":NEWLINE input_config.tf_record_input_reader.ClearField("input_path")NEWLINE if isinstance(input_path, list):NEWLINE input_config.tf_record_input_reader.input_path.extend(input_path)NEWLINE else:NEWLINE input_config.tf_record_input_reader.input_path.append(input_path)NEWLINE else:NEWLINE raise TypeError("Input reader type must be `tf_record_input_reader`.")NEWLINENEWLINENEWLINEdef _update_label_map_path(configs, label_map_path):NEWLINE """Updates the label map path for both train and eval input readers.NEWLINENEWLINE The configs dictionary is updated in place, and hence not returned.NEWLINENEWLINE Args:NEWLINE configs: Dictionary of configuration objects. See outputs fromNEWLINE get_configs_from_pipeline_file() or get_configs_from_multiple_files().NEWLINE label_map_path: New path to `StringIntLabelMap` pbtxt file.NEWLINE """NEWLINE configs["train_input_config"].label_map_path = label_map_pathNEWLINE configs["eval_input_config"].label_map_path = label_map_pathNEWLINENEWLINENEWLINEdef _update_mask_type(configs, mask_type):NEWLINE """Updates the mask type for both train and eval input readers.NEWLINENEWLINE The configs dictionary is updated in place, and hence not returned.NEWLINENEWLINE Args:NEWLINE configs: Dictionary of configuration objects. See outputs fromNEWLINE get_configs_from_pipeline_file() or get_configs_from_multiple_files().NEWLINE mask_type: A string name representing a value ofNEWLINE input_reader_pb2.InstanceMaskTypeNEWLINE """NEWLINE configs["train_input_config"].mask_type = mask_typeNEWLINE configs["eval_input_config"].mask_type = mask_typeNEWLINE
#!/usr/bin/env pythonNEWLINE# Licensed under a 3-clause BSD style license - see LICENSE.rstNEWLINENEWLINEimport globNEWLINEimport osNEWLINEimport sysNEWLINENEWLINEimport ah_bootstrapNEWLINEfrom setuptools import setupNEWLINENEWLINE#A dirty hack to get around some early import/configurations ambiguitiesNEWLINEif sys.version_info[0] >= 3:NEWLINE import builtinsNEWLINEelse:NEWLINE import __builtin__ as builtinsNEWLINEbuiltins._ASTROPY_SETUP_ = TrueNEWLINENEWLINEfrom astropy_helpers.setup_helpers import (NEWLINE register_commands, adjust_compiler, get_debug_option, get_package_info)NEWLINEfrom astropy_helpers.git_helpers import get_git_devstrNEWLINEfrom astropy_helpers.version_helpers import generate_version_pyNEWLINENEWLINE# Get some values from the setup.cfgNEWLINEtry:NEWLINE from ConfigParser import ConfigParserNEWLINEexcept ImportError:NEWLINE from configparser import ConfigParserNEWLINEconf = ConfigParser()NEWLINEconf.read(['setup.cfg'])NEWLINEmetadata = dict(conf.items('metadata'))NEWLINENEWLINEPACKAGENAME = metadata.get('package_name', 'packagename')NEWLINEDESCRIPTION = metadata.get('description', 'Astropy affiliated package')NEWLINEAUTHOR = metadata.get('author', '')NEWLINEAUTHOR_EMAIL = metadata.get('author_email', '')NEWLINELICENSE = metadata.get('license', 'unknown')NEWLINEURL = metadata.get('url', 'http://astropy.org')NEWLINENEWLINE# Get the long description from the package's docstringNEWLINE__import__(PACKAGENAME)NEWLINEpackage = sys.modules[PACKAGENAME]NEWLINELONG_DESCRIPTION = package.__doc__NEWLINENEWLINE# Store the package name in a built-in variable so it's easyNEWLINE# to get from other parts of the setup infrastructureNEWLINEbuiltins._ASTROPY_PACKAGE_NAME_ = PACKAGENAMENEWLINENEWLINE# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)NEWLINEVERSION = '0.2.dev'NEWLINENEWLINE# Indicates if this version is a release versionNEWLINERELEASE = 'dev' not in VERSIONNEWLINENEWLINEif not RELEASE:NEWLINE VERSION += get_git_devstr(False)NEWLINENEWLINE# Populate the dict of setup command overrides; this should be done beforeNEWLINE# invoking any other functionality from distutils since it can potentiallyNEWLINE# modify distutils' behavior.NEWLINEcmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)NEWLINENEWLINE# Adjust the compiler in case the default on this platform is to use aNEWLINE# broken one.NEWLINEadjust_compiler(PACKAGENAME)NEWLINENEWLINE# Freeze build information in version.pyNEWLINEgenerate_version_py(PACKAGENAME, VERSION, RELEASE,NEWLINE get_debug_option(PACKAGENAME))NEWLINENEWLINE# Treat everything in scripts except README.rst as a script to be installedNEWLINEscripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))NEWLINE if os.path.basename(fname) != 'README.rst']NEWLINENEWLINENEWLINE# Get configuration information from all of the various subpackages.NEWLINE# See the docstring for setup_helpers.update_package_files for moreNEWLINE# details.NEWLINEpackage_info = get_package_info()NEWLINENEWLINE# Add the project-global dataNEWLINEpackage_info['package_data'].setdefault(PACKAGENAME, [])NEWLINEpackage_info['package_data'][PACKAGENAME].append('data/*')NEWLINENEWLINE# Include all .c files, recursively, including those generated byNEWLINE# Cython, since we can not do this in MANIFEST.in with a "dynamic"NEWLINE# directory name.NEWLINEc_files = []NEWLINEfor root, dirs, files in os.walk(PACKAGENAME):NEWLINE for filename in files:NEWLINE if filename.endswith('.c'):NEWLINE c_files.append(NEWLINE os.path.join(NEWLINE os.path.relpath(root, PACKAGENAME), filename))NEWLINEpackage_info['package_data'][PACKAGENAME].extend(c_files)NEWLINENEWLINEsetup(name=PACKAGENAME,NEWLINE version=VERSION,NEWLINE description=DESCRIPTION,NEWLINE scripts=scripts,NEWLINE install_requires=['astropy','matplotlib>=2.0'],NEWLINE author=AUTHOR,NEWLINE author_email=AUTHOR_EMAIL,NEWLINE license=LICENSE,NEWLINE url=URL,NEWLINE long_description=LONG_DESCRIPTION,NEWLINE cmdclass=cmdclassd,NEWLINE zip_safe=False,NEWLINE use_2to3=True,NEWLINE **package_infoNEWLINE)NEWLINE
_base_ = [NEWLINE '../_base_/models/cascade_mask_rcnn_swin_fpn.py',NEWLINE '../_base_/datasets/coco_instance.py',NEWLINE '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'NEWLINE]NEWLINENEWLINEmodel = dict(NEWLINE backbone=dict(NEWLINE embed_dim=96,NEWLINE depths=[2, 2, 18, 2],NEWLINE num_heads=[3, 6, 12, 24],NEWLINE window_size=7,NEWLINE ape=False,NEWLINE drop_path_rate=0.2,NEWLINE patch_norm=True,NEWLINE use_checkpoint=FalseNEWLINE ),NEWLINE neck=dict(in_channels=[96, 192, 384, 768]),NEWLINE roi_head=dict(NEWLINE bbox_head=[NEWLINE dict(NEWLINE type='ConvFCBBoxHead',NEWLINE num_shared_convs=4,NEWLINE num_shared_fcs=1,NEWLINE in_channels=256,NEWLINE conv_out_channels=256,NEWLINE fc_out_channels=1024,NEWLINE roi_feat_size=7,NEWLINE num_classes=4,NEWLINE bbox_coder=dict(NEWLINE type='DeltaXYWHBBoxCoder',NEWLINE target_means=[0., 0., 0., 0.],NEWLINE target_stds=[0.1, 0.1, 0.2, 0.2]),NEWLINE reg_class_agnostic=False,NEWLINE reg_decoded_bbox=True,NEWLINE norm_cfg=dict(type='SyncBN', requires_grad=True),NEWLINE loss_cls=dict(NEWLINE type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),NEWLINE loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),NEWLINE dict(NEWLINE type='ConvFCBBoxHead',NEWLINE num_shared_convs=4,NEWLINE num_shared_fcs=1,NEWLINE in_channels=256,NEWLINE conv_out_channels=256,NEWLINE fc_out_channels=1024,NEWLINE roi_feat_size=7,NEWLINE num_classes=4,NEWLINE bbox_coder=dict(NEWLINE type='DeltaXYWHBBoxCoder',NEWLINE target_means=[0., 0., 0., 0.],NEWLINE target_stds=[0.05, 0.05, 0.1, 0.1]),NEWLINE reg_class_agnostic=False,NEWLINE reg_decoded_bbox=True,NEWLINE norm_cfg=dict(type='SyncBN', requires_grad=True),NEWLINE loss_cls=dict(NEWLINE type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),NEWLINE loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),NEWLINE dict(NEWLINE type='ConvFCBBoxHead',NEWLINE num_shared_convs=4,NEWLINE num_shared_fcs=1,NEWLINE in_channels=256,NEWLINE conv_out_channels=256,NEWLINE fc_out_channels=1024,NEWLINE roi_feat_size=7,NEWLINE num_classes=4,NEWLINE bbox_coder=dict(NEWLINE type='DeltaXYWHBBoxCoder',NEWLINE target_means=[0., 0., 0., 0.],NEWLINE target_stds=[0.033, 0.033, 0.067, 0.067]),NEWLINE reg_class_agnostic=False,NEWLINE reg_decoded_bbox=True,NEWLINE norm_cfg=dict(type='SyncBN', requires_grad=True),NEWLINE loss_cls=dict(NEWLINE type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),NEWLINE loss_bbox=dict(type='GIoULoss', loss_weight=10.0))NEWLINE ]))NEWLINENEWLINEimg_norm_cfg = dict(NEWLINE mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)NEWLINENEWLINE# augmentation strategy originates from DETR / Sparse RCNNNEWLINEtrain_pipeline = [NEWLINE dict(type='LoadImageFromFile'),NEWLINE dict(type='LoadAnnotations', with_bbox=True),NEWLINE dict(type='RandomFlip', flip_ratio=0.5),NEWLINE dict(type='AutoAugment',NEWLINE policies=[NEWLINE [NEWLINE dict(type='Resize',NEWLINE img_scale=[(320, 320), (384, 384), (448, 448),NEWLINE (512, 512), (576, 576)],NEWLINE multiscale_mode='value',NEWLINE keep_ratio=True)NEWLINE ],NEWLINE [NEWLINE dict(type='Resize',NEWLINE img_scale=[(320, 320), (576, 576)],NEWLINE multiscale_mode='value',NEWLINE keep_ratio=True),NEWLINE dict(type='RandomCrop',NEWLINE crop_type='absolute_range',NEWLINE crop_size=(320, 320),NEWLINE allow_negative_crop=True),NEWLINE dict(type='Resize',NEWLINE img_scale=[(320, 320), (384, 384), (448, 448),NEWLINE (512, 512), (576, 576)],NEWLINE multiscale_mode='value',NEWLINE override=True,NEWLINE keep_ratio=True)NEWLINE ]NEWLINE ]),NEWLINE dict(type='Normalize', **img_norm_cfg),NEWLINE dict(type='Pad', size_divisor=32),NEWLINE dict(type='DefaultFormatBundle'),NEWLINE dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),NEWLINE]NEWLINEdata = dict(train=dict(pipeline=train_pipeline))NEWLINENEWLINEoptimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,NEWLINE paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),NEWLINE 'relative_position_bias_table': dict(decay_mult=0.),NEWLINE 'norm': dict(decay_mult=0.)}))NEWLINElr_config = dict(step=[16, 22])NEWLINErunner = dict(type='EpochBasedRunnerAmp', max_epochs=12)NEWLINENEWLINE# do not use mmdet version fp16NEWLINEfp16 = NoneNEWLINEoptimizer_config = dict(NEWLINE type="DistOptimizerHook",NEWLINE update_interval=1,NEWLINE grad_clip=None,NEWLINE coalesce=True,NEWLINE bucket_size_mb=-1,NEWLINE use_fp16=True,NEWLINE)NEWLINE
# ------------------------------------------------------------NEWLINE# Copyright (c) All rights reservedNEWLINE# SiLab, Institute of Physics, University of BonnNEWLINE# ------------------------------------------------------------NEWLINE#NEWLINENEWLINEimport unittestNEWLINEimport osNEWLINEimport yamlNEWLINENEWLINEimport numpy as npNEWLINENEWLINEfrom basil.dut import DutNEWLINEfrom basil.utils.sim.utils import cocotb_compile_and_run, cocotb_compile_clean, get_basil_dirNEWLINENEWLINENEWLINEclass TestSram(unittest.TestCase):NEWLINE def setUp(self):NEWLINENEWLINE fw_path = os.path.join(get_basil_dir(), 'firmware/modules')NEWLINE cocotb_compile_and_run([NEWLINE os.path.join(fw_path, 'gpio/gpio.v'),NEWLINE os.path.join(fw_path, 'gpio/gpio_core.v'),NEWLINE os.path.join(fw_path, 'utils/reset_gen.v'),NEWLINE os.path.join(fw_path, 'utils/bus_to_ip.v'),NEWLINE os.path.join(fw_path, 'rrp_arbiter/rrp_arbiter.v'),NEWLINE os.path.join(fw_path, 'utils/ODDR_sim.v'),NEWLINE os.path.join(fw_path, 'utils/generic_fifo.v'),NEWLINE os.path.join(fw_path, 'utils/cdc_pulse_sync.v'),NEWLINE os.path.join(fw_path, 'utils/3_stage_synchronizer.v'),NEWLINE os.path.join(fw_path, 'utils/fx2_to_bus.v'),NEWLINE os.path.join(fw_path, 'pulse_gen/pulse_gen.v'),NEWLINE os.path.join(fw_path, 'pulse_gen/pulse_gen_core.v'),NEWLINE os.path.join(fw_path, 'sram_fifo/sram_fifo_core.v'),NEWLINE os.path.join(fw_path, 'sram_fifo/sram_fifo.v'),NEWLINE os.path.join(os.path.dirname(__file__), '../firmware/src/sram_test.v'),NEWLINE os.path.join(os.path.dirname(__file__), '../tests/tb.v')],NEWLINE top_level='tb',NEWLINE sim_bus='basil.utils.sim.SiLibUsbBusDriver'NEWLINE )NEWLINENEWLINE with open(os.path.join(os.path.dirname(__file__), '../sram_test.yaml'), 'r') as f:NEWLINE cnfg = yaml.safe_load(f)NEWLINENEWLINE # change to simulation interfaceNEWLINE cnfg['transfer_layer'][0]['type'] = 'SiSim'NEWLINENEWLINE self.chip = Dut(cnfg)NEWLINE self.chip.init()NEWLINENEWLINE def test_simple(self):NEWLINE self.chip['CONTROL']['COUNTER_EN'] = 1NEWLINE self.chip['CONTROL'].write()NEWLINE self.chip['CONTROL'].write()NEWLINE self.chip['CONTROL']['COUNTER_EN'] = 0NEWLINE self.chip['CONTROL'].write()NEWLINE for _ in range(10):NEWLINE self.chip['CONTROL'].write()NEWLINENEWLINE ret = self.chip['FIFO'].get_data()NEWLINENEWLINE self.chip['CONTROL']['COUNTER_EN'] = 1NEWLINE self.chip['CONTROL'].write()NEWLINE self.chip['CONTROL'].write()NEWLINE self.chip['CONTROL'].write()NEWLINE self.chip['CONTROL']['COUNTER_EN'] = 0NEWLINE for _ in range(10):NEWLINE self.chip['CONTROL'].write()NEWLINENEWLINE ret = np.hstack((ret, self.chip['FIFO'].get_data()))NEWLINENEWLINE x = np.arange(175 * 4, dtype=np.uint8)NEWLINE x.dtype = np.uint32NEWLINENEWLINE self.assertTrue(np.alltrue(ret == x))NEWLINENEWLINE self.chip['FIFO'].reset()NEWLINENEWLINE self.chip['CONTROL']['COUNTER_EN'] = 1NEWLINE self.chip['CONTROL'].write()NEWLINE self.chip['CONTROL'].write()NEWLINENEWLINE self.chip['CONTROL']['COUNTER_EN'] = 0NEWLINE self.chip['CONTROL'].write()NEWLINE self.chip['CONTROL'].write()NEWLINE self.chip['CONTROL'].write()NEWLINENEWLINE ret = np.hstack((ret, self.chip['FIFO'].get_data()))NEWLINENEWLINE x = np.arange(245 * 4, dtype=np.uint8)NEWLINE x.dtype = np.uint32NEWLINENEWLINE self.assertEqual(ret.tolist(), x.tolist())NEWLINENEWLINE def test_full(self):NEWLINE self.chip['CONTROL']['COUNTER_EN'] = 1NEWLINE self.chip['CONTROL'].write()NEWLINENEWLINE for _ in range(2):NEWLINE self.chip['FIFO'].get_FIFO_SIZE()NEWLINENEWLINE self.chip['CONTROL']['COUNTER_EN'] = 0NEWLINE self.chip['CONTROL'].write()NEWLINENEWLINE for _ in range(10):NEWLINE self.chip['CONTROL'].write()NEWLINENEWLINE size = self.chip['FIFO'].get_FIFO_SIZE()NEWLINE self.assertEqual(size, 512)NEWLINENEWLINE ret = self.chip['FIFO'].get_data()NEWLINE ret = np.hstack((ret, self.chip['FIFO'].get_data()))NEWLINENEWLINE x = np.arange(203 * 4, dtype=np.uint8)NEWLINE x.dtype = np.uint32NEWLINENEWLINE self.assertTrue(np.alltrue(ret == x))NEWLINENEWLINE def test_overflow(self):NEWLINE self.chip['CONTROL']['COUNTER_EN'] = 1NEWLINE self.chip['CONTROL'].write()NEWLINENEWLINE for _ in range(20):NEWLINE self.chip['FIFO'].get_FIFO_SIZE()NEWLINENEWLINE self.chip['CONTROL']['COUNTER_EN'] = 0NEWLINE self.chip['CONTROL'].write()NEWLINENEWLINE for _ in range(10):NEWLINE self.chip['CONTROL'].write()NEWLINENEWLINE ret = self.chip['FIFO'].get_data()NEWLINE while(self.chip['FIFO'].get_FIFO_SIZE()):NEWLINE ret = np.hstack((ret, self.chip['FIFO'].get_data()))NEWLINENEWLINE x = np.arange((128 + 1023) * 4, dtype=np.uint8)NEWLINE x.dtype = np.uint32NEWLINENEWLINE self.assertTrue(np.alltrue(ret == x))NEWLINENEWLINE self.chip['PULSE'].set_DELAY(1)NEWLINE self.chip['PULSE'].set_WIDTH(1)NEWLINE self.chip['PULSE'].start()NEWLINENEWLINE ret = self.chip['FIFO'].get_data()NEWLINE x = np.arange((128 + 1023) * 4, (128 + 1023 + 1) * 4, dtype=np.uint8)NEWLINE x.dtype = np.uint32NEWLINENEWLINE self.assertEqual(ret, x)NEWLINENEWLINE def test_single(self):NEWLINENEWLINE self.chip['PULSE'].set_DELAY(1)NEWLINE self.chip['PULSE'].set_WIDTH(1)NEWLINE self.chip['PULSE'].start()NEWLINENEWLINE self.assertEqual(self.chip['FIFO'].get_data().tolist(), [0x03020100])NEWLINENEWLINE self.chip['PULSE'].start()NEWLINENEWLINE self.assertEqual(self.chip['FIFO'].get_data().tolist(), [0x07060504])NEWLINENEWLINE def test_pattern(self):NEWLINE self.chip['PATTERN'] = 0xaa5555aaNEWLINE self.chip['PATTERN'].write()NEWLINENEWLINE self.chip['CONTROL']['PATTERN_EN'] = 1NEWLINE self.chip['CONTROL'].write()NEWLINE self.chip['CONTROL']['PATTERN_EN'] = 0NEWLINE self.chip['CONTROL'].write()NEWLINE for _ in range(5):NEWLINE self.chip['CONTROL'].write()NEWLINENEWLINE self.assertEqual(self.chip['FIFO'].get_data().tolist(), [0xaa5555aa] * 35)NEWLINENEWLINE def test_direct(self):NEWLINE self.chip['CONTROL']['COUNTER_DIRECT'] = 1NEWLINE self.chip['CONTROL'].write()NEWLINENEWLINE size = 648NEWLINE base_data_addr = self.chip['FIFO']._conf['base_data_addr']NEWLINENEWLINE ret = self.chip['USB'].read(base_data_addr, size=size)NEWLINE ret = np.hstack((ret, self.chip['USB'].read(base_data_addr, size=size)))NEWLINENEWLINE x = np.arange(size * 2, dtype=np.uint8)NEWLINE self.assertEqual(ret.tolist(), x.tolist())NEWLINENEWLINE def test_continouse(self):NEWLINE self.chip['PULSE'].set_DELAY(35)NEWLINE self.chip['PULSE'].set_WIDTH(3)NEWLINE self.chip['PULSE'].set_repeat(0)NEWLINE self.chip['PULSE'].start()NEWLINENEWLINE i = 0NEWLINE error = FalseNEWLINE for _ in range(100):NEWLINE ret = self.chip['FIFO'].get_data()NEWLINENEWLINE x = np.arange(i * 4, (i + ret.shape[0]) * 4, dtype=np.uint8)NEWLINE x.dtype = np.uint32NEWLINENEWLINE i += ret.shape[0]NEWLINENEWLINE ok = np.alltrue(ret == x)NEWLINE # print 'OK?', ok, ret.shape[0], i, kNEWLINE if not ok:NEWLINE error = TrueNEWLINE breakNEWLINENEWLINE self.assertFalse(error)NEWLINENEWLINE def tearDown(self):NEWLINE self.chip.close() # let it close connection and stop simulatorNEWLINE cocotb_compile_clean()NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE unittest.main()NEWLINE
def read():NEWLINE numbers = []NEWLINE with open('./docs/numbers.txt', 'r', encoding='utf-8') as f:NEWLINE for line in f:NEWLINE numbers.append(int(line))NEWLINE print(numbers)NEWLINENEWLINEdef write():NEWLINE names = ['Jesús', 'Facundo', 'Miguel', 'Christian', 'Adal', 'Karol', 'Nicolás']NEWLINE with open('./docs/names.txt', 'a', encoding='utf-8') as f:NEWLINE for name in names:NEWLINE f.write(name)NEWLINE f.write('\n')NEWLINENEWLINEdef run():NEWLINE write()NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE run()
"""NEWLINEThe 'daal4py.sklearn.ensemble' module implements daal4py-based NEWLINERandomForestClassifier and RandomForestRegressor classes.NEWLINE"""NEWLINEfrom daal4py.sklearn._utils import daal_check_versionNEWLINEif daal_check_version((2020,'P', 3)):NEWLINE from .forest import (RandomForestClassifier, RandomForestRegressor)NEWLINEelse:NEWLINE from .decision_forest import (RandomForestClassifier, RandomForestRegressor)NEWLINEfrom .GBTDAAL import (GBTDAALClassifier, GBTDAALRegressor)NEWLINEfrom .AdaBoostClassifier import AdaBoostClassifierNEWLINENEWLINE__all__ = ['RandomForestClassifier', 'RandomForestRegressor', 'GBTDAALClassifier', 'GBTDAALRegressor', 'AdaBoostClassifier']NEWLINE
NEWLINEimport numpy as npNEWLINEfrom numpy import linalg as laNEWLINEimport pdbNEWLINEimport copyNEWLINEimport itertoolsNEWLINENEWLINENEWLINEclass LMPC(object):NEWLINE """Learning Model Predictive Controller (LMPC)NEWLINE Inputs:NEWLINE - ftocp: Finite Time Optimal Control Prolem object used to compute the predicted trajectoryNEWLINE - l: number of past trajectories used to construct the local safe set and local Q-functionNEWLINE - M: number of data points from each trajectory used to construct the local safe set and local Q-function NEWLINE Methods:NEWLINE - addTrajectory: adds a trajectory to the safe set SS and update value functionNEWLINE - computeCost: computes the cost associated with a feasible trajectoryNEWLINE - solve: uses ftocp and the stored data to comptute the predicted trajectoryNEWLINE - closeToSS: computes the K-nearest neighbors to zt"""NEWLINENEWLINE def __init__(self, ftocp, l, P, verbose, Xf_vertices = None):NEWLINE self.ftocp = ftocpNEWLINE self.SS = []NEWLINE self.uSS = []NEWLINE self.Qfun = []NEWLINE self.l = lNEWLINE self.P = PNEWLINE self.zt = []NEWLINE self.it = 0NEWLINE self.timeVarying = TrueNEWLINE self.itCost = []NEWLINE self.verbose = verboseNEWLINE self.ftocp.verbose = verboseNEWLINE self.Xf_vertices = Xf_verticesNEWLINENEWLINE def addTrajectory(self, x, u):NEWLINE # Add the feasible trajectory x and the associated input sequence u to the safe setNEWLINE self.SS.append(copy.copy(x))NEWLINE self.uSS.append(np.concatenate( (copy.copy(u), np.zeros((u.shape[0],1)) ), axis=1)) # Here concatenating zero as f(xf, 0) = xf by assumptionNEWLINENEWLINE # Compute and store the cost associated with the feasible trajectoryNEWLINE self.Qfun.append(copy.copy(np.arange(x.shape[1]-1,-1,-1)))NEWLINENEWLINE # Update the z vector: this vector will be used to compute the intial guess for the ftocp.NEWLINE # Basically zt will be computed using the optimal multipliers lambda and the data stored in the safe setNEWLINE zIdx = np.min((self.ftocp.N, np.shape(x)[1]-1))NEWLINE self.zt = x[:, zIdx]NEWLINENEWLINE # Compute initial guess for nonlinear solver and store few variablesNEWLINE self.xGuess = np.concatenate((x[:,0:(self.ftocp.N+1)].T.flatten(), u[:,0:(self.ftocp.N)].T.flatten()), axis = 0)NEWLINE self.ftocp.xGuess = self.xGuessNEWLINENEWLINE # Initialize cost varaibles for bookkeepingNEWLINE self.cost = self.Qfun[-1][0]NEWLINE self.itCost.append(self.cost)NEWLINE self.ftocp.optCost = self.cost + 1NEWLINE self.oldIt = self.itNEWLINE NEWLINE # Pass inital guess to ftopc objectNEWLINE self.ftocp.xSol = x[:,0:(self.ftocp.N+1)]NEWLINE self.ftocp.uSol = u[:,0:(self.ftocp.N)]NEWLINE # PrintNEWLINE print "Total time added trajectory: ", self.Qfun[-1][0]NEWLINE print "Total time stored trajectories: ", [self.Qfun[x][0] for x in range(0, self.it+1)]NEWLINENEWLINE # Update time Improvement counterNEWLINE self.timeImprovement = 0NEWLINENEWLINE # Update iteration counterNEWLINE self.it = self.it + 1NEWLINENEWLINE # Update indices of stored data points used to contruct the local safe set and Q-functionNEWLINE self.SSindices =[]NEWLINE Tstar = np.min(self.itCost)NEWLINE for i in range(0, self.it):NEWLINE Tj = np.shape(self.SS[i])[1]-1NEWLINE self.SSindices.append(np.arange(Tj - Tstar + self.ftocp.N, Tj - Tstar + self.ftocp.N+self.P))NEWLINENEWLINE def solve(self, xt, verbose = 5): NEWLINENEWLINE # First retive the data points used to cconstruct the safe set.NEWLINE minIt = np.max([0, self.it - self.l])NEWLINE SSQfun = []NEWLINE SSnext = []NEWLINE # Loop over j-l iterations used to contruct the local safe setNEWLINE for i in range(minIt, self.it):NEWLINE # idx associated with the data points from iteration i which are in the local safe setNEWLINE if self.timeVarying == True:NEWLINE idx = self.timeSS(i)NEWLINE else:NEWLINE idx = self.closeToSS(i)NEWLINE # Stored state and cost value (Check if using terminal state or terminal point)NEWLINE if (self.Xf_vertices is not None) and (idx[-1] == np.shape(self.SS[i])[1]-1):NEWLINE augSS = np.concatenate((self.SS[i][:,idx], self.Xf_vertices), axis=1 ) NEWLINE augCost = np.concatenate((self.Qfun[i][idx], np.zeros(self.Xf_vertices.shape[1])), axis=0 ) NEWLINE SSQfun.append( np.concatenate( (augSS, [augCost]), axis=0 ).T )NEWLINENEWLINE # Store the successors of the states into the safe set and the control action. (Note that the vertices of X_f are invariant states)NEWLINE # This matrix will be used to compute the vector zt which represent a feasible guess for the ftocp at time t+1NEWLINE xSSuSS = np.concatenate((self.SS[i], self.uSS[i]), axis = 0)NEWLINE extendedSS = np.concatenate((xSSuSS, np.array([xSSuSS[:,-1]]).T), axis=1)NEWLINE verticesAndInputs = np.concatenate((self.Xf_vertices, np.zeros((self.ftocp.d, self.Xf_vertices.shape[1]))), axis=0)NEWLINE SSnext.append(np.concatenate((extendedSS[:,idx+1], verticesAndInputs), axis = 1).T)NEWLINE else:NEWLINE SSQfun.append( np.concatenate( (self.SS[i][:,idx], [self.Qfun[i][idx]]), axis=0 ).T )NEWLINENEWLINE # Store the successors of the states into the safe set and the control action. NEWLINE # This matrix will be used to compute the vector zt which represent a feasible guess for the ftocp at time t+1NEWLINE xSSuSS = np.concatenate((self.SS[i], self.uSS[i]), axis = 0)NEWLINE extendedSS = np.concatenate((xSSuSS, np.array([xSSuSS[:,-1]]).T), axis=1)NEWLINE SSnext.append(extendedSS[:,idx+1].T)NEWLINENEWLINENEWLINE # From a 3D list to a 2D arrayNEWLINE SSQfun_vector = np.squeeze(list(itertools.chain.from_iterable(SSQfun))).T NEWLINE SSnext_vector = np.squeeze(list(itertools.chain.from_iterable(SSnext))).T NEWLINENEWLINE # Add dimension if neededNEWLINE if SSQfun_vector.ndim == 1:NEWLINE SSQfun_vector = np.array([SSQfun_vector]).TNEWLINE if SSnext_vector.ndim == 1:NEWLINE SSnext_vector = np.array([SSnext_vector]).TNEWLINENEWLINE # Now update ftocp with local safe setNEWLINE self.ftocp.buildNonlinearProgram( SSQfun_vector)NEWLINENEWLINE # Now solve ftocpNEWLINE self.ftocp.solve(xt, self.zt) NEWLINE NEWLINE # Assign inputNEWLINE self.ut = self.ftocp.uSol[:,0]NEWLINENEWLINE # Update guess for the ftocp using optimal predicted trajectory and multipliers lambda NEWLINE if self.ftocp.optCost > 1:NEWLINE xfufNext = np.dot(SSnext_vector, self.ftocp.lamb)NEWLINE # Update ztNEWLINE self.zt = xfufNext[0:self.ftocp.n,0]NEWLINE # Update initial guessNEWLINE xflatOpenLoop = np.concatenate( (self.ftocp.xSol[:,1:(self.ftocp.N+1)].T.flatten(), xfufNext[0:self.ftocp.n,0]), axis = 0)NEWLINE uflatOpenLoop = np.concatenate( (self.ftocp.uSol[:,1:(self.ftocp.N)].T.flatten() , xfufNext[self.ftocp.n:(self.ftocp.n+self.ftocp.d),0]), axis = 0)NEWLINE self.ftocp.xGuess = np.concatenate((xflatOpenLoop, uflatOpenLoop) , axis = 0)NEWLINE NEWLINE def closeToSS(self, it):NEWLINE # TO DO: need to add comments. This function is not used in for time-varying, but for space varying.NEWLINE x = self.SS[it]NEWLINE u = self.uSS[it]NEWLINENEWLINE oneVec = np.ones((x.shape[1], 1))NEWLINE ztVec = (np.dot(oneVec, np.array([self.zt]))).TNEWLINE diff = x - ztVecNEWLINENEWLINENEWLINE norm = la.norm(np.array(diff), 1, axis=0)NEWLINE idxMinNorm = np.argsort(norm)NEWLINENEWLINE maxIdn = np.min([x.shape[1], self.P])NEWLINENEWLINE return idxMinNorm[0:maxIdn]NEWLINENEWLINE def timeSS(self, it):NEWLINE # This function computes the indices used to construct the safe setNEWLINE # self.SSindices[it] is initialized when the trajectory is added to the safe set after computing \delta^i and PNEWLINENEWLINE # Read the time indicesNEWLINE currIdx = self.SSindices[it]NEWLINE # By definition we have x_t^j = x_F \forall t > T^j ---> check indices to selectNEWLINE # currIdxShort = currIdx[ (currIdx >0) & (currIdx < np.shape(self.SS[it])[1])]NEWLINE currIdxShort = currIdx[ currIdx < np.shape(self.SS[it])[1] ]NEWLINE NEWLINE if self.verbose == True:NEWLINE print "Time indices selected"NEWLINE print currIdxShortNEWLINENEWLINE # Progress time indicesNEWLINE self.SSindices[it] = self.SSindices[it] + 1NEWLINENEWLINE # If there is just one time index --> add dimensionNEWLINE if np.shape(currIdxShort)[0] < 1:NEWLINE currIdxShort = np.array([np.shape(self.SS[it])[1]-1])NEWLINENEWLINE return currIdxShortNEWLINENEWLINENEWLINE NEWLINE
#!/usr/bin/env pythonNEWLINEfrom setuptools import setupNEWLINENEWLINENEWLINEwith open('README.rst', 'r') as f:NEWLINE long_description = f.read()NEWLINENEWLINEsetup(NEWLINE name='githubstars',NEWLINE version='0.0.6',NEWLINE description='List repository stars and info through Gituhb v4 GraphQL API',NEWLINE long_description=long_description,NEWLINE url='https://github.com/hanksudo/githubstars',NEWLINE author='Hank Wang',NEWLINE author_email='drapho@gmail.com',NEWLINE license='MIT',NEWLINE classifiers=[NEWLINE 'Development Status :: 2 - Pre-Alpha',NEWLINE 'Intended Audience :: Developers',NEWLINE 'Natural Language :: English',NEWLINE 'License :: OSI Approved :: MIT License',NEWLINE 'Programming Language :: Python :: 2',NEWLINE 'Programming Language :: Python :: 2.7',NEWLINE 'Programming Language :: Python :: 3',NEWLINE 'Programming Language :: Python :: 3.4',NEWLINE 'Programming Language :: Python :: 3.5',NEWLINE 'Programming Language :: Python :: 3.6'NEWLINE ],NEWLINE keywords='stars github graphql',NEWLINE py_modules=['githubstars'],NEWLINE install_requires=[],NEWLINE entry_points={NEWLINE 'console_scripts': [NEWLINE 'githubstars=githubstars:main'NEWLINE ]NEWLINE }NEWLINE)NEWLINE
import unittestNEWLINENEWLINEfrom cubes.sql.mapper import StarSchemaMapper, distill_namingNEWLINEfrom cubes.model import AttributeNEWLINENEWLINEfrom ..common import CubesTestCaseBase, create_providerNEWLINENEWLINEclass MapperTestCase(CubesTestCaseBase):NEWLINE def setUp(self):NEWLINE super(MapperTestCase, self).setUp()NEWLINENEWLINE self.provider = create_provider("mapper_test.json")NEWLINENEWLINE self.cube = self.provider.cube("sales")NEWLINE naming = {NEWLINE "dimension_prefix": "dim_",NEWLINE "dimension_suffix": "_dim"NEWLINE }NEWLINE self.naming = distill_naming(naming)NEWLINE self.mapper = StarSchemaMapper(self.cube, self.naming)NEWLINENEWLINE self.mapper.mappings = {NEWLINE "product.name": "product.product_name",NEWLINE "product.category": "product.category_id",NEWLINE "subcategory.name.en": "subcategory.subcategory_name_en",NEWLINE "subcategory.name.sk": "subcategory.subcategory_name_sk"NEWLINE }NEWLINENEWLINE def test_logical_reference(self):NEWLINENEWLINE dim = self.provider.dimension("date")NEWLINE attr = Attribute("month", dimension=dim)NEWLINE self.assertEqual("date.month", attr.ref)NEWLINENEWLINE dim = self.provider.dimension("product")NEWLINE attr = Attribute("category", dimension=dim)NEWLINE self.assertEqual("product.category", attr.ref)NEWLINENEWLINE dim = self.provider.dimension("flag")NEWLINE attr = Attribute("flag", dimension=dim)NEWLINE self.assertEqual("flag", attr.ref)NEWLINENEWLINE attr = Attribute("measure", dimension=None)NEWLINE self.assertEqual("measure", attr.ref)NEWLINENEWLINE def assertMapping(self, expected, logical_ref, mapper=None):NEWLINE """Create string reference by concatentanig table and column name.NEWLINE No schema is expected (is ignored)."""NEWLINENEWLINE attr = self.cube.attribute(logical_ref)NEWLINE mapper = mapper or self.mapperNEWLINE ref = mapper[attr]NEWLINE sref = ref[1] + "." + ref[2]NEWLINENEWLINE self.assertEqual(expected, sref)NEWLINENEWLINE def test_physical_refs_dimensions(self):NEWLINE """Testing correct default mappings of dimensions (with and withoutNEWLINE explicit default prefix) in physical references."""NEWLINENEWLINE # No dimension prefixNEWLINE self.mapper.naming.dimension_prefix = ""NEWLINE self.mapper.naming.dimension_suffix = ""NEWLINE self.assertMapping("date.year", "date.year")NEWLINE self.assertMapping("sales.flag", "flag")NEWLINE self.assertMapping("sales.amount", "amount")NEWLINENEWLINE # With prefixNEWLINE self.mapper.naming.dimension_prefix = "dm_"NEWLINE self.assertMapping("dm_date.year", "date.year")NEWLINE self.assertMapping("dm_date.month_name", "date.month_name")NEWLINE self.assertMapping("sales.flag", "flag")NEWLINE self.assertMapping("sales.amount", "amount")NEWLINENEWLINE def test_physical_refs_flat_dims(self):NEWLINE self.cube.fact = NoneNEWLINE self.assertMapping("sales.flag", "flag")NEWLINENEWLINE def test_physical_refs_facts(self):NEWLINE """Testing correct mappings of fact attributes in physical references"""NEWLINENEWLINE fact = self.cube.factNEWLINE self.cube.fact = NoneNEWLINE self.assertMapping("sales.amount", "amount")NEWLINE # self.assertEqual("sales.flag", sref("flag.flag"))NEWLINE self.cube.fact = factNEWLINENEWLINE def test_physical_refs_with_mappings_and_locales(self):NEWLINE """Testing mappings of mapped attributes and localized attributes inNEWLINE physical references"""NEWLINENEWLINE self.mapper.mappings = self.cube.mappingsNEWLINE # Test defaultsNEWLINE # Localized mapper is localizing to 'sk', non-localized mapper isNEWLINE # localizing to default 'en'NEWLINE #NEWLINE # Mapper with locale that we haveNEWLINE sk_mapper = StarSchemaMapper(self.cube, self.naming, locale="sk")NEWLINENEWLINE # Mapper with locale that we don't haveNEWLINE de_mapper = StarSchemaMapper(self.cube, self.naming, locale="de")NEWLINENEWLINE self.assertMapping("dim_date_dim.month_name", "date.month_name")NEWLINENEWLINE self.assertMapping("dim_category_dim.category_name_en",NEWLINE "product.category_name")NEWLINENEWLINE self.assertMapping("dim_category_dim.category_name_sk",NEWLINE "product.category_name", sk_mapper)NEWLINENEWLINE # This should default to 'en' since we don't have 'de' locale and theNEWLINE # 'en' locale is the default oneNEWLINE self.assertMapping("dim_category_dim.category_name_en",NEWLINE "product.category_name", de_mapper)NEWLINENEWLINE # Test with mappingNEWLINE self.assertMapping("dim_product_dim.product_name", "product.name")NEWLINE self.assertMapping("dim_product_dim.category_id", "product.category")NEWLINENEWLINE # The product name is not localized, we should get the same for anyNEWLINE # mapperNEWLINE self.assertMapping("dim_product_dim.product_name", "product.name",NEWLINE sk_mapper)NEWLINE self.assertMapping("dim_product_dim.product_name", "product.name",NEWLINE de_mapper)NEWLINENEWLINE self.assertMapping("dim_category_dim.subcategory_name_en",NEWLINE "product.subcategory_name")NEWLINE self.assertMapping("dim_category_dim.subcategory_name_sk",NEWLINE "product.subcategory_name",NEWLINE sk_mapper)NEWLINE self.assertMapping("dim_category_dim.subcategory_name_en",NEWLINE "product.subcategory_name",NEWLINE de_mapper)NEWLINENEWLINE
"""NEWLINEProblem StatementNEWLINENEWLINEAborigines of a mysterious island love to play the Boomerang Game.NEWLINENEWLINEThe game involves n aborigines (n>=2) numbered with consecutive integers fromNEWLINE1 to n. Before the game starts, all participants form a ring so their numbersNEWLINEin a clockwise order match consecutive numbers 1,2,...n. The distances betweenNEWLINEthe adjacent participants are equal. When n=2, they stand in exact oppositeNEWLINEpoints of the ring.NEWLINENEWLINEDuring the game, aborigines throw boomerangs in turn. Each aborigines throw aNEWLINEboomerang at the exact opposite point of the ring. The participant number 1NEWLINEthrows a boomerang first. Two things can happen:NEWLINENEWLINEIf the number of aborigines is even, then the boomerang hits the participantNEWLINEstanding at the exact opposite point from the aborigine numbered 1.NEWLINENEWLINEIf the number of participants is odd, the exact opposite point is empty. InNEWLINEthis case, the boomerang flies back and hits the aborigine who threw it.NEWLINENEWLINEThe aborigine who has been hit by a boomerang leaves the game. The remainingNEWLINEparticipants move around the ring so that the distances between the adjacentNEWLINEaborigines becomes equal again. Note: The aborigines do not change theirNEWLINEorder in the ring during this move.NEWLINENEWLINEThe aborigine who was the closest in the clockwise direction to the aborigineNEWLINEwho threw a boomerang last will go next. Again, only one of the two scenariosNEWLINElisted above can happen. After that, the aborigines move again to make theNEWLINEdistance between the adjacent participants equal. The next turn belongs to theNEWLINEclosest participant in a clockwise order and so on.NEWLINENEWLINEThe aborigines will continue to play and throw boomerangs until there is onlyNEWLINEone participant left. He or she will be the winner.NEWLINENEWLINEDetermine the winning aborigine's number for the given number of participants n.NEWLINENEWLINEInput FormatNEWLINENEWLINEThe first line contains the number of test cases: T. Each of the next T linesNEWLINEcontains a single integer: n.NEWLINENEWLINEConstraintsNEWLINENEWLINEFor full score:NEWLINE1<=T?100000NEWLINE2?n?1018NEWLINENEWLINEFor 40% score:NEWLINE1<=T?100000NEWLINE2?n?1000NEWLINENEWLINEOutput FormatNEWLINENEWLINEOutput T lines, one corresponding to each test case.NEWLINENEWLINESample InputNEWLINENEWLINE6NEWLINE2NEWLINE3NEWLINE4NEWLINE5NEWLINE10NEWLINE1000NEWLINESample OutputNEWLINENEWLINE1NEWLINE2NEWLINE4NEWLINE5NEWLINE5NEWLINE818NEWLINE"""NEWLINE#!/bin/pythonNEWLINENEWLINEimport sysNEWLINENEWLINENEWLINEt = int(raw_input().strip())NEWLINEfor a0 in xrange(t):NEWLINE N = int(raw_input().strip())NEWLINE players = [x for x in range(1, N+1)]NEWLINE player_index = 0NEWLINE start = players[player_index]NEWLINE out = []NEWLINE in_game = []NEWLINE n = NNEWLINE while len(players) > 1:NEWLINE #print playersNEWLINE length = len(players)NEWLINE if length%2 == 0:NEWLINE remove_index = player_index + length/2NEWLINE print ">>>>>>>",player_index, remove_index, lengthNEWLINE if remove_index >= length:NEWLINE if length == 2:NEWLINE remove_index = remove_index % lengthNEWLINE else:NEWLINE remove_index = remove_index % lengthNEWLINE print "---------", player_index, remove_indexNEWLINE players.pop(remove_index)NEWLINE #if remove_index != 0 and player_index + 1 <= len(players):NEWLINE if remove_index != 0:NEWLINE player_index += 1NEWLINE else:NEWLINE print "********",player_index, lengthNEWLINE players.pop(player_index)NEWLINE #print "------>",player_indexNEWLINE if player_index >= len(players):NEWLINE player_index = player_index - len(players)NEWLINE print players[0]
"""NEWLINEFunctions for generating vectorized spark UDFsNEWLINEto distribute the prediction of sklearn model predictionsNEWLINEwith PySpark DataFramesNEWLINE"""NEWLINENEWLINEimport pandas as pdNEWLINEimport numpy as npNEWLINENEWLINENEWLINEclass PysparkRequired(ImportError):NEWLINE passNEWLINENEWLINENEWLINEclass PyarrowRequired(ImportError):NEWLINE passNEWLINENEWLINENEWLINE_PYSPARK_INSTALLED = NoneNEWLINE_PYARROW_INSTALLED = NoneNEWLINENEWLINENEWLINEdef _is_pyspark_installed():NEWLINE global _PYSPARK_INSTALLEDNEWLINE if _PYSPARK_INSTALLED is None:NEWLINE try:NEWLINE import pysparkNEWLINENEWLINE _PYSPARK_INSTALLED = TrueNEWLINE except ImportError:NEWLINE _PYSPARK_INSTALLED = FalseNEWLINENEWLINE if _PYSPARK_INSTALLED:NEWLINE import pysparkNEWLINENEWLINE return TrueNEWLINE else:NEWLINE return FalseNEWLINENEWLINENEWLINEdef _is_pyarrow_installed():NEWLINE global _PYARROW_INSTALLEDNEWLINE if _PYARROW_INSTALLED is None:NEWLINE try:NEWLINE import pyarrowNEWLINENEWLINE _PYARROW_INSTALLED = TrueNEWLINE except ImportError:NEWLINE _PYARROW_INSTALLED = FalseNEWLINENEWLINE if _PYARROW_INSTALLED:NEWLINE import pyarrowNEWLINENEWLINE return TrueNEWLINE else:NEWLINE return FalseNEWLINENEWLINENEWLINEdef _get_vals(*cols, feature_type="numpy", names=None):NEWLINE """ Prep input data for prediction method """NEWLINE if feature_type == "numpy":NEWLINE vals = np.transpose([a.values for a in cols])NEWLINE elif feature_type == "pandas":NEWLINE if names is None:NEWLINE raise Exception("Must pass names argument for pandas feature type")NEWLINE vals = pd.DataFrame(np.transpose([a.values for a in cols]), columns=names)NEWLINE elif feature_type == "text":NEWLINE vals = cols[0].valuesNEWLINE else:NEWLINE raise ValueError("Unknown feature_type: {0}".format(feature_type))NEWLINE return valsNEWLINENEWLINENEWLINEdef get_prediction_udf(model, method="predict", feature_type="numpy", names=None):NEWLINE """NEWLINE Build a vectorized PySpark UDF to apply a sklearn model's `predict` orNEWLINE `predict_proba` methods columns in a PySpark DataFrame. HandlesNEWLINE flexible types of feature data for prediction including 2-D numpyNEWLINE arrays ('numpy'), single field text data ('text') and mixed typeNEWLINE pandas DataFrames ('pandas'). The UDF can then be applied as shown in theNEWLINE example below.NEWLINENEWLINE NOTE: This function requires pyarrow and pyspark with appropriateNEWLINE versions for vectorized pandas UDFs and appropriate spark configurationNEWLINE to use pyarrow. Ths requires pyarrow>=0.8.0 and pyspark>=2.3.0.NEWLINE Additionally, the spark version must be 2.3 or higher. These requirementsNEWLINE are not enforced by the sk-dist package at setup time.NEWLINENEWLINE Args:NEWLINE model (sklearn Estimator): sklearn model to distributeNEWLINE predictions with PySparkNEWLINE method (str): name of prediction method; either 'predict'NEWLINE or 'predict_proba'NEWLINE feature_type (str): name of feature type; either 'numpy',NEWLINE 'pandas' or 'text'NEWLINE names (array-like): list of ordered column namesNEWLINE (only necessary for 'pandas' feature_typeNEWLINE Returns:NEWLINE PySpark pandas UDF (pyspark.sql.functions.pandas_udf)NEWLINE Example:NEWLINE >>> import pandas as pdNEWLINE >>> from sklearn.datasets import load_digitsNEWLINE >>> from sklearn.linear_model import LogisticRegressionNEWLINE >>> from pyspark.sql import SparkSessionNEWLINE >>> spark = (NEWLINE >>> SparkSessionNEWLINE >>> .builderNEWLINE >>> .getOrCreate()NEWLINE >>> )NEWLINE >>> data = load_digits()NEWLINE >>> X = data["data"]NEWLINE >>> y = data["target"]NEWLINE >>> model = LogisticRegression()NEWLINE >>> model.fit(X, y)NEWLINE >>> predict = get_prediction_udf(model, method="predict")NEWLINE >>> predict_proba = get_prediction_udf(model, method="predict_proba")NEWLINE >>> pdf = pd.DataFrame(X)NEWLINE >>> sdf = spark.createDataFrame(pdf)NEWLINE >>> cols = [F.col(str(c)) for c in sdf.columns]NEWLINE >>> prediction_df = (NEWLINE >>> sdfNEWLINE >>> .withColumn("scores", predict_proba(*cols))NEWLINE >>> .withColumn("preds", predict(*cols))NEWLINE >>> .select("preds", "scores")NEWLINE >>> )NEWLINE >>> prediction_df.show()NEWLINE ... +-----+--------------------+NEWLINE ... |preds| scores|NEWLINE ... +-----+--------------------+NEWLINE ... | 0|[0.99988026795692...|NEWLINE ... | 1|[4.75035277837040...|NEWLINE ... | 2|[2.94811218592164...|NEWLINE ... | 3|[1.63438595023762...|NEWLINE ... | 4|[1.11339868338047...|NEWLINE ... | 5|[1.47300432716012...|NEWLINE ... | 6|[1.08560009259480...|NEWLINE ... | 7|[3.02428232165044...|NEWLINE ... | 8|[7.65445972596079...|NEWLINE ... | 9|[3.97610488897298...|NEWLINE ... | 0|[0.99918670844137...|NEWLINE ... | 1|[2.65336456879078...|NEWLINE ... | 2|[1.85886361541580...|NEWLINE ... | 3|[2.89824009324990...|NEWLINE ... | 4|[2.84813979824305...|NEWLINE ... | 5|[2.70090567992820...|NEWLINE ... | 6|[1.10907772018062...|NEWLINE ... | 7|[3.06455862370095...|NEWLINE ... | 8|[2.38739344440480...|NEWLINE ... | 9|[8.23628591704589...|NEWLINE ... +-----+--------------------+NEWLINE ... only showing top 20 rowsNEWLINE """NEWLINE if not _is_pyspark_installed():NEWLINE raise ImportError("Module pyspark not found")NEWLINE if not _is_pyarrow_installed():NEWLINE raise ImportError("Module pyarrow not found")NEWLINE from pyspark.sql import functions as FNEWLINE from pyspark.sql.types import DoubleType, StringType, IntegerType, ArrayTypeNEWLINENEWLINE if method == "predict":NEWLINENEWLINE def predict_func(*cols):NEWLINE vals = _get_vals(*cols, feature_type=feature_type, names=names)NEWLINE return pd.Series(model.predict(vals))NEWLINENEWLINE return_type = (NEWLINE StringType() if isinstance(model.classes_[0], str) else IntegerType()NEWLINE )NEWLINE predict = F.pandas_udf(predict_func, returnType=return_type)NEWLINE elif method == "predict_proba":NEWLINENEWLINE def predict_func(*cols):NEWLINE vals = _get_vals(*cols, feature_type=feature_type, names=names)NEWLINE return pd.Series(list(model.predict_proba(vals)))NEWLINENEWLINE predict = F.pandas_udf(predict_func, returnType=ArrayType(DoubleType()))NEWLINE else:NEWLINE raise ValueError("Unknown method: {0}".format(method))NEWLINE return predictNEWLINE
from __future__ import divisionNEWLINEfrom __future__ import print_functionNEWLINENEWLINE__author__ = "Michael T. Lash, PhD"NEWLINE__copyright__ = "Copyright 2019, Michael T. Lash"NEWLINE__credits__ = [None]NEWLINE__license__ = "MIT"NEWLINE__version__ = "1.0.1"NEWLINE__maintainer__ = "Michael T. Lash"NEWLINE__email__ = "michael.lash@ku.edu"NEWLINE__status__ = "Prototype" #"Development", "Production"NEWLINENEWLINEimport datetimeNEWLINEimport timeNEWLINENEWLINEimport tensorflow as tfNEWLINEimport numpy as npNEWLINEimport scipy.sparse as spNEWLINEimport sysNEWLINEimport osNEWLINEimport jsonNEWLINEimport pickle as pklNEWLINEfrom absl import flags,app #Consistent with TF 2.0 APINEWLINEfrom sklearn.metrics import accuracy_scoreNEWLINEfrom tensorflow.keras import backend as KNEWLINENEWLINEfrom invclass.utils import load_data, load_indices, make_modelNEWLINENEWLINENEWLINEseed = 1234NEWLINEtf.random.set_seed(seed)NEWLINENEWLINENEWLINE# SettingsNEWLINE#flags = absl.flagsNEWLINEFLAGS = flags.FLAGSNEWLINENEWLINE#core params..NEWLINEflags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate for optimizer. Optional (default: 0.01)')NEWLINEflags.DEFINE_string('data_path', '', 'Path to the data. Required.')NEWLINEflags.DEFINE_string('data_file', '', 'Name of the file containing the data. Required.')NEWLINEflags.DEFINE_string('file_type', 'csv', 'Type of data file. Either "csv" or "pkl". Optional (default: "csv")')NEWLINEflags.DEFINE_string('util_file', '', 'Name of the file containing index designations. Required.')NEWLINEflags.DEFINE_string('save_file', '', 'Name of the file to save the processed data to. Optional.')NEWLINEflags.DEFINE_boolean('classification', True, 'Classification or regression. Default: True (classificaiton).')NEWLINEflags.DEFINE_integer('epochs', 200, 'Number of epochs to train the model. Optional (default: 200)')NEWLINEflags.DEFINE_float('dropout', 0.0, 'Dropout rate (1 - keep probability). Optional (default: 0)')NEWLINEflags.DEFINE_integer('hidden_units', 20, 'Number of hidden nodes in hidden layer. If 0, then logistic regression\NEWLINE model is used. Optional (default: 10).')NEWLINEflags.DEFINE_boolean('indirect_model', False, 'Whether or not we are training a model to predict the\NEWLINE indirect features or not. Default: False')NEWLINENEWLINEflags.DEFINE_boolean('class_imb',False, 'Boolean. Whether class imbalance exists. Default: False')NEWLINEflags.DEFINE_float('val_prop',0.10,'Proportion of dataset to use for validation. Default: 0.10')NEWLINEflags.DEFINE_float('test_prop',0.10,'Proportion of dataset to use for testing. Default: 0.10')NEWLINEflags.DEFINE_float('weight_decay',0.,'Weight decay on l2 regularization of model weights.')NEWLINENEWLINENEWLINENEWLINE#os.environ["CUDA_VISIBLE_DEVICES"]=str(FLAGS.gpu)NEWLINE#GPU_MEM_FRACTION = 0.8NEWLINENEWLINEdef log_dir():NEWLINE NEWLINE if FLAGS.indirect_model:NEWLINE mod_type = 'indirect'NEWLINE else:NEWLINE mod_type = 'reg'NEWLINENEWLINE log_dir = FLAGS.data_path + "/sup-" + FLAGS.data_file.split(".")[-2]NEWLINE log_dir += "/{model_type:s}_{model_size:d}_{lr:0.4f}/".format(NEWLINE model_type=mod_type,NEWLINE model_size=FLAGS.hidden_units,NEWLINE lr=FLAGS.learning_rate)NEWLINE if not os.path.exists(log_dir):NEWLINE os.makedirs(log_dir)NEWLINE return log_dirNEWLINENEWLINENEWLINEdef train(data_dict):NEWLINE NEWLINE train_dat = data_dict['train']NEWLINE val_dat = data_dict['val']NEWLINE model = make_model(data_dict,FLAGS.hidden_units,FLAGS.indirect_model,FLAGS.classification)NEWLINE csv_logger = tf.keras.callbacks.CSVLogger(log_dir()+'training.log')NEWLINE if FLAGS.indirect_model:NEWLINE tr_X = train_dat['X']NEWLINE val_X = val_dat['X']NEWLINENEWLINE X_train = np.hstack([tr_X[:,data_dict['xU_ind']],tr_X[:,data_dict['xD_ind']]])NEWLINE X_val = np.hstack([val_X[:,data_dict['xU_ind']],val_X[:,data_dict['xD_ind']]])NEWLINE NEWLINE Y_train = tr_X[:,data_dict['xI_ind']]NEWLINE Y_val = val_X[:,data_dict['xI_ind']]NEWLINENEWLINE history = model.fit(X_train, Y_train, epochs=FLAGS.epochs, batch_size=64,NEWLINE validation_data=(X_val,Y_val),NEWLINE callbacks = [csv_logger])NEWLINE NEWLINE model.save(log_dir()+"model.h5") NEWLINENEWLINE returnNEWLINE NEWLINE y_train = tf.keras.utils.to_categorical(train_dat['target'])NEWLINE y_val = tf.keras.utils.to_categorical(val_dat['target'])NEWLINE history = model.fit(train_dat['X'], y_train, epochs=FLAGS.epochs, batch_size=64,NEWLINE validation_data=(val_dat['X'],y_val),NEWLINE callbacks = [csv_logger])NEWLINE NEWLINENEWLINE model.save(log_dir()+"model.h5")NEWLINENEWLINE returnNEWLINENEWLINEdef main(argv):NEWLINE print("Loading data...")NEWLINE unch_indices,indir_indices,dir_indices,cost_inc,cost_dec,direct_chg,id_ind,target_ind = load_indices(FLAGS.data_path,FLAGS.util_file)NEWLINE opt_params = {'cost_inc':cost_inc,'cost_dec':cost_dec,'direct_chg':direct_chg}NEWLINENEWLINE data_dict = load_data(FLAGS.data_path,FLAGS.data_file,FLAGS.file_type,NEWLINE unch_indices,indir_indices,dir_indices,id_ind=id_ind,NEWLINE target_ind=target_ind,seed=seed,imbal_classes=FLAGS.class_imb,NEWLINE val_prop=FLAGS.val_prop,test_prop=FLAGS.test_prop,opt_params=opt_params,NEWLINE save_file=FLAGS.save_file)NEWLINENEWLINE print("Done loading. Training model...")NEWLINE train(data_dict=data_dict)NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE app.run(main)NEWLINE
#!/usr/bin/env pythonNEWLINE# -*- coding: utf-8 -*-NEWLINENEWLINE#The MIT License (MIT)NEWLINENEWLINE#Copyright (c) 2015 Sondre EngebraatenNEWLINENEWLINE#Permission is hereby granted, free of charge, to any person obtaining a copyNEWLINE#of this software and associated documentation files (the "Software"), to dealNEWLINE#in the Software without restriction, including without limitation the rightsNEWLINE#to use, copy, modify, merge, publish, distribute, sublicense, and/or sellNEWLINE#copies of the Software, and to permit persons to whom the Software isNEWLINE#furnished to do so, subject to the following conditions:NEWLINENEWLINE#The above copyright notice and this permission notice shall be included in allNEWLINE#copies or substantial portions of the Software.NEWLINENEWLINE#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORNEWLINE#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,NEWLINE#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THENEWLINE#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERNEWLINE#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,NEWLINE#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THENEWLINE#SOFTWARE.NEWLINENEWLINENEWLINEfrom b2.download_dest import DownloadDestBytesNEWLINENEWLINEfrom .B2BaseFile import B2BaseFileNEWLINENEWLINENEWLINEclass B2SequentialFileMemory(B2BaseFile):NEWLINE def __init__(self, b2fuse, file_info, new_file=False):NEWLINE super(B2SequentialFileMemory, self).__init__(b2fuse, file_info)NEWLINE NEWLINE self._dirty = FalseNEWLINE if new_file:NEWLINE self.data = bytearray()NEWLINE self._dirty = TrueNEWLINE else:NEWLINE download_dest = DownloadDestBytes()NEWLINE self.b2fuse.bucket_api.download_file_by_id(self.file_info['fileId'], download_dest)NEWLINE self.data = bytearray(download_dest.get_bytes_written())NEWLINENEWLINE # def __getitem__(self, key):NEWLINE # if isinstance(key, slice):NEWLINE # return self.data[key.start:key.stop] NEWLINE # return self.data[key]NEWLINENEWLINE def upload(self):NEWLINE if self._dirty:NEWLINE self.b2fuse.bucket_api.upload_bytes(bytes(self.data), self.file_info['fileName'])NEWLINE self.b2fuse._update_directory_structure()NEWLINE self.file_info = self.b2fuse._directories.get_file_info(self.file_info['fileName'])NEWLINENEWLINE self._dirty = FalseNEWLINENEWLINE #def __setitem__(self, key, value):NEWLINE # self.data[key] = valueNEWLINENEWLINE def __len__(self):NEWLINE return len(self.data)NEWLINENEWLINE #def __del__(self):NEWLINE # self.delete()NEWLINENEWLINE def write(self, offset, data):NEWLINE if offset == len(self):NEWLINE self.data.extend(data)NEWLINE elif offset+len(data) < len(self):NEWLINE for i in range(len(data)):NEWLINE self.data[offset+i] = data[i]NEWLINE else:NEWLINE extend_length = offset-len(data)NEWLINE self.data.extend([0 for i in range(extend_length)])NEWLINE self.write(offset, data)NEWLINENEWLINE def read(self, offset, length):NEWLINE return bytes(self.data[offset:offset + length])NEWLINENEWLINE def truncate(self, length):NEWLINE self.data = self.data[:length]NEWLINENEWLINE def set_dirty(self, new_value):NEWLINE self._dirty = new_valueNEWLINENEWLINE def delete(self, delete_online):NEWLINE if delete_online:NEWLINE self.b2fuse.bucket_api.delete_file_version(NEWLINE self.file_info['fileId'], self.file_info['fileName']NEWLINE )NEWLINE del self.dataNEWLINE
from __future__ import absolute_import, division, print_functionNEWLINENEWLINEimport base64NEWLINEfrom collections import defaultdictNEWLINEimport contextlibNEWLINEimport fnmatchNEWLINEfrom glob2 import globNEWLINEimport jsonNEWLINEfrom locale import getpreferredencodingNEWLINEimport loggingNEWLINEimport logging.configNEWLINEimport mmapNEWLINEimport operatorNEWLINEimport osNEWLINEfrom os.path import dirname, getmtime, getsize, isdir, join, isfile, abspath, islinkNEWLINEimport reNEWLINEimport statNEWLINEimport subprocessNEWLINEimport sysNEWLINEimport shutilNEWLINEimport tarfileNEWLINEimport tempfileNEWLINEimport timeNEWLINEimport yamlNEWLINEimport zipfileNEWLINENEWLINEfrom distutils.version import LooseVersionNEWLINEimport filelockNEWLINENEWLINEfrom conda import __version__ as conda_versionNEWLINENEWLINEfrom .conda_interface import hashsum_file, md5_file, unix_path_to_win, win_path_to_unixNEWLINEfrom .conda_interface import PY3, iteritemsNEWLINEfrom .conda_interface import root_dir, pkgs_dirsNEWLINEfrom .conda_interface import string_types, url_path, get_rc_urlsNEWLINEfrom .conda_interface import memoizedNEWLINEfrom .conda_interface import StringIONEWLINEfrom .conda_interface import VersionOrder, MatchSpecNEWLINEfrom .conda_interface import cc_conda_buildNEWLINE# NOQA because it is not used in this file.NEWLINEfrom conda_build.conda_interface import rm_rf as _rm_rf # NOQANEWLINEfrom conda_build.os_utils import externalNEWLINENEWLINEif PY3:NEWLINE import urllib.parse as urlparseNEWLINE import urllib.request as urllibNEWLINE # NOQA because it is not used in this file.NEWLINE from contextlib import ExitStack # NOQANEWLINE PermissionError = PermissionError # NOQANEWLINEelse:NEWLINE import urlparseNEWLINE import urllibNEWLINE # NOQA because it is not used in this file.NEWLINE from contextlib2 import ExitStack # NOQANEWLINE PermissionError = OSErrorNEWLINENEWLINENEWLINEon_win = (sys.platform == 'win32')NEWLINENEWLINEcodec = getpreferredencoding() or 'utf-8'NEWLINEon_win = sys.platform == "win32"NEWLINEroot_script_dir = os.path.join(root_dir, 'Scripts' if on_win else 'bin')NEWLINEmmap_MAP_PRIVATE = 0 if on_win else mmap.MAP_PRIVATENEWLINEmmap_PROT_READ = 0 if on_win else mmap.PROT_READNEWLINEmmap_PROT_WRITE = 0 if on_win else mmap.PROT_WRITENEWLINENEWLINENEWLINEPY_TMPL = """NEWLINE# -*- coding: utf-8 -*-NEWLINEimport reNEWLINEimport sysNEWLINENEWLINEfrom %(module)s import %(import_name)sNEWLINENEWLINEif __name__ == '__main__':NEWLINE sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])NEWLINE sys.exit(%(func)s())NEWLINE"""NEWLINENEWLINENEWLINEdef get_recipe_abspath(recipe):NEWLINE """resolve recipe dir as absolute path. If recipe is a tarball rather than a folder,NEWLINE extract it and return the extracted directory.NEWLINENEWLINE Returns the absolute path, and a boolean flag that is true if a tarball has been extractedNEWLINE and needs cleanup.NEWLINE """NEWLINE # Don't use byte literals for paths in Python 2NEWLINE if not PY3:NEWLINE recipe = recipe.decode(getpreferredencoding() or 'utf-8')NEWLINE if isfile(recipe):NEWLINE if recipe.endswith(('.tar', '.tar.gz', '.tgz', '.tar.bz2')):NEWLINE recipe_dir = tempfile.mkdtemp()NEWLINE t = tarfile.open(recipe, 'r:*')NEWLINE t.extractall(path=recipe_dir)NEWLINE # At some stage the old build system started to tar up recipes.NEWLINE recipe_tarfile = os.path.join(recipe_dir, 'info', 'recipe.tar')NEWLINE if isfile(recipe_tarfile):NEWLINE t2 = tarfile.open(recipe_tarfile, 'r:*')NEWLINE t2.extractall(path=os.path.join(recipe_dir, 'info'))NEWLINE t2.close()NEWLINE t.close()NEWLINE need_cleanup = TrueNEWLINE else:NEWLINE print("Ignoring non-recipe: %s" % recipe)NEWLINE return (None, None)NEWLINE else:NEWLINE recipe_dir = abspath(os.path.join(os.getcwd(), recipe))NEWLINE need_cleanup = FalseNEWLINE if not os.path.exists(recipe_dir):NEWLINE raise ValueError("Package or recipe at path {0} does not exist".format(recipe_dir))NEWLINE return recipe_dir, need_cleanupNEWLINENEWLINENEWLINE@contextlib.contextmanagerNEWLINEdef try_acquire_locks(locks, timeout):NEWLINE """Try to acquire all locks. If any lock can't be immediately acquired, free all locksNEWLINENEWLINE http://stackoverflow.com/questions/9814008/multiple-mutex-locking-strategies-and-why-libraries-dont-use-address-comparisonNEWLINE """NEWLINE t = time.time()NEWLINE while (time.time() - t < timeout):NEWLINE for lock in locks:NEWLINE try:NEWLINE lock.acquire(timeout=0.1)NEWLINE except filelock.Timeout:NEWLINE for lock in locks:NEWLINE lock.release()NEWLINE breakNEWLINE breakNEWLINE yieldNEWLINE for lock in locks:NEWLINE if lock:NEWLINE lock.release()NEWLINENEWLINENEWLINE# with each of these, we are copying less metadata. This seems to be necessaryNEWLINE# to cope with some shared filesystems with some virtual machine setups.NEWLINE# See https://github.com/conda/conda-build/issues/1426NEWLINEdef _copy_with_shell_fallback(src, dst):NEWLINE is_copied = FalseNEWLINE for func in (shutil.copy2, shutil.copy, shutil.copyfile):NEWLINE try:NEWLINE func(src, dst)NEWLINE is_copied = TrueNEWLINE breakNEWLINE except (IOError, OSError, PermissionError):NEWLINE continueNEWLINE if not is_copied:NEWLINE try:NEWLINE subprocess.check_call('cp -a {} {}'.format(src, dst), shell=True,NEWLINE stderr=subprocess.PIPE, stdout=subprocess.PIPE)NEWLINE except subprocess.CalledProcessError as e:NEWLINE if not os.path.isfile(dst):NEWLINE raise OSError("Failed to copy {} to {}. Error was: {}".format(src, dst, e))NEWLINENEWLINENEWLINEdef get_prefix_replacement_paths(src, dst):NEWLINE ssplit = src.split(os.path.sep)NEWLINE dsplit = dst.split(os.path.sep)NEWLINE while ssplit and ssplit[-1] == dsplit[-1]:NEWLINE del ssplit[-1]NEWLINE del dsplit[-1]NEWLINE return os.path.join(*ssplit), os.path.join(*dsplit)NEWLINENEWLINENEWLINEdef copy_into(src, dst, timeout=90, symlinks=False, lock=None, locking=True, clobber=False):NEWLINE """Copy all the files and directories in src to the directory dst"""NEWLINE log = get_logger(__name__)NEWLINE if symlinks and islink(src):NEWLINE try:NEWLINE os.makedirs(os.path.dirname(dst))NEWLINE except OSError:NEWLINE passNEWLINE if os.path.lexists(dst):NEWLINE os.remove(dst)NEWLINE src_base, dst_base = get_prefix_replacement_paths(src, dst)NEWLINE src_target = os.readlink(src)NEWLINE src_replaced = src_target.replace(src_base, dst_base)NEWLINE os.symlink(src_replaced, dst)NEWLINE try:NEWLINE st = os.lstat(src)NEWLINE mode = stat.S_IMODE(st.st_mode)NEWLINE os.lchmod(dst, mode)NEWLINE except:NEWLINE pass # lchmod not availableNEWLINE elif isdir(src):NEWLINE merge_tree(src, dst, symlinks, timeout=timeout, lock=lock, locking=locking, clobber=clobber)NEWLINENEWLINE else:NEWLINE if isdir(dst):NEWLINE dst_fn = os.path.join(dst, os.path.basename(src))NEWLINE else:NEWLINE dst_fn = dstNEWLINENEWLINE if os.path.isabs(src):NEWLINE src_folder = os.path.dirname(src)NEWLINE else:NEWLINE if os.path.sep in dst_fn:NEWLINE src_folder = os.path.dirname(dst_fn)NEWLINE if not os.path.isdir(src_folder):NEWLINE os.makedirs(src_folder)NEWLINE else:NEWLINE src_folder = os.getcwd()NEWLINENEWLINE if os.path.islink(src) and not os.path.exists(os.path.realpath(src)):NEWLINE log.warn('path %s is a broken symlink - ignoring copy', src)NEWLINE returnNEWLINENEWLINE if not lock and locking:NEWLINE lock = get_lock(src_folder, timeout=timeout)NEWLINE locks = [lock] if locking else []NEWLINE with try_acquire_locks(locks, timeout):NEWLINE # if intermediate folders not not exist create themNEWLINE dst_folder = os.path.dirname(dst)NEWLINE if dst_folder and not os.path.exists(dst_folder):NEWLINE try:NEWLINE os.makedirs(dst_folder)NEWLINE except OSError:NEWLINE passNEWLINE try:NEWLINE _copy_with_shell_fallback(src, dst_fn)NEWLINE except shutil.Error:NEWLINE log.debug("skipping %s - already exists in %s",NEWLINE os.path.basename(src), dst)NEWLINENEWLINENEWLINE# http://stackoverflow.com/a/22331852/1170370NEWLINEdef copytree(src, dst, symlinks=False, ignore=None, dry_run=False):NEWLINE if not os.path.exists(dst):NEWLINE os.makedirs(dst)NEWLINE shutil.copystat(src, dst)NEWLINE lst = os.listdir(src)NEWLINE if ignore:NEWLINE excl = ignore(src, lst)NEWLINE lst = [x for x in lst if x not in excl]NEWLINENEWLINE # do not copy lock filesNEWLINE if '.conda_lock' in lst:NEWLINE lst.remove('.conda_lock')NEWLINENEWLINE dst_lst = [os.path.join(dst, item) for item in lst]NEWLINENEWLINE if not dry_run:NEWLINE for idx, item in enumerate(lst):NEWLINE s = os.path.join(src, item)NEWLINE d = dst_lst[idx]NEWLINE if symlinks and os.path.islink(s):NEWLINE if os.path.lexists(d):NEWLINE os.remove(d)NEWLINE os.symlink(os.readlink(s), d)NEWLINE try:NEWLINE st = os.lstat(s)NEWLINE mode = stat.S_IMODE(st.st_mode)NEWLINE os.lchmod(d, mode)NEWLINE except:NEWLINE pass # lchmod not availableNEWLINE elif os.path.isdir(s):NEWLINE copytree(s, d, symlinks, ignore)NEWLINE else:NEWLINE _copy_with_shell_fallback(s, d)NEWLINENEWLINE return dst_lstNEWLINENEWLINENEWLINEdef merge_tree(src, dst, symlinks=False, timeout=90, lock=None, locking=True, clobber=False):NEWLINE """NEWLINE Merge src into dst recursively by copying all files from src into dst.NEWLINE Return a list of all files copied.NEWLINENEWLINE Like copytree(src, dst), but raises an error if merging the two treesNEWLINE would overwrite any files.NEWLINE """NEWLINE dst = os.path.normpath(os.path.normcase(dst))NEWLINE src = os.path.normpath(os.path.normcase(src))NEWLINE assert not dst.startswith(src), ("Can't merge/copy source into subdirectory of itself. "NEWLINE "Please create separate spaces for these things.")NEWLINENEWLINE new_files = copytree(src, dst, symlinks=symlinks, dry_run=True)NEWLINE existing = [f for f in new_files if isfile(f)]NEWLINENEWLINE if existing and not clobber:NEWLINE raise IOError("Can't merge {0} into {1}: file exists: "NEWLINE "{2}".format(src, dst, existing[0]))NEWLINENEWLINE locks = []NEWLINE if locking:NEWLINE if not lock:NEWLINE lock = get_lock(src, timeout=timeout)NEWLINE locks = [lock]NEWLINE with try_acquire_locks(locks, timeout):NEWLINE copytree(src, dst, symlinks=symlinks)NEWLINENEWLINENEWLINE# purpose here is that we want *one* lock per location on disk. It can be locked or unlockedNEWLINE# at any time, but the lock within this process should all be tied to the same trackingNEWLINE# mechanism.NEWLINE_lock_folders = (os.path.join(root_dir, 'locks'),NEWLINE os.path.expanduser(os.path.join('~', '.conda_build_locks')))NEWLINENEWLINENEWLINEdef get_lock(folder, timeout=90):NEWLINE fl = NoneNEWLINE try:NEWLINE location = os.path.abspath(os.path.normpath(folder))NEWLINE except OSError:NEWLINE location = folderNEWLINE b_location = locationNEWLINE if hasattr(b_location, 'encode'):NEWLINE b_location = b_location.encode()NEWLINE lock_filename = base64.urlsafe_b64encode(b_location)[:20]NEWLINE if hasattr(lock_filename, 'decode'):NEWLINE lock_filename = lock_filename.decode()NEWLINE for locks_dir in _lock_folders:NEWLINE try:NEWLINE if not os.path.isdir(locks_dir):NEWLINE os.makedirs(locks_dir)NEWLINE lock_file = os.path.join(locks_dir, lock_filename)NEWLINE with open(lock_file, 'w') as f:NEWLINE f.write("")NEWLINE fl = filelock.FileLock(lock_file, timeout)NEWLINE breakNEWLINE except (OSError, IOError):NEWLINE continueNEWLINE else:NEWLINE raise RuntimeError("Could not write locks folder to either system location ({0})"NEWLINE "or user location ({1}). Aborting.".format(*_lock_folders))NEWLINE return flNEWLINENEWLINENEWLINEdef get_conda_operation_locks(locking=True, bldpkgs_dirs=None, timeout=90):NEWLINE locks = []NEWLINE bldpkgs_dirs = ensure_list(bldpkgs_dirs)NEWLINE # locks enabled by defaultNEWLINE if locking:NEWLINE _pkgs_dirs = pkgs_dirs[:1]NEWLINE locked_folders = _pkgs_dirs + list(bldpkgs_dirs)NEWLINE for folder in locked_folders:NEWLINE if not os.path.isdir(folder):NEWLINE os.makedirs(folder)NEWLINE lock = get_lock(folder, timeout=timeout)NEWLINE locks.append(lock)NEWLINE # lock used to generally indicate a conda operation occurringNEWLINE locks.append(get_lock('conda-operation', timeout=timeout))NEWLINE return locksNEWLINENEWLINENEWLINEdef relative(f, d='lib'):NEWLINE assert not f.startswith('/'), fNEWLINE assert not d.startswith('/'), dNEWLINE d = d.strip('/').split('/')NEWLINE if d == ['.']:NEWLINE d = []NEWLINE f = dirname(f).split('/')NEWLINE if f == ['']:NEWLINE f = []NEWLINE while d and f and d[0] == f[0]:NEWLINE d.pop(0)NEWLINE f.pop(0)NEWLINE return '/'.join(((['..'] * len(f)) if f else ['.']) + d)NEWLINENEWLINENEWLINEdef tar_xf(tarball, dir_path, mode='r:*'):NEWLINE if tarball.lower().endswith('.tar.z'):NEWLINE uncompress = external.find_executable('uncompress')NEWLINE if not uncompress:NEWLINE uncompress = external.find_executable('gunzip')NEWLINE if not uncompress:NEWLINE sys.exit("""\NEWLINEuncompress (or gunzip) is required to unarchive .z source files.NEWLINE""")NEWLINE check_call_env([uncompress, '-f', tarball])NEWLINE tarball = tarball[:-2]NEWLINE if not PY3 and tarball.endswith('.tar.xz'):NEWLINE unxz = external.find_executable('unxz')NEWLINE if not unxz:NEWLINE sys.exit("""\NEWLINEunxz is required to unarchive .xz source files.NEWLINE""")NEWLINENEWLINE check_call_env([unxz, '-f', '-k', tarball])NEWLINE tarball = tarball[:-3]NEWLINE t = tarfile.open(tarball, mode)NEWLINE if not PY3:NEWLINE t.extractall(path=dir_path.encode(codec))NEWLINE else:NEWLINE t.extractall(path=dir_path)NEWLINE t.close()NEWLINENEWLINENEWLINEdef unzip(zip_path, dir_path):NEWLINE z = zipfile.ZipFile(zip_path)NEWLINE for info in z.infolist():NEWLINE name = info.filenameNEWLINE if name.endswith('/'):NEWLINE continueNEWLINE path = join(dir_path, *name.split('/'))NEWLINE dp = dirname(path)NEWLINE if not isdir(dp):NEWLINE os.makedirs(dp)NEWLINE with open(path, 'wb') as fo:NEWLINE fo.write(z.read(name))NEWLINE unix_attributes = info.external_attr >> 16NEWLINE if unix_attributes:NEWLINE os.chmod(path, unix_attributes)NEWLINE z.close()NEWLINENEWLINENEWLINEdef file_info(path):NEWLINE return {'size': getsize(path),NEWLINE 'md5': md5_file(path),NEWLINE 'sha256': hashsum_file(path, 'sha256'),NEWLINE 'mtime': getmtime(path)}NEWLINENEWLINE# Taken from toolzNEWLINENEWLINENEWLINEdef groupby(key, seq):NEWLINE """ Group a collection by a key functionNEWLINE >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']NEWLINE >>> groupby(len, names) # doctest: +SKIPNEWLINE {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}NEWLINE >>> iseven = lambda x: x % 2 == 0NEWLINE >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIPNEWLINE {False: [1, 3, 5, 7], True: [2, 4, 6, 8]}NEWLINE Non-callable keys imply grouping on a member.NEWLINE >>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},NEWLINE ... {'name': 'Bob', 'gender': 'M'},NEWLINE ... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIPNEWLINE {'F': [{'gender': 'F', 'name': 'Alice'}],NEWLINE 'M': [{'gender': 'M', 'name': 'Bob'},NEWLINE {'gender': 'M', 'name': 'Charlie'}]}NEWLINE See Also:NEWLINE countbyNEWLINE """NEWLINE if not callable(key):NEWLINE key = getter(key)NEWLINE d = defaultdict(lambda: [].append)NEWLINE for item in seq:NEWLINE d[key(item)](item)NEWLINE rv = {}NEWLINE for k, v in iteritems(d):NEWLINE rv[k] = v.__self__NEWLINE return rvNEWLINENEWLINENEWLINEdef getter(index):NEWLINE if isinstance(index, list):NEWLINE if len(index) == 1:NEWLINE index = index[0]NEWLINE return lambda x: (x[index],)NEWLINE elif index:NEWLINE return operator.itemgetter(*index)NEWLINE else:NEWLINE return lambda x: ()NEWLINE else:NEWLINE return operator.itemgetter(index)NEWLINENEWLINENEWLINEdef comma_join(items):NEWLINE """NEWLINE Like ', '.join(items) but with andNEWLINENEWLINE Examples:NEWLINENEWLINE >>> comma_join(['a'])NEWLINE 'a'NEWLINE >>> comma_join(['a', 'b'])NEWLINE 'a and b'NEWLINE >>> comma_join(['a', 'b', 'c])NEWLINE 'a, b, and c'NEWLINE """NEWLINE return ' and '.join(items) if len(items) <= 2 else ', '.join(items[:-1]) + ', and ' + items[-1]NEWLINENEWLINENEWLINEdef safe_print_unicode(*args, **kwargs):NEWLINE """NEWLINE prints unicode strings to stdout using configurable `errors` handler forNEWLINE encoding errorsNEWLINENEWLINE :param args: unicode strings to print to stdoutNEWLINE :param sep: separator (defaults to ' ')NEWLINE :param end: ending character (defaults to '\n')NEWLINE :param errors: error handler for encoding errors (defaults to 'replace')NEWLINE """NEWLINE sep = kwargs.pop('sep', u' ')NEWLINE end = kwargs.pop('end', u'\n')NEWLINE errors = kwargs.pop('errors', 'replace')NEWLINE if PY3:NEWLINE func = sys.stdout.buffer.writeNEWLINE else:NEWLINE func = sys.stdout.writeNEWLINE line = sep.join(args) + endNEWLINE encoding = sys.stdout.encoding or 'utf8'NEWLINE func(line.encode(encoding, errors))NEWLINENEWLINENEWLINEdef rec_glob(path, patterns):NEWLINE result = []NEWLINE for d_f in os.walk(path):NEWLINE # ignore the .git folderNEWLINE # if '.git' in d_f[0]:NEWLINE # continueNEWLINE m = []NEWLINE for pattern in patterns:NEWLINE m.extend(fnmatch.filter(d_f[2], pattern))NEWLINE if m:NEWLINE result.extend([os.path.join(d_f[0], f) for f in m])NEWLINE return resultNEWLINENEWLINENEWLINEdef convert_unix_path_to_win(path):NEWLINE if external.find_executable('cygpath'):NEWLINE cmd = "cygpath -w {0}".format(path)NEWLINE if PY3:NEWLINE path = subprocess.getoutput(cmd)NEWLINE else:NEWLINE path = subprocess.check_output(cmd.split()).rstrip().rstrip("\\")NEWLINENEWLINE else:NEWLINE path = unix_path_to_win(path)NEWLINE return pathNEWLINENEWLINENEWLINEdef convert_win_path_to_unix(path):NEWLINE if external.find_executable('cygpath'):NEWLINE cmd = "cygpath -u {0}".format(path)NEWLINE if PY3:NEWLINE path = subprocess.getoutput(cmd)NEWLINE else:NEWLINE path = subprocess.check_output(cmd.split()).rstrip().rstrip("\\")NEWLINENEWLINE else:NEWLINE path = win_path_to_unix(path)NEWLINE return pathNEWLINENEWLINENEWLINE# Used for translating local paths into url (file://) pathsNEWLINE# http://stackoverflow.com/a/14298190/1170370NEWLINEdef path2url(path):NEWLINE return urlparse.urljoin('file:', urllib.pathname2url(path))NEWLINENEWLINENEWLINEdef get_stdlib_dir(prefix, py_ver):NEWLINE if sys.platform == 'win32':NEWLINE lib_dir = os.path.join(prefix, 'Lib')NEWLINE else:NEWLINE lib_dir = os.path.join(prefix, 'lib', 'python{}'.format(py_ver))NEWLINE return lib_dirNEWLINENEWLINENEWLINEdef get_site_packages(prefix, py_ver):NEWLINE return os.path.join(get_stdlib_dir(prefix, py_ver), 'site-packages')NEWLINENEWLINENEWLINEdef get_build_folders(croot):NEWLINE # remember, glob is not a regex.NEWLINE return glob(os.path.join(croot, "*" + "[0-9]" * 10 + "*"))NEWLINENEWLINENEWLINEdef prepend_bin_path(env, prefix, prepend_prefix=False):NEWLINE # bin_dirname takes care of bin on *nix, Scripts on winNEWLINE env['PATH'] = join(prefix, bin_dirname) + os.pathsep + env['PATH']NEWLINE if sys.platform == "win32":NEWLINE env['PATH'] = join(prefix, "Library", "mingw-w64", "bin") + os.pathsep + \NEWLINE join(prefix, "Library", "usr", "bin") + os.pathsep + os.pathsep + \NEWLINE join(prefix, "Library", "bin") + os.pathsep + \NEWLINE join(prefix, "Scripts") + os.pathsep + \NEWLINE env['PATH']NEWLINE prepend_prefix = True # windows has Python in the prefix. Use it.NEWLINE if prepend_prefix:NEWLINE env['PATH'] = prefix + os.pathsep + env['PATH']NEWLINE return envNEWLINENEWLINENEWLINE# not currently used. Leaving in because it may be useful for when we do thingsNEWLINE# like load setup.py data, and we need the modules from some prefix other thanNEWLINE# the root prefix, which is what conda-build runs from.NEWLINE@contextlib.contextmanagerNEWLINEdef sys_path_prepended(prefix):NEWLINE path_backup = sys.path[:]NEWLINE if on_win:NEWLINE sys.path.insert(1, os.path.join(prefix, 'lib', 'site-packages'))NEWLINE else:NEWLINE lib_dir = os.path.join(prefix, 'lib')NEWLINE python_dir = glob(os.path.join(lib_dir, 'python[0-9\.]*'))NEWLINE if python_dir:NEWLINE python_dir = python_dir[0]NEWLINE sys.path.insert(1, os.path.join(python_dir, 'site-packages'))NEWLINE try:NEWLINE yieldNEWLINE finally:NEWLINE sys.path = path_backupNEWLINENEWLINENEWLINE@contextlib.contextmanagerNEWLINEdef path_prepended(prefix):NEWLINE old_path = os.environ['PATH']NEWLINE os.environ['PATH'] = prepend_bin_path(os.environ.copy(), prefix, True)['PATH']NEWLINE try:NEWLINE yieldNEWLINE finally:NEWLINE os.environ['PATH'] = old_pathNEWLINENEWLINENEWLINEbin_dirname = 'Scripts' if sys.platform == 'win32' else 'bin'NEWLINENEWLINEentry_pat = re.compile('\s*([\w\-\.]+)\s*=\s*([\w.]+):([\w.]+)\s*$')NEWLINENEWLINENEWLINEdef iter_entry_points(items):NEWLINE for item in items:NEWLINE m = entry_pat.match(item)NEWLINE if m is None:NEWLINE sys.exit("Error cound not match entry point: %r" % item)NEWLINE yield m.groups()NEWLINENEWLINENEWLINEdef create_entry_point(path, module, func, config):NEWLINE import_name = func.split('.')[0]NEWLINE pyscript = PY_TMPL % {NEWLINE 'module': module, 'func': func, 'import_name': import_name}NEWLINE if on_win:NEWLINE with open(path + '-script.py', 'w') as fo:NEWLINE if os.path.isfile(os.path.join(config.host_prefix, 'python_d.exe')):NEWLINE fo.write('#!python_d\n')NEWLINE fo.write(pyscript)NEWLINE copy_into(join(dirname(__file__), 'cli-{}.exe'.format(config.arch)),NEWLINE path + '.exe', config.timeout)NEWLINE else:NEWLINE if os.path.islink(path):NEWLINE os.remove(path)NEWLINE with open(path, 'w') as fo:NEWLINE if not config.noarch:NEWLINE fo.write('#!%s\n' % config.build_python)NEWLINE fo.write(pyscript)NEWLINE os.chmod(path, 0o775)NEWLINENEWLINENEWLINEdef create_entry_points(items, config):NEWLINE if not items:NEWLINE returnNEWLINE bin_dir = join(config.host_prefix, bin_dirname)NEWLINE if not isdir(bin_dir):NEWLINE os.mkdir(bin_dir)NEWLINE for cmd, module, func in iter_entry_points(items):NEWLINE create_entry_point(join(bin_dir, cmd), module, func, config)NEWLINENEWLINENEWLINE# Return all files in dir, and all its subdirectories, ending in patternNEWLINEdef get_ext_files(start_path, pattern):NEWLINE for root, _, files in os.walk(start_path):NEWLINE for f in files:NEWLINE if f.endswith(pattern):NEWLINE yield os.path.join(root, f)NEWLINENEWLINENEWLINEdef _func_defaulting_env_to_os_environ(func, *popenargs, **kwargs):NEWLINE if 'env' not in kwargs:NEWLINE kwargs = kwargs.copy()NEWLINE env_copy = os.environ.copy()NEWLINE kwargs.update({'env': env_copy})NEWLINE kwargs['env'] = {str(key): str(value) for key, value in kwargs['env'].items()}NEWLINE _args = []NEWLINE if 'stdin' not in kwargs:NEWLINE kwargs['stdin'] = subprocess.PIPENEWLINE for arg in popenargs:NEWLINE # arguments to subprocess need to be bytestringsNEWLINE if sys.version_info.major < 3 and hasattr(arg, 'encode'):NEWLINE arg = arg.encode(codec)NEWLINE elif sys.version_info.major >= 3 and hasattr(arg, 'decode'):NEWLINE arg = arg.decode(codec)NEWLINE _args.append(str(arg))NEWLINE return func(_args, **kwargs)NEWLINENEWLINENEWLINEdef check_call_env(popenargs, **kwargs):NEWLINE return _func_defaulting_env_to_os_environ(subprocess.check_call, *popenargs, **kwargs)NEWLINENEWLINENEWLINEdef check_output_env(popenargs, **kwargs):NEWLINE return _func_defaulting_env_to_os_environ(subprocess.check_output, *popenargs, **kwargs)\NEWLINE .rstrip()NEWLINENEWLINENEWLINE_posix_exes_cache = {}NEWLINENEWLINENEWLINEdef convert_path_for_cygwin_or_msys2(exe, path):NEWLINE "If exe is a Cygwin or MSYS2 executable then filters it through `cygpath -u`"NEWLINE if sys.platform != 'win32':NEWLINE return pathNEWLINE if exe not in _posix_exes_cache:NEWLINE with open(exe, "rb") as exe_file:NEWLINE exe_binary = exe_file.read()NEWLINE msys2_cygwin = re.findall(b'(cygwin1.dll|msys-2.0.dll)', exe_binary)NEWLINE _posix_exes_cache[exe] = True if msys2_cygwin else FalseNEWLINE if _posix_exes_cache[exe]:NEWLINE try:NEWLINE path = check_output_env(['cygpath', '-u',NEWLINE path]).splitlines()[0].decode(getpreferredencoding())NEWLINE except WindowsError:NEWLINE log = get_logger(__name__)NEWLINE log.debug('cygpath executable not found. Passing native path. This is OK for msys2.')NEWLINE return pathNEWLINENEWLINENEWLINEdef print_skip_message(metadata):NEWLINE print("Skipped: {} defines build/skip for this "NEWLINE "configuration.".format(metadata.path))NEWLINENEWLINENEWLINE@memoizedNEWLINEdef package_has_file(package_path, file_path):NEWLINE try:NEWLINE locks = get_conda_operation_locks()NEWLINE with try_acquire_locks(locks, timeout=90):NEWLINE with tarfile.open(package_path) as t:NEWLINE try:NEWLINE # internal paths are always forward slashed on all platformsNEWLINE file_path = file_path.replace('\\', '/')NEWLINE text = t.extractfile(file_path).read()NEWLINE return textNEWLINE except KeyError:NEWLINE return FalseNEWLINE except OSError as e:NEWLINE raise RuntimeError("Could not extract %s (%s)" % (package_path, e))NEWLINE except tarfile.ReadError:NEWLINE raise RuntimeError("Could not extract metadata from %s. "NEWLINE "File probably corrupt." % package_path)NEWLINENEWLINENEWLINEdef ensure_list(arg):NEWLINE if (isinstance(arg, string_types) or not hasattr(arg, '__iter__')):NEWLINE if arg:NEWLINE arg = [arg]NEWLINE else:NEWLINE arg = []NEWLINE return argNEWLINENEWLINENEWLINE@contextlib.contextmanagerNEWLINEdef tmp_chdir(dest):NEWLINE curdir = os.getcwd()NEWLINE try:NEWLINE os.chdir(dest)NEWLINE yieldNEWLINE finally:NEWLINE os.chdir(curdir)NEWLINENEWLINENEWLINEdef expand_globs(path_list, root_dir):NEWLINE log = get_logger(__name__)NEWLINE files = []NEWLINE for path in path_list:NEWLINE if not os.path.isabs(path):NEWLINE path = os.path.join(root_dir, path)NEWLINE if os.path.islink(path):NEWLINE files.append(path.replace(root_dir + os.path.sep, ''))NEWLINE elif os.path.isdir(path):NEWLINE files.extend(os.path.join(root, f).replace(root_dir + os.path.sep, '')NEWLINE for root, _, fs in os.walk(path) for f in fs)NEWLINE elif os.path.isfile(path):NEWLINE files.append(path.replace(root_dir + os.path.sep, ''))NEWLINE else:NEWLINE # File compared to the globs use / as separator indenpendently of the osNEWLINE glob_files = [f.replace(root_dir + os.path.sep, '')NEWLINE for f in glob(path)]NEWLINE if not glob_files:NEWLINE log.error('invalid recipe path: {}'.format(path))NEWLINE files.extend(glob_files)NEWLINE files = [f.replace(os.path.sep, '/') for f in files]NEWLINE return filesNEWLINENEWLINENEWLINEdef find_recipe(path):NEWLINE """recurse through a folder, locating meta.yaml. Raises error if more than one is found.NEWLINENEWLINE Returns folder containing meta.yaml, to be built.NEWLINENEWLINE If we have a base level meta.yaml and other supplemental ones, use that first"""NEWLINE if os.path.isfile(path) and os.path.basename(path) in ["meta.yaml", "conda.yaml"]:NEWLINE return os.path.dirname(path)NEWLINE results = rec_glob(path, ["meta.yaml", "conda.yaml"])NEWLINE if len(results) > 1:NEWLINE base_recipe = os.path.join(path, "meta.yaml")NEWLINE if base_recipe in results:NEWLINE get_logger(__name__).warn("Multiple meta.yaml files found. "NEWLINE "The meta.yaml file in the base directory "NEWLINE "will be used.")NEWLINE results = [base_recipe]NEWLINE else:NEWLINE raise IOError("More than one meta.yaml files found in %s" % path)NEWLINE elif not results:NEWLINE raise IOError("No meta.yaml or conda.yaml files found in %s" % path)NEWLINE return results[0]NEWLINENEWLINENEWLINEclass LoggingContext(object):NEWLINE loggers = ['conda', 'binstar', 'install', 'conda.install', 'fetch', 'conda.instructions',NEWLINE 'fetch.progress', 'print', 'progress', 'dotupdate', 'stdoutlog', 'requests',NEWLINE 'conda.core.package_cache', 'conda.plan', 'conda.gateways.disk.delete']NEWLINENEWLINE def __init__(self, level=logging.WARN, handler=None, close=True):NEWLINE self.level = levelNEWLINE self.old_levels = {}NEWLINE self.handler = handlerNEWLINE self.close = closeNEWLINENEWLINE def __enter__(self):NEWLINE for logger in LoggingContext.loggers:NEWLINE log = logging.getLogger(logger)NEWLINE self.old_levels[logger] = log.levelNEWLINE log.setLevel(self.level if ('install' not in logger orNEWLINE self.level < logging.INFO) else self.level + 10)NEWLINE if self.handler:NEWLINE self.logger.addHandler(self.handler)NEWLINENEWLINE def __exit__(self, et, ev, tb):NEWLINE for logger, level in self.old_levels.items():NEWLINE logging.getLogger(logger).setLevel(level)NEWLINE if self.handler:NEWLINE self.logger.removeHandler(self.handler)NEWLINE if self.handler and self.close:NEWLINE self.handler.close()NEWLINE # implicit return of None => don't swallow exceptionsNEWLINENEWLINENEWLINEdef get_installed_packages(path):NEWLINE '''NEWLINE Scan all json files in 'path' and return a dictionary with their contents.NEWLINE Files are assumed to be in 'index.json' format.NEWLINE '''NEWLINE installed = dict()NEWLINE for filename in glob(os.path.join(path, 'conda-meta', '*.json')):NEWLINE with open(filename) as file:NEWLINE data = json.load(file)NEWLINE installed[data['name']] = dataNEWLINE return installedNEWLINENEWLINENEWLINEdef _convert_lists_to_sets(_dict):NEWLINE for k, v in _dict.items():NEWLINE if hasattr(v, 'keys'):NEWLINE _dict[k] = HashableDict(_convert_lists_to_sets(v))NEWLINE elif hasattr(v, '__iter__') and not isinstance(v, string_types):NEWLINE _dict[k] = sorted(list(set(v)))NEWLINE return _dictNEWLINENEWLINENEWLINEclass HashableDict(dict):NEWLINE """use hashable frozen dictionaries for resources and resource types so that they can be in setsNEWLINE """NEWLINE def __init__(self, *args, **kwargs):NEWLINE super(HashableDict, self).__init__(*args, **kwargs)NEWLINE self = _convert_lists_to_sets(self)NEWLINENEWLINE def __hash__(self):NEWLINE return hash(json.dumps(self, sort_keys=True))NEWLINENEWLINENEWLINEdef represent_hashabledict(dumper, data):NEWLINE value = []NEWLINENEWLINE for item_key, item_value in data.items():NEWLINE node_key = dumper.represent_data(item_key)NEWLINE node_value = dumper.represent_data(item_value)NEWLINENEWLINE value.append((node_key, node_value))NEWLINENEWLINE return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)NEWLINENEWLINENEWLINEyaml.add_representer(HashableDict, represent_hashabledict)NEWLINENEWLINENEWLINE# http://stackoverflow.com/a/10743550/1170370NEWLINE@contextlib.contextmanagerNEWLINEdef capture():NEWLINE import sysNEWLINE oldout, olderr = sys.stdout, sys.stderrNEWLINE try:NEWLINE out = [StringIO(), StringIO()]NEWLINE sys.stdout, sys.stderr = outNEWLINE yield outNEWLINE finally:NEWLINE sys.stdout, sys.stderr = oldout, olderrNEWLINE out[0] = out[0].getvalue()NEWLINE out[1] = out[1].getvalue()NEWLINENEWLINENEWLINE# copied from conda; added in 4.3, not currently part of exported functionalityNEWLINE@contextlib.contextmanagerNEWLINEdef env_var(name, value, callback=None):NEWLINE # NOTE: will likely want to call reset_context() when using this function, so passNEWLINE # it as callbackNEWLINE name, value = str(name), str(value)NEWLINE saved_env_var = os.environ.get(name)NEWLINE try:NEWLINE os.environ[name] = valueNEWLINE if callback:NEWLINE callback()NEWLINE yieldNEWLINE finally:NEWLINE if saved_env_var:NEWLINE os.environ[name] = saved_env_varNEWLINE else:NEWLINE del os.environ[name]NEWLINE if callback:NEWLINE callback()NEWLINENEWLINENEWLINEdef collect_channels(config, is_host=False):NEWLINE urls = [url_path(config.croot)] + get_rc_urls() + ['local', ]NEWLINE if config.channel_urls:NEWLINE urls.extend(config.channel_urls)NEWLINE # defaults has a very limited set of repo urls. Omit it from the URL list soNEWLINE # that it doesn't fail.NEWLINE if config.is_cross and is_host:NEWLINE urls.remove('defaults')NEWLINE urls.remove('local')NEWLINE return urlsNEWLINENEWLINENEWLINEdef trim_empty_keys(dict_):NEWLINE to_remove = set()NEWLINE negative_means_empty = ('final', 'noarch_python')NEWLINE for k, v in dict_.items():NEWLINE if hasattr(v, 'keys'):NEWLINE trim_empty_keys(v)NEWLINE # empty lists and empty strings, and None are always empty.NEWLINE if v == list() or v == '' or v is None or v == dict():NEWLINE to_remove.add(k)NEWLINE # other things that evaluate as False may not be "empty" - things can be manually set toNEWLINE # false, and we need to keep that setting.NEWLINE if not v and k in negative_means_empty:NEWLINE to_remove.add(k)NEWLINE for k in to_remove:NEWLINE del dict_[k]NEWLINENEWLINENEWLINEdef conda_43():NEWLINE """Conda 4.3 broke compatibility in lots of new fun and exciting ways. This function is forNEWLINE changing conda-build's behavior when conda 4.3 or higher is installed."""NEWLINE return LooseVersion(conda_version) >= LooseVersion('4.3')NEWLINENEWLINENEWLINEdef _increment(version, alpha_ver):NEWLINE try:NEWLINE if alpha_ver:NEWLINE suffix = 'a'NEWLINE else:NEWLINE suffix = '.0a0'NEWLINE last_version = str(int(version) + 1) + suffixNEWLINE except ValueError:NEWLINE last_version = chr(ord(version) + 1)NEWLINE return last_versionNEWLINENEWLINENEWLINEdef apply_pin_expressions(version, min_pin='x.x.x.x.x.x.x', max_pin='x'):NEWLINE pins = [len(p.split('.')) if p else None for p in (min_pin, max_pin)]NEWLINE parsed_version = VersionOrder(version).version[1:]NEWLINE nesting_position = NoneNEWLINE flat_list = []NEWLINE for idx, item in enumerate(parsed_version):NEWLINE if isinstance(item, list):NEWLINE nesting_position = idxNEWLINE flat_list.extend(item)NEWLINE else:NEWLINE flat_list.append(item)NEWLINE versions = ['', '']NEWLINE # first idx is lower bound pin; second is upper bound pin.NEWLINE # pin value is number of places to pin.NEWLINE for p_idx, pin in enumerate(pins):NEWLINE if pin:NEWLINE # flat_list is the blown-out representation of the versionNEWLINE for v_idx, v in enumerate(flat_list[:pin]):NEWLINE # upper bound pinNEWLINE if p_idx == 1 and v_idx == pin - 1:NEWLINE # is the last place an alphabetic character? OpenSSL, JPEGNEWLINE alpha_ver = str(flat_list[min(pin, len(flat_list) - 1)]).isalpha()NEWLINE v = _increment(v, alpha_ver)NEWLINE versions[p_idx] += str(v)NEWLINE if v_idx != nesting_position:NEWLINE versions[p_idx] += '.'NEWLINE if versions[p_idx][-1] == '.':NEWLINE versions[p_idx] = versions[p_idx][:-1]NEWLINE if versions[0]:NEWLINE versions[0] = '>=' + versions[0]NEWLINE if versions[1]:NEWLINE versions[1] = '<' + versions[1]NEWLINE return ','.join([v for v in versions if v])NEWLINENEWLINENEWLINEdef filter_files(files_list, prefix, filter_patterns=('(.*[\\\\/])?\.git[\\\\/].*',NEWLINE '(.*[\\\\/])?\.git$',NEWLINE '(.*)?\.DS_Store.*',NEWLINE '(.*)?\.gitignore',NEWLINE 'conda-meta.*',NEWLINE '(.*)?\.gitmodules')):NEWLINE """Remove things like .git from the list of files to be copied"""NEWLINE for pattern in filter_patterns:NEWLINE r = re.compile(pattern)NEWLINE files_list = set(files_list) - set(filter(r.match, files_list))NEWLINE return [f.replace(prefix + os.path.sep, '') for f in files_listNEWLINE if not os.path.isdir(os.path.join(prefix, f)) orNEWLINE os.path.islink(os.path.join(prefix, f))]NEWLINENEWLINENEWLINEdef rm_rf(path, config=None):NEWLINE if on_win:NEWLINE # native windows delete is potentially much fasterNEWLINE try:NEWLINE if os.path.isfile(path):NEWLINE subprocess.check_call('del {}'.format(path), shell=True)NEWLINE elif os.path.isdir(path):NEWLINE subprocess.check_call('rd /s /q {}'.format(path), shell=True)NEWLINE else:NEWLINE passNEWLINE except subprocess.CalledProcessError:NEWLINE passNEWLINE conda_log_level = logging.WARNNEWLINE if config and config.debug:NEWLINE conda_log_level = logging.DEBUGNEWLINE with LoggingContext(conda_log_level):NEWLINE _rm_rf(path)NEWLINENEWLINENEWLINE# https://stackoverflow.com/a/31459386/1170370NEWLINEclass LessThanFilter(logging.Filter):NEWLINE def __init__(self, exclusive_maximum, name=""):NEWLINE super(LessThanFilter, self).__init__(name)NEWLINE self.max_level = exclusive_maximumNEWLINENEWLINE def filter(self, record):NEWLINE # non-zero return means we log this messageNEWLINE return 1 if record.levelno < self.max_level else 0NEWLINENEWLINENEWLINEclass GreaterThanFilter(logging.Filter):NEWLINE def __init__(self, exclusive_minimum, name=""):NEWLINE super(GreaterThanFilter, self).__init__(name)NEWLINE self.min_level = exclusive_minimumNEWLINENEWLINE def filter(self, record):NEWLINE # non-zero return means we log this messageNEWLINE return 1 if record.levelno > self.min_level else 0NEWLINENEWLINENEWLINE# unclutter logs - show messages only onceNEWLINEclass DuplicateFilter(logging.Filter):NEWLINE def __init__(self):NEWLINE self.msgs = set()NEWLINENEWLINE def filter(self, record):NEWLINE log = record.msg not in self.msgsNEWLINE self.msgs.add(record.msg)NEWLINE return int(log)NEWLINENEWLINENEWLINEdedupe_filter = DuplicateFilter()NEWLINEinfo_debug_stdout_filter = LessThanFilter(logging.WARNING)NEWLINEwarning_error_stderr_filter = GreaterThanFilter(logging.INFO)NEWLINENEWLINENEWLINEdef get_logger(name, level=logging.INFO, dedupe=True, add_stdout_stderr_handlers=True):NEWLINE config_file = cc_conda_build.get('log_config_file')NEWLINE # by loading config file here, and then only adding handlers later, peopleNEWLINE # should be able to override conda-build's logger settings here.NEWLINE if config_file:NEWLINE with open(config_file) as f:NEWLINE config_dict = yaml.safe_load(f)NEWLINE logging.config.dictConfig(config_dict)NEWLINE level = config_dict.get('loggers', {}).get(name, {}).get('level', level)NEWLINE log = logging.getLogger(name)NEWLINE log.setLevel(level)NEWLINE if dedupe:NEWLINE log.addFilter(dedupe_filter)NEWLINENEWLINE # these are defaults. They can be overridden by configuring a log config yaml file.NEWLINE if not log.handlers and add_stdout_stderr_handlers:NEWLINE stdout_handler = logging.StreamHandler(sys.stdout)NEWLINE stderr_handler = logging.StreamHandler(sys.stderr)NEWLINE stdout_handler.addFilter(info_debug_stdout_filter)NEWLINE stderr_handler.addFilter(warning_error_stderr_filter)NEWLINE stdout_handler.setLevel(level)NEWLINE stderr_handler.setLevel(level)NEWLINE log.addHandler(stdout_handler)NEWLINE log.addHandler(stderr_handler)NEWLINE return logNEWLINENEWLINENEWLINEdef _equivalent(base_value, value, path):NEWLINE equivalent = value == base_valueNEWLINE if isinstance(value, string_types) and isinstance(base_value, string_types):NEWLINE if not os.path.isabs(base_value):NEWLINE base_value = os.path.abspath(os.path.normpath(os.path.join(path, base_value)))NEWLINE if not os.path.isabs(value):NEWLINE value = os.path.abspath(os.path.normpath(os.path.join(path, value)))NEWLINE equivalent |= base_value == valueNEWLINE return equivalentNEWLINENEWLINENEWLINEdef merge_or_update_dict(base, new, path, merge, raise_on_clobber=False):NEWLINE log = get_logger(__name__)NEWLINE for key, value in new.items():NEWLINE base_value = base.get(key, value)NEWLINE if hasattr(value, 'keys'):NEWLINE base_value = merge_or_update_dict(base_value, value, path, merge,NEWLINE raise_on_clobber=raise_on_clobber)NEWLINE base[key] = base_valueNEWLINE elif hasattr(value, '__iter__') and not isinstance(value, string_types):NEWLINE if merge:NEWLINE if base_value and base_value != value:NEWLINE base_value.extend(value)NEWLINE try:NEWLINE base[key] = list(set(base_value))NEWLINE except TypeError:NEWLINE base[key] = base_valueNEWLINE else:NEWLINE base[key] = valueNEWLINE else:NEWLINE if (base_value and merge and not _equivalent(base_value, value, path) andNEWLINE raise_on_clobber):NEWLINE log.debug('clobbering key {} (original value {}) with value {}'.format(key,NEWLINE base_value, value))NEWLINE base[key] = valueNEWLINE return baseNEWLINENEWLINENEWLINEdef merge_dicts_of_lists(dol1, dol2):NEWLINE '''NEWLINE From Alex Martelli: https://stackoverflow.com/a/1495821/3257826NEWLINE '''NEWLINE keys = set(dol1).union(dol2)NEWLINE no = []NEWLINE return dict((k, dol1.get(k, no) + dol2.get(k, no)) for k in keys)NEWLINENEWLINENEWLINEdef prefix_files(prefix):NEWLINE '''NEWLINE Returns a set of all files in prefix.NEWLINE '''NEWLINE res = set()NEWLINE for root, dirs, files in os.walk(prefix):NEWLINE for fn in files:NEWLINE res.add(join(root, fn)[len(prefix) + 1:])NEWLINE for dn in dirs:NEWLINE path = join(root, dn)NEWLINE if islink(path):NEWLINE res.add(path[len(prefix) + 1:])NEWLINE res = set(expand_globs(res, prefix))NEWLINE return resNEWLINENEWLINENEWLINEdef mmap_mmap(fileno, length, tagname=None, flags=0, prot=mmap_PROT_READ | mmap_PROT_WRITE,NEWLINE access=None, offset=0):NEWLINE '''NEWLINE Hides the differences between mmap.mmap on Windows and Unix.NEWLINE Windows has `tagname`.NEWLINE Unix does not, but makes up for it with `flags` and `prot`.NEWLINE On both, the defaule value for `access` is determined from how the fileNEWLINE was opened so must not be passed in at all to get this default behaviourNEWLINE '''NEWLINE if on_win:NEWLINE if access:NEWLINE return mmap.mmap(fileno, length, tagname=tagname, access=access, offset=offset)NEWLINE else:NEWLINE return mmap.mmap(fileno, length, tagname=tagname)NEWLINE else:NEWLINE if access:NEWLINE return mmap.mmap(fileno, length, flags=flags, prot=prot, access=access, offset=offset)NEWLINE else:NEWLINE return mmap.mmap(fileno, length, flags=flags, prot=prot)NEWLINENEWLINENEWLINEdef remove_pycache_from_scripts(build_prefix):NEWLINE """Remove pip created pycache directory from bin or Scripts."""NEWLINE if on_win:NEWLINE scripts_path = os.path.join(build_prefix, 'Scripts')NEWLINE else:NEWLINE scripts_path = os.path.join(build_prefix, 'bin')NEWLINENEWLINE for entry in os.listdir(scripts_path):NEWLINE entry_path = os.path.join(scripts_path, entry)NEWLINE if os.path.isdir(entry_path) and entry.strip(os.sep) == '__pycache__':NEWLINE shutil.rmtree(entry_path)NEWLINENEWLINE elif os.path.isfile(entry_path) and entry_path.endswith('.pyc'):NEWLINE os.remove(entry_path)NEWLINENEWLINENEWLINEdef sort_list_in_nested_structure(dictionary, omissions=''):NEWLINE """Recurse through a nested dictionary and sort any lists that are found.NEWLINENEWLINE If the list that is found contains anything but strings, it is skippedNEWLINE as we can't compare lists containing different types. The omissions argumentNEWLINE allows for certain sections of the dictionary to be omitted from sorting.NEWLINE """NEWLINE for field, value in dictionary.items():NEWLINE if isinstance(value, dict):NEWLINE for key in value.keys():NEWLINE section = dictionary[field][key]NEWLINE if isinstance(section, dict):NEWLINE sort_list_in_nested_structure(section)NEWLINE elif (isinstance(section, list) andNEWLINE '{}/{}' .format(field, key) not in omissions andNEWLINE all(isinstance(item, str) for item in section)):NEWLINE section.sort()NEWLINENEWLINE # there's a possibility for nested lists containing dictionariesNEWLINE # in this case we recurse until we find a list to sortNEWLINE elif isinstance(value, list):NEWLINE for element in value:NEWLINE if isinstance(element, dict):NEWLINE sort_list_in_nested_structure(element)NEWLINE try:NEWLINE value.sort()NEWLINE except TypeError:NEWLINE passNEWLINENEWLINENEWLINE# group one: package nameNEWLINE# group two: version (allows _, +, . in version)NEWLINE# group three: build string - mostly not used here. Match primarily mattersNEWLINE# to specify when not to add .*NEWLINENEWLINE# if you are seeing mysterious unsatisfiable errors, with the package you're building being theNEWLINE# unsatisfiable part, then you probably need to update this regex.NEWLINENEWLINEspec_needing_star_re = re.compile("([0-9a-zA-Z\.\-\_]+)\s+([0-9a-zA-Z\.\+\_]+)(\s+[0-9a-zA-Z\.\_]+)?") # NOQANEWLINEspec_ver_needing_star_re = re.compile("^([0-9a-zA-Z\.]+)$")NEWLINENEWLINENEWLINEdef ensure_valid_spec(spec):NEWLINE if isinstance(spec, MatchSpec):NEWLINE if (hasattr(spec, 'version') and spec.version andNEWLINE spec_ver_needing_star_re.match(str(spec.version))):NEWLINE if str(spec.name) not in ('python', 'numpy') or str(spec.version) != 'x.x':NEWLINE spec = MatchSpec("{} {}".format(str(spec.name), str(spec.version) + '.*'))NEWLINE else:NEWLINE match = spec_needing_star_re.match(spec)NEWLINE # ignore exact pins (would be a 3rd group)NEWLINE if match and not match.group(3):NEWLINE if match.group(1) in ('python', 'numpy') and match.group(2) == 'x.x':NEWLINE spec = spec_needing_star_re.sub(r"\1 \2", spec)NEWLINE else:NEWLINE if "*" not in spec:NEWLINE spec = spec_needing_star_re.sub(r"\1 \2.*", spec)NEWLINE return specNEWLINENEWLINENEWLINEdef insert_variant_versions(metadata, env):NEWLINE reqs = metadata.get_value('requirements/' + env)NEWLINE for key, val in metadata.config.variant.items():NEWLINE regex = re.compile(r'^(%s)(?:\s*$)' % key.replace('_', '[-_]'))NEWLINE matches = [regex.match(pkg) for pkg in reqs]NEWLINE if any(matches):NEWLINE for i, x in enumerate(matches):NEWLINE if x:NEWLINE del reqs[i]NEWLINE reqs.insert(i, ensure_valid_spec(' '.join((x.group(1), val))))NEWLINENEWLINE xx_re = re.compile("([0-9a-zA-Z\.\-\_]+)\s+x\.x")NEWLINENEWLINE matches = [xx_re.match(pkg) for pkg in reqs]NEWLINE if any(matches):NEWLINE for i, x in enumerate(matches):NEWLINE if x:NEWLINE del reqs[i]NEWLINE reqs.insert(i, ensure_valid_spec(' '.join((x.group(1),NEWLINE metadata.config.variant.get(x.group(1))))))NEWLINE metadata.meta['requirements'][env] = reqsNEWLINE
import itertools as itNEWLINEimport osNEWLINEfrom typing import UnionNEWLINENEWLINEimport pandas as pdNEWLINEfrom memory_profiler import profileNEWLINEfrom opyenxes.data_out.XesXmlSerializer import XesXmlSerializerNEWLINEfrom opyenxes.extension.std.XLifecycleExtension import XLifecycleExtension as xlcNEWLINEfrom opyenxes.factory.XFactory import XFactoryNEWLINEfrom pm4py.objects.conversion.log import converterNEWLINEfrom pm4py.objects.log.exporter.xes import exporterNEWLINEfrom pm4py.objects.log.util import interval_lifecycleNEWLINENEWLINEfrom ..configuration import ConfigurationNEWLINEfrom ..readers.log_reader import LogReaderNEWLINENEWLINENEWLINEclass XesWriter(object):NEWLINE """NEWLINE This class writes a process log in .xes formatNEWLINE """NEWLINENEWLINE # @profile(stream=open('logs/memprof_XesWriter.log', 'a+'))NEWLINE def __init__(self, log: Union[LogReader, pd.DataFrame, list], settings: Configuration):NEWLINE if isinstance(log, pd.DataFrame):NEWLINE self.log = log.valuesNEWLINE elif isinstance(log, LogReader):NEWLINE self.log = log.dataNEWLINE elif isinstance(log, list):NEWLINE self.log = logNEWLINE else:NEWLINE raise Exception(f'Unimplemented type for {type(log)}')NEWLINE self.one_timestamp = settings.read_options.one_timestampNEWLINE self.column_names = settings.read_options.column_namesNEWLINE self.output_file = os.path.join(settings.output, settings.project_name + '.xes')NEWLINE # self.create_xes_file()NEWLINE self.create_xes_file_alternative()NEWLINENEWLINE # @profile(stream=open('logs/memprof_XesWriter.create_xes_file_alternative.log', 'a+'))NEWLINE def create_xes_file_alternative(self):NEWLINE log_df = pd.DataFrame(self.log)NEWLINE log_df.rename(columns={NEWLINE 'task': 'concept:name',NEWLINE 'caseid': 'case:concept:name',NEWLINE 'event_type': 'lifecycle:transition',NEWLINE 'user': 'org:resource',NEWLINE 'end_timestamp': 'time:timestamp'NEWLINE }, inplace=True)NEWLINE log_df.drop(columns=['@@startevent_concept:name',NEWLINE '@@startevent_org:resource',NEWLINE '@@startevent_Activity',NEWLINE '@@startevent_Resource',NEWLINE '@@duration',NEWLINE 'case:variant',NEWLINE 'case:variant-index',NEWLINE 'case:creator',NEWLINE 'Activity',NEWLINE 'Resource'], inplace=True, errors='ignore')NEWLINENEWLINE log_interval = converter.apply(log_df, variant=converter.Variants.TO_EVENT_LOG)NEWLINE log_lifecycle = interval_lifecycle.to_lifecycle(log_interval)NEWLINENEWLINE exporter.apply(log_lifecycle, self.output_file)NEWLINENEWLINE def create_xes_file(self):NEWLINE csv_mapping = {v: k for k, v in self.column_names.items()}NEWLINE log = XFactory.create_log()NEWLINE data = sorted(self.log, key=lambda x: x['caseid'])NEWLINE for key, group in it.groupby(data, key=lambda x: x['caseid']):NEWLINE sort_key = ('end_timestamp' if self.one_timestamp else 'start_timestamp')NEWLINE csv_trace = sorted(list(group), key=lambda x: x[sort_key]) # TODO: why is that "csv_trace" in xes function?NEWLINE events = list()NEWLINE for line in csv_trace:NEWLINE events.extend(self.convert_line_in_event(csv_mapping, line))NEWLINE trace_attribute = XFactory.create_attribute_literal('concept:name', key)NEWLINE trace_attribute_map = XFactory.create_attribute_map()NEWLINE trace_attribute_map[trace_attribute.get_key()] = trace_attributeNEWLINE trace = XFactory.create_trace(attribute=trace_attribute_map)NEWLINE for event in events:NEWLINE trace.append(event)NEWLINE log.append(trace)NEWLINE # log.set_info(classifier, info)NEWLINENEWLINE # Save log in xes formatNEWLINE with open(self.output_file, "w") as file:NEWLINE XesXmlSerializer().serialize(log, file)NEWLINENEWLINE def convert_line_in_event(self, csv_mapping, event):NEWLINE """NEWLINE ParametersNEWLINE ----------NEWLINE csv_mapping : dictionary with the type of all attribute.NEWLINE event : dict with the attribute in string formatNEWLINENEWLINE ReturnsNEWLINE -------NEWLINE events : An XEvent with the respective attributeNEWLINENEWLINE """NEWLINE transitions = [{'column': 'Complete Timestamp',NEWLINE 'value': xlc.StandardModel.COMPLETE,NEWLINE 'skiped': 'Start Timestamp'}]NEWLINE if not self.one_timestamp:NEWLINE transitions.insert(0, {'column': 'Start Timestamp',NEWLINE 'value': xlc.StandardModel.START,NEWLINE 'skiped': 'Complete Timestamp'})NEWLINE # TODO: Add the use of extensions and optimize codeNEWLINE events = list()NEWLINE for transition in transitions:NEWLINE attribute_map = XFactory.create_attribute_map()NEWLINE for attr_type, attr_value in event.items():NEWLINE attribute_type = csv_mapping[attr_type]NEWLINE if attribute_type in ["Activity", "Resource"]:NEWLINE if attribute_type == "Activity":NEWLINE attribute = XFactory.create_attribute_literal('concept:name', attr_value, extension=None)NEWLINE attribute_map[attribute.get_key()] = attributeNEWLINE if attribute_type == "Resource":NEWLINE attribute = XFactory.create_attribute_literal('org:resource', attr_value, extension=None)NEWLINE attribute_map[attribute.get_key()] = attributeNEWLINE elif attribute_type == transition['column']:NEWLINE attribute = XFactory.create_attribute_timestamp("time:timestamp", attr_value, extension=None)NEWLINE attribute_map[attribute.get_key()] = attributeNEWLINE attribute2 = XFactory.create_attribute_literal('lifecycle:transition', transition['value'],NEWLINE extension=xlc)NEWLINE attribute_map[attribute2.get_key()] = attribute2NEWLINE elif attribute_type in ['Case ID', 'Event ID', transition['skiped']]:NEWLINE continueNEWLINE else:NEWLINE attribute = XFactory.create_attribute_discrete(attribute_type, int(attr_value))NEWLINE attribute_map[attribute.get_key()] = attributeNEWLINE events.append(XFactory.create_event(attribute_map))NEWLINE return eventsNEWLINE
import wxNEWLINENEWLINEfrom meerk40t.gui.scene.sceneconst import (NEWLINE RESPONSE_ABORT,NEWLINE RESPONSE_CHAIN,NEWLINE RESPONSE_CONSUME,NEWLINE)NEWLINEfrom meerk40t.gui.toolwidgets.toolwidget import ToolWidgetNEWLINEfrom meerk40t.svgelements import Ellipse, PathNEWLINENEWLINENEWLINEclass EllipseTool(ToolWidget):NEWLINE """NEWLINE Ellipse Drawing Tool.NEWLINENEWLINE Adds Circle with click and drag.NEWLINE """NEWLINENEWLINE def __init__(self, scene):NEWLINE ToolWidget.__init__(self, scene)NEWLINE self.start_position = NoneNEWLINE self.p1 = NoneNEWLINE self.p2 = NoneNEWLINENEWLINE def process_draw(self, gc: wx.GraphicsContext):NEWLINE if self.p1 is not None and self.p2 is not None:NEWLINE x0 = min(self.p1.real, self.p2.real)NEWLINE y0 = min(self.p1.imag, self.p2.imag)NEWLINE x1 = max(self.p1.real, self.p2.real)NEWLINE y1 = max(self.p1.imag, self.p2.imag)NEWLINE gc.SetPen(self.pen)NEWLINE gc.SetBrush(wx.TRANSPARENT_BRUSH)NEWLINE gc.DrawEllipse(x0, y0, x1 - x0, y1 - y0)NEWLINENEWLINE def event(self, window_pos=None, space_pos=None, event_type=None):NEWLINE response = RESPONSE_CHAINNEWLINE if event_type == "leftdown":NEWLINE self.scene.tool_active = TrueNEWLINE self.p1 = complex(space_pos[0], space_pos[1])NEWLINE response = RESPONSE_CONSUMENEWLINE elif event_type == "move":NEWLINE self.p2 = complex(space_pos[0], space_pos[1])NEWLINE self.scene.request_refresh()NEWLINE response = RESPONSE_CONSUMENEWLINE elif event_type == "leftup":NEWLINE self.scene.tool_active = FalseNEWLINE try:NEWLINE if self.p1 is None:NEWLINE returnNEWLINE self.p2 = complex(space_pos[0], space_pos[1])NEWLINE x0 = min(self.p1.real, self.p2.real)NEWLINE y0 = min(self.p1.imag, self.p2.imag)NEWLINE x1 = max(self.p1.real, self.p2.real)NEWLINE y1 = max(self.p1.imag, self.p2.imag)NEWLINE ellipse = Ellipse(NEWLINE (x1 + x0) / 2.0,NEWLINE (y1 + y0) / 2.0,NEWLINE abs(x0 - x1) / 2,NEWLINE abs(y0 - y1) / 2,NEWLINE stroke="blue",NEWLINE stroke_width=1000,NEWLINE )NEWLINE if not ellipse.is_degenerate():NEWLINE elements = self.scene.context.elementsNEWLINE node = elements.elem_branch.add(shape=ellipse, type="elem ellipse")NEWLINE elements.classify([node])NEWLINE self.p1 = NoneNEWLINE self.p2 = NoneNEWLINE except IndexError:NEWLINE passNEWLINE self.scene.request_refresh()NEWLINE response = RESPONSE_ABORTNEWLINE elif event_type == "lost":NEWLINE self.scene.tool_active = FalseNEWLINE return responseNEWLINE
#!/usr/bin/env python3NEWLINENEWLINEfrom setuptools import setupNEWLINENEWLINEimport sysNEWLINEimport osNEWLINENEWLINEif not sys.version[0] == "3":NEWLINE raise Exception("This Program is for Python **VERSION 3** only!")NEWLINENEWLINE__version__ = '0.1.4'NEWLINEhere = os.path.abspath(os.path.dirname(__file__))NEWLINENEWLINE# Get the long description from the relevant fileNEWLINEwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:NEWLINE long_description = f.read()NEWLINENEWLINEsetup(name='gglsbl3',NEWLINE version=__version__,NEWLINE description="Client library for Google Safe Browsing API",NEWLINE classifiers=[NEWLINE 'Programming Language :: Python :: 3',NEWLINE 'Programming Language :: Python :: 3.3',NEWLINE 'Programming Language :: Python :: 3.4',NEWLINE 'Programming Language :: Python :: 3.5',NEWLINE 'Programming Language :: Python :: Implementation :: CPython',NEWLINE 'Intended Audience :: Developers',NEWLINE 'Topic :: Internet',NEWLINE 'Topic :: Software Development :: Libraries :: Python Modules',NEWLINE ],NEWLINE long_description=long_description,NEWLINE keywords='gglsbl3 gglsbl safebrowsing google-safe-browing googlesafebrowsing',NEWLINE author='Stefan Kuntz',NEWLINE author_email='Stefan.github@gmail.com',NEWLINE url='https://github.com/Stefan-Code/gglsbl3',NEWLINE license='Apache2',NEWLINE packages=['gglsbl3', 'gglsbl3.util'],NEWLINE install_requires=['argparse', 'python3-protobuf', ],NEWLINE scripts=['scripts/gglsbl_client.py'],NEWLINE )NEWLINE
# Copyright (C) 2018-2021 Intel CorporationNEWLINE# SPDX-License-Identifier: Apache-2.0NEWLINENEWLINEimport numpy as npNEWLINENEWLINEfrom openvino.tools.mo.ops.splice import SpliceNEWLINEfrom openvino.tools.mo.front.common.partial_infer.utils import int64_arrayNEWLINEfrom openvino.tools.mo.graph.graph import Graph, NodeNEWLINEfrom openvino.tools.mo.middle.replacement import MiddleReplacementPatternNEWLINEfrom openvino.tools.mo.ops.assign import AssignNEWLINEfrom openvino.tools.mo.ops.concat import ConcatNEWLINEfrom openvino.tools.mo.ops.const import ConstNEWLINEfrom openvino.tools.mo.ops.crop import CropNEWLINEfrom openvino.tools.mo.ops.read_value import ReadValueNEWLINEfrom openvino.tools.mo.ops.result import ResultNEWLINEfrom openvino.tools.mo.utils.error import ErrorNEWLINENEWLINENEWLINEclass ReplaceMemoryOffsetNodePattern(MiddleReplacementPattern):NEWLINE """NEWLINE Replace MemoryOffset with SpliceNEWLINE """NEWLINE enabled = TrueNEWLINENEWLINE def run_before(self):NEWLINE from openvino.tools.mo.middle.RemoveDuplicationMemory import RemoveMemoryDuplicationPatternNEWLINE return [RemoveMemoryDuplicationPattern]NEWLINENEWLINE def run_after(self):NEWLINE from openvino.tools.mo.middle.split_tdnn_memoryoffset import SplitTdnnMemoryOffsetNEWLINE return [SplitTdnnMemoryOffset]NEWLINENEWLINE @staticmethodNEWLINE def pattern():NEWLINE return dict(NEWLINE nodes=[('op', dict(op='MemoryOffset', has_default=False))],NEWLINE edges=[])NEWLINENEWLINE @staticmethodNEWLINE def replace_pattern(graph: Graph, match: dict):NEWLINE node = match['op']NEWLINE pair_node = Node(graph, node.pair_name)NEWLINENEWLINE if pair_node.has_default:NEWLINE returnNEWLINENEWLINE if node.in_port(0).get_source() is not None:NEWLINE input_node_out_port = node.in_port(0).get_source()NEWLINE op_output_id = node.out_port(0).get_destination().node.idNEWLINE out_node_in_ports = pair_node.out_port(0).get_destinations()NEWLINE else:NEWLINE input_node_out_port = pair_node.in_port(0).get_source()NEWLINE op_output_id = pair_node.out_port(0).get_destination().node.idNEWLINE out_node_in_ports = node.out_port(0).get_destinations()NEWLINENEWLINE in_shape = input_node_out_port.data.get_shape().copy()NEWLINENEWLINE node_id = node.idNEWLINE node_name = node.nameNEWLINE node_t = node.tNEWLINENEWLINE splice = Splice(graph, {'name': node_name,NEWLINE 'id': node_id,NEWLINE 'context': int64_array(range(node_t, 1))NEWLINE if node_t < 0 else int64_array(range(0, node_t+1))}).create_node()NEWLINE splice.in_port(0).connect(input_node_out_port)NEWLINENEWLINE # offset of Crop will be 0 (first element) if node_t < 0 and in_shape[1]*node_t (last element) if node_t > 0NEWLINE crop = Crop(graph, {'name': 'Splice_Crop',NEWLINE 'axis': int64_array([1]),NEWLINE 'offset': int64_array([max(0, in_shape[1] * node_t)]),NEWLINE 'dim': int64_array([in_shape[1]])}).create_node()NEWLINENEWLINE splice.out_port(0).connect(crop.in_port(0))NEWLINE splice.out_port(0).data.set_shape(int64_array([in_shape[0], (abs(node_t) + 1) * in_shape[1]]))NEWLINENEWLINE outs = input_node_out_port.get_destinations()NEWLINE for in_port in outs:NEWLINE out_ = in_port.nodeNEWLINE if out_.op == 'Concat' and out_ == out_node_in_ports[0].node:NEWLINE crop_input = Crop(graph, {'name': 'Splice_Crop',NEWLINE 'axis': int64_array([1]),NEWLINE 'offset': int64_array([-min(0, in_shape[1] * node_t)]),NEWLINE 'dim': int64_array([in_shape[1]])}).create_node()NEWLINE splice.out_port(0).connect(crop_input.in_port(0))NEWLINENEWLINE in_port.disconnect()NEWLINE crop_input.out_port(0).connect(in_port)NEWLINE crop_input.out_port(0).data.set_shape(in_shape)NEWLINENEWLINE for dest_port in out_node_in_ports:NEWLINE dest_port.connect(crop.out_port(0))NEWLINENEWLINE graph.remove_node(op_output_id)NEWLINE graph.remove_node(node.id)NEWLINE graph.remove_node(pair_node.id)NEWLINENEWLINENEWLINEclass ReplaceMemoryOffsetWithMemoryNodePattern(MiddleReplacementPattern):NEWLINE """NEWLINE Replace MemoryOffset with Memory if IfDefined used with it to avoid cyclesNEWLINE """NEWLINE enabled = TrueNEWLINE force_shape_inference = TrueNEWLINENEWLINE def run_before(self):NEWLINE from openvino.tools.mo.middle.RemoveDuplicationMemory import RemoveMemoryDuplicationPatternNEWLINE return [RemoveMemoryDuplicationPattern]NEWLINENEWLINE @staticmethodNEWLINE def pattern():NEWLINE return dict(NEWLINE nodes=[('op', dict(op='MemoryOffset', has_default=True))],NEWLINE edges=[])NEWLINENEWLINE @staticmethodNEWLINE def replace_pattern(graph: Graph, match: dict):NEWLINE node = match['op']NEWLINE pair_node = Node(graph, node.pair_name)NEWLINENEWLINE if node.t >= 0:NEWLINE raise Error('Does not support IfDefined with t > 0')NEWLINENEWLINE if node.in_port(0).get_source() is not None:NEWLINE input_port = node.in_port(0).get_source()NEWLINE op_output_id = node.out_port(0).get_destination().node.idNEWLINE out_port = pair_node.out_port(0)NEWLINE node_name = node.nameNEWLINE pair_name = pair_node.nameNEWLINE else:NEWLINE input_port = pair_node.in_port(0).get_source()NEWLINE op_output_id = pair_node.out_port(0).get_destination().node.idNEWLINE out_port = node.out_port(0)NEWLINE node_name = pair_node.nameNEWLINE pair_name = node.nameNEWLINENEWLINE in_shape = input_port.data.get_shape()NEWLINE node_t = abs(node.t)NEWLINENEWLINE init_value_memory_out = Const(graph, {'name': 'init_value_' + pair_name,NEWLINE 'value': np.zeros(int64_array([in_shape[0], in_shape[1]*node_t])),NEWLINE 'shape': int64_array([in_shape[0], in_shape[1]*node_t])}).create_node()NEWLINE memory_out = ReadValue(graph, {'name': pair_name, 'variable_id': node_name+pair_name}).create_node()NEWLINE init_value_memory_out.out_port(0).connect(memory_out.in_port(0))NEWLINENEWLINE if node_t > 1:NEWLINE crop_concat = Crop(graph, {'name': 'Memory_crop', 'dim': np.array([in_shape[1]*(node_t-1)]),NEWLINE 'offset': np.array([in_shape[1]]), 'axis': np.array([1])}).create_node()NEWLINE memory_out.out_port(0).connect(crop_concat.in_port(0))NEWLINE concat = Concat(graph, {'name': 'Memory_concat'}).create_node()NEWLINE concat.add_sequence_of_ports('in', range(2))NEWLINE crop_concat.out_port(0).connect(concat.in_port(0))NEWLINE concat.in_port(1).connect(input_port)NEWLINENEWLINE memory_in = Assign(graph, {'name': node_name, 'variable_id': node_name + pair_name}).create_node()NEWLINE concat.out_port(0).connect(memory_in.in_port(0))NEWLINE out = Result(graph, {'name': 'Memory_output'}).create_node()NEWLINE memory_in.out_port(0).connect(out.in_port(0))NEWLINENEWLINE crop_out = Crop(graph, {'name': 'Memory_crop_out', 'dim': np.array([in_shape[1]]),NEWLINE 'offset': np.array([0]), 'axis': np.array([1])}).create_node()NEWLINE memory_out.out_port(0).connect(crop_out.in_port(0))NEWLINE out_port.get_connection().set_source(crop_out.out_port(0))NEWLINE else:NEWLINE memory_in = Assign(graph, {'name': node_name, 'variable_id': node_name + pair_name}).create_node()NEWLINE memory_in.in_port(0).connect(input_port)NEWLINE out = Result(graph, {'name': 'Memory_output'}).create_node()NEWLINE memory_in.out_port(0).connect(out.in_port(0))NEWLINE out_port.get_connection().set_source(memory_out.out_port(0))NEWLINENEWLINE graph.remove_node(op_output_id)NEWLINE graph.remove_node(node.id)NEWLINE graph.remove_node(pair_node.id)NEWLINE
import os, timeNEWLINEimport numpy as npNEWLINEimport pybullet as pNEWLINEimport gymNEWLINEfrom gym import error,spaces,utilsNEWLINEfrom gym.utils import seedingNEWLINENEWLINEfrom .util import UtilNEWLINEimport pybullet_dataNEWLINEimport mathNEWLINENEWLINEimport randomNEWLINENEWLINEclass Demo2Env(gym.Env):NEWLINE def __init__(self):NEWLINE self.directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'assets')NEWLINENEWLINE self.numJoints = p.getNumJoints(self.tiago)NEWLINE self.available_joints_indices = [i for i in range(numJoints) if p.getJointInfo(self.tiago, i)[2] != p.JOINT_FIXED]NEWLINE self.right_arm_indices = [46, 47, 48, 49, 50, 51, 52]NEWLINE self.lowerlimits = [-1.17, -1.17, -0.78, -0.39, -2.09, -1.41, -2.09]NEWLINE self.upperlimits = [1.57, 1.57, 3.92, 2.35, 2.09, 1.41, 2.09]NEWLINE self.torso_index = [21] # torso lift prismatic jointNEWLINE self.right_gripper_indices = [58, 59]NEWLINE self.right_tool_joint = 56NEWLINE self.iter = 0NEWLINE self.iteration = 0NEWLINE self.dist = 1e30NEWLINE self.step = 1 / 240NEWLINE self.reached0 = FalseNEWLINE self.reached1 = FalseNEWLINE self.reached2 = FalseNEWLINE self.robot_forces = 1.0NEWLINE self.robot_gains = 0.05NEWLINE self.distance_weight = 1.0NEWLINE self.action_weight = 0.01NEWLINE self.task_success_threshold = 0.03NEWLINE self.targetpos = np.array([2.2, 2.1, 1.5])NEWLINENEWLINE self.action_robot_len = len(self.available_joints_indices)NEWLINE self.action_human_len = 0NEWLINE self.action_space = spaces.Box(NEWLINE low=np.array([-1.0] * (self.action_robot_len + self.action_human_len), dtype=np.float32),NEWLINE high=np.array([1.0] * (self.action_robot_len + self.action_human_len), dtype=np.float32), dtype=np.float32)NEWLINE self.obs_robot_len = 18 + len(self.available_joints_indices)NEWLINE self.obs_human_len = 19NEWLINE self.observation_space = spaces.Box(low=np.array([-1000000000.0]*(self.obs_robot_len+self.obs_human_len), dtype=np.float32),NEWLINE high=np.array([1000000000.0]*(self.obs_robot_len+self.obs_human_len),NEWLINE dtype=np.float32), dtype=np.float32)NEWLINE self.action_space_robot = spaces.Box(low=np.array([-1.0]*self.action_robot_len, dtype=np.float32), high=np.array([1.0]*self.action_robot_len, dtype=np.float32), dtype=np.float32)NEWLINE self.action_space_human = spaces.Box(low=np.array([-1.0]*self.action_human_len, dtype=np.float32), high=np.array([1.0]*self.action_human_len, dtype=np.float32), dtype=np.float32)NEWLINE self.observation_space_robot = spaces.Box(low=np.array([-1000000000.0]*self.obs_robot_len, dtype=np.float32), high=np.array([1000000000.0]*self.obs_robot_len, dtype=np.float32), dtype=np.float32)NEWLINE self.observation_space_human = spaces.Box(low=np.array([-1000000000.0]*self.obs_human_len, dtype=np.float32), high=np.array([1000000000.0]*self.obs_human_len, dtype=np.float32), dtype=np.float32)NEWLINENEWLINE def step(self, action):NEWLINENEWLINE obs = self._get_obs()NEWLINENEWLINENEWLINE # Get human preferencesNEWLINE end_effector_velocity = np.linalg.norm(p.getLinkState(self.right_end_effector))NEWLINENEWLINE ee_top_center_pos = [0,0,0]NEWLINE reward_distance_mouth = -np.linalg.norm(self.targetpos - np.array(ee_top_center_pos)) # Penalize distances between top of cup and mouthNEWLINE reward_action = -np.linalg.norm(action) # Penalize actionsNEWLINENEWLINENEWLINENEWLINE reward = self.config('distance_weight')*reward_distance_mouth + self.config('action_weight')*reward_action + preferences_scoreNEWLINENEWLINENEWLINE info = {'task_success': int(reward_distance_mouth <= self.task_success_threshold), 'action_robot_len': self.action_robot_len,NEWLINE 'action_human_len': self.action_human_len,'obs_robot_len': self.obs_robot_len, 'obs_human_len': self.obs_human_len}NEWLINE done = self.iteration >= 200NEWLINENEWLINENEWLINE return obs, reward, done, infoNEWLINENEWLINE def _get_obs(self):NEWLINENEWLINE robot_joint_angles = p.getJointStates(self.tiago, self.available_joints_indices)NEWLINE # Fix joint angles to be in [-pi, pi]NEWLINE robot_joint_angles = (np.array(robot_joint_angles) + np.pi) % (2 * np.pi) - np.piNEWLINE # ee_tc_pos = np.array(p.getLinkState(self.robot, 54, computeForwardKinematics=True, physicsClientId=self.id)[0])NEWLINENEWLINENEWLINE robot_obs = np.concatenate(NEWLINE [ - self.targetpos, robot_joint_angles ]).ravel()NEWLINENEWLINE return robot_obsNEWLINENEWLINE def seed(self, seed=None):NEWLINE self.np_random, seed = seeding.np_random(seed)NEWLINE return [seed]NEWLINENEWLINE def set_seed(self, seed=1000):NEWLINE self.np_random.seed(seed)NEWLINENEWLINE def set_frictions(self, links, lateral_friction=None, spinning_friction=None, rolling_friction=None):NEWLINE if type(links) == int:NEWLINE links = [links]NEWLINE for link in links:NEWLINE if lateral_friction is not None:NEWLINE p.changeDynamics(self.body, link, lateralFriction=lateral_friction, physicsClientId=self.id)NEWLINE if spinning_friction is not None:NEWLINE p.changeDynamics(self.body, link, spinningFriction=spinning_friction, physicsClientId=self.id)NEWLINE if rolling_friction is not None:NEWLINE p.changeDynamics(self.body, link, rollingFriction=rolling_friction, physicsClientId=self.id)NEWLINENEWLINE def build_assistive_env(self):NEWLINE # Build plane, furniture, robot, human, etc. (just like world creation)NEWLINE # Load the ground planeNEWLINE plane = p.loadURDF(os.path.join(self.directory, 'plane', 'plane.urdf'), physicsClientId=self.id)NEWLINENEWLINE # Randomly set friction of the groundNEWLINE self.plane.set_frictions(self.plane, lateral_friction=self.np_random.uniform(0.025, 0.5),NEWLINE spinning_friction=0, rolling_friction=0)NEWLINE # Disable rendering during creationNEWLINE p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0, physicsClientId=self.id)NEWLINE # Create robotNEWLINE self.tiago = p.loadURDF(os.path.join(self.directory, 'tiago_dualhand', 'tiago_dual_modified.urdf'),NEWLINE useFixedBase=True, basePosition=[-10, -10, 0])NEWLINE def reset(self):NEWLINE p.resetSimulation(physicsClientId=self.id)NEWLINENEWLINE if not self.gui:NEWLINE # Reconnect the physics engine to forcefully clear memory when running long training scriptsNEWLINE self.disconnect()NEWLINE self.id = p.connect(p.DIRECT)NEWLINE self.util = Util(self.id, self.np_random)NEWLINE if self.gpu:NEWLINE self.util.enable_gpu()NEWLINE # Configure camera positionNEWLINE p.resetDebugVisualizerCamera(cameraDistance=1.75, cameraYaw=-25, cameraPitch=-45,NEWLINE cameraTargetPosition=[-0.2, 0, 0.4],NEWLINE physicsClientId=self.id)NEWLINE p.configureDebugVisualizer(p.COV_ENABLE_MOUSE_PICKING, 0, physicsClientId=self.id)NEWLINE p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0, physicsClientId=self.id)NEWLINE p.setTimeStep(1/240, physicsClientId=self.id)NEWLINE # Disable real time simulation so that the simulation only advances when we call stepSimulationNEWLINE p.setRealTimeSimulation(0, physicsClientId=self.id)NEWLINE p.setGravity(0, 0, -9.81, physicsClientId=self.id)NEWLINENEWLINE self.last_sim_time = NoneNEWLINE self.iteration = 0NEWLINE self.forces = []NEWLINE self.task_success = 0NEWLINE self.build_assistive_env()NEWLINENEWLINE # Update robot motor gainsNEWLINENEWLINENEWLINE self.generate_target()NEWLINENEWLINE p.resetDebugVisualizerCamera(cameraDistance=1.10, cameraYaw=55, cameraPitch=-45,NEWLINE cameraTargetPosition=[-0.2, 0, 0.75], physicsClientId=self.id)NEWLINENEWLINENEWLINE target_ee_pos = np.array([-0.2, -0.5, 1.1]) + self.np_random.uniform(-0.05, 0.05, size=3)NEWLINE target_ee_orient = self.get_quaternion(self.robot.toc_ee_orient_rpy[self.task])NEWLINE # self.init_robot_poseNEWLINENEWLINE # Open gripper to hold the toolNEWLINE self.robot.set_gripper_open_position(self.robot.right_gripper_indices, self.robot.gripper_pos[self.task],NEWLINE set_instantly=True)NEWLINENEWLINENEWLINENEWLINE p.setPhysicsEngineParameter(numSubSteps=4, numSolverIterations=10, physicsClientId=self.id)NEWLINENEWLINENEWLINE # Enable renderingNEWLINE p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1, physicsClientId=self.id)NEWLINENEWLINENEWLINE for _ in range(50):NEWLINE p.stepSimulation(physicsClientId=self.id)NEWLINENEWLINE self.init_env_variables()NEWLINE return self._get_obs()NEWLINENEWLINE def init_env_variables(self, reset=False):NEWLINE if len(self.action_space.low) <= 1 or reset:NEWLINE obs_len = len(self._get_obs())NEWLINE self.observation_space.__init__(low=-np.ones(obs_len, dtype=np.float32) * 1000000000,NEWLINE high=np.ones(obs_len, dtype=np.float32) * 1000000000, dtype=np.float32)NEWLINENEWLINE # Define action/obs lengthsNEWLINE self.action_robot_len = len(self.available_joints_indices)NEWLINE self.action_human_len = 0NEWLINE self.obs_robot_len = len(self._get_obs('robot'))NEWLINE self.obs_human_len = 19NEWLINE self.action_space_robot = spaces.Box(low=np.array([-1.0] * self.action_robot_len, dtype=np.float32),NEWLINE high=np.array([1.0] * self.action_robot_len, dtype=np.float32),NEWLINE dtype=np.float32)NEWLINE self.action_space_human = spaces.Box(low=np.array([-1.0] * self.action_human_len, dtype=np.float32),NEWLINE high=np.array([1.0] * self.action_human_len, dtype=np.float32),NEWLINE dtype=np.float32)NEWLINE self.observation_space_robot = spaces.Box(NEWLINE low=np.array([-1000000000.0] * self.obs_robot_len, dtype=np.float32),NEWLINE high=np.array([1000000000.0] * self.obs_robot_len, dtype=np.float32), dtype=np.float32)NEWLINE self.observation_space_human = spaces.Box(NEWLINE low=np.array([-1000000000.0] * self.obs_human_len, dtype=np.float32),NEWLINE high=np.array([1000000000.0] * self.obs_human_len, dtype=np.float32), dtype=np.float32)NEWLINENEWLINE def generate_target(self):NEWLINE # Set targetNEWLINE self.sphere = self.create_sphere(radius=0.01, mass=0.0, pos=self.targetpos, collision=False, rgba=[0, 1, 0, 1])NEWLINE self.update_targets()NEWLINENEWLINE def update_targets(self):NEWLINE # update_targets() is automatically called at each time step for updating any targets in the environment.NEWLINE p.resetBasePositionAndOrientation(self.sphere, self.targetpos, [0, 0, 0, 1])NEWLINENEWLINE def create_sphere(self, radius=0.01, mass=0.0, pos=[0, 0, 0], visual=True, collision=True, rgba=[0, 1, 1, 1], maximal_coordinates=False, return_collision_visual=False):NEWLINE sphere_collision = p.createCollisionShape(shapeType=p.GEOM_SPHERE, radius=radius, physicsClientId=self.id) if collision else -1NEWLINE sphere_visual = p.createVisualShape(shapeType=p.GEOM_SPHERE, radius=radius, rgbaColor=rgba, physicsClientId=self.id) if visual else -1NEWLINE if return_collision_visual:NEWLINE return sphere_collision, sphere_visualNEWLINE sphere = p.createMultiBody(baseMass=mass, baseCollisionShapeIndex=sphere_collision, baseVisualShapeIndex=sphere_visual, basePosition=pos, useMaximalCoordinates=maximal_coordinates, physicsClientId=self.id)NEWLINE return sphereNEWLINE # def take_step(self, actions, gains=None, forces=None, action_multiplier=0.05, step_sim=True):NEWLINE # if gains is None:NEWLINE # gains = [a.motor_gains for a in self.agents]NEWLINE # elif type(gains) not in (list, tuple):NEWLINE # gains = [gains]*len(self.agents)NEWLINE # if forces is None:NEWLINE # forces = [a.motor_forces for a in self.agents]NEWLINE # elif type(forces) not in (list, tuple):NEWLINE # forces = [forces]*len(self.agents)NEWLINE # if self.last_sim_time is None:NEWLINE # self.last_sim_time = time.time()NEWLINE # self.iteration += 1NEWLINE # self.forces = []NEWLINE # actions = np.clip(actions, a_min=self.action_space.low, a_max=self.action_space.high)NEWLINE # actions *= action_multiplierNEWLINE # action_index = 0NEWLINE # for i, agent in enumerate(self.agents):NEWLINE # needs_action = not isinstance(agent, Human) or agent.controllableNEWLINE # if needs_action:NEWLINE # agent_action_len = len(agent.controllable_joint_indices)NEWLINE # action = np.copy(actions[action_index:action_index+agent_action_len])NEWLINE # action_index += agent_action_lenNEWLINE # if isinstance(agent, Robot):NEWLINE # action *= agent.action_multiplierNEWLINE # if len(action) != agent_action_len:NEWLINE # print('Received agent actions of length %d does not match expected action length of %d' % (len(action), agent_action_len))NEWLINE # exit()NEWLINE # # Append the new action to the current measured joint anglesNEWLINE # agent_joint_angles = agent.get_joint_angles(agent.controllable_joint_indices)NEWLINE # # Update the target robot/human joint angles based on the proposed action and joint limitsNEWLINE # for _ in range(self.frame_skip):NEWLINE # if needs_action:NEWLINE # below_lower_limits = agent_joint_angles + action < agent.controllable_joint_lower_limitsNEWLINE # above_upper_limits = agent_joint_angles + action > agent.controllable_joint_upper_limitsNEWLINE # action[below_lower_limits] = 0NEWLINE # action[above_upper_limits] = 0NEWLINE # agent_joint_angles[below_lower_limits] = agent.controllable_joint_lower_limits[below_lower_limits]NEWLINE # agent_joint_angles[above_upper_limits] = agent.controllable_joint_upper_limits[above_upper_limits]NEWLINE # if isinstance(agent, Human) and agent.impairment == 'tremor':NEWLINE # if needs_action:NEWLINE # agent.target_joint_angles += actionNEWLINE # agent_joint_angles = agent.target_joint_angles + agent.tremors * (1 if self.iteration % 2 == 0 else -1)NEWLINE # else:NEWLINE # agent_joint_angles += actionNEWLINE # if isinstance(agent, Robot) and agent.action_duplication is not None:NEWLINE # agent_joint_angles = np.concatenate([[a]*d for a, d in zip(agent_joint_angles, self.robot.action_duplication)])NEWLINE # agent.control(agent.all_controllable_joints, agent_joint_angles, agent.gains, agent.forces)NEWLINE # else:NEWLINE # agent.control(agent.controllable_joint_indices, agent_joint_angles, gains[i], forces[i])NEWLINE # if step_sim:NEWLINE # # Update all agent positionsNEWLINE # for _ in range(self.frame_skip):NEWLINE # p.stepSimulation(physicsClientId=self.id)NEWLINE # for agent in self.agents:NEWLINE # if isinstance(agent, Human):NEWLINE # agent.enforce_joint_limits()NEWLINE # if agent.controllable:NEWLINE # agent.enforce_realistic_joint_limits()NEWLINE # self.update_targets()NEWLINE # if self.gui:NEWLINE # # Slow down time so that the simulation matches real timeNEWLINE # self.slow_time()
"""numpy.distutils.fcompilerNEWLINENEWLINEContains FCompiler, an abstract base class that defines the interfaceNEWLINEfor the numpy.distutils Fortran compiler abstraction model.NEWLINENEWLINETerminology:NEWLINENEWLINETo be consistent, where the term 'executable' is used, it means the singleNEWLINEfile, like 'gcc', that is executed, and should be a string. In contrast,NEWLINE'command' means the entire command line, like ['gcc', '-c', 'file.c'], andNEWLINEshould be a list.NEWLINENEWLINEBut note that FCompiler.executables is actually a dictionary of commands.NEWLINENEWLINE"""NEWLINEfrom __future__ import division, absolute_import, print_functionNEWLINENEWLINE__all__ = ['FCompiler', 'new_fcompiler', 'show_fcompilers',NEWLINE 'dummy_fortran_file']NEWLINENEWLINEimport osNEWLINEimport sysNEWLINEimport reNEWLINEimport typesNEWLINEtry:NEWLINE setNEWLINEexcept NameError:NEWLINE from sets import Set as setNEWLINENEWLINEfrom numpy.compat import open_latin1NEWLINENEWLINEfrom distutils.sysconfig import get_python_libNEWLINEfrom distutils.fancy_getopt import FancyGetoptNEWLINEfrom distutils.errors import DistutilsModuleError, \NEWLINE DistutilsExecError, CompileError, LinkError, DistutilsPlatformErrorNEWLINEfrom distutils.util import split_quoted, strtoboolNEWLINENEWLINEfrom numpy.distutils.ccompiler import CCompiler, gen_lib_optionsNEWLINEfrom numpy.distutils import logNEWLINEfrom numpy.distutils.misc_util import is_string, all_strings, is_sequence, \NEWLINE make_temp_file, get_shared_lib_extensionNEWLINEfrom numpy.distutils.environment import EnvironmentConfigNEWLINEfrom numpy.distutils.exec_command import find_executableNEWLINEfrom numpy.distutils.compat import get_exceptionNEWLINENEWLINE__metaclass__ = typeNEWLINENEWLINEclass CompilerNotFound(Exception):NEWLINE passNEWLINENEWLINEdef flaglist(s):NEWLINE if is_string(s):NEWLINE return split_quoted(s)NEWLINE else:NEWLINE return sNEWLINENEWLINEdef str2bool(s):NEWLINE if is_string(s):NEWLINE return strtobool(s)NEWLINE return bool(s)NEWLINENEWLINEdef is_sequence_of_strings(seq):NEWLINE return is_sequence(seq) and all_strings(seq)NEWLINENEWLINEclass FCompiler(CCompiler):NEWLINE """Abstract base class to define the interface that must be implementedNEWLINE by real Fortran compiler classes.NEWLINENEWLINE Methods that subclasses may redefine:NEWLINENEWLINE update_executables(), find_executables(), get_version()NEWLINE get_flags(), get_flags_opt(), get_flags_arch(), get_flags_debug()NEWLINE get_flags_f77(), get_flags_opt_f77(), get_flags_arch_f77(),NEWLINE get_flags_debug_f77(), get_flags_f90(), get_flags_opt_f90(),NEWLINE get_flags_arch_f90(), get_flags_debug_f90(),NEWLINE get_flags_fix(), get_flags_linker_so()NEWLINENEWLINE DON'T call these methods (except get_version) afterNEWLINE constructing a compiler instance or inside any other method.NEWLINE All methods, except update_executables() and find_executables(),NEWLINE may call the get_version() method.NEWLINENEWLINE After constructing a compiler instance, always call customize(dist=None)NEWLINE method that finalizes compiler construction and makes the followingNEWLINE attributes available:NEWLINE compiler_f77NEWLINE compiler_f90NEWLINE compiler_fixNEWLINE linker_soNEWLINE archiverNEWLINE ranlibNEWLINE librariesNEWLINE library_dirsNEWLINE """NEWLINENEWLINE # These are the environment variables and distutils keys used.NEWLINE # Each configuration descripition isNEWLINE # (<hook name>, <environment variable>, <key in distutils.cfg>, <convert>)NEWLINE # The hook names are handled by the self._environment_hook method.NEWLINE # - names starting with 'self.' call methods in this classNEWLINE # - names starting with 'exe.' return the key in the executables dictNEWLINE # - names like 'flags.YYY' return self.get_flag_YYY()NEWLINE # convert is either None or a function to convert a string to theNEWLINE # appropiate type used.NEWLINENEWLINE distutils_vars = EnvironmentConfig(NEWLINE distutils_section='config_fc',NEWLINE noopt = (None, None, 'noopt', str2bool),NEWLINE noarch = (None, None, 'noarch', str2bool),NEWLINE debug = (None, None, 'debug', str2bool),NEWLINE verbose = (None, None, 'verbose', str2bool),NEWLINE )NEWLINENEWLINE command_vars = EnvironmentConfig(NEWLINE distutils_section='config_fc',NEWLINE compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None),NEWLINE compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None),NEWLINE compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None),NEWLINE version_cmd = ('exe.version_cmd', None, None, None),NEWLINE linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None),NEWLINE linker_exe = ('exe.linker_exe', 'LD', 'ld', None),NEWLINE archiver = (None, 'AR', 'ar', None),NEWLINE ranlib = (None, 'RANLIB', 'ranlib', None),NEWLINE )NEWLINENEWLINE flag_vars = EnvironmentConfig(NEWLINE distutils_section='config_fc',NEWLINE f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist),NEWLINE f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist),NEWLINE free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist),NEWLINE fix = ('flags.fix', None, None, flaglist),NEWLINE opt = ('flags.opt', 'FOPT', 'opt', flaglist),NEWLINE opt_f77 = ('flags.opt_f77', None, None, flaglist),NEWLINE opt_f90 = ('flags.opt_f90', None, None, flaglist),NEWLINE arch = ('flags.arch', 'FARCH', 'arch', flaglist),NEWLINE arch_f77 = ('flags.arch_f77', None, None, flaglist),NEWLINE arch_f90 = ('flags.arch_f90', None, None, flaglist),NEWLINE debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist),NEWLINE debug_f77 = ('flags.debug_f77', None, None, flaglist),NEWLINE debug_f90 = ('flags.debug_f90', None, None, flaglist),NEWLINE flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist),NEWLINE linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist),NEWLINE linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist),NEWLINE ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist),NEWLINE )NEWLINENEWLINE language_map = {'.f': 'f77',NEWLINE '.for': 'f77',NEWLINE '.F': 'f77', # XXX: needs preprocessorNEWLINE '.ftn': 'f77',NEWLINE '.f77': 'f77',NEWLINE '.f90': 'f90',NEWLINE '.F90': 'f90', # XXX: needs preprocessorNEWLINE '.f95': 'f90',NEWLINE }NEWLINE language_order = ['f90', 'f77']NEWLINENEWLINENEWLINE # These will be set by the subclassNEWLINENEWLINE compiler_type = NoneNEWLINE compiler_aliases = ()NEWLINE version_pattern = NoneNEWLINENEWLINE possible_executables = []NEWLINE executables = {NEWLINE 'version_cmd': ["f77", "-v"],NEWLINE 'compiler_f77': ["f77"],NEWLINE 'compiler_f90': ["f90"],NEWLINE 'compiler_fix': ["f90", "-fixed"],NEWLINE 'linker_so': ["f90", "-shared"],NEWLINE 'linker_exe': ["f90"],NEWLINE 'archiver': ["ar", "-cr"],NEWLINE 'ranlib': None,NEWLINE }NEWLINENEWLINE # If compiler does not support compiling Fortran 90 then it canNEWLINE # suggest using another compiler. For example, gnu would suggestNEWLINE # gnu95 compiler type when there are F90 sources.NEWLINE suggested_f90_compiler = NoneNEWLINENEWLINE compile_switch = "-c"NEWLINE object_switch = "-o " # Ending space matters! It will be strippedNEWLINE # but if it is missing then object_switchNEWLINE # will be prefixed to object file name byNEWLINE # string concatenation.NEWLINE library_switch = "-o " # Ditto!NEWLINENEWLINE # Switch to specify where module files are created and searchedNEWLINE # for USE statement. Normally it is a string and also here endingNEWLINE # space matters. See above.NEWLINE module_dir_switch = NoneNEWLINENEWLINE # Switch to specify where module files are searched for USE statement.NEWLINE module_include_switch = '-I'NEWLINENEWLINE pic_flags = [] # Flags to create position-independent codeNEWLINENEWLINE src_extensions = ['.for', '.ftn', '.f77', '.f', '.f90', '.f95', '.F', '.F90']NEWLINE obj_extension = ".o"NEWLINENEWLINE shared_lib_extension = get_shared_lib_extension()NEWLINE static_lib_extension = ".a" # or .libNEWLINE static_lib_format = "lib%s%s" # or %s%sNEWLINE shared_lib_format = "%s%s"NEWLINE exe_extension = ""NEWLINENEWLINE _exe_cache = {}NEWLINENEWLINE _executable_keys = ['version_cmd', 'compiler_f77', 'compiler_f90',NEWLINE 'compiler_fix', 'linker_so', 'linker_exe', 'archiver',NEWLINE 'ranlib']NEWLINENEWLINE # This will be set by new_fcompiler when called inNEWLINE # command/{build_ext.py, build_clib.py, config.py} files.NEWLINE c_compiler = NoneNEWLINENEWLINE # extra_{f77,f90}_compile_args are set by build_ext.build_extension methodNEWLINE extra_f77_compile_args = []NEWLINE extra_f90_compile_args = []NEWLINENEWLINE def __init__(self, *args, **kw):NEWLINE CCompiler.__init__(self, *args, **kw)NEWLINE self.distutils_vars = self.distutils_vars.clone(self._environment_hook)NEWLINE self.command_vars = self.command_vars.clone(self._environment_hook)NEWLINE self.flag_vars = self.flag_vars.clone(self._environment_hook)NEWLINE self.executables = self.executables.copy()NEWLINE for e in self._executable_keys:NEWLINE if e not in self.executables:NEWLINE self.executables[e] = NoneNEWLINENEWLINE # Some methods depend on .customize() being called first, soNEWLINE # this keeps track of whether that's happened yet.NEWLINE self._is_customised = FalseNEWLINENEWLINE def __copy__(self):NEWLINE obj = self.__new__(self.__class__)NEWLINE obj.__dict__.update(self.__dict__)NEWLINE obj.distutils_vars = obj.distutils_vars.clone(obj._environment_hook)NEWLINE obj.command_vars = obj.command_vars.clone(obj._environment_hook)NEWLINE obj.flag_vars = obj.flag_vars.clone(obj._environment_hook)NEWLINE obj.executables = obj.executables.copy()NEWLINE return objNEWLINENEWLINE def copy(self):NEWLINE return self.__copy__()NEWLINENEWLINE # Use properties for the attributes used by CCompiler. Setting themNEWLINE # as attributes from the self.executables dictionary is error-prone,NEWLINE # so we get them from there each time.NEWLINE def _command_property(key):NEWLINE def fget(self):NEWLINE assert self._is_customisedNEWLINE return self.executables[key]NEWLINE return property(fget=fget)NEWLINE version_cmd = _command_property('version_cmd')NEWLINE compiler_f77 = _command_property('compiler_f77')NEWLINE compiler_f90 = _command_property('compiler_f90')NEWLINE compiler_fix = _command_property('compiler_fix')NEWLINE linker_so = _command_property('linker_so')NEWLINE linker_exe = _command_property('linker_exe')NEWLINE archiver = _command_property('archiver')NEWLINE ranlib = _command_property('ranlib')NEWLINENEWLINE # Make our terminology consistent.NEWLINE def set_executable(self, key, value):NEWLINE self.set_command(key, value)NEWLINENEWLINE def set_commands(self, **kw):NEWLINE for k, v in kw.items():NEWLINE self.set_command(k, v)NEWLINENEWLINE def set_command(self, key, value):NEWLINE if not key in self._executable_keys:NEWLINE raise ValueError(NEWLINE "unknown executable '%s' for class %s" %NEWLINE (key, self.__class__.__name__))NEWLINE if is_string(value):NEWLINE value = split_quoted(value)NEWLINE assert value is None or is_sequence_of_strings(value[1:]), (key, value)NEWLINE self.executables[key] = valueNEWLINENEWLINE ######################################################################NEWLINE ## Methods that subclasses may redefine. But don't call these methods!NEWLINE ## They are private to FCompiler class and may return unexpectedNEWLINE ## results if used elsewhere. So, you have been warned..NEWLINENEWLINE def find_executables(self):NEWLINE """Go through the self.executables dictionary, and attempt toNEWLINE find and assign appropiate executables.NEWLINENEWLINE Executable names are looked for in the environment (environmentNEWLINE variables, the distutils.cfg, and command line), the 0th-element ofNEWLINE the command list, and the self.possible_executables list.NEWLINENEWLINE Also, if the 0th element is "<F77>" or "<F90>", the Fortran 77NEWLINE or the Fortran 90 compiler executable is used, unless overriddenNEWLINE by an environment setting.NEWLINENEWLINE Subclasses should call this if overriden.NEWLINE """NEWLINE assert self._is_customisedNEWLINE exe_cache = self._exe_cacheNEWLINE def cached_find_executable(exe):NEWLINE if exe in exe_cache:NEWLINE return exe_cache[exe]NEWLINE fc_exe = find_executable(exe)NEWLINE exe_cache[exe] = exe_cache[fc_exe] = fc_exeNEWLINE return fc_exeNEWLINE def verify_command_form(name, value):NEWLINE if value is not None and not is_sequence_of_strings(value):NEWLINE raise ValueError(NEWLINE "%s value %r is invalid in class %s" %NEWLINE (name, value, self.__class__.__name__))NEWLINE def set_exe(exe_key, f77=None, f90=None):NEWLINE cmd = self.executables.get(exe_key, None)NEWLINE if not cmd:NEWLINE return NoneNEWLINE # Note that we get cmd[0] here if the environment doesn'tNEWLINE # have anything setNEWLINE exe_from_environ = getattr(self.command_vars, exe_key)NEWLINE if not exe_from_environ:NEWLINE possibles = [f90, f77] + self.possible_executablesNEWLINE else:NEWLINE possibles = [exe_from_environ] + self.possible_executablesNEWLINENEWLINE seen = set()NEWLINE unique_possibles = []NEWLINE for e in possibles:NEWLINE if e == '<F77>':NEWLINE e = f77NEWLINE elif e == '<F90>':NEWLINE e = f90NEWLINE if not e or e in seen:NEWLINE continueNEWLINE seen.add(e)NEWLINE unique_possibles.append(e)NEWLINENEWLINE for exe in unique_possibles:NEWLINE fc_exe = cached_find_executable(exe)NEWLINE if fc_exe:NEWLINE cmd[0] = fc_exeNEWLINE return fc_exeNEWLINE self.set_command(exe_key, None)NEWLINE return NoneNEWLINENEWLINE ctype = self.compiler_typeNEWLINE f90 = set_exe('compiler_f90')NEWLINE if not f90:NEWLINE f77 = set_exe('compiler_f77')NEWLINE if f77:NEWLINE log.warn('%s: no Fortran 90 compiler found' % ctype)NEWLINE else:NEWLINE raise CompilerNotFound('%s: f90 nor f77' % ctype)NEWLINE else:NEWLINE f77 = set_exe('compiler_f77', f90=f90)NEWLINE if not f77:NEWLINE log.warn('%s: no Fortran 77 compiler found' % ctype)NEWLINE set_exe('compiler_fix', f90=f90)NEWLINENEWLINE set_exe('linker_so', f77=f77, f90=f90)NEWLINE set_exe('linker_exe', f77=f77, f90=f90)NEWLINE set_exe('version_cmd', f77=f77, f90=f90)NEWLINE set_exe('archiver')NEWLINE set_exe('ranlib')NEWLINENEWLINE def update_executables(elf):NEWLINE """Called at the beginning of customisation. Subclasses shouldNEWLINE override this if they need to set up the executables dictionary.NEWLINENEWLINE Note that self.find_executables() is run afterwards, so theNEWLINE self.executables dictionary values can contain <F77> or <F90> asNEWLINE the command, which will be replaced by the found F77 or F90NEWLINE compiler.NEWLINE """NEWLINE passNEWLINENEWLINE def get_flags(self):NEWLINE """List of flags common to all compiler types."""NEWLINE return [] + self.pic_flagsNEWLINENEWLINE def _get_command_flags(self, key):NEWLINE cmd = self.executables.get(key, None)NEWLINE if cmd is None:NEWLINE return []NEWLINE return cmd[1:]NEWLINENEWLINE def get_flags_f77(self):NEWLINE """List of Fortran 77 specific flags."""NEWLINE return self._get_command_flags('compiler_f77')NEWLINE def get_flags_f90(self):NEWLINE """List of Fortran 90 specific flags."""NEWLINE return self._get_command_flags('compiler_f90')NEWLINE def get_flags_free(self):NEWLINE """List of Fortran 90 free format specific flags."""NEWLINE return []NEWLINE def get_flags_fix(self):NEWLINE """List of Fortran 90 fixed format specific flags."""NEWLINE return self._get_command_flags('compiler_fix')NEWLINE def get_flags_linker_so(self):NEWLINE """List of linker flags to build a shared library."""NEWLINE return self._get_command_flags('linker_so')NEWLINE def get_flags_linker_exe(self):NEWLINE """List of linker flags to build an executable."""NEWLINE return self._get_command_flags('linker_exe')NEWLINE def get_flags_ar(self):NEWLINE """List of archiver flags. """NEWLINE return self._get_command_flags('archiver')NEWLINE def get_flags_opt(self):NEWLINE """List of architecture independent compiler flags."""NEWLINE return []NEWLINE def get_flags_arch(self):NEWLINE """List of architecture dependent compiler flags."""NEWLINE return []NEWLINE def get_flags_debug(self):NEWLINE """List of compiler flags to compile with debugging information."""NEWLINE return []NEWLINENEWLINE get_flags_opt_f77 = get_flags_opt_f90 = get_flags_optNEWLINE get_flags_arch_f77 = get_flags_arch_f90 = get_flags_archNEWLINE get_flags_debug_f77 = get_flags_debug_f90 = get_flags_debugNEWLINENEWLINE def get_libraries(self):NEWLINE """List of compiler libraries."""NEWLINE return self.libraries[:]NEWLINE def get_library_dirs(self):NEWLINE """List of compiler library directories."""NEWLINE return self.library_dirs[:]NEWLINENEWLINE def get_version(self, force=False, ok_status=[0]):NEWLINE assert self._is_customisedNEWLINE version = CCompiler.get_version(self, force=force, ok_status=ok_status)NEWLINE if version is None:NEWLINE raise CompilerNotFound()NEWLINE return versionNEWLINENEWLINE ############################################################NEWLINENEWLINE ## Public methods:NEWLINENEWLINE def customize(self, dist = None):NEWLINE """Customize Fortran compiler.NEWLINENEWLINE This method gets Fortran compiler specific information fromNEWLINE (i) class definition, (ii) environment, (iii) distutils configNEWLINE files, and (iv) command line (later overrides earlier).NEWLINENEWLINE This method should be always called after constructing aNEWLINE compiler instance. But not in __init__ because DistributionNEWLINE instance is needed for (iii) and (iv).NEWLINE """NEWLINE log.info('customize %s' % (self.__class__.__name__))NEWLINENEWLINE self._is_customised = TrueNEWLINENEWLINE self.distutils_vars.use_distribution(dist)NEWLINE self.command_vars.use_distribution(dist)NEWLINE self.flag_vars.use_distribution(dist)NEWLINENEWLINE self.update_executables()NEWLINENEWLINE # find_executables takes care of setting the compiler commands,NEWLINE # version_cmd, linker_so, linker_exe, ar, and ranlibNEWLINE self.find_executables()NEWLINENEWLINE noopt = self.distutils_vars.get('noopt', False)NEWLINE noarch = self.distutils_vars.get('noarch', noopt)NEWLINE debug = self.distutils_vars.get('debug', False)NEWLINENEWLINE f77 = self.command_vars.compiler_f77NEWLINE f90 = self.command_vars.compiler_f90NEWLINENEWLINE f77flags = []NEWLINE f90flags = []NEWLINE freeflags = []NEWLINE fixflags = []NEWLINENEWLINE if f77:NEWLINE f77flags = self.flag_vars.f77NEWLINE if f90:NEWLINE f90flags = self.flag_vars.f90NEWLINE freeflags = self.flag_vars.freeNEWLINE # XXX Assuming that free format is default for f90 compiler.NEWLINE fix = self.command_vars.compiler_fixNEWLINE if fix:NEWLINE fixflags = self.flag_vars.fix + f90flagsNEWLINENEWLINE oflags, aflags, dflags = [], [], []NEWLINE # examine get_flags_<tag>_<compiler> for extra flagsNEWLINE # only add them if the method is different from get_flags_<tag>NEWLINE def get_flags(tag, flags):NEWLINE # note that self.flag_vars.<tag> calls self.get_flags_<tag>()NEWLINE flags.extend(getattr(self.flag_vars, tag))NEWLINE this_get = getattr(self, 'get_flags_' + tag)NEWLINE for name, c, flagvar in [('f77', f77, f77flags),NEWLINE ('f90', f90, f90flags),NEWLINE ('f90', fix, fixflags)]:NEWLINE t = '%s_%s' % (tag, name)NEWLINE if c and this_get is not getattr(self, 'get_flags_' + t):NEWLINE flagvar.extend(getattr(self.flag_vars, t))NEWLINE if not noopt:NEWLINE get_flags('opt', oflags)NEWLINE if not noarch:NEWLINE get_flags('arch', aflags)NEWLINE if debug:NEWLINE get_flags('debug', dflags)NEWLINENEWLINE fflags = self.flag_vars.flags + dflags + oflags + aflagsNEWLINENEWLINE if f77:NEWLINE self.set_commands(compiler_f77=[f77]+f77flags+fflags)NEWLINE if f90:NEWLINE self.set_commands(compiler_f90=[f90]+freeflags+f90flags+fflags)NEWLINE if fix:NEWLINE self.set_commands(compiler_fix=[fix]+fixflags+fflags)NEWLINENEWLINENEWLINE #XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGSNEWLINE linker_so = self.linker_soNEWLINE if linker_so:NEWLINE linker_so_flags = self.flag_vars.linker_soNEWLINE if sys.platform.startswith('aix'):NEWLINE python_lib = get_python_lib(standard_lib=1)NEWLINE ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')NEWLINE python_exp = os.path.join(python_lib, 'config', 'python.exp')NEWLINE linker_so = [ld_so_aix] + linker_so + ['-bI:'+python_exp]NEWLINE self.set_commands(linker_so=linker_so+linker_so_flags)NEWLINENEWLINE linker_exe = self.linker_exeNEWLINE if linker_exe:NEWLINE linker_exe_flags = self.flag_vars.linker_exeNEWLINE self.set_commands(linker_exe=linker_exe+linker_exe_flags)NEWLINENEWLINE ar = self.command_vars.archiverNEWLINE if ar:NEWLINE arflags = self.flag_vars.arNEWLINE self.set_commands(archiver=[ar]+arflags)NEWLINENEWLINE self.set_library_dirs(self.get_library_dirs())NEWLINE self.set_libraries(self.get_libraries())NEWLINENEWLINE def dump_properties(self):NEWLINE """Print out the attributes of a compiler instance."""NEWLINE props = []NEWLINE for key in list(self.executables.keys()) + \NEWLINE ['version', 'libraries', 'library_dirs',NEWLINE 'object_switch', 'compile_switch']:NEWLINE if hasattr(self, key):NEWLINE v = getattr(self, key)NEWLINE props.append((key, None, '= '+repr(v)))NEWLINE props.sort()NEWLINENEWLINE pretty_printer = FancyGetopt(props)NEWLINE for l in pretty_printer.generate_help("%s instance properties:" \NEWLINE % (self.__class__.__name__)):NEWLINE if l[:4]==' --':NEWLINE l = ' ' + l[4:]NEWLINE print(l)NEWLINENEWLINE ###################NEWLINENEWLINE def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):NEWLINE """Compile 'src' to product 'obj'."""NEWLINE src_flags = {}NEWLINE if is_f_file(src) and not has_f90_header(src):NEWLINE flavor = ':f77'NEWLINE compiler = self.compiler_f77NEWLINE src_flags = get_f77flags(src)NEWLINE extra_compile_args = self.extra_f77_compile_args or []NEWLINE elif is_free_format(src):NEWLINE flavor = ':f90'NEWLINE compiler = self.compiler_f90NEWLINE if compiler is None:NEWLINE raise DistutilsExecError('f90 not supported by %s needed for %s'\NEWLINE % (self.__class__.__name__, src))NEWLINE extra_compile_args = self.extra_f90_compile_args or []NEWLINE else:NEWLINE flavor = ':fix'NEWLINE compiler = self.compiler_fixNEWLINE if compiler is None:NEWLINE raise DistutilsExecError('f90 (fixed) not supported by %s needed for %s'\NEWLINE % (self.__class__.__name__, src))NEWLINE extra_compile_args = self.extra_f90_compile_args or []NEWLINE if self.object_switch[-1]==' ':NEWLINE o_args = [self.object_switch.strip(), obj]NEWLINE else:NEWLINE o_args = [self.object_switch.strip()+obj]NEWLINENEWLINE assert self.compile_switch.strip()NEWLINE s_args = [self.compile_switch, src]NEWLINENEWLINE if extra_compile_args:NEWLINE log.info('extra %s options: %r' \NEWLINE % (flavor[1:], ' '.join(extra_compile_args)))NEWLINENEWLINE extra_flags = src_flags.get(self.compiler_type, [])NEWLINE if extra_flags:NEWLINE log.info('using compile options from source: %r' \NEWLINE % ' '.join(extra_flags))NEWLINENEWLINE command = compiler + cc_args + extra_flags + s_args + o_args \NEWLINE + extra_postargs + extra_compile_argsNEWLINENEWLINE display = '%s: %s' % (os.path.basename(compiler[0]) + flavor,NEWLINE src)NEWLINE try:NEWLINE self.spawn(command, display=display)NEWLINE except DistutilsExecError:NEWLINE msg = str(get_exception())NEWLINE raise CompileError(msg)NEWLINENEWLINE def module_options(self, module_dirs, module_build_dir):NEWLINE options = []NEWLINE if self.module_dir_switch is not None:NEWLINE if self.module_dir_switch[-1]==' ':NEWLINE options.extend([self.module_dir_switch.strip(), module_build_dir])NEWLINE else:NEWLINE options.append(self.module_dir_switch.strip()+module_build_dir)NEWLINE else:NEWLINE print('XXX: module_build_dir=%r option ignored' % (module_build_dir))NEWLINE print('XXX: Fix module_dir_switch for ', self.__class__.__name__)NEWLINE if self.module_include_switch is not None:NEWLINE for d in [module_build_dir]+module_dirs:NEWLINE options.append('%s%s' % (self.module_include_switch, d))NEWLINE else:NEWLINE print('XXX: module_dirs=%r option ignored' % (module_dirs))NEWLINE print('XXX: Fix module_include_switch for ', self.__class__.__name__)NEWLINE return optionsNEWLINENEWLINE def library_option(self, lib):NEWLINE return "-l" + libNEWLINE def library_dir_option(self, dir):NEWLINE return "-L" + dirNEWLINENEWLINE def link(self, target_desc, objects,NEWLINE output_filename, output_dir=None, libraries=None,NEWLINE library_dirs=None, runtime_library_dirs=None,NEWLINE export_symbols=None, debug=0, extra_preargs=None,NEWLINE extra_postargs=None, build_temp=None, target_lang=None):NEWLINE objects, output_dir = self._fix_object_args(objects, output_dir)NEWLINE libraries, library_dirs, runtime_library_dirs = \NEWLINE self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)NEWLINENEWLINE lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,NEWLINE libraries)NEWLINE if is_string(output_dir):NEWLINE output_filename = os.path.join(output_dir, output_filename)NEWLINE elif output_dir is not None:NEWLINE raise TypeError("'output_dir' must be a string or None")NEWLINENEWLINE if self._need_link(objects, output_filename):NEWLINE if self.library_switch[-1]==' ':NEWLINE o_args = [self.library_switch.strip(), output_filename]NEWLINE else:NEWLINE o_args = [self.library_switch.strip()+output_filename]NEWLINENEWLINE if is_string(self.objects):NEWLINE ld_args = objects + [self.objects]NEWLINE else:NEWLINE ld_args = objects + self.objectsNEWLINE ld_args = ld_args + lib_opts + o_argsNEWLINE if debug:NEWLINE ld_args[:0] = ['-g']NEWLINE if extra_preargs:NEWLINE ld_args[:0] = extra_preargsNEWLINE if extra_postargs:NEWLINE ld_args.extend(extra_postargs)NEWLINE self.mkpath(os.path.dirname(output_filename))NEWLINE if target_desc == CCompiler.EXECUTABLE:NEWLINE linker = self.linker_exe[:]NEWLINE else:NEWLINE linker = self.linker_so[:]NEWLINE command = linker + ld_argsNEWLINE try:NEWLINE self.spawn(command)NEWLINE except DistutilsExecError:NEWLINE msg = str(get_exception())NEWLINE raise LinkError(msg)NEWLINE else:NEWLINE log.debug("skipping %s (up-to-date)", output_filename)NEWLINENEWLINE def _environment_hook(self, name, hook_name):NEWLINE if hook_name is None:NEWLINE return NoneNEWLINE if is_string(hook_name):NEWLINE if hook_name.startswith('self.'):NEWLINE hook_name = hook_name[5:]NEWLINE hook = getattr(self, hook_name)NEWLINE return hook()NEWLINE elif hook_name.startswith('exe.'):NEWLINE hook_name = hook_name[4:]NEWLINE var = self.executables[hook_name]NEWLINE if var:NEWLINE return var[0]NEWLINE else:NEWLINE return NoneNEWLINE elif hook_name.startswith('flags.'):NEWLINE hook_name = hook_name[6:]NEWLINE hook = getattr(self, 'get_flags_' + hook_name)NEWLINE return hook()NEWLINE else:NEWLINE return hook_name()NEWLINENEWLINE ## class FCompilerNEWLINENEWLINE_default_compilers = (NEWLINE # sys.platform mappingsNEWLINE ('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95',NEWLINE 'intelvem', 'intelem')),NEWLINE ('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')),NEWLINE ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq',NEWLINE 'intele', 'intelem', 'gnu', 'g95', 'pathf95')),NEWLINE ('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')),NEWLINE ('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')),NEWLINE ('irix.*', ('mips', 'gnu', 'gnu95',)),NEWLINE ('aix.*', ('ibm', 'gnu', 'gnu95',)),NEWLINE # os.name mappingsNEWLINE ('posix', ('gnu', 'gnu95',)),NEWLINE ('nt', ('gnu', 'gnu95',)),NEWLINE ('mac', ('gnu95', 'gnu', 'pg')),NEWLINE )NEWLINENEWLINEfcompiler_class = NoneNEWLINEfcompiler_aliases = NoneNEWLINENEWLINEdef load_all_fcompiler_classes():NEWLINE """Cache all the FCompiler classes found in modules in theNEWLINE numpy.distutils.fcompiler package.NEWLINE """NEWLINE from glob import globNEWLINE global fcompiler_class, fcompiler_aliasesNEWLINE if fcompiler_class is not None:NEWLINE returnNEWLINE pys = os.path.join(os.path.dirname(__file__), '*.py')NEWLINE fcompiler_class = {}NEWLINE fcompiler_aliases = {}NEWLINE for fname in glob(pys):NEWLINE module_name, ext = os.path.splitext(os.path.basename(fname))NEWLINE module_name = 'numpy.distutils.fcompiler.' + module_nameNEWLINE __import__ (module_name)NEWLINE module = sys.modules[module_name]NEWLINE if hasattr(module, 'compilers'):NEWLINE for cname in module.compilers:NEWLINE klass = getattr(module, cname)NEWLINE desc = (klass.compiler_type, klass, klass.description)NEWLINE fcompiler_class[klass.compiler_type] = descNEWLINE for alias in klass.compiler_aliases:NEWLINE if alias in fcompiler_aliases:NEWLINE raise ValueError("alias %r defined for both %s and %s"NEWLINE % (alias, klass.__name__,NEWLINE fcompiler_aliases[alias][1].__name__))NEWLINE fcompiler_aliases[alias] = descNEWLINENEWLINEdef _find_existing_fcompiler(compiler_types,NEWLINE osname=None, platform=None,NEWLINE requiref90=False,NEWLINE c_compiler=None):NEWLINE from numpy.distutils.core import get_distributionNEWLINE dist = get_distribution(always=True)NEWLINE for compiler_type in compiler_types:NEWLINE v = NoneNEWLINE try:NEWLINE c = new_fcompiler(plat=platform, compiler=compiler_type,NEWLINE c_compiler=c_compiler)NEWLINE c.customize(dist)NEWLINE v = c.get_version()NEWLINE if requiref90 and c.compiler_f90 is None:NEWLINE v = NoneNEWLINE new_compiler = c.suggested_f90_compilerNEWLINE if new_compiler:NEWLINE log.warn('Trying %r compiler as suggested by %r 'NEWLINE 'compiler for f90 support.' % (compiler_type,NEWLINE new_compiler))NEWLINE c = new_fcompiler(plat=platform, compiler=new_compiler,NEWLINE c_compiler=c_compiler)NEWLINE c.customize(dist)NEWLINE v = c.get_version()NEWLINE if v is not None:NEWLINE compiler_type = new_compilerNEWLINE if requiref90 and c.compiler_f90 is None:NEWLINE raise ValueError('%s does not support compiling f90 codes, 'NEWLINE 'skipping.' % (c.__class__.__name__))NEWLINE except DistutilsModuleError:NEWLINE log.debug("_find_existing_fcompiler: compiler_type='%s' raised DistutilsModuleError", compiler_type)NEWLINE except CompilerNotFound:NEWLINE log.debug("_find_existing_fcompiler: compiler_type='%s' not found", compiler_type)NEWLINE if v is not None:NEWLINE return compiler_typeNEWLINE return NoneNEWLINENEWLINEdef available_fcompilers_for_platform(osname=None, platform=None):NEWLINE if osname is None:NEWLINE osname = os.nameNEWLINE if platform is None:NEWLINE platform = sys.platformNEWLINE matching_compiler_types = []NEWLINE for pattern, compiler_type in _default_compilers:NEWLINE if re.match(pattern, platform) or re.match(pattern, osname):NEWLINE for ct in compiler_type:NEWLINE if ct not in matching_compiler_types:NEWLINE matching_compiler_types.append(ct)NEWLINE if not matching_compiler_types:NEWLINE matching_compiler_types.append('gnu')NEWLINE return matching_compiler_typesNEWLINENEWLINEdef get_default_fcompiler(osname=None, platform=None, requiref90=False,NEWLINE c_compiler=None):NEWLINE """Determine the default Fortran compiler to use for the givenNEWLINE platform."""NEWLINE matching_compiler_types = available_fcompilers_for_platform(osname,NEWLINE platform)NEWLINE compiler_type = _find_existing_fcompiler(matching_compiler_types,NEWLINE osname=osname,NEWLINE platform=platform,NEWLINE requiref90=requiref90,NEWLINE c_compiler=c_compiler)NEWLINE return compiler_typeNEWLINENEWLINE# Flag to avoid rechecking for Fortran compiler every timeNEWLINEfailed_fcompiler = FalseNEWLINENEWLINEdef new_fcompiler(plat=None,NEWLINE compiler=None,NEWLINE verbose=0,NEWLINE dry_run=0,NEWLINE force=0,NEWLINE requiref90=False,NEWLINE c_compiler = None):NEWLINE """Generate an instance of some FCompiler subclass for the suppliedNEWLINE platform/compiler combination.NEWLINE """NEWLINE global failed_fcompilerNEWLINE if failed_fcompiler:NEWLINE return NoneNEWLINENEWLINE load_all_fcompiler_classes()NEWLINE if plat is None:NEWLINE plat = os.nameNEWLINE if compiler is None:NEWLINE compiler = get_default_fcompiler(plat, requiref90=requiref90,NEWLINE c_compiler=c_compiler)NEWLINE if compiler in fcompiler_class:NEWLINE module_name, klass, long_description = fcompiler_class[compiler]NEWLINE elif compiler in fcompiler_aliases:NEWLINE module_name, klass, long_description = fcompiler_aliases[compiler]NEWLINE else:NEWLINE msg = "don't know how to compile Fortran code on platform '%s'" % platNEWLINE if compiler is not None:NEWLINE msg = msg + " with '%s' compiler." % compilerNEWLINE msg = msg + " Supported compilers are: %s)" \NEWLINE % (','.join(fcompiler_class.keys()))NEWLINE log.warn(msg)NEWLINE failed_fcompiler = TrueNEWLINE return NoneNEWLINENEWLINE compiler = klass(verbose=verbose, dry_run=dry_run, force=force)NEWLINE compiler.c_compiler = c_compilerNEWLINE return compilerNEWLINENEWLINEdef show_fcompilers(dist=None):NEWLINE """Print list of available compilers (used by the "--help-fcompiler"NEWLINE option to "config_fc").NEWLINE """NEWLINE if dist is None:NEWLINE from distutils.dist import DistributionNEWLINE from numpy.distutils.command.config_compiler import config_fcNEWLINE dist = Distribution()NEWLINE dist.script_name = os.path.basename(sys.argv[0])NEWLINE dist.script_args = ['config_fc'] + sys.argv[1:]NEWLINE try:NEWLINE dist.script_args.remove('--help-fcompiler')NEWLINE except ValueError:NEWLINE passNEWLINE dist.cmdclass['config_fc'] = config_fcNEWLINE dist.parse_config_files()NEWLINE dist.parse_command_line()NEWLINE compilers = []NEWLINE compilers_na = []NEWLINE compilers_ni = []NEWLINE if not fcompiler_class:NEWLINE load_all_fcompiler_classes()NEWLINE platform_compilers = available_fcompilers_for_platform()NEWLINE for compiler in platform_compilers:NEWLINE v = NoneNEWLINE log.set_verbosity(-2)NEWLINE try:NEWLINE c = new_fcompiler(compiler=compiler, verbose=dist.verbose)NEWLINE c.customize(dist)NEWLINE v = c.get_version()NEWLINE except (DistutilsModuleError, CompilerNotFound):NEWLINE e = get_exception()NEWLINE log.debug("show_fcompilers: %s not found" % (compiler,))NEWLINE log.debug(repr(e))NEWLINENEWLINE if v is None:NEWLINE compilers_na.append(("fcompiler="+compiler, None,NEWLINE fcompiler_class[compiler][2]))NEWLINE else:NEWLINE c.dump_properties()NEWLINE compilers.append(("fcompiler="+compiler, None,NEWLINE fcompiler_class[compiler][2] + ' (%s)' % v))NEWLINENEWLINE compilers_ni = list(set(fcompiler_class.keys()) - set(platform_compilers))NEWLINE compilers_ni = [("fcompiler="+fc, None, fcompiler_class[fc][2])NEWLINE for fc in compilers_ni]NEWLINENEWLINE compilers.sort()NEWLINE compilers_na.sort()NEWLINE compilers_ni.sort()NEWLINE pretty_printer = FancyGetopt(compilers)NEWLINE pretty_printer.print_help("Fortran compilers found:")NEWLINE pretty_printer = FancyGetopt(compilers_na)NEWLINE pretty_printer.print_help("Compilers available for this "NEWLINE "platform, but not found:")NEWLINE if compilers_ni:NEWLINE pretty_printer = FancyGetopt(compilers_ni)NEWLINE pretty_printer.print_help("Compilers not available on this platform:")NEWLINE print("For compiler details, run 'config_fc --verbose' setup command.")NEWLINENEWLINENEWLINEdef dummy_fortran_file():NEWLINE fo, name = make_temp_file(suffix='.f')NEWLINE fo.write(" subroutine dummy()\n end\n")NEWLINE fo.close()NEWLINE return name[:-2]NEWLINENEWLINENEWLINEis_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).matchNEWLINE_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).searchNEWLINE_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).searchNEWLINE_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).searchNEWLINE_free_f90_start = re.compile(r'[^c*!]\s*[^\s\d\t]', re.I).matchNEWLINENEWLINEdef is_free_format(file):NEWLINE """Check if file is in free format Fortran."""NEWLINE # f90 allows both fixed and free format, assuming fixed unlessNEWLINE # signs of free format are detected.NEWLINE result = 0NEWLINE f = open_latin1(file, 'r')NEWLINE line = f.readline()NEWLINE n = 10000 # the number of non-comment lines to scan for hintsNEWLINE if _has_f_header(line):NEWLINE n = 0NEWLINE elif _has_f90_header(line):NEWLINE n = 0NEWLINE result = 1NEWLINE while n>0 and line:NEWLINE line = line.rstrip()NEWLINE if line and line[0]!='!':NEWLINE n -= 1NEWLINE if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':NEWLINE result = 1NEWLINE breakNEWLINE line = f.readline()NEWLINE f.close()NEWLINE return resultNEWLINENEWLINEdef has_f90_header(src):NEWLINE f = open_latin1(src, 'r')NEWLINE line = f.readline()NEWLINE f.close()NEWLINE return _has_f90_header(line) or _has_fix_header(line)NEWLINENEWLINE_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)', re.I)NEWLINEdef get_f77flags(src):NEWLINE """NEWLINE Search the first 20 lines of fortran 77 code for line patternNEWLINE `CF77FLAGS(<fcompiler type>)=<f77 flags>`NEWLINE Return a dictionary {<fcompiler type>:<f77 flags>}.NEWLINE """NEWLINE flags = {}NEWLINE f = open_latin1(src, 'r')NEWLINE i = 0NEWLINE for line in f:NEWLINE i += 1NEWLINE if i>20: breakNEWLINE m = _f77flags_re.match(line)NEWLINE if not m: continueNEWLINE fcname = m.group('fcname').strip()NEWLINE fflags = m.group('fflags').strip()NEWLINE flags[fcname] = split_quoted(fflags)NEWLINE f.close()NEWLINE return flagsNEWLINENEWLINE# TODO: implement get_f90flags and use it in _compile similarly to get_f77flagsNEWLINENEWLINEif __name__ == '__main__':NEWLINE show_fcompilers()NEWLINE
#!/usr/bin/python3NEWLINE# -*- coding: utf-8 -*-NEWLINE# #%LNEWLINE# %%NEWLINE# Copyright (C) 2021 BMW Car IT GmbHNEWLINE# %%NEWLINE# Licensed under the Apache License, Version 2.0 (the "License");NEWLINE# you may not use this file except in compliance with the License.NEWLINE# You may obtain a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS,NEWLINE# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.NEWLINE# See the License for the specific language governing permissions andNEWLINE# limitations under the License.NEWLINE# #L%NEWLINENEWLINE'''NEWLINERemove unused dependencies from the generated.pomNEWLINEAdd missing dependencies to the generated.pom.NEWLINENote that this process does not update existing dependency versions.NEWLINEA manual check/update is required if new direct dependencies areNEWLINEadded to the projects or their versions are updated.NEWLINE'''NEWLINENEWLINEimport argparseNEWLINEimport collectionsNEWLINEimport filecmpNEWLINEimport loggingNEWLINEimport osNEWLINEimport shutilNEWLINEimport subprocessNEWLINEimport xml.etree.ElementTree as xmlETNEWLINENEWLINEDEPENDENCY_LOCK_DIR = os.path.abspath(os.path.dirname(__file__))NEWLINENEWLINE# Scope not relevant for dependency management.NEWLINEDependency = collections.namedtuple(NEWLINE 'Dependency', ['groupId', 'artifactId', 'type', 'version'])NEWLINENEWLINENEWLINEclass PomGenerator(object):NEWLINE DEFAULT_WORKING_DIR = os.path.dirname(__file__)NEWLINENEWLINE def __init__(self, lockDir=os.path.abspath(DEFAULT_WORKING_DIR)):NEWLINE object.__init__(self)NEWLINE self.__log = logging.getLogger('PomGenerator')NEWLINE self.__pomFile = os.path.join(lockDir, 'pom.xml')NEWLINE self.__backupFile = os.path.join(lockDir, "pom-backup.%s" % id(self))NEWLINE self.__pomHeader = []NEWLINE self.__pomFooter = []NEWLINE try:NEWLINE if not os.path.isfile(self.__pomFile):NEWLINE raise RuntimeError(NEWLINE "POM file '%s' does not exist." % self.__pomFile)NEWLINENEWLINE pomSection = self.__pomHeaderNEWLINE with open(self.__pomFile) as fp:NEWLINE for line in fp:NEWLINE if line.strip() == '</dependencies>':NEWLINE pomSection = self.__pomFooterNEWLINE pomSection.append(line)NEWLINE if line.strip() == '<dependencies>':NEWLINE pomSection = [] # Drop dependenciesNEWLINENEWLINE if 0 == len(self.__pomFooter):NEWLINE raise RuntimeError(NEWLINE "POM file '%s' does not contain 'dependencies' section." % self.__pomFile)NEWLINENEWLINE shutil.copyfile(self.__pomFile, self.__backupFile)NEWLINE except Exception as e:NEWLINE self.__log.error(NEWLINE "PomGenerator initialization arguments incorrect: %s" % e)NEWLINE raiseNEWLINENEWLINE def __del__(self):NEWLINE if os.path.isfile(self.__backupFile):NEWLINE os.remove(self.__backupFile)NEWLINENEWLINE def restore(self):NEWLINE shutil.copyfile(self.__backupFile, self.__pomFile)NEWLINENEWLINE def reset(self):NEWLINE self.write([])NEWLINENEWLINE def modified(self):NEWLINE return not filecmp.cmp(self.__backupFile, self.__pomFile, shallow=False)NEWLINENEWLINE def write(self, dependencies):NEWLINE with open(self.__pomFile, 'w', encoding='utf-8') as fp:NEWLINE for line in self.__pomHeader:NEWLINE fp.write(line)NEWLINE for dependency in dependencies:NEWLINE fp.write(self.__toXml(dependency))NEWLINE for line in self.__pomFooter:NEWLINE fp.write(line)NEWLINENEWLINE def __toXml(self, dependency):NEWLINE nodeName = 'dependency'NEWLINE node = xmlET.Element(nodeName)NEWLINE for name, value in dependency._asdict().items():NEWLINE child = xmlET.SubElement(node, name)NEWLINE child.text = valueNEWLINENEWLINE nodeStr = xmlET.tostring(node, encoding='unicode', method='xml')NEWLINE nodeStrTags = nodeStr.split('<',)NEWLINE nodeStr = ''NEWLINE for nodeStrTag in nodeStrTags:NEWLINE if nodeName in nodeStrTag:NEWLINE nodeStr += "\t\t\t<%s\n" % nodeStrTagNEWLINE elif nodeStrTag:NEWLINE if nodeStrTag.startswith('/'):NEWLINE nodeStr += "<%s\n" % nodeStrTagNEWLINE else:NEWLINE nodeStr += "\t\t\t\t<%s" % nodeStrTagNEWLINE return nodeStrNEWLINENEWLINENEWLINEclass ProjectDependency(object):NEWLINE DEFAULT_POM_DIR = os.path.abspath(os.path.join(NEWLINE os.path.dirname(__file__), os.path.normpath('../')))NEWLINE DEFAULT_POM_FILENAME = 'pom.xml'NEWLINE MVN_EXEC = 'mvn'NEWLINE MVN_SCOPES = ['compile', 'provided', 'runtime', 'test', 'system', 'import']NEWLINE ADDITIONAL_PROFILES = ['javascript', 'android']NEWLINENEWLINE def __init__(self, pomDir=DEFAULT_POM_DIR, pomFile=DEFAULT_POM_FILENAME):NEWLINE object.__init__(self)NEWLINE pomDir = os.path.abspath(pomDir)NEWLINE self.__log = logging.getLogger('ProjectDependency')NEWLINE try:NEWLINE if not os.path.isdir(pomDir):NEWLINE raise RuntimeError(NEWLINE "POM directory '%s' does not exist." % pomDir)NEWLINENEWLINE pomFile = os.path.join(pomDir, pomFile)NEWLINE if not os.path.isfile(pomFile):NEWLINE raise RuntimeError("POM file '%s' does not exist." % pomFile)NEWLINE mvnCall = subprocess.run(NEWLINE [ProjectDependency.MVN_EXEC, '-version'], capture_output=True, text=True)NEWLINE if mvnCall.returncode:NEWLINE raise RuntimeError("'%s' execution failed:\n%s\n%s" % (NEWLINE ProjectDependency.MVN_EXEC, mvnCall.stdout, mvnCall.stderr))NEWLINENEWLINE self.__cmd = [ProjectDependency.MVN_EXEC, '-f', pomFile,NEWLINE '-pl', '"-io.joynr.examples:radio-app"',NEWLINE '-P', ','.join(ProjectDependency.ADDITIONAL_PROFILES)]NEWLINE except Exception as e:NEWLINE self.__log.error(NEWLINE "ProjectDependency initialization arguments incorrect: %s" % e)NEWLINE raiseNEWLINENEWLINE def listTransitives(self):NEWLINE allDependencies = self.__set()NEWLINE self.__log.debug("Found %d dependencies." % len(allDependencies))NEWLINENEWLINE directDependencies = self.__set(['excludeTransitive'])NEWLINE self.__log.debug("Found %d direct dependencies." %NEWLINE len(directDependencies))NEWLINENEWLINE transitiveDependencies = allDependencies - directDependenciesNEWLINE return sorted(transitiveDependencies)NEWLINENEWLINE def listExternal(self, internalGroupsStartWith='io.joynr'):NEWLINE allDependencies = self.__set()NEWLINE self.__log.debug("Found %d dependencies." % len(allDependencies))NEWLINENEWLINE externalDependencies = sorted(filter(NEWLINE lambda d: not d.groupId.startswith(internalGroupsStartWith), allDependencies))NEWLINE self.__log.debug("Found %d external dependencies." %NEWLINE len(externalDependencies))NEWLINE return externalDependenciesNEWLINENEWLINE def __set(self, userProperties=[]):NEWLINE cmd = self.__cmd.copy()NEWLINE for userProperty in userProperties:NEWLINE cmd += ["-D%s" % userProperty]NEWLINE cmd += ['dependency:list']NEWLINE mvnCall = subprocess.run(cmd, capture_output=True, text=True)NEWLINE if mvnCall.returncode:NEWLINE raise RuntimeError("'%s' execution failed:\n%s\n%s" % (NEWLINE " ".join(cmd), mvnCall.stdout, mvnCall.stderr))NEWLINENEWLINE return self.__parseOutput(mvnCall.stdout)NEWLINENEWLINE def __parseOutput(self, output):NEWLINE dependencies = set()NEWLINE for line in output.split("\n"):NEWLINE if not line:NEWLINE continueNEWLINE line = line.strip()NEWLINE endOfInfo = line.find(' ')NEWLINE line = line[endOfInfo:]NEWLINE line = line.strip()NEWLINE parts = line.split(':')NEWLINE if 5 == len(parts) and parts[4] in ProjectDependency.MVN_SCOPES:NEWLINE del parts[-1]NEWLINE dependencies.add(Dependency(*parts))NEWLINE return dependenciesNEWLINENEWLINENEWLINEdef init(parser):NEWLINE parser.add_argument('--loglevel', '-l', dest='loglevel', required=False,NEWLINE default=logging.INFO, type=int, help="Log level, default is %d (info)" % logging.INFO)NEWLINE args = parser.parse_args()NEWLINE logging.basicConfig(level=args.loglevel)NEWLINENEWLINENEWLINE'''Entry point'''NEWLINEif __name__ == '__main__':NEWLINE init(argparse.ArgumentParser(description="Update dependency lock"))NEWLINE generator = PomGenerator()NEWLINE try:NEWLINE projectDependencies = ProjectDependency()NEWLINE externalDependencies = projectDependencies.listExternal()NEWLINE generator.write(externalDependencies)NEWLINE logging.getLogger("main").info(NEWLINE "Updated dependency lock. %d external dependencies found." % len(externalDependencies))NEWLINE except Exception:NEWLINE generator.restore()NEWLINE raiseNEWLINE
"""NEWLINEtorch.multiprocessing is a wrapper around the native :mod:`multiprocessing`NEWLINEmodule. It registers custom reducers, that use shared memory to provide sharedNEWLINEviews on the same data in different processes. Once the tensor/storage is movedNEWLINEto shared_memory (see :func:`~torch.Tensor.share_memory_`), it will be possibleNEWLINEto send it to other processes without making any copies.NEWLINENEWLINEThe API is 100% compatible with the original module - it's enough to changeNEWLINE``import multiprocessing`` to ``import torch.multiprocessing`` to have all theNEWLINEtensors sent through the queues or shared via other mechanisms, moved to sharedNEWLINEmemory.NEWLINENEWLINEBecause of the similarity of APIs we do not document most of this packageNEWLINEcontents, and we recommend referring to very good docs of the original module.NEWLINE"""NEWLINEimport torchNEWLINEimport sysNEWLINEfrom .reductions import init_reductionsNEWLINEimport multiprocessingNEWLINENEWLINE__all__ = ['set_sharing_strategy', 'get_sharing_strategy',NEWLINE 'get_all_sharing_strategies']NEWLINENEWLINENEWLINEfrom multiprocessing import *NEWLINENEWLINENEWLINE__all__ += multiprocessing.__all__NEWLINENEWLINENEWLINE# This call adds a Linux specific prctl(2) wrapper function to this module.NEWLINE# See https://github.com/pytorch/pytorch/pull/14391 for more information.NEWLINEtorch._C._multiprocessing_init()NEWLINENEWLINENEWLINEif sys.version_info < (3, 3):NEWLINE """Override basic classes in Python 2.7 and Python 3.3 to use ForkingPicklerNEWLINE for serialization. Later versions of Python already use ForkingPickler."""NEWLINE from .queue import Queue, SimpleQueueNEWLINE from .pool import PoolNEWLINENEWLINENEWLINE"""Add helper function to spawn N processes and wait for completion of any ofNEWLINEthem. This depends `mp.get_context` which was added in Python 3.4."""NEWLINEfrom .spawn import spawn, SpawnContext, _supports_context, start_processes, ProcessContextNEWLINENEWLINENEWLINEif sys.platform == 'darwin' or sys.platform == 'win32':NEWLINE _sharing_strategy = 'file_system'NEWLINE _all_sharing_strategies = {'file_system'}NEWLINEelse:NEWLINE _sharing_strategy = 'file_descriptor'NEWLINE _all_sharing_strategies = {'file_descriptor', 'file_system'}NEWLINENEWLINENEWLINEdef set_sharing_strategy(new_strategy):NEWLINE """Sets the strategy for sharing CPU tensors.NEWLINENEWLINE Arguments:NEWLINE new_strategy (str): Name of the selected strategy. Should be one ofNEWLINE the values returned by :func:`get_all_sharing_strategies()`.NEWLINE """NEWLINE global _sharing_strategyNEWLINE assert new_strategy in _all_sharing_strategiesNEWLINE _sharing_strategy = new_strategyNEWLINENEWLINENEWLINEdef get_sharing_strategy():NEWLINE """Returns the current strategy for sharing CPU tensors."""NEWLINE return _sharing_strategyNEWLINENEWLINENEWLINEdef get_all_sharing_strategies():NEWLINE """Returns a set of sharing strategies supported on a current system."""NEWLINE return _all_sharing_strategiesNEWLINENEWLINENEWLINEinit_reductions()NEWLINE
print("Hello world")NEWLINENEWLINENEWLINE# ### IntegerNEWLINEinteger_variable = 1NEWLINEprint(integer_variable)NEWLINE# print('Integer')NEWLINE# print(integer_variable, type(integer_variable), id(integer_variable))NEWLINEfloat_variable = 3.4NEWLINEprint(float_variable)NEWLINE# ### StringNEWLINEstring_variable_5 = """NEWLINEThis is my Test multi liner string. NEWLINECan write anything for as a TEST code.NEWLINEAnyone can refer this TEST codeNEWLINEIf any changes are needed then need to informNEWLINEthen commit. NEWLINE"""NEWLINEprint(string_variable_5)NEWLINE# ### BooleanNEWLINETrue_Python = TrueNEWLINEFalse_Python = FalseNEWLINEprint(True_Python, type(True_Python), id(True_Python))NEWLINEprint(False_Python, type(False_Python), id(False_Python))NEWLINENEWLINE# ### NoneNEWLINENone_variable = NoneNEWLINEprint(None_variable, type(None_variable), id(None_variable))NEWLINENEWLINE# ### ListNEWLINElist_variable =['Kiran Kakde', 'home address', '7760675006']NEWLINEprint(list_variable)NEWLINENEWLINElist_variable =['home address','Kiran Kakde','7760675006']NEWLINEprint(list_variable)NEWLINENEWLINElist_variable =['Kiran Kakde','7760675006','home address']NEWLINEprint(list_variable)NEWLINENEWLINENEWLINElist_variable =['7760675006','Kiran Kakde','home address']NEWLINEprint(list_variable)NEWLINENEWLINElist_variable =['7760675006','home address','Kiran Kakde']NEWLINEprint(list_variable)NEWLINENEWLINE
from blocktopus.blocks.declarations import machine_declarationNEWLINENEWLINEclass machine_startech_powerremotecontrol (machine_declaration):NEWLINE def getMachineClass (self):NEWLINE from octopus.manufacturer import startechNEWLINE return startech.PowerRemoteControl
import os.pathNEWLINENEWLINEimport torchNEWLINEimport seaborn as snsNEWLINEfrom pandas import DataFrameNEWLINEfrom torch.utils.data import DataLoaderNEWLINEfrom transformers import RobertaTokenizerNEWLINENEWLINEfrom bond.data import DatasetName, DatasetType, SubTokenDataset, load_dataset, load_tags_dictNEWLINEfrom bond.utils import ner_scoresNEWLINENEWLINENEWLINEdef plot_distant_dataset_stats(dataset_name: DatasetName) -> None:NEWLINE tokenizer = RobertaTokenizer.from_pretrained('roberta-base') # for loading datasets - doesn't really matter what tokenizer to useNEWLINENEWLINE distant_dataset = load_dataset(dataset_name, DatasetType.DISTANT, tokenizer, 'roberta-base', 128)NEWLINE gold_dataset = load_dataset(dataset_name, DatasetType.TRAIN, tokenizer, 'roberta-base', 128)NEWLINENEWLINE distant_labels = []NEWLINE for _, labels, mask, _ in DataLoader(distant_dataset, batch_size=1):NEWLINE distant_labels.extend(labels.masked_select(mask > 0).tolist())NEWLINENEWLINE gold_labels = []NEWLINE for _, labels, mask, _ in DataLoader(gold_dataset, batch_size=1):NEWLINE gold_labels.extend(labels.masked_select(mask > 0).tolist())NEWLINENEWLINE stats = ner_scores(gold_labels, distant_labels, load_tags_dict(dataset_name))NEWLINE print(stats) # TODO: do actual stats visuallizationNEWLINENEWLINENEWLINEdef score_cached_dataset(dataset_path: str) -> None:NEWLINE cached_name = os.path.basename(dataset_path)NEWLINE info = cached_name.split('_')NEWLINE tokenizer = RobertaTokenizer.from_pretrained(info[-2])NEWLINE dataset_name = DatasetName(info[0])NEWLINE max_seq_len = int(info[-1][3:])NEWLINENEWLINE distant_dataset: SubTokenDataset = torch.load(dataset_path)NEWLINE gold_dataset = load_dataset(dataset_name, DatasetType.TRAIN, tokenizer, 'roberta-base', max_seq_len)NEWLINENEWLINE distant_labels = []NEWLINE for _, _, _, labels, mask, _, _ in DataLoader(distant_dataset, batch_size=1, collate_fn=distant_dataset.collate_fn):NEWLINE distant_labels.extend(labels.masked_select(mask).tolist())NEWLINENEWLINE gold_labels = []NEWLINE for _, _, _, labels, mask, _, _ in DataLoader(gold_dataset, batch_size=1, collate_fn=gold_dataset.collate_fn):NEWLINE gold_labels.extend(labels.masked_select(mask).tolist())NEWLINENEWLINE assert len(gold_labels) == len(distant_labels)NEWLINE stats = ner_scores(gold_labels, distant_labels, load_tags_dict(dataset_name))NEWLINE print(stats)NEWLINE
# Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.NEWLINE#NEWLINE# This work is made available under the Nvidia Source Code License-NC.NEWLINE# To view a copy of this license, check out LICENSE.mdNEWLINE# import torchNEWLINEimport mathNEWLINENEWLINEfrom torch.optim.optimizer import Optimizer, requiredNEWLINENEWLINENEWLINEclass Fromage(Optimizer):NEWLINE r"""Fromage optimizer implementation (https://arxiv.org/abs/2002.03432)"""NEWLINENEWLINE def __init__(self, params, lr=required, momentum=0):NEWLINE if lr is not required and lr < 0.0:NEWLINE raise ValueError("Invalid learning rate: {}".format(lr))NEWLINE defaults = dict(lr=lr, momentum=momentum)NEWLINE super(Fromage, self).__init__(params, defaults)NEWLINENEWLINE def step(self, closure=None):NEWLINE r"""Performs a single optimization step.NEWLINENEWLINE Args:NEWLINE closure (callable, optional): A closure that reevaluates the modelNEWLINE and returns the loss.NEWLINE """NEWLINE loss = NoneNEWLINE if closure is not None:NEWLINE loss = closure()NEWLINENEWLINE for group in self.param_groups:NEWLINE for p in group['params']:NEWLINE if p.grad is None:NEWLINE continueNEWLINE d_p = p.grad.dataNEWLINE d_p_norm = p.grad.norm()NEWLINE p_norm = p.norm()NEWLINE if p_norm > 0.0 and d_p_norm > 0.0:NEWLINE p.data.add_(-group['lr'], d_p * (p_norm / d_p_norm))NEWLINE else:NEWLINE p.data.add_(-group['lr'], d_p)NEWLINE p.data /= math.sqrt(1 + group['lr'] ** 2)NEWLINENEWLINE return lossNEWLINE
"""NEWLINE OpenVINO DL WorkbenchNEWLINE Script to check internet connectionNEWLINENEWLINE Copyright (c) 2020 Intel CorporationNEWLINENEWLINE Licensed under the Apache License, Version 2.0 (the "License");NEWLINE you may not use this file except in compliance with the License.NEWLINE You may obtain a copy of the License atNEWLINE http://www.apache.org/licenses/LICENSE-2.0NEWLINE Unless required by applicable law or agreed to in writing, softwareNEWLINE distributed under the License is distributed on an "AS IS" BASIS,NEWLINE WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.NEWLINE See the License for the specific language governing permissions andNEWLINE limitations under the License.NEWLINE"""NEWLINEfrom cpuinfo import get_cpu_infoNEWLINEfrom psutil import cpu_count, cpu_freqNEWLINENEWLINEif __name__ == '__main__':NEWLINE FULL_CPU_NAME = get_cpu_info()['brand_raw']NEWLINE print(f'Full CPU name is {FULL_CPU_NAME}')NEWLINE print(f'CPU cores number: {cpu_count(logical=False)}')NEWLINE CPU_FREQUENCY_RANGE = cpu_freq(percpu=False)NEWLINE CPU_FREQUENCY_UNITS = 'GHz'NEWLINE MHZ_IN_GHZ = 1000NEWLINE if CPU_FREQUENCY_RANGE.min == CPU_FREQUENCY_RANGE.max:NEWLINE CPU_FREQUENCY = '{:.1f} {units}'.format(CPU_FREQUENCY_RANGE.min / MHZ_IN_GHZ, units=CPU_FREQUENCY_UNITS)NEWLINE else:NEWLINE CPU_FREQUENCY = '{:.1f}-{:.1f} {units}'.format(CPU_FREQUENCY_RANGE.min / MHZ_IN_GHZ,NEWLINE CPU_FREQUENCY_RANGE.max / MHZ_IN_GHZ, units=CPU_FREQUENCY_UNITS)NEWLINE print(f'CPU frequency range: {CPU_FREQUENCY}')NEWLINE
from fastapi import FastAPINEWLINEfrom fastapi.middleware.cors import CORSMiddlewareNEWLINEimport uvicornNEWLINENEWLINEfrom app.api import predict, viz, us_map, us_bar, us_demo_pie, us_pie_vic, us_non_lethal, us_non_lethat_line, top_x_listNEWLINENEWLINEapp = FastAPI(NEWLINE title='HRF-TEAM-D-Lab28 DS API',NEWLINE description='The ultimate api for Data Visualization',NEWLINE version='0.5',NEWLINE docs_url='/',NEWLINE)NEWLINEapp.include_router(us_demo_pie.router)NEWLINEapp.include_router(us_map.router)NEWLINEapp.include_router(us_bar.router)NEWLINEapp.include_router(us_pie_vic.router)NEWLINEapp.include_router(us_non_lethal.router)NEWLINEapp.include_router(us_non_lethat_line.router)NEWLINEapp.include_router(top_x_list.router)NEWLINEapp.add_middleware(NEWLINE CORSMiddleware,NEWLINE allow_origins=['*'],NEWLINE allow_credentials=True,NEWLINE allow_methods=['*'],NEWLINE allow_headers=['*'],NEWLINE)NEWLINENEWLINEif __name__ == '__main__':NEWLINE uvicorn.run(app)NEWLINE
"""netflixclone URL ConfigurationNEWLINENEWLINEThe `urlpatterns` list routes URLs to views. For more information please see:NEWLINE https://docs.djangoproject.com/en/1.11/topics/http/urls/NEWLINEExamples:NEWLINEFunction viewsNEWLINE 1. Add an import: from my_app import viewsNEWLINE 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')NEWLINEClass-based viewsNEWLINE 1. Add an import: from other_app.views import HomeNEWLINE 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')NEWLINEIncluding another URLconfNEWLINE 1. Import the include() function: from django.conf.urls import url, includeNEWLINE 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))NEWLINE"""NEWLINEfrom django.conf.urls import url, includeNEWLINEfrom django.contrib import adminNEWLINENEWLINEurlpatterns = [NEWLINE url(r'^admin/', admin.site.urls),NEWLINE url(r'', include('netflix.urls'))NEWLINE]NEWLINE
from py_asciimath import PROJECT_ROOTNEWLINEfrom py_asciimath.translator.translator import (NEWLINE ASCIIMath2MathML,NEWLINE ASCIIMath2Tex,NEWLINE MathML2Tex,NEWLINE)NEWLINENEWLINENEWLINEif __name__ == "__main__":NEWLINE print("ASCIIMath to MathML")NEWLINE asciimath2mathml = ASCIIMath2MathML(log=False, inplace=True)NEWLINE parsed = asciimath2mathml.translate(NEWLINE PROJECT_ROOT + "/../examples/asciimath_exp.txt",NEWLINE displaystyle=True,NEWLINE dtd="mathml2",NEWLINE dtd_validation=True,NEWLINE from_file=True,NEWLINE output="string",NEWLINE network=True,NEWLINE pprint=False,NEWLINE to_file=None,NEWLINE xml_declaration=True,NEWLINE xml_pprint=True,NEWLINE )NEWLINENEWLINE print(parsed, "\n\nASCIIMath to LaTeX")NEWLINE asciimath2tex = ASCIIMath2Tex(log=False, inplace=True)NEWLINE parsed = asciimath2tex.translate(NEWLINE PROJECT_ROOT + "/../examples/asciimath_exp.txt",NEWLINE displaystyle=True,NEWLINE from_file=True,NEWLINE pprint=False,NEWLINE to_file=None,NEWLINE )NEWLINENEWLINE print(parsed, "\n\nMathML to LaTeX")NEWLINE mathml2tex = MathML2Tex()NEWLINE parsed = mathml2tex.translate(NEWLINE PROJECT_ROOT + "/../examples/mathml_exp.xml",NEWLINE from_file=True,NEWLINE network=False,NEWLINE to_file=None,NEWLINE )NEWLINE print(parsed)NEWLINE
# TestSwiftUnknownReference.pyNEWLINE#NEWLINE# This source file is part of the Swift.org open source projectNEWLINE#NEWLINE# Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authorsNEWLINE# Licensed under Apache License v2.0 with Runtime Library ExceptionNEWLINE#NEWLINE# See https://swift.org/LICENSE.txt for license informationNEWLINE# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authorsNEWLINE#NEWLINE# ------------------------------------------------------------------------------NEWLINEimport lldbNEWLINEfrom lldbsuite.test.decorators import *NEWLINEimport lldbsuite.test.lldbtest as lldbtestNEWLINEimport lldbsuite.test.lldbutil as lldbutilNEWLINEimport osNEWLINEimport unittest2NEWLINENEWLINENEWLINEclass TestSwiftUnknownReference(lldbtest.TestBase):NEWLINENEWLINE mydir = lldbtest.TestBase.compute_mydir(__file__)NEWLINENEWLINE def check_class(self, var_self):NEWLINE lldbutil.check_variable(self, var_self, num_children=2)NEWLINE m_base_string = var_self.GetChildMemberWithName("base_string")NEWLINE m_string = var_self.GetChildMemberWithName("string")NEWLINE lldbutil.check_variable(self, m_base_string, summary='"hello"')NEWLINE lldbutil.check_variable(self, m_string, summary='"world"')NEWLINENEWLINE NEWLINE @swiftTestNEWLINE def test_unknown_objc_ref(self):NEWLINE """Test unknown references to Objective-C objects."""NEWLINE self.build()NEWLINE target, process, thread, bkpt = lldbutil.run_to_source_breakpoint(NEWLINE self, 'break here', lldb.SBFileSpec('main.swift'))NEWLINENEWLINE frame = thread.frames[0]NEWLINE var_self = frame.FindVariable("self")NEWLINE m_pure_ref = var_self.GetChildMemberWithName("pure_ref")NEWLINE self.check_class(m_pure_ref)NEWLINE m_objc_ref = var_self.GetChildMemberWithName("objc_ref")NEWLINE self.check_class(m_objc_ref)NEWLINENEWLINE
import pytestNEWLINEimport uqbar.stringsNEWLINENEWLINEimport supriyaNEWLINENEWLINENEWLINEdef test_1():NEWLINE group_a = supriya.realtime.Group()NEWLINE group_b = supriya.realtime.Group()NEWLINE group_c = supriya.realtime.Group()NEWLINE assert group_a.node_id is NoneNEWLINE assert group_b.node_id is NoneNEWLINE assert group_c.node_id is NoneNEWLINE request = supriya.commands.GroupNewRequest(NEWLINE items=[NEWLINE supriya.commands.GroupNewRequest.Item(NEWLINE node_id=group_b, target_node_id=group_aNEWLINE ),NEWLINE supriya.commands.GroupNewRequest.Item(NEWLINE node_id=group_c, target_node_id=group_bNEWLINE ),NEWLINE ]NEWLINE )NEWLINE assert request.items[0].node_id is group_bNEWLINE assert request.items[0].target_node_id is group_aNEWLINE assert request.items[1].node_id is group_cNEWLINE assert request.items[1].target_node_id is group_bNEWLINE with pytest.raises(TypeError):NEWLINE request.to_osc()NEWLINENEWLINENEWLINEdef test_2(server):NEWLINE """NEWLINE Local application allocates the groups' IDs before we generate the OSCNEWLINE message.NEWLINE """NEWLINE group_a = supriya.realtime.Group().allocate()NEWLINE group_b = supriya.realtime.Group()NEWLINE group_c = supriya.realtime.Group()NEWLINE assert group_a.node_id == 1000NEWLINE assert group_b.node_id is NoneNEWLINE assert group_c.node_id is NoneNEWLINE server_state = str(server.query(False))NEWLINE assert server_state == uqbar.strings.normalize(NEWLINE """NEWLINE NODE TREE 0 groupNEWLINE 1 groupNEWLINE 1000 groupNEWLINE """NEWLINE )NEWLINE request = supriya.commands.GroupNewRequest(NEWLINE items=[NEWLINE supriya.commands.GroupNewRequest.Item(NEWLINE add_action="add_to_head", node_id=group_b, target_node_id=group_aNEWLINE ),NEWLINE supriya.commands.GroupNewRequest.Item(NEWLINE add_action="add_to_head", node_id=group_c, target_node_id=group_bNEWLINE ),NEWLINE ]NEWLINE )NEWLINE with server.osc_protocol.capture() as transcript:NEWLINE request.communicate()NEWLINE server.sync()NEWLINE assert [(_.label, _.message) for _ in transcript] == [NEWLINE ("S", supriya.osc.OscMessage("/g_new", 1001, 0, 1000, 1002, 0, 1001)),NEWLINE ("R", supriya.osc.OscMessage("/n_go", 1001, 1000, -1, -1, 1, -1, -1)),NEWLINE ("R", supriya.osc.OscMessage("/n_go", 1002, 1001, -1, -1, 1, -1, -1)),NEWLINE ("S", supriya.osc.OscMessage("/sync", 0)),NEWLINE ("R", supriya.osc.OscMessage("/synced", 0)),NEWLINE ]NEWLINE server_state = str(server.query(False))NEWLINE assert server_state == uqbar.strings.normalize(NEWLINE """NEWLINE NODE TREE 0 groupNEWLINE 1 groupNEWLINE 1000 groupNEWLINE 1001 groupNEWLINE 1002 groupNEWLINE """NEWLINE )NEWLINE assert group_b.node_id == 1001NEWLINE assert group_b.parent is group_aNEWLINE assert group_b.is_allocatedNEWLINE assert group_c.node_id == 1002NEWLINE assert group_c.parent is group_bNEWLINE assert group_c.is_allocatedNEWLINENEWLINENEWLINEdef test_3(server):NEWLINE """NEWLINE Communicating without a pre-existing group creates that group during localNEWLINE application.NEWLINE """NEWLINE group_a = supriya.realtime.Group().allocate()NEWLINE server_state = str(server.query(False))NEWLINE assert server_state == uqbar.strings.normalize(NEWLINE """NEWLINE NODE TREE 0 groupNEWLINE 1 groupNEWLINE 1000 groupNEWLINE """NEWLINE )NEWLINE request = supriya.commands.GroupNewRequest(NEWLINE items=[NEWLINE supriya.commands.GroupNewRequest.Item(NEWLINE add_action="add_to_head", node_id=1001, target_node_id=group_aNEWLINE ),NEWLINE supriya.commands.GroupNewRequest.Item(NEWLINE add_action="add_to_head", node_id=1002, target_node_id=1001NEWLINE ),NEWLINE ]NEWLINE )NEWLINE with server.osc_protocol.capture() as transcript:NEWLINE request.communicate()NEWLINE server.sync()NEWLINE assert [(_.label, _.message) for _ in transcript] == [NEWLINE ("S", supriya.osc.OscMessage("/g_new", 1001, 0, 1000, 1002, 0, 1001)),NEWLINE ("R", supriya.osc.OscMessage("/n_go", 1001, 1000, -1, -1, 1, -1, -1)),NEWLINE ("R", supriya.osc.OscMessage("/n_go", 1002, 1001, -1, -1, 1, -1, -1)),NEWLINE ("S", supriya.osc.OscMessage("/sync", 0)),NEWLINE ("R", supriya.osc.OscMessage("/synced", 0)),NEWLINE ]NEWLINE server_state = str(server.query(False))NEWLINE assert server_state == uqbar.strings.normalize(NEWLINE """NEWLINE NODE TREE 0 groupNEWLINE 1 groupNEWLINE 1000 groupNEWLINE 1001 groupNEWLINE 1002 groupNEWLINE """NEWLINE )NEWLINE group_b = server[1001]NEWLINE group_c = server[1002]NEWLINE assert group_b.parent is group_aNEWLINE assert group_c.parent is group_bNEWLINE
import rlkit.misc.hyperparameter as hypNEWLINEfrom rlkit.demos.source.dict_to_mdp_path_loader import EncoderDictToMDPPathLoaderNEWLINEfrom rlkit.launchers.experiments.ashvin.awac_rig import awac_rig_experimentNEWLINEfrom rlkit.launchers.launcher_util import run_experimentNEWLINEfrom rlkit.launchers.arglauncher import run_variantsNEWLINEfrom rlkit.torch.sac.policies import GaussianPolicy, GaussianMixturePolicyNEWLINEfrom rlkit.envs.encoder_wrappers import ConditionalEncoderWrappedEnvNEWLINEfrom sawyer_control.envs.sawyer_grip import SawyerGripEnvNEWLINE#from sawyer_control.envs.sawyer_grip_stub import SawyerGripEnvNEWLINEfrom rlkit.torch.networks import ClampNEWLINEfrom rlkit.torch.vae.vq_vae import VQ_VAENEWLINEfrom rlkit.torch.vae.vq_vae_trainer import VQ_VAETrainerNEWLINEfrom rlkit.torch.grill.common import train_vqvaeNEWLINENEWLINEpath_func = lambda name: '/media/ashvin/data2/data/baseline/'+ nameNEWLINENEWLINEall_demos = [NEWLINE dict(path=path_func('fixed_drawer_demos.npy'), obs_dict=True, is_demo=True, data_split=0.2),NEWLINE dict(path=path_func('fixed_pot_demos.npy'), obs_dict=True, is_demo=True, data_split=0.2),NEWLINE dict(path=path_func('fixed_pot_extra1_demos.npy'), obs_dict=True, is_demo=True, data_split=0.2),NEWLINE dict(path=path_func('fixed_pnp_demos.npy'), obs_dict=True, is_demo=True, data_split=0.2),NEWLINE dict(path=path_func('fixed_tray_demos.npy'), obs_dict=True, is_demo=True, data_split=0.2),NEWLINE]NEWLINENEWLINENEWLINEall_demos = [NEWLINE dict(path=path_func('fixed_drawer_demos.npy'), obs_dict=True, is_demo=True,),NEWLINE dict(path=path_func('fixed_pot_demos.npy'), obs_dict=True, is_demo=True,),NEWLINE dict(path=path_func('fixed_pot_extra1_demos.npy'), obs_dict=True, is_demo=True,),NEWLINE dict(path=path_func('fixed_pnp_demos.npy'), obs_dict=True, is_demo=True,),NEWLINE dict(path=path_func('fixed_tray_demos.npy'), obs_dict=True, is_demo=True,),NEWLINE]NEWLINENEWLINENEWLINEif __name__ == "__main__":NEWLINE variant = dict(NEWLINE imsize=48,NEWLINE env_class=SawyerGripEnv,NEWLINE env_kwargs=dict(NEWLINE action_mode='position',NEWLINE config_name='ashvin_config',NEWLINE reset_free=False,NEWLINE position_action_scale=0.05,NEWLINE max_speed=0.4,NEWLINE step_sleep_time=0.2,NEWLINE crop_version_str="crop_val_torch",NEWLINE ),NEWLINE policy_class=GaussianPolicy,NEWLINE policy_kwargs=dict(NEWLINE hidden_sizes=[256, 256, 256, 256, ],NEWLINE max_log_std=0,NEWLINE min_log_std=-6,NEWLINE std_architecture="values",NEWLINE ),NEWLINENEWLINE qf_kwargs=dict(NEWLINE hidden_sizes=[256, 256],NEWLINE ),NEWLINENEWLINE trainer_kwargs=dict(NEWLINE discount=0.99,NEWLINE soft_target_tau=5e-3,NEWLINE target_update_period=1,NEWLINE policy_lr=3e-4,NEWLINE qf_lr=3E-4,NEWLINE reward_scale=1,NEWLINE beta=1,NEWLINE use_automatic_entropy_tuning=False,NEWLINE alpha=0,NEWLINENEWLINE bc_num_pretrain_steps=0,NEWLINE q_num_pretrain1_steps=0,NEWLINE q_num_pretrain2_steps=25001, #25001 #HERENEWLINE policy_weight_decay=1e-4,NEWLINE q_weight_decay=0,NEWLINENEWLINE rl_weight=1.0,NEWLINE use_awr_update=True,NEWLINE use_reparam_update=False,NEWLINE compute_bc=True,NEWLINE reparam_weight=0.0,NEWLINE awr_weight=1.0,NEWLINE bc_weight=0.0,NEWLINENEWLINE reward_transform_kwargs=None,NEWLINE terminal_transform_kwargs=None,NEWLINE ),NEWLINENEWLINE max_path_length=75, #50NEWLINE algo_kwargs=dict(NEWLINE batch_size=1024, #1024NEWLINE num_epochs=101, #1001NEWLINE num_eval_steps_per_epoch=150, #500NEWLINE num_expl_steps_per_train_loop=600, #500NEWLINE num_trains_per_train_loop=600, #500NEWLINE min_num_steps_before_training=150, #4000NEWLINE ),NEWLINE replay_buffer_kwargs=dict(NEWLINE fraction_future_context=0.6,NEWLINE fraction_distribution_context=0.1, # TODO: Try less?NEWLINE max_size=int(5E5), # HERE# HERE# HERE# HERE# HERE# HERE# HERE# HERE# HERE (DOUBLE CHECK THAT DEMOS FIT!!!!)NEWLINE ),NEWLINE demo_replay_buffer_kwargs=dict(NEWLINE fraction_future_context=0.6,NEWLINE fraction_distribution_context=0.1, # TODO: Try less?NEWLINE ),NEWLINE reward_kwargs=dict(NEWLINE reward_type='sparse',NEWLINE epsilon=1.0,NEWLINE ),NEWLINENEWLINE observation_key='latent_observation',NEWLINE desired_goal_key='latent_desired_goal',NEWLINE save_video=True,NEWLINE save_video_kwargs=dict(NEWLINE save_video_period=1,NEWLINE pad_color=0,NEWLINE ),NEWLINE NEWLINE encoder_wrapper=ConditionalEncoderWrappedEnv,NEWLINE reset_keys_map=dict(NEWLINE image_observation="initial_latent_state"NEWLINE ),NEWLINENEWLINE path_loader_class=EncoderDictToMDPPathLoader,NEWLINE path_loader_kwargs=dict(NEWLINE recompute_reward=True,NEWLINE ),NEWLINENEWLINE renderer_kwargs=dict(NEWLINE create_image_format='HWC',NEWLINE output_image_format='CWH',NEWLINE flatten_image=True,NEWLINE width=48,NEWLINE height=48,NEWLINE ),NEWLINENEWLINE add_env_demos=False,NEWLINE add_env_offpolicy_data=False,NEWLINENEWLINE load_demos=True,NEWLINE pretrain_policy=True,NEWLINE pretrain_rl=True,NEWLINENEWLINE evaluation_goal_sampling_mode="presampled_images",NEWLINE exploration_goal_sampling_mode="conditional_vae_prior",NEWLINE train_vae_kwargs=dict(NEWLINE imsize=48,NEWLINE beta=1,NEWLINE beta_schedule_kwargs=dict(NEWLINE x_values=(0, 250),NEWLINE y_values=(0, 100),NEWLINE ),NEWLINE num_epochs=1501, #1501NEWLINE embedding_dim=5,NEWLINE dump_skew_debug_plots=False,NEWLINE decoder_activation='sigmoid',NEWLINE use_linear_dynamics=False,NEWLINE generate_vae_dataset_kwargs=dict(NEWLINE N=1000,NEWLINE n_random_steps=2,NEWLINE test_p=.9,NEWLINE dataset_path={NEWLINE 'train': 'demos/icra2021/dataset_v1_train.npy',NEWLINE 'test': 'demos/icra2021/dataset_v1_test.npy',NEWLINE },NEWLINE augment_data=False,NEWLINE use_cached=False,NEWLINE show=False,NEWLINE oracle_dataset=False,NEWLINE oracle_dataset_using_set_to_goal=False,NEWLINE non_presampled_goal_img_is_garbage=False,NEWLINE random_rollout_data=True,NEWLINE random_rollout_data_set_to_goal=True,NEWLINE conditional_vae_dataset=True,NEWLINE save_trajectories=False,NEWLINE enviorment_dataset=False,NEWLINE tag="ccrig_tuning_orig_network",NEWLINE ),NEWLINE vae_trainer_class=VQ_VAETrainer,NEWLINE vae_class=VQ_VAE,NEWLINE vae_kwargs=dict(NEWLINE input_channels=3,NEWLINE imsize=48,NEWLINE ),NEWLINE algo_kwargs=dict(NEWLINE key_to_reconstruct='x_t',NEWLINE start_skew_epoch=5000,NEWLINE is_auto_encoder=False,NEWLINE batch_size=128,NEWLINE lr=1e-3,NEWLINE skew_config=dict(NEWLINE method='vae_prob',NEWLINE power=0,NEWLINE ),NEWLINE weight_decay=0.0,NEWLINE skew_dataset=False,NEWLINE priority_function_kwargs=dict(NEWLINE decoder_distribution='gaussian_identity_variance',NEWLINE sampling_method='importance_sampling',NEWLINE num_latents_to_sample=10,NEWLINE ),NEWLINE use_parallel_dataloading=False,NEWLINE ),NEWLINENEWLINE save_period=10,NEWLINE ),NEWLINE train_model_func=train_vqvae,NEWLINENEWLINE presampled_goal_kwargs=dict(NEWLINE eval_goals='/media/ashvin/data2/data/val/v1/ccvae_pot1_eval_goals.pkl',NEWLINE expl_goals=None,NEWLINE ),NEWLINE launcher_config=dict(NEWLINE unpack_variant=True,NEWLINE region='us-west-1',NEWLINE ),NEWLINE logger_config=dict(NEWLINE snapshot_mode='gap',NEWLINE snapshot_gap=1,NEWLINE ),NEWLINE ccvae_or_cbigan_exp=True,NEWLINE pickle_paths=True,NEWLINE NEWLINE pretrained_vae_path=path_func('vae.pt'),NEWLINE pretrained_algo_path=path_func('agent_sparse_1.pt'), #agent_sparse_1.pt, agent_sparse_2.pt, agent_dense.ptNEWLINE )NEWLINENEWLINE search_space = {NEWLINE "seed": range(1),NEWLINE 'path_loader_kwargs.demo_paths': [all_demos], #CHANGEDNEWLINENEWLINE 'reward_kwargs.reward_type': ['sparse',], # TRY SPARSE (EPS=1), SPARSE (EPS=2), DENSE (PROB NOT GONNA WORK)NEWLINE 'trainer_kwargs.beta': [0.3],NEWLINE 'num_pybullet_objects':[None],NEWLINENEWLINE 'policy_kwargs.min_log_std': [-6],NEWLINE 'trainer_kwargs.awr_weight': [1.0],NEWLINE 'trainer_kwargs.awr_use_mle_for_vf': [True],NEWLINE 'trainer_kwargs.awr_sample_actions': [False],NEWLINE 'trainer_kwargs.clip_score': [2],NEWLINE 'trainer_kwargs.awr_min_q': [True],NEWLINE 'trainer_kwargs.reward_transform_kwargs': [None, ],NEWLINE 'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0)],NEWLINE #'qf_kwargs.output_activation': [Clamp(max=0)],NEWLINE }NEWLINE sweeper = hyp.DeterministicHyperparameterSweeper(NEWLINE search_space, default_parameters=variant,NEWLINE )NEWLINENEWLINE variants = []NEWLINE for variant in sweeper.iterate_hyperparameters():NEWLINE if 'sparse' in variant['pretrained_algo_path']:NEWLINE variant['qf_kwargs']['output_activation'] = Clamp(max=0)NEWLINENEWLINE if variant['pretrained_algo_path'] == path_func('agent_sparse_1.pt'):NEWLINE variant['reward_kwargs'] == dict(reward_type='sparse', epsilon=1.0)NEWLINE if variant['pretrained_algo_path'] == path_func('agent_sparse_2.pt'):NEWLINE variant['reward_kwargs'] == dict(reward_type='sparse', epsilon=2.0)NEWLINE if variant['pretrained_algo_path'] == path_func('agent_dense.pt'):NEWLINE variant['reward_kwargs'] == dict(reward_type='dense', epsilon=1.0)NEWLINE NEWLINE variants.append(variant)NEWLINENEWLINE run_variants(awac_rig_experiment, variants, run_id=10) #HERENEWLINE
from typing import Any, DictNEWLINENEWLINEfrom flask import FlaskNEWLINEfrom flask_marshmallow import MarshmallowNEWLINEfrom marshmallow import fields, post_loadNEWLINENEWLINEfrom .model import UserNEWLINENEWLINE__all__ = ["org_schema", "user_schema", "workspace_schema", "new_user_schema",NEWLINE "orgs_schema", "ma", "link_workspace_schema",NEWLINE "new_workspace_schema", "workspaces_schema", "orgs_schema_tiny",NEWLINE "org_schema_short", "workspaces_schema_tiny", "my_orgs_schema",NEWLINE "workspace_schema_short", "my_workspaces_schema",NEWLINE "experiments_schema", "my_experiments_schema", "setup_schemas",NEWLINE "my_executions_schema", "my_schedules_schema",NEWLINE "user_profile_schema", "access_token_schema", "org_name_schema",NEWLINE "access_tokens_schema", "new_access_token_schema",NEWLINE "created_access_token_schema", "profile_new_org_schema",NEWLINE "profile_org_schema", "org_settings_schema",NEWLINE "org_info_schema", "link_org_schema",NEWLINE "profile_workspace_schema", "profile_workspaces_schema",NEWLINE "profile_new_workspace_schema",NEWLINE "workspace_collaborators_schema", "current_user_schema",NEWLINE "schedules_schema", "experiment_schema",NEWLINE "light_access_tokens_schema"]NEWLINENEWLINEma = Marshmallow()NEWLINENEWLINENEWLINEdef setup_schemas(app: Flask):NEWLINE return ma.init_app(app)NEWLINENEWLINENEWLINEclass WorkspaceSchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=True)NEWLINE org_id = fields.UUID(required=True)NEWLINE org_name = fields.String(required=True)NEWLINE name = fields.String(required=True)NEWLINE kind = fields.String(NEWLINE data_key="type",NEWLINE required=True,NEWLINE validate=lambda k: k in ("personal", "protected", "public")NEWLINE )NEWLINE created_on = fields.DateTime()NEWLINE owner = fields.Boolean(default=False)NEWLINE member = fields.Boolean(default=False)NEWLINE visibility = fields.Dict(NEWLINE keys=fields.Str(validate=lambda k: k in ("execution", "experiment")),NEWLINE values=fields.String(NEWLINE validate=lambda v: v in (NEWLINE "private", "protected", "public",NEWLINE "none", "status", "full"NEWLINE )NEWLINE )NEWLINE )NEWLINE url = ma.AbsoluteURLFor('workspace.get_one', workspace_id='<id>')NEWLINE links = ma.Hyperlinks({NEWLINE 'self': ma.URLFor('workspace.get_one', workspace_id='<id>')NEWLINE })NEWLINENEWLINENEWLINEclass NewWorkspaceSchema(ma.Schema):NEWLINE name = fields.String(required=True)NEWLINE org = fields.UUID(required=True)NEWLINE kind = fields.String(NEWLINE data_key="type",NEWLINE attribute="type",NEWLINE missing="public",NEWLINE validate=lambda k: k in ("personal", "protected", "public")NEWLINE )NEWLINE visibility = fields.Dict(NEWLINE keys=fields.Str(validate=lambda k: k in ("execution", "experiment")),NEWLINE values=fields.String(NEWLINE validate=lambda v: v in (NEWLINE "private", "protected", "public",NEWLINE "none", "status", "full"NEWLINE )NEWLINE )NEWLINE )NEWLINENEWLINENEWLINEclass LinkWorkpaceSchema(ma.Schema):NEWLINE owner = fields.Boolean(default=False)NEWLINENEWLINENEWLINEclass OrgSettingsSchema(ma.Schema):NEWLINE description = fields.String(allow_none=True)NEWLINE url = fields.URL(allow_none=True)NEWLINE logo = fields.URL(allow_none=True)NEWLINE email = fields.Email(allow_none=True)NEWLINE visibility = fields.Dict(NEWLINE allow_none=True,NEWLINE keys=fields.Str(validate=lambda k: k in ("execution", "experiment")),NEWLINE values=fields.Dict(NEWLINE keys=fields.Str(NEWLINE validate=lambda k: k in ("anonymous", "members")),NEWLINE values=fields.String(NEWLINE validate=lambda v: v in (NEWLINE "private", "protected", "public",NEWLINE "none", "status", "full"NEWLINE )NEWLINE )NEWLINE )NEWLINE )NEWLINENEWLINENEWLINEclass OrganizationSchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=True)NEWLINE name = fields.String(required=True)NEWLINE owner = fields.Boolean(default=False)NEWLINE member = fields.Boolean(default=False)NEWLINE created_on = fields.DateTime()NEWLINE kind = fields.String(data_key="type", required=True)NEWLINE workspaces = fields.Nested(WorkspaceSchema, many=True)NEWLINE settings = fields.Raw(allow_none=True)NEWLINE url = ma.AbsoluteURLFor('org.get_one', org_id='<id>')NEWLINE links = ma.Hyperlinks({NEWLINE 'self': ma.URLFor('org.get_one', org_id='<id>')NEWLINE })NEWLINENEWLINENEWLINEclass NewOrgSchema(ma.Schema):NEWLINE name = fields.String(required=True)NEWLINE settings = fields.Nested(OrgSettingsSchema, allow_none=True)NEWLINE visibility = fields.Dict(NEWLINE keys=fields.Str(validate=lambda k: k in ("execution", "experiment")),NEWLINE values=fields.String(NEWLINE validate=lambda v: v in (NEWLINE "private", "protected", "public",NEWLINE "none", "status", "full"NEWLINE )NEWLINE )NEWLINE )NEWLINENEWLINENEWLINEclass LinkOrgSchema(ma.Schema):NEWLINE owner = fields.Boolean(default=False)NEWLINENEWLINENEWLINEclass WorkspaceCollaboratorSchema(ma.Schema):NEWLINE id = fields.UUID()NEWLINE username = fields.String()NEWLINE fullname = fields.String()NEWLINE owner = fields.Boolean()NEWLINE collaborator = fields.Boolean()NEWLINE workspace_id = fields.UUID()NEWLINE workspace_name = fields.String()NEWLINENEWLINENEWLINEclass OrgMemberSchema(ma.Schema):NEWLINE id = fields.UUID()NEWLINE username = fields.String()NEWLINE fullname = fields.String()NEWLINE owner = fields.Boolean()NEWLINE member = fields.Boolean()NEWLINE org_id = fields.UUID()NEWLINE org_name = fields.String()NEWLINENEWLINENEWLINEclass UserSchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=False)NEWLINE username = fields.String(required=True)NEWLINE orgs = fields.Nested(OrganizationSchema, many=True, dump_only=True)NEWLINE workspaces = fields.Nested(WorkspaceSchema, many=True, dump_only=True)NEWLINE url = ma.AbsoluteURLFor('user.get', user_id='<id>')NEWLINE links = ma.Hyperlinks({NEWLINE 'self': ma.URLFor('user.get', user_id='<id>'),NEWLINE })NEWLINENEWLINE @post_loadNEWLINE def make_user(self, data: Dict[str, Any]):NEWLINE return User(**data)NEWLINENEWLINENEWLINEclass CurrentUserSchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=False)NEWLINE username = fields.String(required=True)NEWLINE is_active = fields.Boolean(required=True, dump_only=True)NEWLINE is_anonymous = fields.Boolean(required=True, dump_only=True)NEWLINE orgs = fields.Nested(OrganizationSchema, many=True, dump_only=True)NEWLINE workspaces = fields.Nested(WorkspaceSchema, many=True, dump_only=True)NEWLINENEWLINENEWLINEclass NewUserSchema(ma.Schema):NEWLINE username = fields.String(required=True)NEWLINE name = fields.String(required=False, default=None)NEWLINE email = fields.Email(required=False, default=None)NEWLINENEWLINENEWLINEclass ExperimentSchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=True)NEWLINE user_id = fields.UUID(required=True)NEWLINE org_id = fields.UUID(required=True)NEWLINE workspace_id = fields.UUID(required=True)NEWLINE username = fields.String()NEWLINE user_org_name = fields.String()NEWLINE org_name = fields.String()NEWLINE workspace_name = fields.String()NEWLINE created_date = fields.DateTime()NEWLINE updated_date = fields.DateTime()NEWLINE payload = fields.Mapping(default=None)NEWLINENEWLINENEWLINEclass ExecutionSchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=True)NEWLINE user_id = fields.UUID(required=True)NEWLINE org_id = fields.UUID(required=True)NEWLINE workspace_id = fields.UUID(required=True)NEWLINE execution_id = fields.UUID(required=True)NEWLINENEWLINENEWLINEclass CurrentOrgSchema(ma.Schema):NEWLINE id = fields.UUID(required=True)NEWLINE name = fields.String(required=True)NEWLINE owner = fields.Boolean(default=False)NEWLINE kind = fields.String(data_key="type", required=True)NEWLINENEWLINENEWLINEclass CurrentWorkspaceSchema(ma.Schema):NEWLINE id = fields.UUID(required=True)NEWLINE name = fields.String(required=True)NEWLINE owner = fields.Boolean(default=False)NEWLINE kind = fields.String(data_key="type", required=True)NEWLINENEWLINENEWLINEclass UserProfileSchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=False)NEWLINE username = fields.String(required=True)NEWLINE name = fields.String()NEWLINE email = fields.String()NEWLINE bio = fields.String()NEWLINE company = fields.String()NEWLINENEWLINENEWLINEclass AccessTokenSchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=False)NEWLINE user_id = fields.UUID()NEWLINE name = fields.String()NEWLINE jti = fields.String()NEWLINE access_token = fields.String(load_only=True)NEWLINE refresh_token = fields.String(load_only=True)NEWLINE revoked = fields.Boolean()NEWLINE issued_on = fields.DateTime()NEWLINE last_used_on = fields.DateTime()NEWLINENEWLINENEWLINEclass LightAccessTokenSchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=False)NEWLINE user_id = fields.UUID()NEWLINE name = fields.String()NEWLINE revoked = fields.Boolean()NEWLINE issued_on = fields.DateTime()NEWLINE last_used_on = fields.DateTime()NEWLINENEWLINENEWLINEclass CreatedAccessTokenSchema(AccessTokenSchema):NEWLINE access_token = fields.String(load_only=False)NEWLINENEWLINENEWLINEclass NewAccessTokenSchema(ma.Schema):NEWLINE name = fields.String(required=True)NEWLINE scopes = fields.String(many=True, default=None, allow_none=True)NEWLINENEWLINENEWLINEclass PagingSchema(ma.Schema):NEWLINE prev_item = fields.Integer(default=1, data_key="prev")NEWLINE next_item = fields.Integer(default=1, data_key="next")NEWLINENEWLINENEWLINEclass ProfileWorkspaceSchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=True)NEWLINE org_id = fields.UUID(required=True)NEWLINE org_name = fields.String(required=True)NEWLINE name = fields.String(required=True)NEWLINE owner = fields.Boolean(default=False)NEWLINE created_on = fields.DateTime()NEWLINE kind = fields.String(data_key="type", required=True)NEWLINENEWLINENEWLINEclass ProfileWorkspacesSchema(ma.Schema):NEWLINE workspaces = fields.Nested(ProfileWorkspaceSchema, many=True)NEWLINE paging = fields.Nested(PagingSchema, default={"prev": 1, "next": 1})NEWLINENEWLINENEWLINEclass ProfileOrganizationSchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=True)NEWLINE name = fields.String(required=True)NEWLINE owner = fields.Boolean(default=False)NEWLINE created_on = fields.DateTime()NEWLINE kind = fields.String(data_key="type", required=True)NEWLINENEWLINENEWLINEclass ProfileOrganizationsSchema(ma.Schema):NEWLINE orgs = fields.Nested(ProfileOrganizationSchema, many=True)NEWLINE paging = fields.Nested(PagingSchema, default={"prev": 1, "next": 1})NEWLINENEWLINENEWLINEclass OrgNameSchema(ma.Schema):NEWLINE name = fields.String(required=True)NEWLINENEWLINENEWLINEclass ProfileNewOrganizationSchema(ma.Schema):NEWLINE name = fields.String(required=True)NEWLINE settings = fields.Nested(OrgSettingsSchema)NEWLINENEWLINENEWLINEclass WorkspaceInfoSchema(ma.Schema):NEWLINE id = fields.UUID(required=True)NEWLINE org_id = fields.UUID(required=True)NEWLINE name = fields.String(required=True)NEWLINE kind = fields.String(NEWLINE data_key="type",NEWLINE required=True,NEWLINE validate=lambda k: k in ("personal", "protected", "public")NEWLINE )NEWLINE owner = fields.Boolean(default=False)NEWLINE visibility = fields.Dict(NEWLINE keys=fields.Str(validate=lambda k: k in ("execution", "experiment")),NEWLINE values=fields.String(NEWLINE validate=lambda v: v in (NEWLINE "private", "protected", "public",NEWLINE "none", "status", "full"NEWLINE )NEWLINE )NEWLINE )NEWLINENEWLINENEWLINEclass OrganizationInfoSchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=True)NEWLINE name = fields.String(required=True)NEWLINE owner = fields.Boolean(default=False)NEWLINE member = fields.Boolean(default=False)NEWLINE kind = fields.String(data_key="type", required=True)NEWLINE created_on = fields.DateTime()NEWLINE workspaces = fields.Nested(WorkspaceInfoSchema, many=True, default=None)NEWLINENEWLINENEWLINEclass ProfileNewWorkspaceSchema(ma.Schema):NEWLINE name = fields.String(required=True)NEWLINE org_id = fields.UUID(required=True)NEWLINE visibility = fields.Dict(NEWLINE keys=fields.Str(validate=lambda k: k in ("execution", "experiment")),NEWLINE values=fields.String(NEWLINE validate=lambda v: v in (NEWLINE "private", "protected", "public",NEWLINE "none", "status", "full"NEWLINE )NEWLINE )NEWLINE )NEWLINENEWLINENEWLINEclass WorkspaceInfoSchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=True)NEWLINE org_id = fields.UUID(required=True)NEWLINE name = fields.String(required=True)NEWLINE owner = fields.Boolean(default=False)NEWLINE collaborator = fields.Boolean(default=False)NEWLINE kind = fields.String(data_key="type", required=True)NEWLINE created_on = fields.DateTime()NEWLINENEWLINENEWLINEclass ActivitySchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=True)NEWLINE user_id = fields.UUID(required=True)NEWLINE org_id = fields.UUID(required=True)NEWLINE workspace_id = fields.UUID(required=True)NEWLINE execution_id = fields.UUID(required=True)NEWLINENEWLINENEWLINEclass ScheduleSchema(ma.Schema):NEWLINE class Meta:NEWLINE ordered = TrueNEWLINE id = fields.UUID(required=True)NEWLINE user_id = fields.UUID(required=True)NEWLINE username = fields.String(required=True)NEWLINE user_org_name = fields.String(required=True)NEWLINE org_id = fields.UUID(required=True)NEWLINE workspace_id = fields.UUID(required=True)NEWLINE experiment_id = fields.UUID(required=True)NEWLINE experiment_name = fields.String(required=True)NEWLINE org_name = fields.String(required=True)NEWLINE workspace_name = fields.String(required=True)NEWLINE token_id = fields.UUID(required=True)NEWLINE job_id = fields.UUID(allow_none=True)NEWLINE created_on = fields.DateTime()NEWLINE active_from = fields.DateTime()NEWLINE active_until = fields.DateTime(allow_none=True)NEWLINE status = fields.String()NEWLINE repeat = fields.Integer(allow_none=True)NEWLINE interval = fields.Integer(allow_none=True)NEWLINE cron = fields.String(allow_none=True)NEWLINE plan = fields.List(fields.DateTime(), allow_none=True)NEWLINENEWLINENEWLINEnew_user_schema = NewUserSchema()NEWLINEuser_schema = UserSchema()NEWLINElink_workspace_schema = LinkWorkpaceSchema()NEWLINENEWLINEorg_schema = OrganizationSchema()NEWLINEorgs_schema = OrganizationSchema(many=True)NEWLINEorgs_schema_tiny = OrganizationSchema(NEWLINE many=True, exclude=('owner', 'workspaces'))NEWLINEmy_orgs_schema = OrganizationSchema(many=True, exclude=('workspaces', ))NEWLINEorg_schema_short = OrganizationSchema(exclude=('owner',))NEWLINEnew_org_schema = NewOrgSchema()NEWLINElink_org_schema = LinkOrgSchema()NEWLINENEWLINEworkspace_schema = WorkspaceSchema()NEWLINEworkspaces_schema = WorkspaceSchema(many=True)NEWLINEworkspaces_schema_tiny = WorkspaceSchema(NEWLINE many=True, exclude=('owner',))NEWLINEworkspace_schema_short = WorkspaceSchema(exclude=('owner',))NEWLINEmy_workspaces_schema = WorkspaceSchema(many=True)NEWLINEnew_workspace_schema = NewWorkspaceSchema()NEWLINENEWLINEexperiment_schema = ExperimentSchema()NEWLINEexperiments_schema = ExperimentSchema(many=True)NEWLINEmy_experiments_schema = ExperimentSchema(many=True)NEWLINENEWLINEmy_executions_schema = ExecutionSchema(many=True)NEWLINENEWLINEmy_schedules_schema = ScheduleSchema(many=True)NEWLINENEWLINENEWLINEuser_profile_schema = UserProfileSchema()NEWLINENEWLINEaccess_token_schema = AccessTokenSchema()NEWLINEaccess_tokens_schema = AccessTokenSchema(many=True)NEWLINEnew_access_token_schema = NewAccessTokenSchema()NEWLINEcreated_access_token_schema = CreatedAccessTokenSchema()NEWLINEprofile_org_schema = ProfileOrganizationSchema()NEWLINEprofile_orgs_schema = ProfileOrganizationsSchema()NEWLINEprofile_new_org_schema = ProfileNewOrganizationSchema()NEWLINEorg_info_schema = OrganizationInfoSchema()NEWLINEorg_settings_schema = OrgSettingsSchema()NEWLINEorg_name_schema = OrgNameSchema()NEWLINEprofile_workspace_schema = ProfileWorkspaceSchema()NEWLINEprofile_workspaces_schema = ProfileWorkspacesSchema()NEWLINEprofile_new_workspace_schema = ProfileNewWorkspaceSchema()NEWLINEworkspace_collaborator_schema = WorkspaceCollaboratorSchema()NEWLINEworkspace_collaborators_schema = WorkspaceCollaboratorSchema(many=True)NEWLINEworkspace_info_schema = WorkspaceInfoSchema()NEWLINEorg_members_schema = OrgMemberSchema(many=True)NEWLINEorg_member_schema = OrgMemberSchema()NEWLINEcurrent_user_schema = CurrentUserSchema()NEWLINEschedules_schema = ScheduleSchema(many=True)NEWLINElight_access_tokens_schema = LightAccessTokenSchema(many=True)NEWLINE
from typing import Any, Iterable, Mapping, MutableMappingNEWLINENEWLINEfrom ghaudit.query.sub_query import SubQuery, ValidValueTypeNEWLINEfrom ghaudit.query.utils import PageInfo, jinja_envNEWLINENEWLINENEWLINEclass SubQueryCommon(SubQuery):NEWLINE def __init__(NEWLINE self,NEWLINE fragments: Iterable[str],NEWLINE entry: str,NEWLINE params: MutableMapping[str, str],NEWLINE ) -> None:NEWLINE SubQuery.__init__(self)NEWLINE self._entry = entryNEWLINE self._params = paramsNEWLINE self._values = {} # type: MutableMapping[str, ValidValueType]NEWLINENEWLINE env = jinja_env()NEWLINE self._templates = [env.get_template(frag) for frag in fragments]NEWLINENEWLINE def entry(self) -> str:NEWLINE return self._entryNEWLINENEWLINE def params(self) -> Mapping[str, str]:NEWLINE return self._paramsNEWLINENEWLINE def render(self, args: Mapping[str, ValidValueType]) -> str:NEWLINE frags = [frag.render(args) for frag in self._templates]NEWLINE return "".join(frags)NEWLINENEWLINE def update_page_info(self, response: Mapping[str, Any]) -> None:NEWLINE raise NotImplementedError("abstract function call")NEWLINENEWLINE def params_values(self) -> Mapping[str, ValidValueType]:NEWLINE return self._valuesNEWLINENEWLINE def __repr__(self) -> str:NEWLINE return "{}({}): {}".format(NEWLINE self._entry, self._count, repr(self._page_info)NEWLINE )NEWLINENEWLINE def _iterate(self, page_info: PageInfo, cursor_name: str) -> None:NEWLINE self._page_info = page_infoNEWLINE self._values[cursor_name] = self._page_info["endCursor"]NEWLINE self._count += 1NEWLINE
import numpy as npNEWLINENEWLINEfrom collections import deque, IterableNEWLINEimport sigvisa.infer.optimize.optim_utils as optim_utilsNEWLINENEWLINEclass CyclicGraphError(Exception):NEWLINE passNEWLINENEWLINENEWLINEclass DAG(object):NEWLINENEWLINE """NEWLINE Represents a directed acyclic graph.NEWLINENEWLINE """NEWLINENEWLINE def __init__(self, toplevel_nodes=None, leaf_nodes=None):NEWLINE self.toplevel_nodes = set(toplevel_nodes) if toplevel_nodes is not None else set()NEWLINE self.leaf_nodes = set(leaf_nodes) if leaf_nodes is not None else set()NEWLINENEWLINE # invariant: self._topo_sorted_list should always be a topologically sorted list of nodesNEWLINE self._topo_sort()NEWLINENEWLINE def __ts_visit(self, node):NEWLINE m = node.get_mark()NEWLINE if m == 2:NEWLINE raise CyclicGraphError("graph contains a cycle!")NEWLINE elif m == 0:NEWLINE node.set_mark(2) # visit node "temporarily"NEWLINE for pn in node.parents.values():NEWLINE self.__ts_visit(pn)NEWLINE node.set_mark(1)NEWLINE node._topo_sorted_list_index = len(self._topo_sorted_list)NEWLINE self._topo_sorted_list.append(node)NEWLINENEWLINE def _topo_sort(self):NEWLINE # check graph invariantsNEWLINE for tn in self.toplevel_nodes:NEWLINE assert(len(tn.parents) == 0)NEWLINE for ln in self.leaf_nodes:NEWLINE assert(len(ln.children) == 0)NEWLINENEWLINE self._topo_sorted_list = []NEWLINE for leaf in self.leaf_nodes:NEWLINE self.__ts_visit(leaf)NEWLINE self.clear_visited()NEWLINENEWLINENEWLINE # allow fast removing of nodes by setting their entries to NoneNEWLINE def _gc_topo_sorted_nodes(self):NEWLINE tsl = [n for n in self._topo_sorted_list if n is not None]NEWLINE for (i, n) in enumerate(tsl):NEWLINE n._topo_sorted_list_index = iNEWLINE self._topo_sorted_list = tslNEWLINENEWLINE def topo_sorted_nodes(self):NEWLINE self._gc_topo_sorted_nodes()NEWLINE return self._topo_sorted_listNEWLINENEWLINE def clear_visited(self):NEWLINE q = deque(self.toplevel_nodes)NEWLINE while len(q) > 0:NEWLINE node = q.pop()NEWLINE node.clear_mark()NEWLINE q.extendleft(node.children)NEWLINENEWLINENEWLINE def topo_sorted_nodes(self):NEWLINE self._gc_topo_sorted_nodes()NEWLINE assert(len(self._topo_sorted_list) == len(self.all_nodes))NEWLINE return self._topo_sorted_listNEWLINENEWLINE def recover_parents_from_children(self):NEWLINE for node in self.topo_sorted_nodes():NEWLINE for child in node.children:NEWLINE child.addParent(node, stealth=True)NEWLINENEWLINEdef get_relevant_nodes(node_list, exclude_nodes=None):NEWLINE # note, it's important that the nodes have a consistent order, sinceNEWLINE # we represent their joint values as a vector.NEWLINENEWLINE parents_of_deterministic = [node.parents[node.default_parent_key()] for node in node_list if node.deterministic()]NEWLINE node_list = [node for node in node_list if not node.deterministic()]NEWLINENEWLINE nlset = set(node_list + parents_of_deterministic)NEWLINE all_stochastic_children = [child for node in nlset for (child, intermediates) in node.get_stochastic_children()]NEWLINE relevant_nodes = set(node_list + all_stochastic_children + parents_of_deterministic)NEWLINE if exclude_nodes:NEWLINE for n in exclude_nodes:NEWLINE relevant_nodes.remove(n)NEWLINENEWLINE return node_list, relevant_nodesNEWLINENEWLINENEWLINEclass ParentConditionalNotDefined(Exception):NEWLINE passNEWLINENEWLINEclass DirectedGraphModel(DAG):NEWLINENEWLINE """NEWLINE A directed graphical probability model.NEWLINENEWLINE """NEWLINENEWLINE def __init__(self, **kwargs):NEWLINE super(DirectedGraphModel, self).__init__(**kwargs)NEWLINENEWLINE self.all_nodes = dict()NEWLINE self.nodes_by_key = dict()NEWLINENEWLINENEWLINE def add_children(n):NEWLINE if n.label not in self.all_nodes:NEWLINE self.add_node(n)NEWLINE for c in n.children:NEWLINE add_children(c)NEWLINENEWLINE if self.toplevel_nodes is not None:NEWLINE for n in self.toplevel_nodes:NEWLINE add_children(n)NEWLINENEWLINE def current_log_p(self, verbose=False):NEWLINE logp = 0NEWLINE for node in self.topo_sorted_nodes():NEWLINE if node.deterministic(): continueNEWLINENEWLINE try:NEWLINE lp = node.log_p()NEWLINE except ParentConditionalNotDefined:NEWLINE lp = node.upwards_message_normalizer()NEWLINENEWLINE if verbose:NEWLINE print "node %s has logp %.1f" % (node.label, lp)NEWLINE logp += lpNEWLINE return logpNEWLINENEWLINE def parent_predict_all(self):NEWLINE for node in self.topo_sorted_nodes():NEWLINE if not node._fixed:NEWLINE node.parent_predict()NEWLINENEWLINE def parent_sample_all(self):NEWLINE for node in self.topo_sorted_nodes():NEWLINE if not node._fixed:NEWLINE node.parent_sample()NEWLINENEWLINE def get_all(self, node_list):NEWLINE return np.concatenate([node.get_mutable_values() for node in node_list if not node.deterministic()])NEWLINENEWLINE def set_all(self, values, node_list):NEWLINE i = 0NEWLINE for node in node_list:NEWLINE if node.deterministic(): continueNEWLINE n = node.mutable_dimension()NEWLINE node.set_mutable_values(values[i:i+n])NEWLINE i += nNEWLINENEWLINE for dn in node.get_deterministic_children():NEWLINE dn.parent_predict()NEWLINENEWLINENEWLINE def joint_logprob(self, values, node_list, relevant_nodes, proxy_lps=None, c=1):NEWLINE # node_list: list of nodes whose values we are interested inNEWLINENEWLINE # relevant_nodes: all nodes whose log_p() depends on a valueNEWLINE # from a node in node_list.NEWLINENEWLINE #v = self.get_all(node_list = node_list)NEWLINE if values is not None:NEWLINE self.set_all(values=values, node_list=node_list)NEWLINENEWLINE joint_factors = set()NEWLINE if proxy_lps is not None:NEWLINE ll = np.sum([f() for (f, df) in proxy_lps.values()])NEWLINE ll += np.sum([node.log_p() for node in relevant_nodes if node.label not in proxy_lps.keys()])NEWLINE else:NEWLINE ll = 0NEWLINE for node in relevant_nodes:NEWLINE if len(node.params_modeled_jointly)==0:NEWLINE ll += node.log_p()NEWLINE else:NEWLINE ll += node.upwards_message_normalizer()NEWLINE joint_factors = joint_factors | node.params_modeled_jointlyNEWLINENEWLINE for jf in joint_factors:NEWLINE ll += jf.log_likelihood()NEWLINENEWLINENEWLINE return c * llNEWLINENEWLINE def joint_logprob_keys(self, relevant_nodes, keys=None, values=None, node_list=None, proxy_lps=None, c=1):NEWLINE # same as joint_logprob, but we specify values only for aNEWLINE # specific set of keys.NEWLINE # here, node_list contains one entry for each key (so willNEWLINE # have duplicates if we have multiple keys from the same node)NEWLINE if keys is not None:NEWLINE for (key, val, n) in zip(keys, values, node_list):NEWLINE n.set_value(key=key, value=val)NEWLINENEWLINENEWLINE joint_factors = set()NEWLINE if proxy_lps is not None:NEWLINE ll = np.sum([f() for (f, df) in proxy_lps.values()])NEWLINE ll += np.sum([node.log_p() for node in relevant_nodes if node.label not in proxy_lps.keys()])NEWLINE else:NEWLINE ll = 0NEWLINE for node in relevant_nodes:NEWLINE if len(node.params_modeled_jointly)==0:NEWLINE ll += node.log_p()NEWLINE else:NEWLINE ll += node.upwards_message_normalizer()NEWLINE joint_factors = joint_factors | node.params_modeled_jointlyNEWLINENEWLINE for jf in joint_factors:NEWLINE ll += jf.log_likelihood()NEWLINENEWLINE return c * llNEWLINENEWLINENEWLINE def log_p_grad(self, values, node_list, relevant_nodes, proxy_lps=None, eps=1e-4, c=1.0):NEWLINE try:NEWLINE eps0 = eps[0]NEWLINE except:NEWLINE eps = (eps,) * len(values)NEWLINENEWLINE proxy_lps = proxy_lps if proxy_lps is not None else {}NEWLINENEWLINE v = self.get_all(node_list = node_list)NEWLINE self.set_all(values=values, node_list=node_list)NEWLINE initial_lp = dict([(node.label, node.log_p() if node.label not in proxy_lps else proxy_lps[node.label][0]()) for node in relevant_nodes])NEWLINE grad = np.zeros((len(values),))NEWLINE i = 0NEWLINE for node in node_list:NEWLINE keys = node.mutable_keys()NEWLINE for (ni, key) in enumerate(keys):NEWLINE if node.label in proxy_lps:NEWLINE deriv = proxy_lps[node.label][1](key=key, eps=eps[i + ni], lp0=initial_lp[node.label])NEWLINE else:NEWLINE deriv = node.deriv_log_p(key=key, eps=eps[i + ni], lp0=initial_lp[node.label])NEWLINENEWLINE # sum the derivatives of all child nodes wrt to this value, includingNEWLINE # any deterministic nodes along the wayNEWLINE child_list = node.get_stochastic_children()NEWLINE for (child, intermediate_nodes) in child_list:NEWLINE current_key = keyNEWLINE d = 1.0NEWLINE for inode in intermediate_nodes:NEWLINE d *= inode.deriv_value_wrt_parent(parent_key = current_key)NEWLINE current_key = inode.labelNEWLINENEWLINE if child.label in proxy_lps:NEWLINE d *= proxy_lps[child.label][1](parent_key = current_key,NEWLINE eps=eps[i + ni],NEWLINE lp0=initial_lp[child.label])NEWLINE else:NEWLINE d *= child.deriv_log_p(parent_key = current_key,NEWLINE eps=eps[i + ni],NEWLINE lp0=initial_lp[child.label])NEWLINE deriv += dNEWLINENEWLINE grad[i + ni] = derivNEWLINE i += len(keys)NEWLINE self.set_all(values=v, node_list=node_list)NEWLINE return grad * cNEWLINENEWLINE def joint_optimize_nodes(self, node_list, optim_params, proxy_lps=None, use_grad=True):NEWLINE """NEWLINE Assume that the value at each node is a 1D array.NEWLINE """NEWLINE node_list, relevant_nodes = get_relevant_nodes(node_list)NEWLINENEWLINE start_values = self.get_all(node_list=node_list)NEWLINE low_bounds = np.concatenate([node.low_bounds() for node in node_list])NEWLINE high_bounds = np.concatenate([node.high_bounds() for node in node_list])NEWLINE #bounds = zip(low_bounds, high_bounds)NEWLINE bounds = NoneNEWLINENEWLINE jp = lambda v: self.joint_logprob(values=v, relevant_nodes=relevant_nodes, node_list=node_list, proxy_lps=proxy_lps, c=-1)NEWLINENEWLINE # this is included for profiling / debugging -- not real codeNEWLINE def time_joint_logprob():NEWLINE import timeNEWLINE st = time.time()NEWLINE for i in range(500):NEWLINE joint_logprob(start_values, relevant_nodes=relevant_nodes, proxy_lps=proxy_lps, c=-1)NEWLINE et = time.time()NEWLINE print "joint prob took %.3fs on average" % ((et-st)/500.0)NEWLINENEWLINE if use_grad:NEWLINE g = lambda v, eps=1e-4: self.log_p_grad(values=v, node_list=node_list, relevant_nodes=relevant_nodes, proxy_lps=proxy_lps, c=-1, eps=eps)NEWLINE else:NEWLINE g = NoneNEWLINENEWLINE result_vector, cost = optim_utils.minimize(f=jp, x0=start_values, fprime=g, optim_params=optim_params, bounds=bounds)NEWLINE self.set_all(values=result_vector, node_list=node_list)NEWLINE print "got optimized x", result_vectorNEWLINENEWLINENEWLINE def remove_node(self, node):NEWLINE del self.all_nodes[node.label]NEWLINE for key in node.keys():NEWLINE del self.nodes_by_key[key]NEWLINENEWLINE self.toplevel_nodes.discard(node)NEWLINE self.leaf_nodes.discard(node)NEWLINENEWLINE for child in node.children:NEWLINE child.removeParent(node)NEWLINE if len(child.parents) == 0:NEWLINE self.toplevel_nodes.add(child)NEWLINENEWLINE for parent in set(node.parents.values()):NEWLINE parent.removeChild(node)NEWLINE if len(parent.children) == 0:NEWLINE self.leaf_nodes.add(parent)NEWLINENEWLINENEWLINE def add_node(self, node):NEWLINE if node.label in self.all_nodes:NEWLINE raise ValueError("adding node '%s' to the graph, but a node with this label already exists!" % node.label)NEWLINE self.all_nodes[node.label] = nodeNEWLINE for key in node.keys():NEWLINE self.nodes_by_key[key] = nodeNEWLINE if len(node.children) == 0:NEWLINE self.leaf_nodes.add(node)NEWLINE if len(node.parents) == 0:NEWLINE self.toplevel_nodes.add(node)NEWLINE for child in node.children:NEWLINE self.toplevel_nodes.discard(child)NEWLINE for parent in node.parents.values():NEWLINE self.leaf_nodes.discard(parent)NEWLINENEWLINENEWLINE def get_node_from_key(self, key):NEWLINE return self.nodes_by_key[key]NEWLINENEWLINE def set_value(self, key, value, **kwargs):NEWLINE n = self.nodes_by_key[key]NEWLINE n.set_value(value=value, key=key, **kwargs)NEWLINENEWLINE def get_value(self, key):NEWLINE n = self.nodes_by_key[key]NEWLINE return n.get_value(key=key)NEWLINENEWLINE def save_graphviz(self, fname):NEWLINE f = open(fname, 'w')NEWLINE f.write("digraph G {\n")NEWLINE f.write("size =\"10,10\";")NEWLINENEWLINE for node in self.topo_sorted_nodes():NEWLINE for child in node.children:NEWLINE f.write("\"%s\" -> \"%s\";\n" % (node.label, child.label))NEWLINENEWLINE f.write("}\n")NEWLINE f.close()NEWLINE
__author__ = 'Steven Summers'NEWLINE__version__ = ''NEWLINENEWLINEimport argparseNEWLINE# import builtinsNEWLINEimport difflibNEWLINEimport importlib.utilNEWLINEimport inspectNEWLINEimport ioNEWLINEimport jsonNEWLINEimport reNEWLINEimport sysNEWLINEimport textwrapNEWLINEimport threadingNEWLINEimport timeNEWLINEimport tracebackNEWLINEimport unittestNEWLINENEWLINEfrom bdb import BdbNEWLINEfrom collections import OrderedDictNEWLINEfrom enum import Enum, uniqueNEWLINEfrom functools import wrapsNEWLINEfrom types import FunctionType, ModuleType, TracebackTypeNEWLINEfrom typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, UnionNEWLINENEWLINENEWLINE# GLOBALS TO EXCLUDE FILES IN TRACEBACKNEWLINE__TEST_RUNNER = TrueNEWLINEsetattr(threading, '__TEST_RUNNER', True) # Don't like this but otherwise regexNEWLINENEWLINE__all__ = ['AttributeGuesser', 'OrderedTestCase', 'RedirectStdIO', 'TestCase', 'TestMaster',NEWLINE 'skipIfFailed', 'timeout']NEWLINENEWLINE# DEFAULTSNEWLINEDEFAULT_TIMEOUT = 0NEWLINE# MIN_PY_VERSION = (3, 7, 0)NEWLINENEWLINE# CONSTANTSNEWLINEDIFF_OMITTED = '\nDiff is {} characters long. Set TestMaster(max_diff=None) to see it.'NEWLINEDUPLICATE_MSG = 'AS ABOVE'NEWLINECLOSE_MATCH_CUTOFF = 0.8NEWLINETAB_SIZE = 4NEWLINEBLOCK_WIDTH = 80NEWLINEBLOCK_TEMPLATE = """\NEWLINE/{0}\\NEWLINE|{{:^{1}}}|NEWLINE\\{0}/\NEWLINE""".format('-' * (BLOCK_WIDTH - 2), BLOCK_WIDTH - 2)NEWLINENEWLINENEWLINE@uniqueNEWLINEclass TestOutcome(Enum):NEWLINE SUCCESS = '+'NEWLINE FAIL = '-'NEWLINE SKIP = '?'NEWLINENEWLINENEWLINEdef skipIfFailed(test_case: Type[unittest.TestCase] = None, test_name: str = None, tag: str = None):NEWLINE """NEWLINE skipIfFail decorator allows you to skip entire TestCases or specific testNEWLINE cases if not all tests pass for a TestCase, or if a specific test case failsNEWLINE (skipped counts as a fail).NEWLINENEWLINE At least one test method of TestCase1 needs to fail to skipNEWLINE @skipIfFailed(TestCase1)NEWLINENEWLINE Skip if 'test_method' of TestCase1 failedNEWLINE @skipIfFailed(TestCase1, 'test_method')NEWLINENEWLINE Skip if 'test_method' failedNEWLINE Can only be applied to method with class class containing a methodNEWLINE named 'test_method'NEWLINE @skipIfFailed(test_name='test_method')NEWLINE """NEWLINE if test_case is None and test_name is None:NEWLINE raise RuntimeError("test_case and test_name for skipIfFailed can't both be None")NEWLINENEWLINE if test_case is not None and test_name is not None and not hasattr(test_case, test_name):NEWLINE raise AttributeError(f'{test_case.__name__} has no method {test_name}')NEWLINENEWLINE if tag is not None and test_name is None:NEWLINE raise RuntimeError("test_name must be specified if tag is provided for skipIfFailed")NEWLINENEWLINE def decorator(obj: Union[Type[TestCase], Callable]):NEWLINE if hasattr(obj, '__skip_test__'):NEWLINE obj.__skip_test__ = obj.__skip_test__.copy()NEWLINE obj.__skip_test__.append((test_case, test_name, tag))NEWLINE else:NEWLINE obj.__skip_test__ = [(test_case, test_name, tag)]NEWLINE if not inspect.isfunction(obj):NEWLINE return objNEWLINENEWLINE @wraps(obj)NEWLINE def wrapper(*args, **kwargs):NEWLINE return obj(*args, **kwargs)NEWLINENEWLINE return wrapperNEWLINENEWLINE return decoratorNEWLINENEWLINENEWLINEdef import_module(name: str, path: str) -> Tuple[Optional[ModuleType], Optional[Tuple[Type, Exception, TracebackType]]]:NEWLINE """NEWLINE Dynamically import the Python file (.py) at 'path' theNEWLINE __name__ attribute will be set to 'name'NEWLINE """NEWLINE if not name:NEWLINE raise ValueError("'name' can not be empty")NEWLINENEWLINE spec = importlib.util.spec_from_file_location(name, path)NEWLINE if spec is None:NEWLINE raise ValueError(f'The path {path} is invalid. It should be a Python (.py) file path.')NEWLINENEWLINE module = importlib.util.module_from_spec(spec)NEWLINE with RedirectStdIO(stdin=True, stdout=True) as stdio:NEWLINE try:NEWLINE spec.loader.exec_module(module)NEWLINE setattr(module, '__TEST_RUNNER_CLEAN_IMPORT', stdio.stdout == '')NEWLINE return module, NoneNEWLINE except BaseException:NEWLINE return None, sys.exc_info()NEWLINENEWLINENEWLINEdef _timeout_wrapper(test_func):NEWLINE """NEWLINE Runs the test function in a killable thread, the seconds valueNEWLINE is obtained from the __timeout__ attribute which can be set globallyNEWLINE using TestMaster(timeout=value) or apply to specific classes or functionsNEWLINE using the timeout decorator, if seconds <= 0 the test is not threaded.NEWLINE """NEWLINENEWLINE @wraps(test_func)NEWLINE def thread_wrapper(self):NEWLINE secs = getattr(test_func, '__timeout__', 0) or \NEWLINE getattr(self.__class__, '__timeout__', 0) or \NEWLINE _TimeoutThread.timeoutNEWLINENEWLINE if secs <= 0:NEWLINE return test_func(self)NEWLINENEWLINE try:NEWLINE thread = _TimeoutThread(name=test_func.__qualname__,NEWLINE target=test_func, args=(self,))NEWLINE threading.settrace(thread.global_trace)NEWLINE thread.start()NEWLINE thread.join(secs)NEWLINE alive = thread.isAlive()NEWLINE thread.kill()NEWLINE # re-join to ensure thread completes any blocking operations. This isNEWLINE # really only required because long blocking calls may resultNEWLINE # in sequential tests using RedirectStdIO not setting back correctlyNEWLINE thread.join()NEWLINE finally:NEWLINE threading.settrace(None)NEWLINENEWLINE if alive:NEWLINE raise unittest.SkipTest(f'Function ran longer than {secs} second(s)')NEWLINENEWLINE if thread.exc_info is not None:NEWLINE raise thread.exc_info[1].with_traceback(thread.exc_info[2])NEWLINENEWLINE return NoneNEWLINENEWLINE return thread_wrapperNEWLINENEWLINENEWLINEdef timeout(seconds: float = 0):NEWLINE """NEWLINE Decorator to apply __timeout__ attribute to a test method or TestCaseNEWLINE """NEWLINENEWLINE def timeout_decorator(test_obj):NEWLINE test_obj.__timeout__ = secondsNEWLINE return test_objNEWLINENEWLINE return timeout_decoratorNEWLINENEWLINENEWLINEdef get_object_name(obj):NEWLINE return getattr(obj, '__qualname__', None) or getattr(obj, '__name__', None) or obj.__class__.__name__NEWLINENEWLINENEWLINEclass CachedIO(io.StringIO):NEWLINE """ Writes all read values and write values to stream """NEWLINE def __init__(self, stream):NEWLINE super().__init__()NEWLINE self._stream = streamNEWLINENEWLINE def set_value(self, string):NEWLINE """ Set value to self without writing to stream """NEWLINE self.seek(0)NEWLINE self.truncate()NEWLINE super().write(string)NEWLINE self.seek(0)NEWLINENEWLINE def write(self, s: str):NEWLINE res = super().write(s)NEWLINE self._stream.write(s)NEWLINE return resNEWLINENEWLINE def readline(self, size: int = None):NEWLINE res = super().readline(size)NEWLINE self._stream.write(res)NEWLINE return resNEWLINENEWLINENEWLINEclass RedirectStdIO:NEWLINE """NEWLINE Context manager to send stdin input and capture stdout and stderrNEWLINENEWLINE Usage:NEWLINE with RedirectStdIO(stdin=True, stdout=True) as stdio:NEWLINE stdio.set_stdin('World!\n')NEWLINE inp = input('Hello')NEWLINENEWLINE stdio.stdout == 'Hello'NEWLINE inp == 'World'NEWLINE """NEWLINE def __init__(self, *, stdin: bool = False, stdout: bool = False,NEWLINE stderr: bool = False, stdinout: bool = False):NEWLINE self._sys_stdin = sys.stdinNEWLINE self._sys_stdout = sys.stdoutNEWLINE self._sys_stderr = sys.stderrNEWLINENEWLINE if stdinout:NEWLINE self._stdinout_stream = io.StringIO()NEWLINE self._stdin_stream = CachedIO(self._stdinout_stream)NEWLINE self._stdout_stream = CachedIO(self._stdinout_stream)NEWLINENEWLINE else:NEWLINE self._stdinout_stream = NoneNEWLINE self._stdin_stream = io.StringIO() if stdin else NoneNEWLINE self._stdout_stream = io.StringIO() if stdout else NoneNEWLINENEWLINE self._stderr_stream = io.StringIO() if stderr else NoneNEWLINENEWLINE def __enter__(self):NEWLINE if self._stdin_stream is not None:NEWLINE sys.stdin = self._stdin_streamNEWLINENEWLINE if self._stdout_stream is not None:NEWLINE sys.stdout = self._stdout_streamNEWLINENEWLINE if self._stderr_stream is not None:NEWLINE sys.stderr = self._stderr_streamNEWLINENEWLINE return selfNEWLINENEWLINE def __exit__(self, exc_type, exc_val, exc_tb):NEWLINE sys.stdin = self._sys_stdinNEWLINE sys.stdout = self._sys_stdoutNEWLINE sys.stderr = self._sys_stderrNEWLINENEWLINE @staticmethodNEWLINE def _read_stream(stream: io.StringIO) -> str:NEWLINE if stream is None:NEWLINE raise RuntimeError(NEWLINE 'Attempt to read from a stream that has not been enabled')NEWLINE return stream.getvalue()NEWLINENEWLINE def set_stdin(self, string: str):NEWLINE if self._stdin_stream is None:NEWLINE raise RuntimeError(NEWLINE f'stdin has not been set in {self.__class__.__name__}.__init__')NEWLINENEWLINE if self._stdinout_stream is None:NEWLINE self._stdin_stream.seek(0)NEWLINE self._stdin_stream.truncate()NEWLINE self._stdin_stream.write(string)NEWLINE self._stdin_stream.seek(0)NEWLINE else:NEWLINE self._stdin_stream.set_value(string)NEWLINENEWLINE @propertyNEWLINE def stdin(self):NEWLINE if self._stdin_stream is None:NEWLINE raise RuntimeError(NEWLINE f'stdin has not been set in {self.__class__.__name__}.__init__')NEWLINE pos = self._stdin_stream.tell()NEWLINE value = self._stdin_stream.read()NEWLINE self._stdin_stream.seek(pos)NEWLINE return valueNEWLINENEWLINE @propertyNEWLINE def stdout(self) -> str:NEWLINE return self._read_stream(self._stdout_stream)NEWLINENEWLINE @propertyNEWLINE def stderr(self) -> str:NEWLINE return self._read_stream(self._stderr_stream)NEWLINENEWLINE @propertyNEWLINE def stdinout(self):NEWLINE return self._read_stream(self._stdinout_stream)NEWLINENEWLINENEWLINEclass RecursionDetector(Bdb):NEWLINE def __init__(self, *args):NEWLINE super().__init__(*args)NEWLINE self._stack = set()NEWLINENEWLINE def do_clear(self, arg):NEWLINE passNEWLINENEWLINE def user_call(self, frame, argument_list):NEWLINE code = frame.f_codeNEWLINE if code in self._stack:NEWLINE raise RecursionErrorNEWLINE self._stack.add(code)NEWLINENEWLINE def user_return(self, frame, return_value):NEWLINE self._stack.remove(frame.f_code)NEWLINENEWLINENEWLINEclass AttributeGuesser:NEWLINE """NEWLINE Wrapper class for objects to return the attribute with theNEWLINE closest matching name. If fail is True then a TestCase.failureExceptionNEWLINE is raised if no possible match is found.NEWLINE """NEWLINENEWLINE def __init__(self, obj: Any, fail: bool = True):NEWLINE """NEWLINE Parameters:NEWLINE obj: Object to wrap for guessing attributes ofNEWLINE fail: if attribute can't be foundNEWLINE raise exception iff True otherwise return NoneNEWLINE """NEWLINE if isinstance(obj, AttributeGuesser):NEWLINE obj = getattr(obj, '_AttributeGuesser__object')NEWLINE self.__object = objNEWLINE self.__cache = {}NEWLINE self.__fail = failNEWLINENEWLINE @classmethodNEWLINE def get_wrapped_object(cls, attr_guesser):NEWLINE if not isinstance(attr_guesser, AttributeGuesser):NEWLINE raise ValueError('attr_guesser must be an instance of AttributeGuesser')NEWLINE return object.__getattribute__(attr_guesser, '_AttributeGuesser__object')NEWLINENEWLINE def __guess_attribute(self, obj: Any, name: str):NEWLINE attributes = dict(inspect.getmembers(obj))NEWLINE matches = difflib.get_close_matches(name, attributes, n=1, cutoff=CLOSE_MATCH_CUTOFF)NEWLINE if not matches:NEWLINE if self._AttributeGuesser__fail:NEWLINE raise AttributeError(NEWLINE f"Found no close match for '{get_object_name(obj)}.{name}'")NEWLINE return NoneNEWLINE return attributes[matches[0]]NEWLINENEWLINE def __getattribute__(self, key: str):NEWLINE if key in ('_AttributeGuesser__object', '_AttributeGuesser__cache',NEWLINE '_AttributeGuesser__guess_attribute', '_AttributeGuesser__fail'):NEWLINE return object.__getattribute__(self, key)NEWLINE return getattr(object.__getattribute__(self, '_AttributeGuesser__object'), key)NEWLINENEWLINE def __getattr__(self, key: str):NEWLINE cache = self._AttributeGuesser__cacheNEWLINE if key in cache:NEWLINE return cache[key]NEWLINENEWLINE attr = self._AttributeGuesser__guess_attribute(self._AttributeGuesser__object, key)NEWLINE cache[key] = attrNEWLINE return attrNEWLINENEWLINE def __setattr__(self, key: str, value: Any):NEWLINE if key in ('_AttributeGuesser__object', '_AttributeGuesser__cache',NEWLINE '_AttributeGuesser__fail'):NEWLINE return object.__setattr__(self, key, value)NEWLINE return setattr(self._AttributeGuesser__object, key, value)NEWLINENEWLINE def __repr__(self):NEWLINE return f'AttributeGuesser({self._AttributeGuesser__object!r})'NEWLINENEWLINENEWLINEclass _TimeoutThread(threading.Thread):NEWLINE """NEWLINE Killable threadNEWLINE """NEWLINE timeout: float = DEFAULT_TIMEOUTNEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE super().__init__(*args, **kwargs)NEWLINE self.killed = FalseNEWLINE self.exc_info = NoneNEWLINENEWLINE def run(self):NEWLINE """NEWLINE Set the trace function and run the thread catching and storingNEWLINE any exceptions that occur.NEWLINE """NEWLINE try:NEWLINE super().run()NEWLINE except BaseException:NEWLINE self.exc_info = sys.exc_info()NEWLINENEWLINE def kill(self):NEWLINE """ Set the thread to terminate at the next trace event """NEWLINE self.killed = TrueNEWLINENEWLINE def global_trace(self, _frame, event, _arg):NEWLINE """NEWLINE Global trace function for threading.settrace which returns a localNEWLINE trace functionNEWLINE """NEWLINE if event == 'call':NEWLINE return self.local_traceNEWLINE return NoneNEWLINENEWLINE def local_trace(self, _frame, event, _arg):NEWLINE """NEWLINE Local trace function which kills the thread should it still be runningNEWLINE and the 'killed' attribute is set to True.NEWLINE """NEWLINE if self.killed:NEWLINE if event == 'line':NEWLINE raise SystemExitNEWLINE return self.local_traceNEWLINENEWLINENEWLINEclass TestLoader(unittest.TestLoader):NEWLINE """ Custom loader class to specify TestCase case order """NEWLINENEWLINE def getTestCaseNames(self, testCaseClass: Type['TestCase']):NEWLINE """NEWLINE Override for unittest.TestLoad.getTestCaseNamesNEWLINE Return a sorted sequence of method names found within testCaseClassNEWLINE """NEWLINE if issubclass(testCaseClass, OrderedTestCase):NEWLINE return testCaseClass.member_namesNEWLINE return super().getTestCaseNames(testCaseClass)NEWLINENEWLINE def loadTestCases(self, test_cases: List) -> unittest.TestSuite:NEWLINE """NEWLINE Params:NEWLINE test_cases List[Union[unittest.TestCase, Type[unittest.TestCase]]]NEWLINE """NEWLINE suite = unittest.TestSuite()NEWLINENEWLINE for test_case in test_cases:NEWLINE if isinstance(test_case, unittest.TestCase):NEWLINE suite.addTest(test_case)NEWLINE else:NEWLINE suite.addTests(self.loadTestsFromTestCase(test_case))NEWLINE return suiteNEWLINENEWLINENEWLINEclass _TestCaseMeta(type):NEWLINE """NEWLINE MetaClass to decorate all test methods with _timeout_wrapper andNEWLINE track test method definition order.NEWLINE """NEWLINENEWLINE def __new__(mcs, name, bases, namespace):NEWLINE member_names = []NEWLINE prefix = TestLoader.testMethodPrefixNEWLINE for key, value in namespace.items():NEWLINE if key.startswith(prefix) and callable(value):NEWLINE member_names.append(key)NEWLINE namespace[key] = _timeout_wrapper(value)NEWLINENEWLINE result = super().__new__(mcs, name, bases, namespace)NEWLINE result.member_names = member_namesNEWLINE return resultNEWLINENEWLINE # def __getattr__(cls, item):NEWLINE # if item not in cls._modules:NEWLINE # raise AttributeError(f"type object '{cls.__name__}'' has no attribute '{item}'")NEWLINE # return cls._modules[item]NEWLINENEWLINENEWLINEclass TestCase(unittest.TestCase, metaclass=_TestCaseMeta):NEWLINE """NEWLINE Extends the unittest.TestCase defining additional assert methods.NEWLINE """NEWLINE member_names: List[str]NEWLINE _modules: Dict[str, ModuleType] = {}NEWLINENEWLINE def __init__(self, *args, **kwargs):NEWLINE super().__init__(*args, **kwargs)NEWLINE self.aggregated_tests = []NEWLINENEWLINE def __getattr__(self, item):NEWLINE if item not in self._modules:NEWLINE raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{item}'")NEWLINE return self._modules[item]NEWLINENEWLINE @classmethodNEWLINE def register_module(cls, name: str, module: ModuleType):NEWLINE cls._modules[name] = moduleNEWLINENEWLINE def assertIsCleanImport(self, module, msg=None):NEWLINE self.assertIs(getattr(module, '__TEST_RUNNER_CLEAN_IMPORT'), True, msg=msg)NEWLINENEWLINE def assertMultiLineEqual(self, first: str, second: str, msg: Optional[str] = None, strip: bool = False):NEWLINE """NEWLINE unittest.TestCase.assertMultiLineEqual with strip keyword arg,NEWLINE if True then string is split on newlines with trailingNEWLINE whitespace striped and rejoined beforeNEWLINE """NEWLINE if strip:NEWLINE first = '\n'.join(s.rstrip() for s in first.splitlines()) + '\n'NEWLINE second = '\n'.join(s.rstrip() for s in second.splitlines()) + '\n'NEWLINENEWLINE super().assertMultiLineEqual(first, second, msg=msg)NEWLINENEWLINE def assertDefined(self, obj: Union[ModuleType, Type], name: str):NEWLINE if obj is None:NEWLINE self.fail(msg=f"Got 'None' when checking if '{name}' was defined for a type")NEWLINE obj_name = get_object_name(obj)NEWLINE if not hasattr(obj, name):NEWLINE self.fail(msg=f"'{obj_name}.{name}' is not defined correctly or not implemented")NEWLINENEWLINE def assertFunctionDefined(self, obj: Union[ModuleType, Type], function_name: str, params: int):NEWLINE self.assertDefined(obj, function_name)NEWLINE obj_name = get_object_name(obj)NEWLINE func = getattr(obj, function_name)NEWLINE if not inspect.isfunction(func):NEWLINE if inspect.ismethoddescriptor(func):NEWLINE self.fail(msg=f"{obj_name}.{function_name} needs to be implemented")NEWLINE self.fail(msg=f"{obj_name}.{function_name} should be a function")NEWLINE num_params = len(inspect.signature(func).parameters)NEWLINE self.assertEqual(num_params, params,NEWLINE msg=(f"'{function_name}' does not have the correct number of parameters, "NEWLINE f"expected {params} found {num_params}"))NEWLINENEWLINE def assertClassDefined(self, module: ModuleType, class_name: str):NEWLINE self.assertDefined(module, class_name)NEWLINE class_ = getattr(module, class_name)NEWLINE self.assertIs(inspect.isclass(class_), True, msg=f"{class_name} should be a class")NEWLINENEWLINE def assertIsSubclass(self, sub_class: Type, parent_class: Type):NEWLINE self.assertIs(issubclass(sub_class, parent_class), True,NEWLINE msg=f"'{sub_class}' is not a subclass of '{parent_class}'")NEWLINENEWLINE def assertDocString(self, obj: Union[Type, Callable], name: str = None):NEWLINE if name is not None:NEWLINE # self.assertDefined(obj, name)NEWLINE obj = getattr(obj, name)NEWLINENEWLINE if obj is None:NEWLINE self.fail(msg=f"Got 'None' when checking if docstring was defined for a type")NEWLINENEWLINE # used over inspect.getdoc to require a doc string rather than inheriting itNEWLINE doc = getattr(obj, '__doc__', None)NEWLINE if doc is None or doc.strip() == '':NEWLINE self.fail(msg=f"Documentation string is required for '{obj.__qualname__}'")NEWLINENEWLINE def assertListSimilar(self, actual: List, expected: List):NEWLINE # Try if sortableNEWLINE # try:NEWLINE # s1 = sorted(actual)NEWLINE # s2 = sorted(expected)NEWLINE # self.assertListEqual(s1, s2)NEWLINE # returnNEWLINE # except TypeError:NEWLINE # passNEWLINENEWLINE # FallbackNEWLINE unexpected = list(actual)NEWLINE missing = []NEWLINE for elem in expected:NEWLINE try:NEWLINE unexpected.remove(elem)NEWLINE except ValueError:NEWLINE missing.append(elem)NEWLINENEWLINE if unexpected or missing:NEWLINE msg = f'Lists are not similar\n\nActual: {actual}\nExpected: {expected}'NEWLINE if missing:NEWLINE msg += f"\nMissing: {missing}"NEWLINE if unexpected:NEWLINE msg += f"\nUnexpected: {unexpected}"NEWLINE self.fail(msg=msg)NEWLINENEWLINE def assertIsNotRecursive(self, func):NEWLINE detector = RecursionDetector()NEWLINE detector.set_trace()NEWLINE is_recursive = FalseNEWLINE try:NEWLINE func()NEWLINE except RecursionError:NEWLINE is_recursive = TrueNEWLINE finally:NEWLINE sys.settrace(None)NEWLINENEWLINE if is_recursive:NEWLINE self.fail(msg="function should not be recursive")NEWLINENEWLINE def aggregate(self, test_func: Callable, *args, tag: str = None, **kwargs):NEWLINE try:NEWLINE test_func(*args, **kwargs)NEWLINE except (self.failureException, unittest.SkipTest) as failure:NEWLINE self.aggregated_tests.append((failure, tag))NEWLINENEWLINE def aggregate_tests(self):NEWLINE """NEWLINE Must be called when done with the AggregateTestCase to propagateNEWLINE the failures. This is not in __exit__ due to hiding relevant tracebackNEWLINE levels the exception message ends up pointing to the last line.NEWLINE """NEWLINE msg = ''NEWLINE for error, tag, in self.aggregated_tests:NEWLINE msg += '\n' + textwrap.indent(str(error), ' ' * TAB_SIZE) + \NEWLINE (f' :: {tag}' if tag is not None else '')NEWLINENEWLINE if msg:NEWLINE self.fail(msg=msg)NEWLINENEWLINE def _truncateMessage(self, message, diff):NEWLINE """NEWLINE override unittest.TestCase._truncateMessage to use DIFF_OMITTED messageNEWLINE """NEWLINE max_diff = self.maxDiffNEWLINE if max_diff is None or len(diff) <= max_diff:NEWLINE return message + diffNEWLINE return message + DIFF_OMITTED.format(len(diff))NEWLINENEWLINE @propertyNEWLINE def name(self) -> str:NEWLINE return self._testMethodNameNEWLINENEWLINE @propertyNEWLINE def description(self) -> str:NEWLINE short_desc = self.shortDescription()NEWLINE return short_desc if short_desc else self.nameNEWLINENEWLINENEWLINEclass OrderedTestCase(TestCase):NEWLINE """ TestCase with the description property reflecting the test number """NEWLINENEWLINE @propertyNEWLINE def description(self):NEWLINE return f'{self.member_names.index(self.name) + 1}. {super().description}'NEWLINENEWLINENEWLINEclass TestResult(unittest.TestResult):NEWLINE """NEWLINE TestResult stores the result of each test in the order they were executedNEWLINE """NEWLINE def __init__(self, stream=None, descriptions=None, verbosity=None):NEWLINE super().__init__(stream, descriptions, verbosity)NEWLINE self._start = 0NEWLINE self._stop = 0NEWLINE # TestCaseClassName TestCaseNameNEWLINE self.results: Dict[str, Dict[str, Tuple[TestCase, TestOutcome]]] = OrderedDict()NEWLINENEWLINE def startTestRun(self):NEWLINE self._start = time.time()NEWLINE super().startTestRun()NEWLINENEWLINE def stopTestRun(self):NEWLINE self._stop = time.time()NEWLINE super().stopTestRun()NEWLINENEWLINE @propertyNEWLINE def run_time(self):NEWLINE return self._stop - self._startNEWLINENEWLINE def startTest(self, test: TestCase):NEWLINE test_cls_name = test.__class__.__name__NEWLINE if test_cls_name not in self.results:NEWLINE self.results[test_cls_name] = OrderedDict()NEWLINENEWLINE test_method = getattr(test.__class__, test.name)NEWLINE self._apply_skip(test, test.__class__)NEWLINE self._apply_skip(test, test_method)NEWLINENEWLINE super().startTest(test)NEWLINENEWLINE def _apply_skip(self, test: TestCase, test_item: Union[Type[TestCase], FunctionType]):NEWLINE """NEWLINE Applies the unittest attributes used for skipping tests if theNEWLINE __skip_test__ attribute has been applied to either the test class orNEWLINE method using the skipIfFailed decorator.NEWLINE """NEWLINE skip_test = getattr(test_item, '__skip_test__', None)NEWLINE if skip_test is None:NEWLINE returnNEWLINENEWLINE for test_cls, test_name, tag in skip_test:NEWLINE if test_cls is None: # if none then decorator was applied to current TestCaseNEWLINE # Set type of current TestCase and check if test method is definedNEWLINE test_cls = test.__class__NEWLINE if not hasattr(test_cls, test_name):NEWLINE raise AttributeError(f'{test_cls.__name__} has no method {test_name}')NEWLINENEWLINE test_cls_name = test_cls.__name__NEWLINENEWLINE # Check if TestCase has been runNEWLINE test_results = self.results.get(test_cls_name)NEWLINE if test_results is None:NEWLINE raise RuntimeError(NEWLINE f"Can't check to skip {test.__class__.__name__}.{test.name} if {test_cls_name} has not run")NEWLINENEWLINE # Check if test for TestCase has been runNEWLINE if test_name is not None and test_name not in test_results:NEWLINE raise RuntimeError(f"Can't check to skip {test.__class__.__name__}.{test.name} '"NEWLINE f"if {test_cls_name}.{test_name} has not run")NEWLINENEWLINE if test_name is not None:NEWLINE test_case, outcome = test_results[test_name]NEWLINE if outcome != TestOutcome.SUCCESS and \NEWLINE (tag is None or (tag is not None and any(t == tag for _, t in test_case.aggregated_tests))):NEWLINE # set attributes unittest looks for if a test is marked to skipNEWLINE test_item.__unittest_skip__ = TrueNEWLINE tag_msg = f" with tag '{tag}'" if tag is not None else ''NEWLINE test_item.__unittest_skip_why__ = f'Skipped due to failing/skipping {test_cls_name}.{test_name}{tag_msg}'NEWLINE breakNEWLINENEWLINE elif test_name is None and any(outcome != TestOutcome.SUCCESS for _, outcome in test_results.values()):NEWLINE test_item.__unittest_skip__ = TrueNEWLINE test_item.__unittest_skip_why__ = f'Skipped due to failing/skipping a test from {test_cls_name}'NEWLINE breakNEWLINE # set custom attribute to None since __unittest_skip__ has been appliedNEWLINE test_item.__skip_test = NoneNEWLINENEWLINE def addSubTest(self, test, subtest, err):NEWLINE raise NotImplementedError("TODO")NEWLINENEWLINE def add_outcome(self, test: TestCase, outcome: TestOutcome):NEWLINE self.results[test.__class__.__name__][test.name] = (test, outcome)NEWLINENEWLINE def addSuccess(self, test: TestCase):NEWLINE self.add_outcome(test, TestOutcome.SUCCESS)NEWLINE super().addSuccess(test)NEWLINENEWLINE @unittest.result.failfastNEWLINE def addFailure(self, test: TestCase, err: Tuple[Type[BaseException], BaseException, TracebackType]):NEWLINE self.add_outcome(test, TestOutcome.FAIL)NEWLINE super().addFailure(test, err)NEWLINENEWLINE @unittest.result.failfastNEWLINE def addError(self, test: TestCase, err: Tuple[Type[Exception], BaseException, TracebackType]):NEWLINE self.add_outcome(test, TestOutcome.FAIL)NEWLINE super().addError(test, err)NEWLINENEWLINE def addSkip(self, test: TestCase, reason: str):NEWLINE self.add_outcome(test, TestOutcome.SKIP)NEWLINE super().addSkip(test, reason)NEWLINENEWLINE def _is_relevant_tb_level(self, tb):NEWLINE """NEWLINE Override which is used with unittest.TestResult._exc_info_to_string toNEWLINE determine what levels of a traceback to skip when formatting the error.NEWLINE """NEWLINE return '__TEST_RUNNER' in tb.tb_frame.f_globals or super()._is_relevant_tb_level(tb)NEWLINENEWLINE def to_dict(self):NEWLINE return {NEWLINE test_cls:NEWLINE {name: outcome.value for name, (test, outcome) in res.items()}NEWLINE for test_cls, res in self.results.items()NEWLINE }NEWLINENEWLINENEWLINEclass TestNoPrint(TestCase):NEWLINE def __init__(self, stdio: RedirectStdIO):NEWLINE super().__init__()NEWLINE self._stdio = stdioNEWLINENEWLINE def runTest(self):NEWLINE """ check for no unexpected prints """NEWLINE self.assertEqual(self._stdio.stdout, '')NEWLINENEWLINENEWLINEclass TestMaster:NEWLINE """NEWLINE Core driving class which creates the TestSuite from the provided TestCasesNEWLINE """NEWLINE separator1 = '=' * BLOCK_WIDTHNEWLINE separator2 = '-' * BLOCK_WIDTHNEWLINE indent = ' ' * TAB_SIZENEWLINE _remove_path = re.compile(r'File ".*[\\/]([^\\/]+.py)"')NEWLINE # _remove_threading = re.compile(NEWLINE # r'(^\s*File \".*threading.py\".+?(?=\s*File \"))', flags=re.DOTALL | re.MULTILINE)NEWLINE _remove_importlib = re.compile(NEWLINE r'(^\s*File \".*importlib.*\".+?(?=\s{2}File \"))', flags=re.DOTALL | re.MULTILINE)NEWLINENEWLINE def __init__(self,NEWLINE max_diff: int = None,NEWLINE suppress_stdout: bool = True,NEWLINE timeout: float = DEFAULT_TIMEOUT,NEWLINE output_json: bool = False,NEWLINE hide_paths: bool = True,NEWLINE ignore_import_fails: bool = False,NEWLINE include_no_print: bool = False,NEWLINE scripts: List[Tuple[str, str]] = ()):NEWLINE """NEWLINE Parameters:NEWLINE max_diff: Determines the maximum length of diffs output by assertNEWLINE methods that report diffs on failure. Set to None for no maxNEWLINE suppress_stdout: If True all uncaught stdout output is suppressedNEWLINE timeout: global timeout value in seconds, if a timeout > 0 isNEWLINE specified then the tests are run in killable threads.NEWLINE output_json: outputs text summary if True else in json format.NEWLINE hide_paths: if True file paths in traceback messages for failuresNEWLINE are removed to only contain the filename.NEWLINE ignore_import_fails: If set to True not tests will run if any moduleNEWLINE being imported with 'scripts' fails to import correctly.NEWLINE Otherwise all tests will run.NEWLINE include_no_print: iff True adds a test for uncaught prints duringNEWLINE tests. Requires suppress_stdout to be set as well.NEWLINE scripts: list of tuples, these tuples are a pair of module name andNEWLINE module path that gets imported using 'path' with the __name__NEWLINE attribute of the module set to 'name'. On successful import aNEWLINE __TEST_RUNNER_CLEAN_IMPORT attribute is set on the module TrueNEWLINE if nothing was output to stdout otherwise False.NEWLINE """NEWLINE # argparse setupNEWLINE parser = argparse.ArgumentParser()NEWLINE parser.add_argument("-j", "--json",NEWLINE help="Whether or not to display output in JSON format.",NEWLINE action='store_true',NEWLINE default=output_json)NEWLINE parser.add_argument("-d", "--diff",NEWLINE help="The maximum number of characters in a diff",NEWLINE action="store",NEWLINE default=max_diff,NEWLINE type=int)NEWLINE parser.add_argument("-t", "--timeout",NEWLINE help="The maximum time a test is allowed to run before being killed",NEWLINE action="store",NEWLINE default=timeout,NEWLINE type=float)NEWLINE parser.add_argument('-p', '--paths', nargs="+")NEWLINE parser.add_argument('-s', '--scripts', nargs="+")NEWLINE parser.add_argument("--hide-tb-paths",NEWLINE help="Hide paths from traceback output.",NEWLINE action="store_true",NEWLINE default=hide_paths)NEWLINE parser.add_argument("--show-tb-duplicates",NEWLINE help="Remove duplicates from test output.",NEWLINE action="store_true",NEWLINE default=False)NEWLINE parser.add_argument("--ignore-import-fails",NEWLINE help="Continue tests even if an import fails",NEWLINE action="store_true",NEWLINE default=ignore_import_fails)NEWLINE parser.add_argument("--include-no-print",NEWLINE help="Adds test case for unexpected prints in functions",NEWLINE action="store_true",NEWLINE default=include_no_print)NEWLINE parser.add_argument("--suppress-stdout",NEWLINE help="Suppresses uncaught stdout output while running tests",NEWLINE action="store_true",NEWLINE default=suppress_stdout)NEWLINE self._args = args = parser.parse_args()NEWLINENEWLINE TestCase.maxDiff = args.diffNEWLINE _TimeoutThread.timeout = args.timeoutNEWLINENEWLINE if args.scripts or args.paths:NEWLINE if len(args.scripts or ()) != len(args.paths or ()):NEWLINE parser.error("must have equal number of values for 'imports' and 'paths'")NEWLINE scripts = zip(args.scripts, args.paths)NEWLINENEWLINE self.result = NoneNEWLINE self._import_errors = []NEWLINENEWLINE # import scriptsNEWLINE for name, path in scripts:NEWLINE name = name.strip()NEWLINE module, error = import_module(name, path)NEWLINE if module is not None:NEWLINE module: ModuleType = AttributeGuesser(module)NEWLINE TestCase.register_module(name, module)NEWLINE if error:NEWLINE self._import_errors.append(self.format_error(name, error))NEWLINE if not args.ignore_import_fails:NEWLINE breakNEWLINENEWLINE @staticmethodNEWLINE def _add_flavour(flavour: str, test_results: List[Tuple[TestCase, str]]):NEWLINE return [(flavour, test, msg) for test, msg in test_results]NEWLINENEWLINE def print_results(self, failed_tests: List[Tuple[str, TestCase, str]], result: TestResult):NEWLINE # print summaryNEWLINE print(BLOCK_TEMPLATE.format('Summary of Results'))NEWLINE for test_cls, test_cases in result.results.items():NEWLINE print(test_cls)NEWLINE for _test_name, (test, outcome) in test_cases.items():NEWLINE print(f'{self.indent}{outcome.value} {test.description}')NEWLINENEWLINE # failed importsNEWLINE if self._import_errors:NEWLINE print(self.separator2)NEWLINE print(BLOCK_TEMPLATE.format('Failed Imports'))NEWLINE for err_type, _, err_msg in self._import_errors:NEWLINE print(self.separator1)NEWLINE print(f'REASON: {err_type.upper()}')NEWLINE print(self.separator2)NEWLINE print(textwrap.indent(err_msg, self.indent))NEWLINENEWLINE # print failsNEWLINE if failed_tests:NEWLINE print(self.separator2)NEWLINE print(BLOCK_TEMPLATE.format('Failed/Skipped Tests'))NEWLINE prev = NoneNEWLINE for flavour, test, msg in failed_tests:NEWLINE if self._args.show_tb_duplicates:NEWLINE self.print_error(flavour, test, msg.strip())NEWLINE else:NEWLINE self.print_error(flavour, test, DUPLICATE_MSG if msg == prev else msg.strip())NEWLINE prev = msgNEWLINENEWLINE def print_error(self, flavour: str, test: TestCase, msg: str):NEWLINE print(self.separator1)NEWLINE print(f'{flavour}: {test.__class__.__name__} {test.description}')NEWLINE print(self.separator2)NEWLINE if self._args.hide_tb_paths:NEWLINE msg = self._remove_path.sub(r'File "\1"', msg)NEWLINE # msg = self._remove_threading.sub('', msg)NEWLINE print(textwrap.indent(msg, self.indent))NEWLINE print()NEWLINENEWLINE def format_error(self, name: str, exc_info) -> Tuple[str, str, str]:NEWLINE exc_type, exc_value, exc_traceback = exc_infoNEWLINE if exc_type is ImportError:NEWLINE msg = f"Tests not run due to {name} file not found"NEWLINE err_type = 'import'NEWLINE elif exc_type is SyntaxError:NEWLINE msg = "Tests not run due to syntax error"NEWLINE err_type = 'syntax'NEWLINE elif exc_type is EOFError:NEWLINE msg = "Tests not run due to unexpectedly waiting for input"NEWLINE err_type = 'eof'NEWLINE elif exc_type is IndentationError:NEWLINE msg = "Tests not run due to indentation error"NEWLINE err_type = 'indentation'NEWLINE else:NEWLINE msg = "Tests not run due to arbitrary exception"NEWLINE err_type = 'exception'NEWLINENEWLINE err_msg = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))NEWLINE err_msg = self._remove_importlib.sub('', err_msg)NEWLINE if self._args.hide_tb_paths:NEWLINE err_msg = self._remove_path.sub(r'File "\1"', err_msg)NEWLINENEWLINE return err_type, msg, err_msgNEWLINENEWLINE def output_results(self, all_tests: List[TestCase], result: TestResult):NEWLINE runtime = result.run_timeNEWLINE total = result.testsRunNEWLINE fails, skips = len(result.failures) + len(result.errors), len(result.skipped)NEWLINE passed = total - fails - skipsNEWLINENEWLINE if self._args.json:NEWLINE errors = []NEWLINE for err_type, msg, err_msg in self._import_errors:NEWLINE errors.append(dict(error=err_type, error_message=f'{msg}\n{err_msg}'))NEWLINE data = dict(total=total, failed=fails, skipped=skips, passed=passed,NEWLINE time=runtime, results=result.to_dict(), errors=errors)NEWLINE json.dump(data, sys.stdout, indent=4)NEWLINE else:NEWLINE # Join the lists sorted by the test orderNEWLINE failed_tests = sorted(NEWLINE self._add_flavour('FAIL', result.failures) +NEWLINE self._add_flavour('ERROR', result.errors) +NEWLINE self._add_flavour('SKIP', result.skipped),NEWLINE key=lambda t: all_tests.index(t[1]))NEWLINE self.print_results(failed_tests, result)NEWLINE print(self.separator2)NEWLINE print(f'Ran {total} tests in {runtime:.3f} seconds with 'NEWLINE f'{passed} passed/{skips} skipped/{fails} failed.')NEWLINENEWLINE def run(self, test_cases: List[Union[TestCase, Type[TestCase]]]) -> Optional[TestResult]:NEWLINE if not self._args.ignore_import_fails and self._import_errors:NEWLINE err_type, msg, err_msg = self._import_errors[0]NEWLINE if self._args.json:NEWLINE data = dict(error=err_type, error_message=f'{msg}\n{err_msg}')NEWLINE json.dump(data, sys.stdout, indent=4)NEWLINE else:NEWLINE print(BLOCK_TEMPLATE.format(msg))NEWLINE print(err_msg)NEWLINENEWLINE return NoneNEWLINENEWLINE suite = TestLoader().loadTestCases(test_cases)NEWLINENEWLINE # hide unittest outputNEWLINE with RedirectStdIO(stdout=self._args.suppress_stdout, stderr=True) as stdio:NEWLINE runner = unittest.TextTestRunner(stream=None,NEWLINE verbosity=0,NEWLINE resultclass=TestResult)NEWLINE if self._args.include_no_print:NEWLINE if not self._args.suppress_stdout:NEWLINE raise RuntimeError("Can't test for no print without suppressing stdout")NEWLINE suite.addTest(TestNoPrint(stdio))NEWLINENEWLINE all_tests = list(suite)NEWLINE result = runner.run(suite)NEWLINENEWLINE self.output_results(all_tests, result)NEWLINE return resultNEWLINE
n = int(input())NEWLINEfor j in range(n):NEWLINE word = input()NEWLINE if len(word) <= 10:NEWLINE print(word)NEWLINE else:NEWLINE print(word[0] + str(len(word)-2) + word[-1])NEWLINE
from flask import *NEWLINEimport hashlibNEWLINEfrom pymongo import *NEWLINEimport stringNEWLINEimport datetimeNEWLINEimport reNEWLINEfrom flask_cors import *NEWLINENEWLINEapp = Flask(__name__)NEWLINEcors = CORS(app)NEWLINENEWLINENEWLINE@app.errorhandler(404)NEWLINEdef page_not_found(e):NEWLINE return "error",404NEWLINENEWLINE@app.route('/')NEWLINE@cross_origin()NEWLINEdef index():NEWLINE return render_template('form.html')NEWLINENEWLINE@app.route('/upload')NEWLINEdef upload():NEWLINE return render_template('upload.html')NEWLINENEWLINE@app.route('/cat')NEWLINEdef cat():NEWLINE return render_template('cat.html')NEWLINENEWLINEclient = MongoClient(port=27017)NEWLINEdb=client.cc_assignment.usersNEWLINEcat = client.cc_assignment.categoriesNEWLINEact = client.cc_assignment.actNEWLINENEWLINEdef getNextSequence(collection,name):NEWLINE collection.update_one( { '_id': name },{ '$inc': {'seq': 1}})NEWLINE return int(collection.find_one({'_id':name})["seq"])NEWLINENEWLINE#api 1NEWLINE@app.route('/api/v1/users', methods=['POST'])NEWLINEdef process():NEWLINE j = request.get_json()NEWLINE name = j['name']NEWLINE password = j['password']NEWLINE if( len(password) != 40 or not all(c in string.hexdigits for c in password)):NEWLINE return jsonify({'code' : 600,"text":"not in sha1"}),400NEWLINENEWLINE if name and password and password != "da39a3ee5e6b4b0d3255bfef95601890afd80709":NEWLINE if(db.count_documents({"name":name})>0):NEWLINE print("username already exist")NEWLINE return jsonify({'code' : 405,"text":"username already exist"}),400NEWLINE result=db.insert_one({'userId': getNextSequence(client.cc_assignment.orgid_counter,"userId"), 'name': name, 'password' : password })NEWLINE return jsonify({'code' : 201}),201NEWLINE print("empty fields")NEWLINE return jsonify({'code' : 400,"text":"empty fields"}),400NEWLINENEWLINE#api 2NEWLINE@app.route('/api/v1/users/<username>', methods=['DELETE'])NEWLINEdef userdelete(username):NEWLINE if(db.count_documents({"name":username})>0):NEWLINE db.delete_one({"name":username})NEWLINE return jsonify({'code':200}),200NEWLINE else:NEWLINE print("api 2 user does not exist")NEWLINE return jsonify({'code':404,"text":"user does not exist" }),400NEWLINENEWLINE#api 3NEWLINE@app.route('/api/v1/categories', methods=['GET'])NEWLINEdef categorieAdd():NEWLINE j = cat.find()NEWLINE d = dict()NEWLINE for x in j:NEWLINE d[x['catName']]=x['size']NEWLINE return jsonify(d),200NEWLINENEWLINE#api 4NEWLINE@app.route('/api/v1/categories', methods=['POST'])NEWLINEdef categorieList():NEWLINE j = re.search("[A-Za-z0-9 _]+",(request.get_data().decode('utf-8')))NEWLINE if(j is None):NEWLINE print("empty input")NEWLINE return jsonify({'code':400}),400NEWLINE j = j.group(0)NEWLINE if(cat.count_documents({"catName":j})>0):NEWLINE print("categoryName already exist")NEWLINE return jsonify({'code':404}),400NEWLINE result=cat.insert_one({'catId': getNextSequence(client.cc_assignment.orgid_counter,"catId"), 'catName':j , 'size' : 0 })NEWLINE return jsonify({'code':200}),201NEWLINENEWLINE#api 5NEWLINE@app.route('/api/v1/categories/<categories>', methods=['DELETE'])NEWLINEdef catdelete(categories):NEWLINE if(cat.count_documents({"catName":categories})>0):NEWLINE cat.delete_one({"catName":categories})NEWLINE return jsonify({'code':200}),200NEWLINE else:NEWLINE print("categoryName does not exists")NEWLINE return jsonify({'code':404}),400NEWLINENEWLINE#api 6 and 8NEWLINE@app.route('/api/v1/categories/<categoryName>/acts', methods=['GET'])NEWLINEdef catactsizeprint(categoryName):NEWLINE start = request.args.get("start")NEWLINE end = request.args.get("end")NEWLINE if(not cat.count_documents({"catName":categoryName})>0):NEWLINE print("categoryName does not exists")NEWLINE return jsonify({"code": 400}),400NEWLINE if(start is None and end is None):NEWLINE d = dict()NEWLINE j = cat.find_one({"catName" : categoryName})NEWLINE if(j['size'] < 100):NEWLINE l = list()NEWLINE if(act.count_documents({"catName":categoryName}) == 0):NEWLINE print("empty category")NEWLINE return jsonify({'code':404}),204NEWLINE v = act.find({"catName" : categoryName},{"_id":0,"catName":0})NEWLINE for x in v:NEWLINE l.append(x)NEWLINE return jsonify(l),200NEWLINE else:NEWLINE print("more than 100 asked api6")NEWLINE return jsonify({"code":413}),413NEWLINE if(start is None or end is None):NEWLINE print("start or end missing")NEWLINE return jsonify({"code":1400}),400NEWLINE else :NEWLINE start = int(start)NEWLINE end = int(end)NEWLINE if(start > end or (start<0 or end <0)):NEWLINE print("start and end values and not correct")NEWLINE return jsonify({"code":1600}),400NEWLINE else :NEWLINE diff = end-start + 1NEWLINE k = 1NEWLINE ll = list()NEWLINE val = act.count_documents({"catName":categoryName})NEWLINE if(val < diff):NEWLINE print("start and end values and not correct")NEWLINE return jsonify({"code":1500}),400NEWLINE if(diff >100):NEWLINE print("more values than given or more than 100 values")NEWLINE return jsonify({"code" : 1400,"text":"more than 100 values "}),413NEWLINE if(val == 0):NEWLINE return jsonify({'code':1404}),204NEWLINE v = act.find({"catName" : categoryName},{"_id":0}).sort([("timestamp",-1)])NEWLINE for x in v:NEWLINE if(k <= diff):NEWLINE ll.append(x)NEWLINE k = k + 1NEWLINE return jsonify(ll),200NEWLINENEWLINE#api 7NEWLINE@app.route('/api/v1/categories/<categories>/acts/size', methods=['GET'])NEWLINEdef catactsize(categories):NEWLINE if(not cat.count_documents({"catName":categories})>0):NEWLINE return jsonify({"code": 400}),400NEWLINE else:NEWLINE j = cat.find({"catName" : categories})NEWLINE for x in j:NEWLINE l = x['size']NEWLINE if (l == 0 ):NEWLINE return jsonify({"code":411}),204NEWLINE return jsonify(x['size'])NEWLINENEWLINE#api 9NEWLINE@app.route('/api/v1/acts/upvote', methods=['POST'])NEWLINEdef upvote():NEWLINE if(request.get_data().decode('utf-8') == "[]"):NEWLINE return jsonify({"code": 410}),400 NEWLINE j = re.search("[0-9]+",(request.get_data().decode('utf-8')))NEWLINE j = j.group(0)NEWLINE print(j)NEWLINE if(not act.count_documents({"actId":int(j)})>0):NEWLINE return jsonify({"code": 400}),400NEWLINE else:NEWLINE act.update_one( { 'actId': int(j)},{ '$inc': {'upvote': 1}})NEWLINE return jsonify({"code": 200})NEWLINENEWLINE#api 10NEWLINE@app.route('/api/v1/acts/<actId>', methods=['DELETE'])NEWLINEdef actDelete(actId):NEWLINE if(not act.count_documents({"actId":int(actId)})>0):NEWLINE return jsonify({"code": 400}),400NEWLINE else:NEWLINE j = act.find({"actId":int(actId)},{"_id":0})NEWLINE for i in j:NEWLINE l=(i["catName"])NEWLINE print(l)NEWLINE cat.update_one({ 'catName':l },{ '$inc': {'size': -1}})NEWLINE act.delete_one({"actId":int(actId)})NEWLINE return jsonify({'code':200})NEWLINENEWLINENEWLINEdef validateDateTime(date_text):NEWLINE try:NEWLINE datetime.datetime.strptime(date_text, '%d-%m-%Y:%S-%M-%H')NEWLINE return TrueNEWLINE except ValueError:NEWLINE return FalseNEWLINEdef validateBase64(data_text):NEWLINE data_text = data_text.split(",")[1]NEWLINE if(re.search("[A-Za-z0-9+/=]", data_text) and len(data_text)%4==0):NEWLINE return TrueNEWLINE else:NEWLINE return FalseNEWLINE#api 11NEWLINE@app.route('/api/v1/acts', methods=['POST'])NEWLINEdef actUpload():NEWLINE j = request.get_json()NEWLINE #to validate unique IDNEWLINE if(act.count_documents({"actId":j['actId']})>0):NEWLINE return jsonify({"code":405}),400NEWLINE #to validate timestampNEWLINE if not validateDateTime(j['timestamp']):NEWLINE return jsonify({"code":406}),400NEWLINE #to validate user existsNEWLINE if(not db.count_documents({"name":j['username']})>0):NEWLINE return jsonify({"code":407}),400NEWLINE #to validate Base64 codeNEWLINE if(not validateBase64(j['imgB64'])):NEWLINE return jsonify({"code":408}),400NEWLINE #to validate upvoteNEWLINE if("upvote" in j):NEWLINE return jsonify({"code":409}),400NEWLINE #to validate that cat existsNEWLINE if(not cat.count_documents({"catName":j['categoryName']})>0):NEWLINE return jsonify({"code":410}),400NEWLINENEWLINE result=act.insert_one({'actId':j['actId'] , 'username': j['username'], 'timestamp' : j['timestamp'], 'caption':j['caption'], 'catName':j['categoryName'], 'imgB64':j['imgB64'], 'upvote':0 })NEWLINE cat.update_one({ 'catName':j['categoryName'] },{ '$inc': {'size': 1}})NEWLINE client.cc_assignment.orgid_counter.update_one( {'_id':"actId"},{'$inc': {'seq': 1}})NEWLINE return jsonify({'code':200}),201NEWLINENEWLINE# helper api'sNEWLINE# get act idNEWLINE@app.route('/api/get/actId')NEWLINEdef actid():NEWLINE f = client.cc_assignment.orgid_counter.find_one({"_id":"actId"})NEWLINE return jsonify(f['seq'])NEWLINENEWLINE#down voteNEWLINE@app.route('/api/v1/acts/downvote', methods=['POST'])NEWLINEdef downvote():NEWLINE j = request.get_json()NEWLINE if(not act.count_documents({"actId":j['actId']})>0):NEWLINE return jsonify({"code": 400}),400NEWLINE else:NEWLINE act.update_one( { 'actId': j['actId'] },{ '$inc': {'upvote': -1}})NEWLINE return jsonify({"code": 200})NEWLINENEWLINE#loginNEWLINE@app.route('/api/v1/users/login', methods=['POST'])NEWLINEdef processes():NEWLINE j = request.get_json()NEWLINE name = j['name']NEWLINE password = j['password']NEWLINE if( len(password) != 40 or not all(c in string.hexdigits for c in password) ):NEWLINE return jsonify({'code' : 600 ,"text" :"Sha1 error"}),200NEWLINENEWLINE if name and password and password != "da39a3ee5e6b4b0d3255bfef95601890afd80709":NEWLINE if(db.count_documents({"name":name})<=0):NEWLINE return jsonify({'code' : 405 ,"text" :"login fail"}),400NEWLINENEWLINE v = db.find_one({'name': name},{"_id":0})NEWLINE return jsonify({'code' : 201,"text" :"Successfull login","userId":v["userId"]}),201NEWLINE return jsonify({'code' : 400,"text" :"data missing"}),400NEWLINENEWLINE#get list of usersNEWLINE@app.route('/api/v1/userlist', methods=['GET'])NEWLINEdef listuser():NEWLINE j = db.find()NEWLINE d = dict()NEWLINE for x in j:NEWLINE d[x['name']]=x['userId']NEWLINE return jsonify(d)NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE app.run(host='0.0.0.0',port=80,debug = True)NEWLINE
# -*- coding: utf-8 -*-NEWLINE# Author:w kNEWLINENEWLINEimport nonebot as rcnbNEWLINEimport asyncioNEWLINEfrom gadget.untils.aiorequests import Aiorequests as requestsNEWLINENEWLINERCNBOT = rcnb.get_bot()NEWLINENEWLINENEWLINE### 百度NEWLINENEWLINE# 获取百度语音合成的tokenNEWLINE@rcnb.scheduler.scheduled_job('interval', days=10)NEWLINEasync def get_bd_token():NEWLINE token_url = 'https://aip.baidubce.com/oauth/2.0/token'NEWLINE data = {NEWLINE 'grant_type': 'client_credentials',NEWLINE 'client_id': RCNBOT.config.BD_CLIENT_ID,NEWLINE 'client_secret': RCNBOT.config.BD_CLIENT_SECRET,NEWLINE }NEWLINE headers = {NEWLINE 'Content-Type': 'application/json; charset=UTF-8'NEWLINE }NEWLINE response = await requests.post(token_url, data=data, headers=headers)NEWLINE key = response['access_token']NEWLINE if key:NEWLINE RCNBOT.config.BD_TOKEN = keyNEWLINE else:NEWLINE await asyncio.sleep(60)NEWLINE return await get_bd_token()NEWLINENEWLINENEWLINE### B站NEWLINELIVE_OPEN = []NEWLINESEND_QUEUE = []NEWLINENEWLINENEWLINEasync def get_live_status(room):NEWLINE status = await requests.get(f'http://api.live.bilibili.com/room/v1/Room/room_init?id={room}')NEWLINE if status['msg'] == '房间不存在':NEWLINE return '房间不存在'NEWLINE return status['data']['live_status']NEWLINENEWLINENEWLINE# 延迟发送开播信息NEWLINE@rcnb.scheduler.scheduled_job('interval', seconds=3)NEWLINEasync def send_message():NEWLINE if not len(SEND_QUEUE):NEWLINE returnNEWLINE item = SEND_QUEUE.pop(0)NEWLINE msg = f'你订阅的 {item["roomid"]}号房间开播啦~'NEWLINE for cid in item['userset']:NEWLINE ''.split()NEWLINE target_type = cid.split('/')[1]NEWLINE target_id = cid.split('/')[2]NEWLINE if target_type == 'group':NEWLINE await RCNBOT.send_msg(message_type='group', group_id=target_id, message=msg)NEWLINE else:NEWLINE await RCNBOT.send_msg(message_type='private', user_id=target_id, message=msg)NEWLINE await asyncio.sleep(0.1)NEWLINENEWLINE# B站直播状态NEWLINE@rcnb.scheduler.scheduled_job('interval', seconds=RCNBOT.config.CHECK_OPEN_STATUS)NEWLINEasync def live_status_open():NEWLINE for item in RCNBOT.config.BILIBILI_SUBSCRIPTION_INFO:NEWLINE if item['roomid'] not in LIVE_OPEN:NEWLINE status = await get_live_status(item['roomid'])NEWLINE if status == 1:NEWLINE LIVE_OPEN.append(item['roomid'])NEWLINE SEND_QUEUE.append(item)NEWLINE await asyncio.sleep(0.2)NEWLINENEWLINENEWLINE#@rcnb.scheduler.scheduled_job('interval', minutes=RCNBOT.config.CHECK_CLOSE_STATUS)NEWLINE@rcnb.scheduler.scheduled_job('interval', seconds=30)NEWLINEasync def live_status_close():NEWLINE for item in RCNBOT.config.BILIBILI_SUBSCRIPTION_INFO:NEWLINE if item['roomid'] in LIVE_OPEN:NEWLINE status = await get_live_status(item['roomid'])NEWLINE if status != 1 and len(LIVE_OPEN) > 0:NEWLINE LIVE_OPEN.remove(item['roomid'])NEWLINE
import warningsNEWLINEfrom unittest.mock import MagicMockNEWLINENEWLINEfrom joblibspark.backend import SparkDistributedBackendNEWLINENEWLINENEWLINEdef test_effective_n_jobs():NEWLINENEWLINE backend = SparkDistributedBackend()NEWLINE max_num_concurrent_tasks = 8NEWLINE backend._get_max_num_concurrent_tasks = MagicMock(return_value=max_num_concurrent_tasks)NEWLINENEWLINE assert backend.effective_n_jobs(n_jobs=None) == 1NEWLINE assert backend.effective_n_jobs(n_jobs=-1) == 8NEWLINE assert backend.effective_n_jobs(n_jobs=4) == 4NEWLINENEWLINE with warnings.catch_warnings(record=True) as w:NEWLINE warnings.simplefilter("always")NEWLINE assert backend.effective_n_jobs(n_jobs=16) == 16NEWLINE assert len(w) == 1NEWLINE
'''Remove "地" & "得" from output lable file.NEWLINE'''NEWLINENEWLINEimport argparseNEWLINEfrom os import pardirNEWLINEimport reNEWLINENEWLINENEWLINEdef remove_de(input_path, output_path):NEWLINE with open(input_path) as f:NEWLINE data = f.read()NEWLINENEWLINE data = re.sub(r'\d+, 地(, )?', '', data)NEWLINE data = re.sub(r'\d+, 得(, )?', '', data)NEWLINE data = re.sub(r', \n', '\n', data)NEWLINE data = re.sub(r'(\d{5})\n', r'\1, 0\n', data)NEWLINENEWLINE with open(output_path, 'w') as f:NEWLINE f.write(data)NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE parser = argparse.ArgumentParser()NEWLINE parser.add_argument('--input_path', required=True)NEWLINE parser.add_argument('--output_path', required=True)NEWLINE args = parser.parse_args()NEWLINENEWLINE remove_de(NEWLINE input_path=args.input_path,NEWLINE output_path=args.output_path,NEWLINE )NEWLINENEWLINE # remove_de(NEWLINE # input_path='test13/bert-mlm-wwm_v1/lbl_test_42.txt',NEWLINE # output_path='test13/bert-mlm-wwm_v1/lbl_test_42_remove.txt',NEWLINE # )NEWLINE
class RadixSort:NEWLINE def __init__(self, arr):NEWLINE self.arr = arrNEWLINENEWLINE def _count_sort(self, exp):NEWLINE n = len(self.arr)NEWLINE sorted_arr = [0] * (n)NEWLINE count_arr = [0] * (10)NEWLINE for i in range(n):NEWLINE index = int(self.arr[i]/exp)NEWLINE count_arr[index%10] += 1NEWLINE for i in range(1, 10):NEWLINE count_arr[i] += count_arr[i-1]NEWLINE for i in range(n-1,-1,-1):NEWLINE index = int(self.arr[i]/exp)NEWLINE sorted_arr[count_arr[index%10]-1] = self.arr[i]NEWLINE count_arr[index%10] -= 1NEWLINE self.arr = sorted_arrNEWLINENEWLINE def _radix_sort(self):NEWLINE maximum = max(self.arr)NEWLINE exp = 1NEWLINE while maximum/exp > 0:NEWLINE self._count_sort(exp)NEWLINE exp *= 10NEWLINENEWLINE def result(self):NEWLINE self._radix_sort()NEWLINE return self.arr
# -*- coding: utf-8 -*-NEWLINE#NEWLINE# Copyright: (c) 2011 by the Serge S. Koval, see AUTHORS for more details.NEWLINE#NEWLINE# Licensed under the Apache License, Version 2.0 (the "License"); you mayNEWLINE# not use this file except in compliance with the License. You may obtainNEWLINE# a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS, WITHOUTNEWLINE# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See theNEWLINE# License for the specific language governing permissions and limitationsNEWLINE# under the License.NEWLINENEWLINE"""NEWLINE tornadio2.serverNEWLINE ~~~~~~~~~~~~~~~~NEWLINENEWLINE Implements handy wrapper to start FlashSocket server (if FlashSocketNEWLINE protocol is enabled). Shamesly borrowed from the SocketTornad.IO project.NEWLINE"""NEWLINENEWLINEimport loggingNEWLINENEWLINEfrom tornado import ioloopNEWLINEfrom tornado.httpserver import HTTPServerNEWLINENEWLINEfrom tornadio2.flashserver import FlashPolicyServerNEWLINENEWLINENEWLINEclass SocketServer(HTTPServer):NEWLINE """HTTP Server which does some configuration and automatic setupNEWLINE of Socket.IO based on configuration.NEWLINE Starts the IOLoop and listening automaticallyNEWLINE in contrast to the Tornado default behavior.NEWLINE If FlashSocket is enabled, starts up the policy server also."""NEWLINENEWLINE def __init__(self, application,NEWLINE no_keep_alive=False, io_loop=None,NEWLINE xheaders=False, ssl_options=None,NEWLINE auto_start=TrueNEWLINE ):NEWLINE """Initializes the server with the given request callback.NEWLINENEWLINE If you use pre-forking/start() instead of the listen() method toNEWLINE start your server, you should not pass an IOLoop instance to thisNEWLINE constructor. Each pre-forked child process will create its ownNEWLINE IOLoop instance after the forking process.NEWLINENEWLINE `application`NEWLINE Tornado applicationNEWLINE `no_keep_alive`NEWLINE Support keep alive for HTTP connections or notNEWLINE `io_loop`NEWLINE Optional io_loop instance.NEWLINE `xheaders`NEWLINE Extra headersNEWLINE `ssl_options`NEWLINE Tornado SSL optionsNEWLINE `auto_start`NEWLINE Set auto_start to False in order to have opportunitiesNEWLINE to work with server object and/or perform some actionsNEWLINE after server is already created but before ioloop will start.NEWLINE Attention: if you use auto_start param set to FalseNEWLINE you should start ioloop manuallyNEWLINE """NEWLINE settings = application.settingsNEWLINENEWLINE flash_policy_file = settings.get('flash_policy_file', None)NEWLINE flash_policy_port = settings.get('flash_policy_port', None)NEWLINE socket_io_port = settings.get('socket_io_port', 8001)NEWLINE socket_io_address = settings.get('socket_io_address', '')NEWLINENEWLINE io_loop = io_loop or ioloop.IOLoop.instance()NEWLINENEWLINE HTTPServer.__init__(self,NEWLINE application,NEWLINE no_keep_alive,NEWLINE io_loop,NEWLINE xheaders,NEWLINE ssl_options)NEWLINENEWLINE logging.info('Starting up tornadio server on port \'%s\'',NEWLINE socket_io_port)NEWLINENEWLINE self.listen(socket_io_port, socket_io_address)NEWLINENEWLINE if flash_policy_file is not None and flash_policy_port is not None:NEWLINE try:NEWLINE logging.info('Starting Flash policy server on port \'%d\'',NEWLINE flash_policy_port)NEWLINENEWLINE FlashPolicyServer(NEWLINE io_loop=io_loop,NEWLINE port=flash_policy_port,NEWLINE policy_file=flash_policy_file)NEWLINE except Exception, ex:NEWLINE logging.error('Failed to start Flash policy server: %s', ex)NEWLINENEWLINE if auto_start:NEWLINE logging.info('Entering IOLoop...')NEWLINE io_loop.start()NEWLINE
from django.contrib.sites.models import SiteNEWLINEfrom django.db import modelsNEWLINEfrom django.urls import NoReverseMatch, get_script_prefix, reverseNEWLINEfrom django.utils.encoding import iri_to_uriNEWLINEfrom django.utils.translation import gettext_lazy as _NEWLINENEWLINENEWLINEclass FlatPage(models.Model):NEWLINE url = models.CharField(_("URL"), max_length=100, db_index=True)NEWLINE title = models.CharField(_("title"), max_length=200)NEWLINE content = models.TextField(_("content"), blank=True)NEWLINE enable_comments = models.BooleanField(_("enable comments"), default=False)NEWLINE template_name = models.CharField(NEWLINE _("template name"),NEWLINE max_length=70,NEWLINE blank=True,NEWLINE help_text=_(NEWLINE "Example: “flatpages/contact_page.html”. If this isn’t provided, "NEWLINE "the system will use “flatpages/default.html”."NEWLINE ),NEWLINE )NEWLINE registration_required = models.BooleanField(NEWLINE _("registration required"),NEWLINE help_text=_(NEWLINE "If this is checked, only logged-in users will be able to view the page."NEWLINE ),NEWLINE default=False,NEWLINE )NEWLINE sites = models.ManyToManyField(Site, verbose_name=_("sites"))NEWLINENEWLINE class Meta:NEWLINE db_table = "django_flatpage"NEWLINE verbose_name = _("flat page")NEWLINE verbose_name_plural = _("flat pages")NEWLINE ordering = ["url"]NEWLINENEWLINE def __str__(self):NEWLINE return "%s -- %s" % (self.url, self.title)NEWLINENEWLINE def get_absolute_url(self):NEWLINE from .views import flatpageNEWLINENEWLINE for url in (self.url.lstrip("/"), self.url):NEWLINE try:NEWLINE return reverse(flatpage, kwargs={"url": url})NEWLINE except NoReverseMatch:NEWLINE passNEWLINE # Handle script prefix manually because we bypass reverse()NEWLINE return iri_to_uri(get_script_prefix().rstrip("/") + self.url)NEWLINE
# -*- coding: utf-8 -*-NEWLINE"""NEWLINECreated on Mon Jun 08 10:17:32 2015NEWLINENEWLINE@author: PacoNEWLINE"""NEWLINENEWLINEfrom api import APINEWLINENEWLINEclass Soundcloud(API):NEWLINENEWLINE _class_name = 'Soundcloud'NEWLINE _category = 'Music'NEWLINE _help_url = 'https://developers.soundcloud.com/docs/api/guide'NEWLINE _api_url = 'http://api.soundcloud.com/'NEWLINENEWLINE def __init__(self,apikey):NEWLINE self._api_key = apikeyNEWLINENEWLINE def _parsing_data(self,data):NEWLINE res = {'title':list(),'downloads':list(),'favorites':list(),'comments':list(),'genre':list(),'duration':list(),'tags':list(),'description':list(),'url':list()}NEWLINE for d in data:NEWLINE res['title'].append(self._tools.key_test('title',d))NEWLINE res['downloads'].append(self._tools.key_test('download_count',d,'int'))NEWLINE res['favorites'].append(self._tools.key_test('favoritings_count',d,'int'))NEWLINE res['comments'].append(self._tools.key_test('comment_count',d,'int'))NEWLINE res['genre'].append(self._tools.key_test('genre',d))NEWLINE res['duration'].append(self._tools.key_test('duration',d))NEWLINE res['tags'].append(self._tools.key_test('tag_list',d,'list'))NEWLINE res['description'].append(self._tools.key_test('description',d))NEWLINE res['url'].append(self._tools.key_test('permalink_url',d))NEWLINE return resNEWLINENEWLINE def _parsing_data2(self,data):NEWLINE res = {'username':list(),'country':list(),'name':list(),'description':list(),'city':list(),'website':list(),'tracks':list(),'followers':list()}NEWLINE for d in data:NEWLINE res['username'].append(self._tools.key_test('username',d))NEWLINE res['country'].append(self._tools.key_test('country',d))NEWLINE res['name'].append(self._tools.key_test('full_name',d))NEWLINE res['description'].append(self._tools.key_test('description',d))NEWLINE res['city'].append(self._tools.key_test('city',d))NEWLINE res['website'].append(self._tools.key_test('website',d))NEWLINE res['tracks'].append(self._tools.key_test('track_count',d,'int'))NEWLINE res['followers'].append(self._tools.key_test('followers_count',d,'int'))NEWLINE return resNEWLINENEWLINE def _parsing_data3(self,data):NEWLINE res = {'name':list(),'tracks':list(),'members':list(),'contributors':list(),'url':list(),'description':list()}NEWLINE for d in data:NEWLINE res['name'].append(self._tools.key_test('name',d))NEWLINE res['tracks'].append(self._tools.key_test('track_count',d,'int'))NEWLINE res['members'].append(self._tools.key_test('members_count',d,'int'))NEWLINE res['contributors'].append(self._tools.key_test('contributors_count',d,'int'))NEWLINE res['url'].append(self._tools.key_test('permalink_url',d))NEWLINE res['description'].append(self._tools.key_test('description',d))NEWLINE return resNEWLINENEWLINE def _parsing_data4(self,data):NEWLINE res = {'id':list(),'text':list()}NEWLINE for d in data:NEWLINE res['id'].append(self._tools.key_test('track_id',d,'int'))NEWLINE res['text'].append(self._tools.key_test('body',d))NEWLINE return resNEWLINENEWLINE def search_tracks(self,text='',limit=10):NEWLINE text = text.replace(' ','+')NEWLINE url = self._api_url+'tracks.json?client_id='+self._api_key+'&q='+text+'&limit='+str(limit)NEWLINE data = self._tools.data_from_url(url)NEWLINE self._increment_nb_call()NEWLINE return self._parsing_data(data)NEWLINENEWLINE def get_infos_track(self,idd=182242225):NEWLINE url = self._api_url+'tracks/'+str(idd)+'?client_id='+self._api_keyNEWLINE data = self._tools.data_from_url(url)NEWLINE self._increment_nb_call()NEWLINE return self._parsing_data(data)NEWLINENEWLINE def get_latest_tracks(self,limit=10):NEWLINE url = self._api_url+'tracks.json?client_id='+self._api_key+'&limit='+str(limit)NEWLINE data = self._tools.data_from_url(url)NEWLINE self._increment_nb_call()NEWLINE return self._parsing_data(data)NEWLINENEWLINE def search_users(self,text='',limit=10):NEWLINE text = text.replace(' ','+')NEWLINE url = self._api_url+'users?client_id='+self._api_key+'&q='+text+'&limit='+str(limit)NEWLINE data = self._tools.data_from_url(url)NEWLINE self._increment_nb_call()NEWLINE return self._parsing_data2(data)NEWLINENEWLINE def search_groups(self,text='',limit=10):NEWLINE text = text.replace(' ','+')NEWLINE url = self._api_url+'groups?client_id='+self._api_key+'&q='+text+'&limit='+str(limit)NEWLINE data = self._tools.data_from_url(url)NEWLINE self._increment_nb_call()NEWLINE return self._parsing_data3(data)NEWLINENEWLINE def get_latest_comments(self,limit=10):NEWLINE url = self._api_url+'comments?client_id='+self._api_key+'&limit='+str(limit)NEWLINE data = self._tools.data_from_url(url)NEWLINE self._increment_nb_call()NEWLINE return self._parsing_data4(data)NEWLINE
"""remove post authorNEWLINENEWLINERevision ID: ba46e93ec9c9NEWLINERevises: 03414fac27d3NEWLINECreate Date: 2022-03-13 19:57:04.386858NEWLINENEWLINE"""NEWLINEfrom alembic import opNEWLINEimport sqlalchemy as saNEWLINENEWLINENEWLINE# revision identifiers, used by Alembic.NEWLINErevision = 'ba46e93ec9c9'NEWLINEdown_revision = '03414fac27d3'NEWLINEbranch_labels = NoneNEWLINEdepends_on = NoneNEWLINENEWLINENEWLINEdef upgrade():NEWLINE # ### commands auto generated by Alembic - please adjust! ###NEWLINE op.drop_column('posts', 'author')NEWLINE # ### end Alembic commands ###NEWLINENEWLINENEWLINEdef downgrade():NEWLINE # ### commands auto generated by Alembic - please adjust! ###NEWLINE op.add_column('posts', sa.Column('author', sa.VARCHAR(length=255), autoincrement=False, nullable=True))NEWLINE # ### end Alembic commands ###NEWLINE
from nose.tools import raisesNEWLINEfrom mhcnames import normalize_allele_name, AlleleParseErrorNEWLINENEWLINE@raises(AlleleParseError)NEWLINEdef test_extra_text_after_allele():NEWLINE normalize_allele_name("HLA-A*02:01 zipper")
from rest_framework import routersNEWLINENEWLINEfrom api.transaction.viewsets import TransactionViewSetNEWLINENEWLINErouter = routers.SimpleRouter(trailing_slash=False)NEWLINENEWLINErouter.register('', TransactionViewSet, basename='transactions')NEWLINENEWLINEurlpatterns = [NEWLINE *router.urls,NEWLINE]NEWLINE
#!/usr/bin/env pythonNEWLINENEWLINE#github:strike-afkNEWLINE#Youtube:STRİKENEWLINE#Codded by/StrikeNEWLINEimport os NEWLINEimport sysNEWLINEimport timeNEWLINE#STRİKE BURADA ☾★NEWLINENEWLINE#BANNERNEWLINEprint("Yükleniyor..")NEWLINEos.system("sleep 3")NEWLINEprint()NEWLINEprint()NEWLINEprint()NEWLINEprint()NEWLINEprint()NEWLINEprint()NEWLINEprint()NEWLINEprint()NEWLINEprint()NEWLINEprint()NEWLINEprint()NEWLINEprint()NEWLINEos.system("clear")NEWLINEprint("""NEWLINE\033[91mNEWLINE -`NEWLINE .o+`NEWLINE `ooo/NEWLINE `+oooo:NEWLINE `+oooooo:NEWLINE -+oooooo+:NEWLINE `/:-:++oooo+:NEWLINE `/++++/+++++++:NEWLINE `/++++++++++++++:NEWLINE `/+++ooooooooooooo/`NEWLINE ./ooosssso++osssssso+`NEWLINE .oossssso-ssoo/ossssss+`NEWLINE -osssssso+++++++:sssssso+`NEWLINE /sss/+\033[94mCodded by/Strike\033[91m+oss.NEWLINE /ossssssss/+ooooooox+ssssooo/-NEWLINE `/ossssso+/++++++++++-:/+osssso+-NEWLINE `+sso+osssssssssssssssxpo+++/+oso:NEWLINE `++:+os+oooooooooooosssxo+sooss`-/+/NEWLINE .\033[94m☾★ ☾★NEWLINE \033[94mInstagram:strikeofficialsNEWLINE ☾★ \033[92mYoutube :STRİKE\033[94m ☾★NEWLINE \033[94MGithub :strike-afk NEWLINE☾★ ☾★NEWLINE\033[93m[1]\033[94mXerXes \033[93m[12]\033[94mT-U-R-KNEWLINE\033[93m[2]\033[94mgrabcam \033[93m[13]\033[94mCamera-trNEWLINE\033[93m[3]\033[94mipmux \033[93m[14]\033[94mFinduserNEWLINE\033[93m[4]\033[94mIg-brute \033[93m[15]\033[94mTermux_Full_KurulumNEWLINE\033[93m[5]\033[94mAdmin Panel finder \033[93m[16]\033[94mipolusturNEWLINE\033[93m[6]\033[94malgosspammer \033[93m[17]\033[94mtorshammerNEWLINE\033[93m[7]\033[94mTBomb \033[93m[18]\033[94mOsi-igNEWLINE\033[93m[8]\033[94mStrike \033[93m[19]\033[94mphishingNEWLINE\033[93m[9]\033[94mimpulse \033[93m[20]\033[94mTermuxu güncelleNEWLINE\033[93m[10]\033[94mtc-kimlik \033[93m[21]\033[94msite-pingNEWLINE\033[93m[11]\033[94mOSIF \033[93m[22]\033[94mHack-ToolsNEWLINE \033[92m[0]Çıkış NEWLINE""")NEWLINEstrike=input("\033[93mişlem numarasını seçiniz: ")NEWLINENEWLINEif strike=="1":NEWLINE os.system("git clone " " https://github.com/CyberXCodder/XerXes")NEWLINEif strike=="2":NEWLINE os.system("git clone " " https://github.com/noob-hackers/grabcam")NEWLINENEWLINEif strike=="3":NEWLINE os.system("git clone " " https://github.com/Amriez/ipmux")NEWLINEif strike=="4":NEWLINE os.system("git clone " " https://github.com/keralahacker/Ig-brute")NEWLINEif strike=="5":NEWLINE os.system("git clone " " https://github.com/bdblackhat/admin-panel-finder")NEWLINEif strike=="6":NEWLINE os.system("git clone " " https://github.com/algospoines/algosspammer")NEWLINEif strike=="7":NEWLINE os.system("git clone " " https://github.com/Hackertrackersj/Tbomb")NEWLINEif strike=="8":NEWLINE os.system("git clone " " https://github.com/strike-afk/Strike")NEWLINEif strike=="9":NEWLINE os.system("git clone " " https://github.com/LimerBoy/Impulse")NEWLINEif strike=="10":NEWLINE os.system("git clone " " https://github.com/ibrahimirdem/tckimlik")NEWLINEif strike=="11":NEWLINE os.system("git clone " " https://github.com/CiKu370/OSIF")NEWLINEif strike=="12":NEWLINE os.system("git clone " " https://github.com/yamanefkar/T-U-R-K")NEWLINEif strike=="13":NEWLINE os.system("git clone " " https://github.com/yamanefkar/Camera-Tr")NEWLINEif strike=="14":NEWLINE os.system("git clone " " https://github.com/xHak9x/finduser")NEWLINEif strike=="15":NEWLINE os.system("git clone " " https://github.com/M49R0/Termux_Full_Kurulum")NEWLINEif strike=="16":NEWLINE os.system("git clone " " https://github.com/saepsh/ipolustur")NEWLINEif strike=="17":NEWLINE os.system("git clone " " https://github.com/dotfighter/torshammer")NEWLINEif strike=="18":NEWLINE os.system("git clone " " https://github.com/th3unkn0n/osi.ig")NEWLINEif strike=="19":NEWLINE os.system("git clone " " https://github.com/xHak9x/SocialPhish")NEWLINEif strike=="20":NEWLINE os.system("apt update -y")NEWLINE os.system("apt upgrade -y")NEWLINEif strike=="21":NEWLINE os.system("git clone " " https://github.com/strike-afk/site-ping")NEWLINEif strike=="22":NEWLINE os.system("git clone " " https://github.com/yamanefkar/Hack-Tools")NEWLINENEWLINEelse:NEWLINE input("\033[94mdevam etmek için enter'a bas ")NEWLINEif strike=="0":NEWLINE print("\033[94mÇıkış Yapılıyor...")NEWLINE os.system("sleep 3")NEWLINE print()NEWLINE print()NEWLINE print("\033[94mÇıkış Yapıldı√")NEWLINE os.system("sleep 2")NEWLINE quit()NEWLINEelse:NEWLINE os.system("python strike-tools.py")NEWLINE print()NEWLINENEWLINENEWLINENEWLINE
from lxml import etreeNEWLINEimport requestsNEWLINEimport pandas as pdNEWLINEimport timeNEWLINENEWLINE'''NEWLINE获取AAAI近几年主页的所有论文的URLNEWLINE'''NEWLINEurl_home17 = 'https://aaai.org/ocs/index.php/AAAI/AAAI17/schedConf/presentations'NEWLINEurl_home16 = 'https://aaai.org/ocs/index.php/AAAI/AAAI16/schedConf/presentations'NEWLINEurl_home15 = 'https://aaai.org/ocs/index.php/AAAI/AAAI15/schedConf/presentations'NEWLINEurl_home14 = 'https://aaai.org/ocs/index.php/AAAI/AAAI14/schedConf/presentations'NEWLINEstart = time.time()NEWLINEr_home17 = requests.get(url_home17)NEWLINEprint('请求并获取2017主页花费时间:'+str(time.time()-start)+'s')NEWLINEhtml17 = r_home17.contentNEWLINEurl_sets17 =[]NEWLINEurl_sets17 = etree.HTML(html17).xpath('//*[@id="content"]/table/tr[1]/td[1]/a/@href')NEWLINEprint(url_sets17)NEWLINEprint(len(url_sets17))NEWLINEdf = pd.DataFrame(url_sets17)NEWLINEdf.to_excel('C:\\Users\\Administrator\\Desktop\\2017url.xlsx',sheet_name='Sheet1',na_rep=0,startrow=0,startcol=0,index=False,header=False) #na_rep缺省值填充NEWLINENEWLINE# 2016NEWLINEr_home16 = requests.get(url_home16)NEWLINEprint('请求并获取2016主页花费时间:'+str(time.time()-start)+'s')NEWLINEhtml16 = r_home16.contentNEWLINEurl_sets16 =[]NEWLINEurl_sets16 = etree.HTML(html16).xpath('//*[@id="content"]/table/tr[1]/td[1]/a/@href')NEWLINEprint(url_sets16)NEWLINEprint(len(url_sets16))NEWLINEdf = pd.DataFrame(url_sets16)NEWLINEdf.to_excel('C:\\Users\\Administrator\\Desktop\\2016url.xlsx',sheet_name='Sheet1',na_rep=0,startrow=0,startcol=0,index=False,header=False) #na_rep缺省值填充NEWLINENEWLINE# 2015NEWLINEr_home15 = requests.get(url_home15)NEWLINEprint('请求并获取2015主页花费时间:'+str(time.time()-start)+'s')NEWLINEhtml15 = r_home15.contentNEWLINEurl_sets15 =[]NEWLINEurl_sets15 = etree.HTML(html15).xpath('//*[@id="content"]/table/tr[1]/td[1]/a/@href')NEWLINEprint(url_sets15)NEWLINEprint(len(url_sets15))NEWLINEdf = pd.DataFrame(url_sets15)NEWLINEdf.to_excel('C:\\Users\\Administrator\\Desktop\\2015url.xlsx',sheet_name='Sheet1',na_rep=0,startrow=0,startcol=0,index=False,header=False) #na_rep缺省值填充NEWLINENEWLINE# 2014NEWLINEr_home14 = requests.get(url_home14)NEWLINEprint('请求并获取2014主页花费时间:'+str(time.time()-start)+'s')NEWLINEhtml14 = r_home14.contentNEWLINEurl_sets14 =[]NEWLINEurl_sets14 = etree.HTML(html14).xpath('//*[@id="content"]/table/tr[1]/td[1]/a/@href')NEWLINEprint(url_sets14)NEWLINEprint(len(url_sets14))NEWLINEdf = pd.DataFrame(url_sets14)NEWLINEdf.to_excel('C:\\Users\\Administrator\\Desktop\\2014url.xlsx',sheet_name='Sheet1',na_rep=0,startrow=0,startcol=0,index=False,header=False) #na_rep缺省值填充
# 실수 1개 입력받아 변환하여 출력하기NEWLINEprint(float(input()))
from xml.dom import minidomNEWLINEimport osNEWLINENEWLINENEWLINEdef parse_voc(xmls_folder):NEWLINE xmls_filename = os.listdir(xmls_folder)NEWLINENEWLINE annos = []NEWLINENEWLINE for xml_filename in xmls_filename:NEWLINENEWLINE anno_dict = {}NEWLINENEWLINE xml_fullpath = os.path.join(xmls_folder, xml_filename)NEWLINENEWLINE dom = minidom.parse(xml_fullpath)NEWLINE root = dom.documentElementNEWLINENEWLINE img_filename = root.getElementsByTagName('filename')[0].childNodes[0].dataNEWLINE anno_dict['filename'] = img_filenameNEWLINENEWLINE size = root.getElementsByTagName('size')[0]NEWLINENEWLINE width = size.getElementsByTagName('width')[0].childNodes[0].dataNEWLINE height = size.getElementsByTagName('height')[0].childNodes[0].dataNEWLINE depth = size.getElementsByTagName('depth')[0].childNodes[0].dataNEWLINENEWLINE anno_dict['size'] = {'depth': depth, 'height': height, 'width': width}NEWLINE anno_dict['samples'] = []NEWLINENEWLINE objects = root.getElementsByTagName('object')NEWLINENEWLINE for obj in objects:NEWLINE name = obj.getElementsByTagName('name')[0].childNodes[0].dataNEWLINE bndbox = obj.getElementsByTagName('bndbox')[0]NEWLINENEWLINE xmin = bndbox.getElementsByTagName('xmin')[0].childNodes[0].dataNEWLINE ymin = bndbox.getElementsByTagName('ymin')[0].childNodes[0].dataNEWLINE xmax = bndbox.getElementsByTagName('xmax')[0].childNodes[0].dataNEWLINE ymax = bndbox.getElementsByTagName('ymax')[0].childNodes[0].dataNEWLINENEWLINE anno_dict['samples'].append({'name': name, 'bbox': [xmin, ymin, xmax, ymax]})NEWLINENEWLINE annos.append(anno_dict)NEWLINENEWLINE return annos
import loggingNEWLINEfrom pathlib import PathNEWLINEfrom typing import DictNEWLINENEWLINEimport colorlogNEWLINEfrom concurrent_log_handler import ConcurrentRotatingFileHandlerNEWLINEfrom logging.handlers import SysLogHandlerNEWLINENEWLINEfrom mogua.util.path import mkdir, path_from_rootNEWLINENEWLINENEWLINEdef initialize_logging(service_name: str, logging_config: Dict, root_path: Path):NEWLINE log_path = path_from_root(root_path, logging_config.get("log_filename", "log/debug.log"))NEWLINE log_date_format = "%Y-%m-%dT%H:%M:%S"NEWLINENEWLINE mkdir(str(log_path.parent))NEWLINE file_name_length = 33 - len(service_name)NEWLINE if logging_config["log_stdout"]:NEWLINE handler = colorlog.StreamHandler()NEWLINE handler.setFormatter(NEWLINE colorlog.ColoredFormatter(NEWLINE f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: "NEWLINE f"%(log_color)s%(levelname)-8s%(reset)s %(message)s",NEWLINE datefmt=log_date_format,NEWLINE reset=True,NEWLINE )NEWLINE )NEWLINENEWLINE logger = colorlog.getLogger()NEWLINE logger.addHandler(handler)NEWLINE else:NEWLINE logger = logging.getLogger()NEWLINE maxrotation = logging_config.get("log_maxfilesrotation", 7)NEWLINE handler = ConcurrentRotatingFileHandler(log_path, "a", maxBytes=20 * 1024 * 1024, backupCount=maxrotation)NEWLINE handler.setFormatter(NEWLINE logging.Formatter(NEWLINE fmt=f"%(asctime)s.%(msecs)03d {service_name} %(name)-{file_name_length}s: %(levelname)-8s %(message)s",NEWLINE datefmt=log_date_format,NEWLINE )NEWLINE )NEWLINE logger.addHandler(handler)NEWLINENEWLINE if logging_config.get("log_syslog", False):NEWLINE log_syslog_host = logging_config.get("log_syslog_host", "localhost")NEWLINE log_syslog_port = logging_config.get("log_syslog_port", 514)NEWLINE log_syslog_handler = SysLogHandler(address=(log_syslog_host, log_syslog_port))NEWLINE log_syslog_handler.setFormatter(logging.Formatter(fmt=f"{service_name} %(message)s", datefmt=log_date_format))NEWLINE logger = logging.getLogger()NEWLINE logger.addHandler(log_syslog_handler)NEWLINENEWLINE if "log_level" in logging_config:NEWLINE if logging_config["log_level"] == "CRITICAL":NEWLINE logger.setLevel(logging.CRITICAL)NEWLINE elif logging_config["log_level"] == "ERROR":NEWLINE logger.setLevel(logging.ERROR)NEWLINE elif logging_config["log_level"] == "WARNING":NEWLINE logger.setLevel(logging.WARNING)NEWLINE elif logging_config["log_level"] == "INFO":NEWLINE logger.setLevel(logging.INFO)NEWLINE elif logging_config["log_level"] == "DEBUG":NEWLINE logger.setLevel(logging.DEBUG)NEWLINE logging.getLogger("aiosqlite").setLevel(logging.INFO) # Too much logging on debug levelNEWLINE logging.getLogger("websockets").setLevel(logging.INFO) # Too much logging on debug levelNEWLINE else:NEWLINE logger.setLevel(logging.INFO)NEWLINE else:NEWLINE logger.setLevel(logging.INFO)NEWLINE
#!/usr/bin/pythonNEWLINE# -*- coding: utf8 -*-NEWLINENEWLINE# This code is based on: T.Davidson, F.Kloosterman, M.Wilson "Hippocampal replay of extended experience",NEWLINE# in Neuron, vol. 63, pp. 497-507, 2009NEWLINE# difference: \tau_i(x) (rate parameters) are known (from poisson_proc.py and generate_spike_train.py)NEWLINENEWLINEimport numpy as npNEWLINEfrom scipy.misc import factorialNEWLINEimport matplotlib.pyplot as pltNEWLINEimport osNEWLINENEWLINEfInSpikes = 'spikes.npz'NEWLINEfInPF = 'PFstarts.npz'NEWLINEfOut = 'route_0.005.npz'NEWLINENEWLINEtempRes = 0.005 # [s]NEWLINEspaRes = 2*np.pi / 360.0 # [rad] ( == 1 degree)NEWLINEN = 4000NEWLINENEWLINESWBasePath = '/home/bandi/workspace/KOKI/SharpWaves' # os.path.split(os.path.split(__file__)[0])[0]NEWLINENEWLINEspatialPoints = np.linspace(0, 2*np.pi, int(2*np.pi / spaRes))NEWLINEsamplingTimes = np.linspace(0, 10, int(10.0 / tempRes)+1)NEWLINENEWLINE# (constants from poisson_proc.py:)NEWLINElRoute = 300 # circumference [cm]NEWLINElPlaceField = 30 # [cm]NEWLINEr = lRoute / (2 * np.pi) # radius [cm]NEWLINEphiPFRad = lPlaceField / r # (angle of) place field [rad]NEWLINEavgRateInField = 20.0 # avg. in-field firing rate [Hz]NEWLINENEWLINENEWLINE# list of overlapping place fieldsNEWLINEfName = os.path.join(SWBasePath, 'files', fInPF)NEWLINEnpzFile = np.load(fName)NEWLINEpfStarts = npzFile['pfStarts'].tolist()NEWLINENEWLINEoverlappingPFs = []NEWLINEfor pfStart in pfStarts:NEWLINE overlap = []NEWLINE pfEnd = np.mod(pfStart + phiPFRad, 2*np.pi)NEWLINE if pfStart < (2*np.pi - phiPFRad):NEWLINE overlap = [i for i, val in enumerate(pfStarts) if pfStart <= val and val < pfEnd]NEWLINE else:NEWLINE overlap = [i for i, val in enumerate(pfStarts) if pfStart <= val or val < pfEnd]NEWLINENEWLINE overlappingPFs.append(overlap)NEWLINENEWLINENEWLINE# calculate firing rates (\tau_i(x)) !!! calculate not estimateNEWLINErates = []NEWLINEfor i in range(0, N):NEWLINE tau = np.zeros((1, int(2*np.pi / spaRes)))NEWLINENEWLINE pfEnd = np.mod(pfStarts[i] + phiPFRad, 2*np.pi)NEWLINE mPF = pfStarts[i] + phiPFRad / 2NEWLINENEWLINE for ind, phi in enumerate(spatialPoints):NEWLINE if pfStarts[i] < pfEnd:NEWLINE if pfStarts[i] <= phi and phi < pfEnd:NEWLINE tau[0][ind] = np.cos((2*np.pi) / (2 * phiPFRad) * (phi - mPF)) * avgRateInFieldNEWLINE else:NEWLINE if pfStarts[i] <= phi or phi < pfEnd:NEWLINE tau[0][ind] = np.cos((2*np.pi) / (2 * phiPFRad) * (phi - mPF)) * avgRateInFieldNEWLINENEWLINE rates.append(tau)NEWLINENEWLINEprint 'rates calculated'NEWLINENEWLINENEWLINE# read spike timesNEWLINEfName = os.path.join(SWBasePath, 'files', fInSpikes)NEWLINEnpzFile = np.load(fName)NEWLINEspikes = npzFile['spikes'] # only for the populational firing rateNEWLINEspiketimes = npzFile['spiketimes']NEWLINENEWLINE# taking cells into account, whose have overlapping place fields with a cell, that fired in the binNEWLINEcellROI = []NEWLINEbinSpikes = []NEWLINENEWLINEfor t1, t2 in zip(samplingTimes[:-1], samplingTimes[1:]):NEWLINE count = 0NEWLINE tmp = [] # will be a list of list (cells that have to be taken into account)NEWLINE for i in range(0, N):NEWLINE n_i = ((t1 < spiketimes[i]) & (spiketimes[i] < t2)).sum() # #{spikes of the i-th cell in the bin}NEWLINE if n_i != 0:NEWLINE tmp.append(overlappingPFs[i])NEWLINE count += n_iNEWLINE tmp2 = list(set(sorted([item for sublist in tmp for item in sublist])))NEWLINE cellROI.append(tmp2)NEWLINE binSpikes.append(count)NEWLINENEWLINEprint 'average spikes/bin:', np.mean(binSpikes)NEWLINENEWLINE# calc. mean firing rates (to decide if there is a replay or not)NEWLINEpopre = {}NEWLINENEWLINEfor i in spikes:NEWLINE if np.floor(i[1] * 1000) not in popre:NEWLINE popre[np.floor(i[1] * 1000)] = 1NEWLINE elif np.floor(i[1] * 1000) in popre:NEWLINE popre[np.floor(i[1] * 1000)] += 1NEWLINENEWLINE# rate correctionNEWLINEfor i in range(0, 10000):NEWLINE if i not in popre:NEWLINE popre[i] = 0NEWLINENEWLINEexcRate = popre.values()NEWLINEmeanExcRate = np.mean(excRate)NEWLINENEWLINE# --------------------------------------------------------------------------------------------------------------------------NEWLINE# log(likelihood): log(Pr(spikes|x)) = \sum_{i=1}^N n_ilog(\frac{\Delta t \tau_i(x)}{n_i!}) - \Delta t \sum_{i=1}^N \tau_i(x)NEWLINE# --------------------------------------------------------------------------------------------------------------------------NEWLINENEWLINEdelta_t = tempRes # in sNEWLINEroute = []NEWLINEML = []NEWLINENEWLINEbin = 0NEWLINEfor t1, t2 in zip(samplingTimes[:-1], samplingTimes[1:]):NEWLINE likelihoods = []NEWLINE binAvgRate = np.mean(excRate[int(t1*1000):int(t2*1000)])NEWLINE if binAvgRate >= meanExcRate / 2: # if there is replayNEWLINE for indPhi in range(0, len(spatialPoints)):NEWLINE likelihood1 = 0NEWLINE likelihood2 = 0NEWLINENEWLINE for i in cellROI[bin]: # instead of "for i in range(0, N):"NEWLINE tmp = 0NEWLINENEWLINE n_i = ((t1 < spiketimes[i]) & (spiketimes[i] < t2)).sum() # #{spikes of the i-th cell in the bin}NEWLINE tau_i_phi = rates[i][0, indPhi] # firing rate of the i-th cell in a given position (on the circle)NEWLINE if tau_i_phi != 0 and n_i != 0: # because log() can't take 0NEWLINE tmp = n_i * np.log(delta_t * tau_i_phi / factorial(n_i).item())NEWLINE # .item() is needed because factorial gives 0-d arrayNEWLINENEWLINE likelihood1 += tmpNEWLINE likelihood2 += tau_i_phiNEWLINE likelihood = likelihood1 - delta_t * likelihood2NEWLINENEWLINE likelihoods.append(likelihood)NEWLINE likelihoods = [np.nan if x == 0 else x for x in likelihoods] # change 0s to np.nanNEWLINE if np.isnan(likelihoods).all(): # just to make sureNEWLINE likelihoods[0] = 0NEWLINENEWLINE # search for the maximum of the likelihoods in a given sampling timeNEWLINE id = np.nanargmax(likelihoods)NEWLINE maxLikelihood = likelihoods[id]NEWLINE place = spatialPoints[id]NEWLINE route.append(place)NEWLINE ML.append(maxLikelihood)NEWLINE print 'sampling time:', str(t2 * 1000), '[ms]:', str(place), '[rad] ML:', maxLikelihoodNEWLINE bin += 1NEWLINE else: # if there is no replayNEWLINE route.append(np.nan)NEWLINE ML.append(np.nan)NEWLINE print 'sampling time:', str(t2 * 1000), '[ms]: not replay'NEWLINE bin += 1NEWLINENEWLINENEWLINEfName = os.path.join(SWBasePath, 'files', fOut)NEWLINEnp.savez(fName, route=route, ML=ML)NEWLINE
N = int(input())NEWLINEp = [int(input()) for i in range(N)]NEWLINEprint(sum(p)-max(p)//2)NEWLINE
# Twitter AUTH:NEWLINEAPP_KEY = 'APP_KEY_HERE' NEWLINEAPP_SECRET = 'APP_SECRET_HERE' NEWLINEOAUTH_TOKEN = 'TOKEN_HERE'NEWLINEOAUTH_TOKEN_SECRET = 'TOKEN_SECRET_HERE'NEWLINENEWLINE# Telegram options:NEWLINETELEGRAM_CHANNEL = 'CHANNEL_NAME_HERE'NEWLINETELEGRAM_TOKEN = 'TOKEN_HERE'NEWLINENEWLINE# Misc:NEWLINETWITTER_USER_NAME = 'USER_NAME_HERE'NEWLINEMSG = '<b>{NAME}</b>:\n{TEXT}\n\n<a href="{URL}">Source</a>'NEWLINENEWLINE# Technical stuff:NEWLINETWEET_BASE_URL = 'https://twitter.com/i/web/status/'NEWLINESTATE_FILE = 'state.p'NEWLINESLEEP = 3NEWLINETG_LINK = 'https://api.telegram.org/bot{TOKEN}/sendMessage?chat_id=@{CHANNEL}&text={MESSAGE}&parse_mode=html'NEWLINEUNSUPPORTED_TAGS = ['<span class="twython-tweet-suffix">', '<span class="twython-tweet-prefix">', '</span>', 'class="twython-url"', 'class="twython-media"', 'class="twython-mention"', 'class="twython-hashtag"', 'class="twython-symbol"', ]NEWLINE
NEWLINEfrom .client import NetworkTableClientNEWLINEfrom .server import NetworkTableServerNEWLINEfrom .socketstream import SocketStreamFactory, SocketServerStreamProviderNEWLINEfrom .type import BooleanArray, NumberArray, StringArray, DefaultEntryTypesNEWLINE
from django.conf.urls import patterns, urlNEWLINEfrom . import viewsNEWLINENEWLINEurlpatterns = patterns('',NEWLINE url(r'^$', views.newsletter),NEWLINE url(r'^subscribe_default/$', views.subscribe_default),NEWLINE url(r'^subscribe_specific/$', views.subscribe_specific),NEWLINE)NEWLINE
# MIT LicenseNEWLINE#NEWLINE# Copyright (C) IBM Corporation 2018NEWLINE#NEWLINE# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associatedNEWLINE# documentation files (the "Software"), to deal in the Software without restriction, including without limitation theNEWLINE# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permitNEWLINE# persons to whom the Software is furnished to do so, subject to the following conditions:NEWLINE#NEWLINE# The above copyright notice and this permission notice shall be included in all copies or substantial portions of theNEWLINE# Software.NEWLINE#NEWLINE# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THENEWLINE# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THENEWLINE# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,NEWLINE# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THENEWLINE# SOFTWARE.NEWLINEfrom __future__ import absolute_import, division, print_function, unicode_literalsNEWLINENEWLINEimport loggingNEWLINENEWLINEimport numpy as npNEWLINENEWLINEfrom art.attacks.attack import AttackNEWLINENEWLINElogger = logging.getLogger(__name__)NEWLINENEWLINENEWLINEclass SaliencyMapMethod(Attack):NEWLINE """NEWLINE Implementation of the Jacobian-based Saliency Map Attack (Papernot et al. 2016).NEWLINE Paper link: https://arxiv.org/pdf/1511.07528.pdfNEWLINE """NEWLINE attack_params = Attack.attack_params + ['theta', 'gamma', 'batch_size', 'expectation']NEWLINENEWLINE def __init__(self, classifier, theta=0.1, gamma=1., batch_size=128, expectation=None):NEWLINE """NEWLINE Create a SaliencyMapMethod instance.NEWLINENEWLINE :param classifier: A trained model.NEWLINE :type classifier: :class:`Classifier`NEWLINE :param theta: Perturbation introduced to each modified feature per step (can be positive or negative).NEWLINE :type theta: `float`NEWLINE :param gamma: Maximum percentage of perturbed features (between 0 and 1).NEWLINE :type gamma: `float`NEWLINE :param batch_size: Batch sizeNEWLINE :type batch_size: `int`NEWLINE :param expectation: An expectation over transformations to be applied when computingNEWLINE classifier gradients and predictions.NEWLINE :type expectation: :class:`ExpectationOverTransformations`NEWLINE """NEWLINE super(SaliencyMapMethod, self).__init__(classifier)NEWLINE kwargs = {'theta': theta, 'gamma': gamma, 'batch_size': batch_size, 'expectation': expectation}NEWLINE self.set_params(**kwargs)NEWLINENEWLINE def generate(self, x, **kwargs):NEWLINE """NEWLINE Generate adversarial samples and return them in an array.NEWLINENEWLINE :param x: An array with the original inputs to be attacked.NEWLINE :type x: `np.ndarray`NEWLINE :param y: Target values if the attack is targetedNEWLINE :type y: `np.ndarray`NEWLINE :param theta: Perturbation introduced to each modified feature per step (can be positive or negative)NEWLINE :type theta: `float`NEWLINE :param gamma: Maximum percentage of perturbed features (between 0 and 1)NEWLINE :type gamma: `float`NEWLINE :param batch_size: Batch sizeNEWLINE :type batch_size: `int`NEWLINE :return: An array holding the adversarial examples.NEWLINE :rtype: `np.ndarray`NEWLINE """NEWLINE # Parse and save attack-specific parametersNEWLINE self.set_params(**kwargs)NEWLINE clip_min, clip_max = self.classifier.clip_valuesNEWLINENEWLINE # Initialize variablesNEWLINE dims = list(x.shape[1:])NEWLINE self._nb_features = np.product(dims)NEWLINE x_adv = np.reshape(np.copy(x), (-1, self._nb_features))NEWLINE preds = np.argmax(self._predict(x), axis=1)NEWLINENEWLINE # Determine target classes for attackNEWLINE if 'y' not in kwargs or kwargs[str('y')] is None:NEWLINE # Randomly choose target from the incorrect classes for each sampleNEWLINE from art.utils import random_targetsNEWLINE targets = np.argmax(random_targets(preds, self.classifier.nb_classes), axis=1)NEWLINE else:NEWLINE targets = np.argmax(kwargs[str('y')], axis=1)NEWLINENEWLINE # Compute perturbation with implicit batchingNEWLINE for batch_id in range(int(np.ceil(x_adv.shape[0] / float(self.batch_size)))):NEWLINE batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_sizeNEWLINE batch = x_adv[batch_index_1:batch_index_2]NEWLINENEWLINE # Main algorithm for each batchNEWLINE # Initialize the search space; optimize to remove features that can't be changedNEWLINE search_space = np.zeros_like(batch)NEWLINE if self.theta > 0:NEWLINE search_space[batch < clip_max] = 1NEWLINE else:NEWLINE search_space[batch > clip_min] = 1NEWLINENEWLINE # Get current predictionsNEWLINE current_pred = preds[batch_index_1:batch_index_2]NEWLINE target = targets[batch_index_1:batch_index_2]NEWLINE active_indices = np.where(current_pred != target)[0]NEWLINE all_feat = np.zeros_like(batch)NEWLINENEWLINE while len(active_indices) != 0:NEWLINE # Compute saliency mapNEWLINE feat_ind = self._saliency_map(np.reshape(batch, [batch.shape[0]] + dims)[active_indices],NEWLINE target[active_indices], search_space[active_indices])NEWLINENEWLINE # Update used featuresNEWLINE all_feat[active_indices][np.arange(len(active_indices)), feat_ind[:, 0]] = 1NEWLINE all_feat[active_indices][np.arange(len(active_indices)), feat_ind[:, 1]] = 1NEWLINENEWLINE # Prepare update depending of thetaNEWLINE if self.theta > 0:NEWLINE clip_func, clip_value = np.minimum, clip_maxNEWLINE else:NEWLINE clip_func, clip_value = np.maximum, clip_minNEWLINENEWLINE # Update adversarial examplesNEWLINE tmp_batch = batch[active_indices]NEWLINE tmp_batch[np.arange(len(active_indices)), feat_ind[:, 0]] = clip_func(clip_value,NEWLINE tmp_batch[np.arange(len(active_indices)), feat_ind[:, 0]] + self.theta)NEWLINE tmp_batch[np.arange(len(active_indices)), feat_ind[:, 1]] = clip_func(clip_value,NEWLINE tmp_batch[np.arange(len(active_indices)), feat_ind[:, 1]] + self.theta)NEWLINE batch[active_indices] = tmp_batchNEWLINENEWLINE # Remove indices from search space if max/min values were reachedNEWLINE search_space[batch == clip_value] = 0NEWLINENEWLINE # Recompute model predictionNEWLINE current_pred = np.argmax(self._predict(np.reshape(batch, [batch.shape[0]] + dims)), axis=1)NEWLINENEWLINE # Update active_indicesNEWLINE active_indices = np.where((current_pred != target) *NEWLINE (np.sum(all_feat, axis=1) / self._nb_features <= self.gamma) *NEWLINE (np.sum(search_space, axis=1) > 0))[0]NEWLINENEWLINE x_adv[batch_index_1:batch_index_2] = batchNEWLINENEWLINE x_adv = np.reshape(x_adv, x.shape)NEWLINE preds = np.argmax(self._predict(x), axis=1)NEWLINE preds_adv = np.argmax(self._predict(x_adv), axis=1)NEWLINE logger.info('Success rate of JSMA attack: %.2f%%', (np.sum(preds != preds_adv) / x.shape[0]))NEWLINENEWLINE return x_advNEWLINENEWLINE def set_params(self, **kwargs):NEWLINE """NEWLINE Take in a dictionary of parameters and applies attack-specific checks before saving them as attributes.NEWLINENEWLINE :param theta: Perturbation introduced to each modified feature per step (can be positive or negative)NEWLINE :type theta: `float`NEWLINE :param gamma: Maximum percentage of perturbed features (between 0 and 1)NEWLINE :type gamma: `float`NEWLINE :param batch_size: Internal size of batches on which adversarial samples are generated.NEWLINE :type batch_size: `int`NEWLINE """NEWLINE # Save attack-specific parametersNEWLINE super(SaliencyMapMethod, self).set_params(**kwargs)NEWLINENEWLINE if self.gamma <= 0 or self.gamma > 1:NEWLINE raise ValueError("The total perturbation percentage `gamma` must be between 0 and 1.")NEWLINENEWLINE if self.batch_size <= 0:NEWLINE raise ValueError('The batch size `batch_size` has to be positive.')NEWLINENEWLINE return TrueNEWLINENEWLINE def _saliency_map(self, x, target, search_space):NEWLINE """NEWLINE Compute the saliency map of `x`. Return the top 2 coefficients in `search_space` that maximize / minimizeNEWLINE the saliency map.NEWLINENEWLINE :param x: A batch of input samplesNEWLINE :type x: `np.ndarray`NEWLINE :param target: Target class for `x`NEWLINE :type target: `np.ndarray`NEWLINE :param search_space: The set of valid pairs of feature indices to searchNEWLINE :type search_space: `np.ndarray`NEWLINE :return: The top 2 coefficients in `search_space` that maximize / minimize the saliency mapNEWLINE :rtype: `np.ndarray`NEWLINE """NEWLINE grads = self._class_gradient(x, label=target, logits=False)NEWLINE grads = np.reshape(grads, (-1, self._nb_features))NEWLINENEWLINE # Remove gradients for already used featuresNEWLINE used_features = 1 - search_spaceNEWLINE coeff = 2 * int(self.theta > 0) - 1NEWLINE grads[used_features == 1] = -np.inf * coeffNEWLINENEWLINE if self.theta > 0:NEWLINE ind = np.argpartition(grads, -2, axis=1)[:, -2:]NEWLINE else:NEWLINE ind = np.argpartition(-grads, -2, axis=1)[:, -2:]NEWLINENEWLINE return indNEWLINE