repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
AlexChrisF/udacity
[ "b7f85a74058fc63ccb7601c418450ab934ef5953", "b7f85a74058fc63ccb7601c418450ab934ef5953", "b7f85a74058fc63ccb7601c418450ab934ef5953" ]
[ "tensorflow/contrib/layers/python/kernel_tests/sparse_feature_cross_op_test.py", "tensorflow/python/platform/flags.py", "tensorflow/python/kernel_tests/softplus_op_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tf.contrib.layers.sparse_feature_cross.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy\n\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.layers.python.ops import sparse_feature_cross_op\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.platform import test\n\n\nclass SparseCrossOpTest(test.TestCase):\n\n def test_simple(self):\n \"\"\"Tests a simple scenario.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor([['batch1-FC1-F1'],\n ['batch2-FC1-F1', 'batch2-FC1-F2']]),\n self._sparse_tensor([['batch1-FC2-F1'],\n ['batch2-FC2-F1', 'batch2-FC2-F2']])\n ])\n expected_out = self._sparse_tensor([['batch1-FC1-F1_X_batch1-FC2-F1'], [\n 'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',\n 'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_dense(self):\n \"\"\"Tests only dense inputs.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n constant_op.constant([['batch1-FC1-F1', 'batch1-FC1-F2'],\n ['batch2-FC1-F1', 'batch2-FC1-F2']],\n dtypes.string),\n constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],\n ['batch2-FC2-F1', 'batch2-FC2-F2']],\n dtypes.string),\n ])\n expected_out = self._sparse_tensor([[\n 'batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2',\n 'batch1-FC1-F2_X_batch1-FC2-F1', 'batch1-FC1-F2_X_batch1-FC2-F2'\n ], [\n 'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',\n 'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_integer_mixed_string_sparse(self):\n \"\"\"Tests mixed type.\"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor([[11], [333, 55555]]),\n self._sparse_tensor([['batch1-FC2-F1'],\n ['batch2-FC2-F1', 'batch2-FC2-F2']])\n ])\n expected_out = self._sparse_tensor([['11_X_batch1-FC2-F1'], [\n '333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2', '55555_X_batch2-FC2-F1',\n '55555_X_batch2-FC2-F2'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_integer_mixed_string_dense(self):\n \"\"\"Tests mixed dense inputs.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n constant_op.constant([[11, 333], [55555, 999999]], dtypes.int64),\n constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],\n ['batch2-FC2-F1', 'batch2-FC2-F2']],\n dtypes.string),\n ])\n expected_out = self._sparse_tensor([[\n '11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2', '333_X_batch1-FC2-F1',\n '333_X_batch1-FC2-F2'\n ], [\n '55555_X_batch2-FC2-F1', '55555_X_batch2-FC2-F2',\n '999999_X_batch2-FC2-F1', '999999_X_batch2-FC2-F2'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_sparse_cross_dense(self):\n \"\"\"Tests sparse and dense inputs.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor([['batch1-FC1-F1'],\n ['batch2-FC1-F1', 'batch2-FC1-F2']]),\n constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],\n ['batch2-FC2-F1', 'batch2-FC2-F2']],\n dtypes.string),\n ])\n expected_out = self._sparse_tensor(\n [['batch1-FC1-F1_X_batch1-FC2-F1', 'batch1-FC1-F1_X_batch1-FC2-F2'], [\n 'batch2-FC1-F1_X_batch2-FC2-F1', 'batch2-FC1-F1_X_batch2-FC2-F2',\n 'batch2-FC1-F2_X_batch2-FC2-F1', 'batch2-FC1-F2_X_batch2-FC2-F2'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_integer_sparse_input(self):\n \"\"\"Tests mixed type sparse and dense inputs.\"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor([[11], [333, 5555]]),\n constant_op.constant([['batch1-FC2-F1', 'batch1-FC2-F2'],\n ['batch2-FC2-F1', 'batch2-FC2-F2']],\n dtypes.string),\n ])\n expected_out = self._sparse_tensor(\n [['11_X_batch1-FC2-F1', '11_X_batch1-FC2-F2'], [\n '333_X_batch2-FC2-F1', '333_X_batch2-FC2-F2',\n '5555_X_batch2-FC2-F1', '5555_X_batch2-FC2-F2'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_permutation_3x3x3(self):\n \"\"\"Tests 3x3x3 permutation.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor(\n [['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),\n self._sparse_tensor(\n [['batch1-FC2-F1', 'batch1-FC2-F2', 'batch1-FC2-F3']]),\n self._sparse_tensor(\n [['batch1-FC3-F1', 'batch1-FC3-F2', 'batch1-FC3-F3']])\n ])\n expected_out = self._sparse_tensor([[\n 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',\n 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F3',\n 'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F1',\n 'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F2',\n 'batch1-FC1-F1_X_batch1-FC2-F2_X_batch1-FC3-F3',\n 'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F1',\n 'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F2',\n 'batch1-FC1-F1_X_batch1-FC2-F3_X_batch1-FC3-F3',\n 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',\n 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F3',\n 'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F1',\n 'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F2',\n 'batch1-FC1-F2_X_batch1-FC2-F2_X_batch1-FC3-F3',\n 'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F1',\n 'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F2',\n 'batch1-FC1-F2_X_batch1-FC2-F3_X_batch1-FC3-F3',\n 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2',\n 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F3',\n 'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F1',\n 'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F2',\n 'batch1-FC1-F3_X_batch1-FC2-F2_X_batch1-FC3-F3',\n 'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F1',\n 'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F2',\n 'batch1-FC1-F3_X_batch1-FC2-F3_X_batch1-FC3-F3'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_permutation_3x1x2(self):\n \"\"\"Tests 3x1x2 permutation.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor(\n [['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),\n self._sparse_tensor([['batch1-FC2-F1']]),\n self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])\n ])\n expected_out = self._sparse_tensor([[\n 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',\n 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2',\n 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F3_X_batch1-FC2-F1_X_batch1-FC3-F2'\n ]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_large_batch(self):\n \"\"\"Tests with large batch size to force multithreding.\n \"\"\"\n batch_size = 5000\n col1 = []\n col2 = []\n col3 = []\n for b in range(batch_size):\n col1.append(\n ['batch%d-FC1-F1' % b, 'batch%d-FC1-F2' % b, 'batch%d-FC1-F3' % b])\n col2.append(['batch%d-FC2-F1' % b])\n col3.append(['batch%d-FC3-F1' % b, 'batch%d-FC3-F2' % b])\n\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor(col1), self._sparse_tensor(col2),\n self._sparse_tensor(col3)\n ])\n\n col_out = []\n for b in range(batch_size):\n col_out.append([\n 'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),\n 'batch%d-FC1-F1_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),\n 'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),\n 'batch%d-FC1-F2_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b),\n 'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F1' % (b, b, b),\n 'batch%d-FC1-F3_X_batch%d-FC2-F1_X_batch%d-FC3-F2' % (b, b, b)\n ])\n\n expected_out = self._sparse_tensor(col_out)\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_one_column_empty(self):\n \"\"\"Tests when one column is empty.\n\n The crossed tensor should be empty.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']]),\n self._sparse_tensor([], 1),\n self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])\n ])\n with self.test_session() as sess:\n self._assert_sparse_tensor_empty(sess.run(op))\n\n def test_some_columns_empty(self):\n \"\"\"Tests when more than one columns are empty.\n\n Cross for the corresponding batch should be empty.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor([['batch1-FC1-F1', 'batch1-FC1-F2']], 2),\n self._sparse_tensor([['batch1-FC2-F1'], ['batch2-FC2-F1']], 2),\n self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']], 2)\n ])\n expected_out = self._sparse_tensor([[\n 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F1_X_batch1-FC2-F1_X_batch1-FC3-F2',\n 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F1',\n 'batch1-FC1-F2_X_batch1-FC2-F1_X_batch1-FC3-F2'\n ]], 2)\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_all_columns_empty(self):\n \"\"\"Tests when all columns are empty.\n\n The crossed tensor should be empty.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross([\n self._sparse_tensor([]), self._sparse_tensor([]),\n self._sparse_tensor([])\n ])\n with self.test_session() as sess:\n self._assert_sparse_tensor_empty(sess.run(op))\n\n def test_hashed_output_zero_bucket(self):\n \"\"\"Tests a simple scenario.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross(\n [\n self._sparse_tensor([['batch1-FC1-F1']]),\n self._sparse_tensor([['batch1-FC2-F1']]),\n self._sparse_tensor([['batch1-FC3-F1']])\n ],\n hashed_output=True)\n # Check actual hashed output to prevent unintentional hashing changes.\n expected_out = self._sparse_tensor([[3735511728867393167]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_hashed_output_zero_bucket_v2(self):\n \"\"\"Tests a simple scenario.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross(\n [\n self._sparse_tensor([['batch1-FC1-F1']]),\n self._sparse_tensor([['batch1-FC2-F1']]),\n self._sparse_tensor([['batch1-FC3-F1']])\n ],\n hashed_output=True,\n hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)\n # Check actual hashed output to prevent unintentional hashing changes.\n expected_out = self._sparse_tensor([[1971693436396284976]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n # TODO(sibyl-Aix6ihai): Add benchmark to compare Hashed vs Non-hashed.\n def test_hashed_output(self):\n \"\"\"Tests a simple scenario.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross(\n [\n self._sparse_tensor([['batch1-FC1-F1']]),\n self._sparse_tensor([['batch1-FC2-F1']]),\n self._sparse_tensor([['batch1-FC3-F1']])\n ],\n hashed_output=True,\n num_buckets=100)\n # Check actual hashed output to prevent unintentional hashing changes.\n expected_out = self._sparse_tensor([[74]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_hashed_output_v2(self):\n \"\"\"Tests a simple scenario.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross(\n [\n self._sparse_tensor([['batch1-FC1-F1']]),\n self._sparse_tensor([['batch1-FC2-F1']]),\n self._sparse_tensor([['batch1-FC3-F1']])\n ],\n hashed_output=True,\n num_buckets=100,\n hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)\n # Check actual hashed output to prevent unintentional hashing changes.\n expected_out = self._sparse_tensor([[83]])\n with self.test_session() as sess:\n self._assert_sparse_tensor_equals(expected_out, sess.run(op))\n\n def test_hashed_output_v1_has_collision(self):\n \"\"\"Tests the old version of the fingerprint concatenation has collisions.\n \"\"\"\n # The last 10 bits of 359 and 1024+359 are identical.\n # As a result, all the crosses collide.\n t1 = constant_op.constant([[359], [359 + 1024]])\n t2 = constant_op.constant([list(range(10)), list(range(10))])\n cross = sparse_feature_cross_op.sparse_feature_cross(\n [t2, t1], hashed_output=True, num_buckets=1024)\n cross_dense = sparse_ops.sparse_tensor_to_dense(cross)\n with session.Session():\n values = cross_dense.eval()\n self.assertTrue(numpy.equal(values[0], values[1]).all())\n\n def test_hashed_output_v2_has_no_collision(self):\n \"\"\"Tests the new version of the fingerprint concatenation has no collisions.\n \"\"\"\n # Although the last 10 bits of 359 and 1024+359 are identical.\n # As a result, all the crosses shouldn't collide.\n t1 = constant_op.constant([[359], [359 + 1024]])\n t2 = constant_op.constant([list(range(10)), list(range(10))])\n cross = sparse_feature_cross_op.sparse_feature_cross(\n [t2, t1],\n hashed_output=True,\n num_buckets=1024,\n hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY)\n cross_dense = sparse_ops.sparse_tensor_to_dense(cross)\n with session.Session():\n values = cross_dense.eval()\n self.assertTrue(numpy.not_equal(values[0], values[1]).all())\n\n def test_hashed_3x1x2(self):\n \"\"\"Tests 3x1x2 permutation with hashed output.\n \"\"\"\n op = sparse_feature_cross_op.sparse_feature_cross(\n [\n self._sparse_tensor(\n [['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']]),\n self._sparse_tensor([['batch1-FC2-F1']]),\n self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])\n ],\n hashed_output=True,\n num_buckets=1000)\n with self.test_session() as sess:\n out = sess.run(op)\n self.assertEqual(6, len(out.values))\n self.assertAllEqual([[0, i] for i in range(6)], out.indices)\n self.assertTrue(all(x < 1000 and x >= 0 for x in out.values))\n all_values_are_different = len(out.values) == len(set(out.values))\n self.assertTrue(all_values_are_different)\n\n def _assert_sparse_tensor_empty(self, sp):\n self.assertEquals(0, sp.indices.size)\n self.assertEquals(0, sp.values.size)\n # TODO(zakaria): check if we can ignore the first dim of the shape.\n self.assertEquals(0, sp.dense_shape[1])\n\n def _assert_sparse_tensor_equals(self, sp1, sp2):\n self.assertAllEqual(sp1.indices.eval(), sp2.indices)\n self.assertAllEqual(sp1.values.eval(), sp2.values)\n self.assertAllEqual(sp1.dense_shape.eval(), sp2.dense_shape)\n\n def _sparse_tensor(self, data, batch_size=-1):\n \"\"\"Generates a SparseTensor.\n\n Args:\n data: Should be a list of list of strings or int64. Each item of the outer\n list represents a batch. Each item of the batch is a feature of a\n specific feature column.\n batch_size: optional batch size, especially for cases when data has no\n entry for some batches.\n\n Returns:\n A SparseTensor.\n \"\"\"\n indices = []\n values = []\n max_col_count = 0\n for batch, batch_ix in zip(data, range(len(data))):\n for column, column_ix in zip(batch, range(len(batch))):\n indices.append([batch_ix, column_ix])\n values.append(column)\n max_col_count = max(max_col_count, column_ix + 1)\n shape = [batch_size if batch_size != -1 else len(data), max_col_count]\n value_type = (dtypes.string if not values or isinstance(values[0], str) else\n dtypes.int64)\n return sparse_tensor.SparseTensor(\n constant_op.constant(indices, dtypes.int64, [len(indices), 2]),\n constant_op.constant(values, value_type, [len(indices)]),\n constant_op.constant(shape, dtypes.int64))\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Implementation of the flags interface.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse as _argparse\n\nfrom tensorflow.python.util.all_util import remove_undocumented\n\n_global_parser = _argparse.ArgumentParser()\n\n\n# pylint: disable=invalid-name\n\n\nclass _FlagValues(object):\n \"\"\"Global container and accessor for flags and their values.\"\"\"\n\n def __init__(self):\n self.__dict__['__flags'] = {}\n self.__dict__['__parsed'] = False\n\n def _parse_flags(self, args=None):\n result, unparsed = _global_parser.parse_known_args(args=args)\n for flag_name, val in vars(result).items():\n self.__dict__['__flags'][flag_name] = val\n self.__dict__['__parsed'] = True\n return unparsed\n\n def __getattr__(self, name):\n \"\"\"Retrieves the 'value' attribute of the flag --name.\"\"\"\n if not self.__dict__['__parsed']:\n self._parse_flags()\n if name not in self.__dict__['__flags']:\n raise AttributeError(name)\n return self.__dict__['__flags'][name]\n\n def __setattr__(self, name, value):\n \"\"\"Sets the 'value' attribute of the flag --name.\"\"\"\n if not self.__dict__['__parsed']:\n self._parse_flags()\n self.__dict__['__flags'][name] = value\n\n\ndef _define_helper(flag_name, default_value, docstring, flagtype):\n \"\"\"Registers 'flag_name' with 'default_value' and 'docstring'.\"\"\"\n _global_parser.add_argument('--' + flag_name,\n default=default_value,\n help=docstring,\n type=flagtype)\n\n\n# Provides the global object that can be used to access flags.\nFLAGS = _FlagValues()\n\n\ndef DEFINE_string(flag_name, default_value, docstring):\n \"\"\"Defines a flag of type 'string'.\n\n Args:\n flag_name: The name of the flag as a string.\n default_value: The default value the flag should take as a string.\n docstring: A helpful message explaining the use of the flag.\n \"\"\"\n _define_helper(flag_name, default_value, docstring, str)\n\n\ndef DEFINE_integer(flag_name, default_value, docstring):\n \"\"\"Defines a flag of type 'int'.\n\n Args:\n flag_name: The name of the flag as a string.\n default_value: The default value the flag should take as an int.\n docstring: A helpful message explaining the use of the flag.\n \"\"\"\n _define_helper(flag_name, default_value, docstring, int)\n\n\ndef DEFINE_boolean(flag_name, default_value, docstring):\n \"\"\"Defines a flag of type 'boolean'.\n\n Args:\n flag_name: The name of the flag as a string.\n default_value: The default value the flag should take as a boolean.\n docstring: A helpful message explaining the use of the flag.\n \"\"\"\n # Register a custom function for 'bool' so --flag=True works.\n def str2bool(v):\n return v.lower() in ('true', 't', '1')\n _global_parser.add_argument('--' + flag_name,\n nargs='?',\n const=True,\n help=docstring,\n default=default_value,\n type=str2bool)\n\n # Add negated version, stay consistent with argparse with regard to\n # dashes in flag names.\n _global_parser.add_argument('--no' + flag_name,\n action='store_false',\n dest=flag_name.replace('-', '_'))\n\n\n# The internal google library defines the following alias, so we match\n# the API for consistency.\nDEFINE_bool = DEFINE_boolean # pylint: disable=invalid-name\n\n\ndef DEFINE_float(flag_name, default_value, docstring):\n \"\"\"Defines a flag of type 'float'.\n\n Args:\n flag_name: The name of the flag as a string.\n default_value: The default value the flag should take as a float.\n docstring: A helpful message explaining the use of the flag.\n \"\"\"\n _define_helper(flag_name, default_value, docstring, float)\n\n_allowed_symbols = [\n # We rely on gflags documentation.\n 'DEFINE_bool',\n 'DEFINE_boolean',\n 'DEFINE_float',\n 'DEFINE_integer',\n 'DEFINE_string',\n 'FLAGS',\n]\nremove_undocumented(__name__, _allowed_symbols)\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Softplus and SoftplusGrad.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import nn_ops\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\n\n\nclass SoftplusTest(test.TestCase):\n\n def _npSoftplus(self, np_features):\n np_features = np.asarray(np_features)\n zero = np.asarray(0).astype(np_features.dtype)\n return np.logaddexp(zero, np_features)\n\n def _testSoftplus(self, np_features, use_gpu=False):\n np_softplus = self._npSoftplus(np_features)\n with self.test_session(use_gpu=use_gpu):\n softplus = nn_ops.softplus(np_features)\n tf_softplus = softplus.eval()\n self.assertAllCloseAccordingToType(np_softplus, tf_softplus)\n self.assertTrue(np.all(tf_softplus > 0))\n self.assertShapeEqual(np_softplus, softplus)\n\n def testNumbers(self):\n for t in [np.float16, np.float32, np.float64]:\n self._testSoftplus(\n np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),\n use_gpu=False)\n self._testSoftplus(\n np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),\n use_gpu=True)\n log_eps = np.log(np.finfo(t).eps)\n one = t(1)\n ten = t(10)\n self._testSoftplus(\n [\n log_eps, log_eps - one, log_eps + one, log_eps - ten,\n log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,\n -log_eps - ten, -log_eps + ten\n ],\n use_gpu=False)\n self._testSoftplus(\n [\n log_eps, log_eps - one, log_eps + one, log_eps - ten,\n log_eps + ten - log_eps, -log_eps - one, -log_eps + one,\n -log_eps - ten, -log_eps + ten\n ],\n use_gpu=True)\n\n def testGradient(self):\n with self.test_session():\n x = constant_op.constant(\n [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],\n shape=[2, 5],\n name=\"x\")\n y = nn_ops.softplus(x, name=\"softplus\")\n x_init = np.asarray(\n [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],\n dtype=np.float32,\n order=\"F\")\n err = gradient_checker.compute_gradient_error(\n x, [2, 5], y, [2, 5], x_init_value=x_init)\n print(\"softplus (float) gradient err = \", err)\n self.assertLess(err, 1e-4)\n\n\nif __name__ == \"__main__\":\n test.main()\n" ]
[ [ "numpy.equal", "numpy.not_equal", "tensorflow.python.platform.test.main", "tensorflow.python.client.session.Session", "tensorflow.contrib.layers.python.ops.sparse_feature_cross_op.sparse_feature_cross", "tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "tensorflow.python.ops.gradient_checker.compute_gradient_error", "numpy.asarray", "tensorflow.python.platform.test.main", "numpy.all", "numpy.array", "numpy.logaddexp", "numpy.finfo", "tensorflow.python.ops.nn_ops.softplus", "tensorflow.python.framework.constant_op.constant" ] ]
vulpicastor/advent-of-code-2021
[ "12aaf84091604caf88acf3b4f7a118d866c33f5f" ]
[ "src/11.py" ]
[ "#!/usr/bin/env python3\n\n# pylint: disable=unused-import\nimport collections\nimport functools\nimport io\nimport itertools\nimport operator as op\nimport re\nimport timeit\n\nimport numpy as np\nimport aocd\n\nYEAR = 2021\nDAY = 11\n\n\ndef step(grid):\n grid += 1\n flash = np.zeros_like(grid, dtype=bool)\n while np.any(grid[~flash] > 9):\n new_flash = (grid > 9) ^ flash\n grid[:-1, :-1] += new_flash[1:, 1:]\n grid[:-1, :] += new_flash[1:, :]\n grid[:-1, 1:] += new_flash[1:, :-1]\n grid[:, :-1] += new_flash[:, 1:]\n grid[:, 1:] += new_flash[:, :-1]\n grid[1:, :-1] += new_flash[:-1, 1:]\n grid[1:, :] += new_flash[:-1, :]\n grid[1:, 1:] += new_flash[:-1, :-1]\n flash |= new_flash\n grid[flash] = 0\n return flash\n\n\ndef main():\n data = \"\"\"5483143223\n2745854711\n5264556173\n6141336146\n6357385478\n4167524645\n2176841721\n6882881134\n4846848554\n5283751526\"\"\"\n data = aocd.get_data(day=DAY, year=YEAR)\n inlist = np.array([list(map(int, l)) for l in data.split('\\n')])\n print(inlist)\n\n grid = inlist.copy()\n num_flashes = 0\n for i in range(100):\n num_flashes += np.sum(step(grid))\n print(num_flashes)\n answer = num_flashes\n\n aocd.submit(answer, part='a', day=DAY, year=YEAR)\n\n grid = inlist.copy()\n for i in itertools.count(1):\n flash = step(grid)\n if np.all(flash):\n answer = i\n break\n print(answer)\n aocd.submit(answer, part='b', day=DAY, year=YEAR)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.zeros_like", "numpy.any", "numpy.all" ] ]
elyase/jack
[ "a4f43a4012a540d55d2e05d8a904e6f8cc3002f1" ]
[ "jack/train_reader.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport logging\nimport math\nimport os\nimport random\nimport shutil\n\nimport tensorflow as tf\n\nfrom jack import readers\nfrom jack.core.tensorflow import TFReader\nfrom jack.eval import evaluate_reader, pretty_print_results\nfrom jack.util.hooks import LossHook, ExamplesPerSecHook, ETAHook\n\nlogger = logging.getLogger(__name__)\n\n\ndef train(reader, train_data, test_data, dev_data, configuration: dict, debug=False):\n if isinstance(reader, TFReader):\n train_tensorflow(reader, train_data, test_data, dev_data, configuration, debug)\n else:\n train_pytorch(reader, train_data, test_data, dev_data, configuration, debug)\n\n\ndef train_tensorflow(reader, train_data, test_data, dev_data, configuration: dict, debug=False):\n import tensorflow as tf\n seed = configuration.get('seed', 0)\n\n # make everything deterministic\n random.seed(seed)\n tf.set_random_seed(seed)\n\n clip_value = configuration.get('clip_value')\n batch_size = configuration.get('batch_size')\n dev_batch_size = configuration.get('dev_batch_size') or batch_size\n epochs = configuration.get('epochs')\n l2 = configuration.get('l2')\n optimizer = configuration.get('optimizer')\n learning_rate = configuration.get('learning_rate')\n min_learning_rate = configuration.get('min_learning_rate')\n learning_rate_decay = configuration.get('learning_rate_decay')\n log_interval = configuration.get('log_interval')\n validation_interval = configuration.get('validation_interval')\n tensorboard_folder = configuration.get('tensorboard_folder')\n reader_type = configuration.get('reader')\n save_dir = configuration.get('save_dir')\n write_metrics_to = configuration.get('write_metrics_to')\n\n if clip_value != 0.0:\n clip_value = - abs(clip_value), abs(clip_value)\n\n learning_rate = tf.get_variable(\"learning_rate\", initializer=learning_rate, dtype=tf.float32, trainable=False)\n lr_decay_op = learning_rate.assign(tf.maximum(learning_rate_decay * learning_rate, min_learning_rate))\n\n name_to_optimizer = {\n 'gd': tf.train.GradientDescentOptimizer,\n 'adam': tf.train.AdamOptimizer,\n 'adagrad': tf.train.AdagradOptimizer,\n 'adadelta': tf.train.AdadeltaOptimizer,\n 'rmsprop': tf.train.RMSPropOptimizer\n }\n\n if optimizer not in name_to_optimizer:\n raise ValueError('Unknown optimizer: {}'.format(optimizer))\n\n tf_optimizer_class = name_to_optimizer[optimizer]\n tf_optimizer = tf_optimizer_class(learning_rate=learning_rate)\n\n sw = None\n if tensorboard_folder is not None:\n if os.path.exists(tensorboard_folder):\n shutil.rmtree(tensorboard_folder)\n sw = tf.summary.FileWriter(tensorboard_folder)\n\n # Hooks\n iter_interval = 1 if debug else log_interval\n hooks = [LossHook(reader, iter_interval, summary_writer=sw),\n ETAHook(reader, iter_interval, int(math.ceil(len(train_data) / batch_size)), epochs),\n ExamplesPerSecHook(reader, batch_size, iter_interval, sw)]\n\n preferred_metric, best_metric = readers.eval_hooks[reader_type].preferred_metric_and_initial_score()\n\n def side_effect(metrics, prev_metric):\n \"\"\"Returns: a state (in this case a metric) that is used as input for the next call\"\"\"\n if prev_metric is None: # store whole reader only at beginning of training\n reader.store(save_dir)\n m = metrics[preferred_metric]\n if prev_metric is not None and m < prev_metric:\n reader.session.run(lr_decay_op)\n logger.info(\"Decayed learning rate to: %.5f\" % reader.session.run(learning_rate))\n elif m > best_metric[0] and save_dir is not None:\n best_metric[0] = m\n reader.model_module.store(os.path.join(save_dir, \"model_module\"))\n logger.info(\"Saving reader to: %s\" % save_dir)\n return m\n\n # this is the standard hook for the reader\n hooks.append(readers.eval_hooks[reader_type](\n reader, dev_data, dev_batch_size, summary_writer=sw, side_effect=side_effect,\n iter_interval=validation_interval,\n epoch_interval=(1 if validation_interval is None else None),\n write_metrics_to=write_metrics_to))\n\n # Train\n reader.train(tf_optimizer, train_data, batch_size, max_epochs=epochs, hooks=hooks,\n l2=l2, clip=clip_value, clip_op=tf.clip_by_value, summary_writer=sw)\n\n # Test final reader\n if dev_data is not None and save_dir is not None:\n reader.load(save_dir)\n result_dict = evaluate_reader(reader, dev_data, batch_size)\n\n logger.info(\"############### Results on the Dev Set##############\")\n pretty_print_results(result_dict)\n\n if test_data is not None and save_dir is not None:\n reader.load(save_dir)\n result_dict = evaluate_reader(reader, test_data, batch_size)\n\n logger.info(\"############### Results on the Test Set##############\")\n pretty_print_results(result_dict)\n\n\ndef train_pytorch(reader, train_data, test_data, dev_data, configuration: dict, debug=False):\n import torch\n seed = configuration.get('seed')\n\n # make everything deterministic\n random.seed(seed)\n torch.manual_seed(seed)\n\n clip_value = configuration.get('clip_value')\n batch_size = configuration.get('batch_size')\n epochs = configuration.get('epochs')\n l2 = configuration.get('l2')\n optimizer = configuration.get('optimizer')\n learning_rate = configuration.get('learning_rate')\n learning_rate_decay = configuration.get('learning_rate_decay')\n log_interval = configuration.get('log_interval')\n validation_interval = configuration.get('validation_interval')\n tensorboard_folder = configuration.get('tensorboard_folder')\n model = configuration.get('reader')\n save_dir = configuration.get('save_dir')\n write_metrics_to = configuration.get('write_metrics_to')\n\n # need setup here already :(\n reader.setup_from_data(train_data, is_training=True)\n\n if clip_value != 0.0:\n clip_value = - abs(clip_value), abs(clip_value)\n\n name_to_optimizer = {\n 'gd': torch.optim.SGD,\n 'adam': torch.optim.Adam,\n 'adagrad': torch.optim.Adagrad,\n 'adadelta': torch.optim.Adadelta\n }\n\n if optimizer not in name_to_optimizer:\n raise ValueError('Unknown optimizer: {}'.format(optimizer))\n\n torch_optimizer_class = name_to_optimizer[optimizer]\n params = list(reader.model_module.prediction_module.parameters())\n params.extend(reader.model_module.loss_module.parameters())\n\n torch_optimizer = torch_optimizer_class(params, lr=learning_rate)\n\n sw = None\n if tensorboard_folder is not None:\n if os.path.exists(tensorboard_folder):\n shutil.rmtree(tensorboard_folder)\n sw = tf.summary.FileWriter(tensorboard_folder)\n\n # Hooks\n iter_interval = 1 if debug else log_interval\n hooks = [LossHook(reader, iter_interval, summary_writer=sw),\n ExamplesPerSecHook(reader, batch_size, iter_interval, sw)]\n\n preferred_metric, best_metric = readers.eval_hooks[model].preferred_metric_and_initial_score()\n\n def side_effect(metrics, prev_metric):\n \"\"\"Returns: a state (in this case a metric) that is used as input for the next call\"\"\"\n m = metrics[preferred_metric]\n if prev_metric is not None and m < prev_metric:\n for param_group in torch_optimizer.param_groups:\n param_group['lr'] *= learning_rate_decay\n logger.info(\"Decayed learning rate to: %.5f\" % param_group['lr'])\n elif m > best_metric[0] and save_dir is not None:\n best_metric[0] = m\n if prev_metric is None: # store whole model only at beginning of training\n reader.store(save_dir)\n else:\n reader.model_module.store(os.path.join(save_dir, \"model_module\"))\n logger.info(\"Saving model to: %s\" % save_dir)\n return m\n\n # this is the standard hook for the model\n hooks.append(readers.eval_hooks[model](\n reader, dev_data, batch_size, summary_writer=sw, side_effect=side_effect,\n iter_interval=validation_interval,\n epoch_interval=(1 if validation_interval is None else None),\n write_metrics_to=write_metrics_to))\n\n # Train\n reader.train(torch_optimizer, train_data, batch_size, max_epochs=epochs, hooks=hooks,\n l2=l2, clip=clip_value)\n\n # Test final model\n if dev_data is not None and save_dir is not None:\n reader.load(save_dir)\n result_dict = evaluate_reader(reader, dev_data, batch_size)\n\n logger.info(\"############### Results on the Dev Set##############\")\n pretty_print_results(result_dict)\n\n if test_data is not None and save_dir is not None:\n reader.load(save_dir)\n result_dict = evaluate_reader(reader, test_data, batch_size)\n\n logger.info(\"############### Results on the Test Set##############\")\n pretty_print_results(result_dict)\n" ]
[ [ "torch.manual_seed", "tensorflow.set_random_seed", "tensorflow.summary.FileWriter", "tensorflow.get_variable", "tensorflow.maximum" ] ]
limberc/hypercl
[ "ad098a3b18cf2a2ae6e3ecd28a2b7af698f7b807" ]
[ "utils/batchnorm_layer.py" ]
[ "#!/usr/bin/env python3\n# Copyright 2019 Christian Henning\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n- **title** :utils/batchnorm_layer.py\n- **author** :ch\n- **contact** :henningc@ethz.ch\n- **created** :09/02/2019\n- **version** :1.0\n- **python_version** :3.6.8\n\nImplementation of a hypernet compatible batchnorm layer.\n\nThe joint use of batch-normalization and hypernetworks is not straight forward,\nmainly due to the statistics accumulated by the batch-norm operation which\nexpect the weights of the main network to only change slowly. If a hypernetwork\nreplaces the whole set of weights, the statistics previously estimated by the\nbatch-norm layer might be completely off.\n\nTo circumvent this problem, we provide multiple solutions:\n\n - In a continual learning setting with one set of weights per task, we can\n simply estimate and store statistics per task (hence, the batch-norm\n operation has to be conditioned on the task).\n - The statistics are distilled into the hypernetwork. This would require\n the addition of an extra loss term.\n - The statistics can be treated as parameters that are outputted by the\n hypernetwork. In this case, nothing enforces that these \"statistics\"\n behave similar to statistics that would result from a running estimate\n (hence, the resulting operation might have nothing in common with batch-\n norm).\n - Always use the statistics estimated on the current batch.\n\nNote, we also provide the option of turning off the statistics, in which case\nthe statistics will be set to zero mean and unit variance. This is helpful when\ninterpreting batch-normalization as a general form of gain modulation (i.e.,\njust applying a shift and scale to neural activities).\n\"\"\"\nfrom warnings import warn\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BatchNormLayer(nn.Module):\n r\"\"\"Hypernetwork-compatible batch-normalization layer.\n\n Note, batch normalization performs the following operation\n\n .. math::\n\n y = \\frac{x - \\mathrm{E}[x]}{\\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\\n \\gamma + \\beta\n\n This class allows to deviate from this standard implementation in order to\n provide the flexibility required when using hypernetworks. Therefore, we\n slightly change the notation to\n\n .. math::\n\n y = \\frac{x - m_{\\text{stats}}^{(t)}}{\\sqrt{v_{\\text{stats}}^{(t)} + \\\n \\epsilon}} * \\gamma^{(t)} + \\beta^{(t)}\n\n We use this notation to highlight that the running statistics\n :math:`m_{\\text{stats}}^{(t)}` and :math:`v_{\\text{stats}}^{(t)}` are not\n necessarily estimates resulting from mean and variance computation but might\n be learned parameters (e.g., the outputs of a hypernetwork).\n\n We additionally use the superscript :math:`(t)` to denote that the gain\n :math:`\\gamma`, offset :math:`\\beta` and statistics may be dynamically\n selected based on some external context information.\n\n This class provides the possibility to checkpoint statistics\n :math:`m_{\\text{stats}}^{(t)}` and :math:`v_{\\text{stats}}^{(t)}`, but\n **not** gains and offsets.\n\n .. note::\n If context-dependent gains :math:`\\gamma^{(t)}` and offsets\n :math:`\\beta^{(t)}` are required, then they have to be maintained\n externally, e.g., via a task-conditioned hypernetwork (see\n `this paper`_ for an example) and passed to the :meth:`forward` method.\n\n .. _this paper: https://arxiv.org/abs/1906.00695\n\n Attributes:\n weights: A list of all internal weights of this layer. If all\n weights are assumed to be generated externally, then this\n attribute will be ``None``.\n param_shapes: A list of list of integers. Each list represents the\n shape of a parameter tensor. Note, this attribute is\n independent of the attribute :attr:`weights`, it always comprises\n the shapes of all weight tensors as if the network would be stand-\n alone (i.e., no weights being passed to the :meth:`forward` method).\n Note, unless ``learnable_stats`` is enabled, the layer statistics\n are not considered here.\n hyper_shapes: A list of list of integers. Each list represents the\n shape of a weight tensor that can be passed to the :meth:`forward`\n method. If all weights are maintained internally, then this\n attribute will be ``None``.\n Specifically, this attribute is controlled by the argument\n ``affine``. If ``affine`` is ``True``, this attribute will be\n ``None``. Otherwise this attribute contains the shape of\n :math:`\\gamma` and :math:`\\beta`.\n num_stats: The number :math:`T` of internally managed statistics\n :math:`\\{(m_{\\text{stats}}^{(1)}, v_{\\text{stats}}^{(1)}), \\dots, \\\n (m_{\\text{stats}}^{(T)}, v_{\\text{stats}}^{(T)}) \\}`. This number is\n incremented everytime the method :meth:`checkpoint_stats` is called.\n \"\"\"\n\n def __init__(self, num_features, momentum=0.1, affine=True,\n track_running_stats=True, frozen_stats=False,\n learnable_stats=False):\n r\"\"\"\n Args:\n num_features: See argument ``num_features``, for instance, of class\n :class:`torch.nn.BatchNorm1d`.\n momentum: See argument ``momentum`` of class\n :class:`torch.nn.BatchNorm1d`.\n affine: See argument ``affine`` of class\n :class:`torch.nn.BatchNorm1d`. If set to :code:`False`, the\n input activity will simply be \"whitened\" according to the\n applied layer statistics (except if gain :math:`\\gamma` and\n offset :math:`\\beta` are passed to the :meth:`forward` method).\n\n Note, if ``learnable_stats`` is :code:`False`, then setting\n ``affine`` to :code:`False` results in no learnable weights for\n this layer (running stats might still be updated, but not via\n gradient descent).\n\n Note, even if this option is ``False``, one may still pass a\n gain :math:`\\gamma` and offset :math:`\\beta` to the\n :meth:`forward` method.\n track_running_stats: See argument ``track_running_stats`` of class\n :class:`torch.nn.BatchNorm1d`.\n frozen_stats: If ``True``, the layer statistics are frozen at their\n initial values of :math:`\\gamma = 1` and :math:`\\beta = 0`,\n i.e., layer activity will not be whitened.\n\n Note, this option requires ``track_running_stats`` to be set to\n ``False``.\n learnable_stats: If ``True``, the layer statistics are initialized\n as learnable parameters (:code:`requires_grad=True`).\n\n Note, these extra parameters will be maintained internally and\n not added to the :attr:`weights`. Statistics can always be\n maintained externally and passed to the :meth:`forward` method.\n\n Note, this option requires ``track_running_stats`` to be set to\n ``False``.\n \"\"\"\n super(BatchNormLayer, self).__init__()\n\n if learnable_stats:\n # FIXME We need our custom stats computation for this.\n # The running stats updated by `torch.nn.functional.batch_norm` do\n # not allow backpropagation.\n # See here on how they are computed:\n # https://github.com/pytorch/pytorch/blob/96fe2b4ecbbd02143d95f467655a2d697282ac32/aten/src/ATen/native/Normalization.cpp#L137\n raise NotImplementedError('Option \"learnable_stats\" has not been ' +\n 'implemented yet!')\n\n if momentum is None:\n # If one wants to implement this, then please note that the\n # attribute `num_batches_tracked` has to be added. Also, note the\n # extra code for computing the momentum value in the forward method\n # of class `_BatchNorm`:\n # https://pytorch.org/docs/stable/_modules/torch/nn/modules/batchnorm.html#_BatchNorm\n raise NotImplementedError('This reimplementation of PyTorch its ' +\n 'batchnorm layer does not support ' +\n 'setting \"momentum\" to None.')\n\n if learnable_stats and track_running_stats:\n raise ValueError('Option \"track_running_stats\" must be set to ' +\n 'False when enabling \"learnable_stats\".')\n\n if frozen_stats and track_running_stats:\n raise ValueError('Option \"track_running_stats\" must be set to ' +\n 'False when enabling \"frozen_stats\".')\n\n self._num_features = num_features\n self._momentum = momentum\n self._affine = affine\n self._track_running_stats = track_running_stats\n self._frozen_stats = frozen_stats\n self._learnable_stats = learnable_stats\n\n self.register_buffer('_num_stats', torch.tensor(0, dtype=torch.long))\n\n self._weights = nn.ParameterList()\n self._param_shapes = [[num_features], [num_features]]\n\n if affine:\n # Gamma\n self.register_parameter('scale', nn.Parameter( \\\n torch.Tensor(num_features), requires_grad=True))\n # Beta\n self.register_parameter('bias', nn.Parameter( \\\n torch.Tensor(num_features), requires_grad=True))\n\n self._weights.append(self.scale)\n self._weights.append(self.bias)\n\n nn.init.ones_(self.scale)\n nn.init.zeros_(self.bias)\n\n elif not learnable_stats:\n self._weights = None\n\n if learnable_stats:\n # Don't forget to add the new params to `self._weights`.\n # Don't forget to add shapes to `self._param_shapes`.\n raise NotImplementedError()\n\n elif track_running_stats or frozen_stats:\n # Note, in case of frozen stats, we just don't update the stats\n # initialized here later on.\n self.checkpoint_stats()\n else:\n mname, vname = self._stats_names(0)\n self.register_buffer(mname, None)\n self.register_buffer(vname, None)\n\n @property\n def weights(self):\n \"\"\"Getter for read-only attribute :attr:`weights`.\n\n Returns:\n A :class:`torch.nn.ParameterList` or ``None``, if no parameters are\n internally maintained.\n \"\"\"\n return self._weights\n\n @property\n def param_shapes(self):\n \"\"\"Getter for read-only attribute :attr:`param_shapes`.\n\n Returns:\n A list of lists of integers.\n \"\"\"\n return self._param_shapes\n\n @property\n def hyper_shapes(self):\n \"\"\"Getter for read-only attribute :attr:`hyper_shapes`.\n\n Returns:\n A list of lists of integers.\n \"\"\"\n # FIXME not implemented attribute. Do we even need the attribute, given\n # that all components are individually passed to the forward method?\n raise NotImplementedError('Not implemented yet!')\n return self._hyper_shapes\n\n @property\n def num_stats(self):\n \"\"\"Getter for read-only attribute :attr:`num_stats`.\n\n Returns:\n (int)\n \"\"\"\n return self._num_stats\n\n def forward(self, inputs, running_mean=None, running_var=None, weight=None,\n bias=None, stats_id=None):\n r\"\"\"Apply batch normalization to given layer activations.\n\n Based on the state if this module (attribute :attr:`training`), the\n configuration of this layer and the parameters currently passed, the\n behavior of this function will be different.\n\n The core of this method still relies on the function\n :func:`torch.nn.functional.batch_norm`. In the following we list the\n different behaviors of this method based on the context.\n\n **In training mode:**\n\n We first consider the case that this module is in training mode, i.e.,\n :meth:`torch.nn.Module.train` has been called.\n\n Usually, during training, the running statistics are not used when\n computing the output, instead the statistics computed on the current\n batch are used (denoted by *use batch stats* in the table below).\n However, the batch statistics are typically updated during training\n (denoted by *update running stats* in the table below).\n\n The above described scenario would correspond to passing batch\n statistics to the function :func:`torch.nn.functional.batch_norm` and\n setting the parameter ``training`` to ``True``.\n\n +----------------------+---------------------+-------------------------+\n | **training mode** | **use batch stats** | **update running stats**|\n +----------------------+---------------------+-------------------------+\n | given stats | Yes | Yes |\n +----------------------+---------------------+-------------------------+\n | track running stats | Yes | Yes |\n +----------------------+---------------------+-------------------------+\n | frozen stats | No | No |\n +----------------------+---------------------+-------------------------+\n | learnable stats | Yes | Yes [1]_ |\n +----------------------+---------------------+-------------------------+\n |no track running stats| Yes | No |\n +----------------------+---------------------+-------------------------+\n\n The meaning of each row in this table is as follows:\n\n - **given stats**: External stats are provided via the parameters\n ``running_mean`` and ``running_var``.\n - **track running stats**: If ``track_running_stats`` was set to\n ``True`` in the constructor and no stats were given.\n - **frozen stats**: If ``frozen_stats`` was set to ``True`` in the\n constructor and no stats were given.\n - **learnable stats**: If ``learnable_stats`` was set to ``True`` in\n the constructor and no stats were given.\n - **no track running stats**: If none of the above options apply,\n then the statistics will always be computed from the current batch\n (also in eval mode).\n\n .. note::\n If provided, running stats specified via ``running_mean`` and\n ``running_var`` always have priority.\n\n .. [1] We use a custom implementation to update the running statistics,\n that is compatible with backpropagation.\n\n **In evaluation mode:**\n\n We now consider the case that this module is in evaluation mode, i.e.,\n :meth:`torch.nn.Module.eval` has been called.\n\n Here is the same table as above just for the evaluation mode.\n\n +----------------------+---------------------+-------------------------+\n | **evaluation mode** | **use batch stats** | **update running stats**|\n +----------------------+---------------------+-------------------------+\n | track running stats | No | No |\n +----------------------+---------------------+-------------------------+\n | frozen stats | No | No |\n +----------------------+---------------------+-------------------------+\n | learnable stats | No | No |\n +----------------------+---------------------+-------------------------+\n | given stats | No | No |\n +----------------------+---------------------+-------------------------+\n |no track running stats| Yes | No |\n +----------------------+---------------------+-------------------------+\n\n Args:\n inputs: The inputs to the batchnorm layer.\n running_mean (optional): Running mean stats\n :math:`m_{\\text{stats}}`. This option has priority, i.e., any\n internally maintained statistics are ignored if given.\n\n .. note::\n If specified, then ``running_var`` also has to be specified.\n running_var (optional): Similar to option ``running_mean``, but for\n the running variance stats :math:`v_{\\text{stats}}`\n\n .. note::\n If specified, then ``running_mean`` also has to be\n specified.\n weight (optional): The gain factors :math:`\\gamma`. If given, any\n internal gains are ignored. If option ``affine`` was set to\n ``False`` in the constructor and this option remains ``None``,\n then no gains are multiplied to the \"whitened\" inputs.\n bias (optional): The behavior of this option is similar to option\n ``weight``, except that this option represents the offsets\n :math:`\\beta`.\n stats_id: This argument is optional except if multiple running\n stats checkpoints exist (i.e., attribute :attr:`num_stats` is\n greater than 1) and no running stats have been provided to this\n method.\n\n .. note::\n This argument is ignored if running stats have been passed.\n\n Returns:\n The layer activation ``inputs`` after batch-norm has been applied.\n \"\"\"\n assert (running_mean is None and running_var is None or \\\n running_mean is not None and running_var is not None)\n\n if not self._affine:\n if weight is None or bias is None:\n raise ValueError('Layer was generated in non-affine mode. ' +\n 'Therefore, arguments \"weight\" and \"bias\" ' +\n 'may not be None.')\n\n # No gains given but we have internal gains.\n # Otherwise, if no gains are given we leave `weight` as None.\n if weight is None and self._affine:\n weight = self.scale\n if bias is None and self._affine:\n bias = self.bias\n\n stats_given = running_mean is not None\n\n if (running_mean is None or running_var is None):\n if stats_id is None and self.num_stats > 1:\n raise ValueError('Parameter \"stats_id\" is not defined but ' +\n 'multiple running stats are available.')\n elif self._track_running_stats:\n if stats_id is None:\n stats_id = 0\n assert (stats_id < self.num_stats)\n\n rm, rv = self.get_stats(stats_id)\n\n if running_mean is None:\n running_mean = rm\n if running_var is None:\n running_var = rv\n elif stats_id is not None:\n warn('Parameter \"stats_id\" is ignored since running stats have ' +\n 'been provided.')\n\n momentum = self._momentum\n\n if stats_given or self._track_running_stats:\n return F.batch_norm(inputs, running_mean, running_var,\n weight=weight, bias=bias,\n training=self.training, momentum=momentum)\n\n if self._learnable_stats:\n raise NotImplementedError()\n\n if self._frozen_stats:\n return F.batch_norm(inputs, running_mean, running_var,\n weight=weight, bias=bias, training=False)\n\n # TODO implement scale and shift here. Note, that `running_mean` and\n # `running_var` are always 0 and 1, resp. Therefore, the call to\n # `F.batch_norm` is a waste of computation.\n # ret = inputs\n # if weight is not None:\n # # Multiply `ret` with `weight` such that dimensions are\n # # respected.\n # pass\n # if bias is not None:\n # # Add `bias` to modified `ret` such that dimensions are\n # # respected.\n # pass\n # return ret\n\n else:\n assert (not self._track_running_stats)\n\n # Always compute statistics based on current batch.\n return F.batch_norm(inputs, None, None, weight=weight, bias=bias,\n training=True, momentum=momentum)\n\n def checkpoint_stats(self, device=None):\n \"\"\"Buffers for a new set of running stats will be registered.\n\n Calling this function will also increment the attribute\n :attr:`num_stats`.\n\n Args:\n device (optional): If not provided, the newly created statistics\n will either be moved to the device of the most recent statistics\n or to CPU if no prior statistics exist.\n \"\"\"\n assert (self._track_running_stats or \\\n self._frozen_stats and self._num_stats == 0)\n\n if device is None:\n if self.num_stats > 0:\n mname_old, _ = self._stats_names(self._num_stats - 1)\n device = getattr(self, mname_old).device\n\n if self._learnable_stats:\n raise NotImplementedError()\n\n mname, vname = self._stats_names(self._num_stats)\n self._num_stats += 1\n\n self.register_buffer(mname, torch.zeros(self._num_features,\n device=device))\n self.register_buffer(vname, torch.ones(self._num_features,\n device=device))\n\n def get_stats(self, stats_id=None):\n \"\"\"Get a set of running statistics (means and variances).\n\n Args:\n stats_id (optional): ID of stats. If not provided, the most recent\n stats are returned.\n\n Returns:\n (tuple): Tuple containing:\n\n - **running_mean**\n - **running_var**\n \"\"\"\n if stats_id is None:\n stats_id = self.num_stats - 1\n assert (stats_id < self.num_stats)\n\n mname, vname = self._stats_names(stats_id)\n\n running_mean = getattr(self, mname)\n running_var = getattr(self, vname)\n\n return running_mean, running_var\n\n def _stats_names(self, stats_id):\n \"\"\"Get the buffer names for mean and variance statistics depending on\n the ``stats_id``, i.e., the ID of the stats checkpoint.\n\n Args:\n stats_id: ID of stats.\n\n Returns:\n (tuple): Tuple containing:\n\n - **mean_name**\n - **var_name**\n \"\"\"\n mean_name = 'mean_%d' % stats_id\n var_name = 'var_%d' % stats_id\n\n return mean_name, var_name\n\n\nif __name__ == '__main__':\n pass\n" ]
[ [ "torch.ones", "torch.tensor", "torch.nn.ParameterList", "torch.nn.functional.batch_norm", "torch.nn.init.ones_", "torch.nn.init.zeros_", "torch.zeros", "torch.Tensor" ] ]
calebchoo/modulabs
[ "10fbaf0581700641fc9b38b1bd722044bfb7c638" ]
[ "tensorflow/contrib/learn/python/learn/estimators/linear.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Linear Estimators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.framework.python.ops import variables as contrib_variables\nfrom tensorflow.contrib.learn.python.learn.estimators import _sklearn\nfrom tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined\nfrom tensorflow.contrib.learn.python.learn.estimators import sdca_optimizer\nfrom tensorflow.contrib.learn.python.learn.estimators.base import DeprecatedMixin\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import logging_ops\nfrom tensorflow.python.platform import tf_logging as logging\n\n\n# TODO(b/29580537): Replace with @changing decorator.\ndef _changing(feature_columns):\n if feature_columns is not None:\n return\n logging.warn(\n \"Change warning: `feature_columns` will be required after 2016-08-01.\\n\"\n \"Instructions for updating:\\n\"\n \"Pass `tf.contrib.learn.infer_real_valued_columns_from_input(x)` or\"\n \" `tf.contrib.learn.infer_real_valued_columns_from_input_fn(input_fn)`\"\n \" as `feature_columns`, where `x` or `input_fn` is your argument to\"\n \" `fit`, `evaluate`, or `predict`.\")\n\n\nclass LinearClassifier(dnn_linear_combined.DNNLinearCombinedClassifier):\n \"\"\"Linear classifier model.\n\n Train a linear model to classify instances into one of multiple possible\n classes. When number of possible classes is 2, this is binary classification.\n\n Example:\n\n ```python\n education = sparse_column_with_hash_bucket(column_name=\"education\",\n hash_bucket_size=1000)\n occupation = sparse_column_with_hash_bucket(column_name=\"occupation\",\n hash_bucket_size=1000)\n\n education_x_occupation = crossed_column(columns=[education, occupation],\n hash_bucket_size=10000)\n\n # Estimator using the default optimizer.\n estimator = LinearClassifier(\n feature_columns=[occupation, education_x_occupation])\n\n # Or estimator using the FTRL optimizer with regularization.\n estimator = LinearClassifier(\n feature_columns=[occupation, education_x_occupation],\n optimizer=tf.train.FtrlOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001\n ))\n\n # Or estimator using the SDCAOptimizer.\n estimator = LinearClassifier(\n feature_columns=[occupation, education_x_occupation],\n optimizer=tf.contrib.learn.SDCAOptimizer(\n example_id_column='example_id',\n symmetric_l2_regularization=2.0\n ))\n\n # Input builders\n def input_fn_train: # returns x, y\n ...\n def input_fn_eval: # returns x, y\n ...\n estimator.fit(input_fn=input_fn_train)\n estimator.evaluate(input_fn=input_fn_eval)\n estimator.predict(x=x)\n ```\n\n Input of `fit` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * if `weight_column_name` is not `None`, a feature with\n `key=weight_column_name` whose value is a `Tensor`.\n * for each `column` in `feature_columns`:\n - if `column` is a `SparseColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `RealValuedColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n - if `feature_columns` is `None`, then `input` must contains only real\n valued `Tensor`.\n \"\"\"\n\n def __init__(self,\n feature_columns=None,\n model_dir=None,\n n_classes=2,\n weight_column_name=None,\n optimizer=None,\n gradient_clip_norm=None,\n enable_centered_bias=True,\n config=None):\n \"\"\"Construct a `LinearClassifier` estimator object.\n\n Args:\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `FeatureColumn`.\n model_dir: Directory to save model parameters, graph and etc.\n n_classes: number of target classes. Default is binary classification.\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n optimizer: The optimizer used to train the model. If specified, it should\n be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,\n the Ftrl optimizer will be used.\n gradient_clip_norm: A `float` > 0. If provided, gradients are clipped\n to their global norm with this clipping ratio. See\n `tf.clip_by_global_norm` for more details.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n config: `RunConfig` object to configure the runtime settings.\n\n Returns:\n A `LinearClassifier` estimator.\n \"\"\"\n _changing(feature_columns)\n super(LinearClassifier, self).__init__(\n model_dir=model_dir,\n n_classes=n_classes,\n weight_column_name=weight_column_name,\n linear_feature_columns=feature_columns,\n linear_optimizer=optimizer,\n gradient_clip_norm=gradient_clip_norm,\n enable_centered_bias=enable_centered_bias,\n config=config)\n self._feature_columns_inferred = False\n\n # TODO(b/29580537): Remove feature_columns inference.\n def _validate_linear_feature_columns(self, features):\n if self._linear_feature_columns is None:\n self._linear_feature_columns = layers.infer_real_valued_columns(features)\n self._feature_columns_inferred = True\n elif self._feature_columns_inferred:\n this_dict = {c.name: c for c in self._linear_feature_columns}\n that_dict = {\n c.name: c for c in layers.infer_real_valued_columns(features)\n }\n if this_dict != that_dict:\n raise ValueError(\n \"Feature columns, expected %s, got %s.\", (this_dict, that_dict))\n\n def _get_train_ops(self, features, targets):\n \"\"\"See base class.\"\"\"\n self._validate_linear_feature_columns(features)\n if not isinstance(self._linear_optimizer, sdca_optimizer.SDCAOptimizer):\n return super(LinearClassifier, self)._get_train_ops(features, targets)\n\n # SDCA currently supports binary classification only.\n if self._target_column.num_label_columns > 2:\n raise ValueError(\n \"SDCA does not currently support multi-class classification.\")\n global_step = contrib_variables.get_global_step()\n assert global_step\n\n logits, columns_to_variables, _ = layers.weighted_sum_from_feature_columns(\n columns_to_tensors=features,\n feature_columns=self._linear_feature_columns,\n num_outputs=self._target_column.num_label_columns,\n weight_collections=[self._linear_weight_collection],\n scope=\"linear\")\n with ops.control_dependencies([self._centered_bias()]):\n loss = self._target_column.loss(logits, targets, features)\n logging_ops.scalar_summary(\"loss\", loss)\n\n train_ops = self._linear_optimizer.get_train_step(\n self._linear_feature_columns, self._target_column.weight_column_name,\n \"logistic_loss\", features, targets, columns_to_variables, global_step)\n\n return train_ops, loss\n\n def _get_eval_ops(self, features, targets, metrics=None):\n self._validate_linear_feature_columns(features)\n return super(LinearClassifier, self)._get_eval_ops(\n features, targets, metrics)\n\n def _get_predict_ops(self, features):\n \"\"\"See base class.\"\"\"\n self._validate_linear_feature_columns(features)\n return super(LinearClassifier, self)._get_predict_ops(features)\n\n @property\n def weights_(self):\n return self.linear_weights_\n\n @property\n def bias_(self):\n return self.linear_bias_\n\n\nclass LinearRegressor(dnn_linear_combined.DNNLinearCombinedRegressor):\n \"\"\"Linear regressor model.\n\n Train a linear regression model to predict target variable value given\n observation of feature values.\n\n Example:\n\n ```python\n education = sparse_column_with_hash_bucket(column_name=\"education\",\n hash_bucket_size=1000)\n occupation = sparse_column_with_hash_bucket(column_name=\"occupation\",\n hash_bucket_size=1000)\n\n education_x_occupation = crossed_column(columns=[education, occupation],\n hash_bucket_size=10000)\n\n estimator = LinearRegressor(\n feature_columns=[occupation, education_x_occupation])\n\n # Input builders\n def input_fn_train: # returns x, y\n ...\n def input_fn_eval: # returns x, y\n ...\n estimator.fit(input_fn=input_fn_train)\n estimator.evaluate(input_fn=input_fn_eval)\n estimator.predict(x=x)\n ```\n\n Input of `fit` and `evaluate` should have following features,\n otherwise there will be a KeyError:\n\n * if `weight_column_name` is not `None`:\n key=weight_column_name, value=a `Tensor`\n * for column in `feature_columns`:\n - if isinstance(column, `SparseColumn`):\n key=column.name, value=a `SparseTensor`\n - if isinstance(column, `RealValuedColumn`):\n key=column.name, value=a `Tensor`\n - if `feature_columns` is `None`:\n input must contains only real valued `Tensor`.\n \"\"\"\n\n def __init__(self,\n feature_columns=None,\n model_dir=None,\n weight_column_name=None,\n optimizer=None,\n gradient_clip_norm=None,\n enable_centered_bias=True,\n target_dimension=1,\n config=None):\n \"\"\"Construct a `LinearRegressor` estimator object.\n\n Args:\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `FeatureColumn`.\n model_dir: Directory to save model parameters, graph, etc.\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n optimizer: An instance of `tf.Optimizer` used to train the model. If\n `None`, will use an Ftrl optimizer.\n gradient_clip_norm: A `float` > 0. If provided, gradients are clipped\n to their global norm with this clipping ratio. See\n `tf.clip_by_global_norm` for more details.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n target_dimension: dimension of the target for multilabels.\n config: `RunConfig` object to configure the runtime settings.\n\n Returns:\n A `LinearRegressor` estimator.\n \"\"\"\n _changing(feature_columns)\n super(LinearRegressor, self).__init__(\n model_dir=model_dir,\n weight_column_name=weight_column_name,\n linear_feature_columns=feature_columns,\n linear_optimizer=optimizer,\n gradient_clip_norm=gradient_clip_norm,\n enable_centered_bias=enable_centered_bias,\n target_dimension=target_dimension,\n config=config)\n self._feature_columns_inferred = False\n\n # TODO(b/29580537): Remove feature_columns inference.\n def _validate_linear_feature_columns(self, features):\n if self._linear_feature_columns is None:\n self._linear_feature_columns = layers.infer_real_valued_columns(features)\n self._feature_columns_inferred = True\n elif self._feature_columns_inferred:\n this_dict = {c.name: c for c in self._linear_feature_columns}\n that_dict = {\n c.name: c for c in layers.infer_real_valued_columns(features)\n }\n if this_dict != that_dict:\n raise ValueError(\n \"Feature columns, expected %s, got %s.\", (this_dict, that_dict))\n\n def _get_train_ops(self, features, targets):\n \"\"\"See base class.\"\"\"\n if isinstance(self._linear_optimizer, sdca_optimizer.SDCAOptimizer):\n raise ValueError(\"SDCAOptimizer does not currently support regression.\")\n self._validate_linear_feature_columns(features)\n return super(LinearRegressor, self)._get_train_ops(features, targets)\n\n def _get_eval_ops(self, features, targets, metrics=None):\n self._validate_linear_feature_columns(features)\n return super(LinearRegressor, self)._get_eval_ops(\n features, targets, metrics)\n\n def _get_predict_ops(self, features):\n \"\"\"See base class.\"\"\"\n self._validate_linear_feature_columns(features)\n return super(LinearRegressor, self)._get_predict_ops(features)\n\n @property\n def weights_(self):\n return self.linear_weights_\n\n @property\n def bias_(self):\n return self.linear_bias_\n\n\n# TensorFlowLinearRegressor and TensorFlowLinearClassifier are deprecated.\nclass TensorFlowLinearRegressor(DeprecatedMixin, LinearRegressor,\n _sklearn.RegressorMixin):\n pass\n\n\nclass TensorFlowLinearClassifier(DeprecatedMixin, LinearClassifier,\n _sklearn.ClassifierMixin):\n pass\n\n\nTensorFlowRegressor = TensorFlowLinearRegressor\nTensorFlowClassifier = TensorFlowLinearClassifier\n" ]
[ [ "tensorflow.python.ops.logging_ops.scalar_summary", "tensorflow.python.platform.tf_logging.warn", "tensorflow.contrib.layers.infer_real_valued_columns", "tensorflow.contrib.layers.weighted_sum_from_feature_columns", "tensorflow.contrib.framework.python.ops.variables.get_global_step" ] ]
peri044/TRTorch
[ "62c9830b24552651abbff611515114cbcaca8b7b" ]
[ "py/trtorch/_compile_spec.py" ]
[ "from typing import List, Dict, Any\nimport torch\nimport trtorch._C\nfrom trtorch import _types\n\n\ndef _supported_input_size_type(input_size: Any) -> bool:\n if isinstance(input_size, torch.Size):\n return True\n elif isinstance(input_size, tuple):\n return True\n elif isinstance(input_size, list):\n return True\n else:\n raise TypeError(\n \"Input sizes for inputs are required to be a List, tuple or torch.Size or a Dict of three sizes (min, opt, max), found type: \"\n + str(type(input_size)))\n\n\ndef _parse_input_ranges(input_sizes: List) -> List:\n\n if any(not isinstance(i, dict) and not _supported_input_size_type(i) for i in input_sizes):\n raise KeyError(\"An input size must either be a static size or a range of three sizes (min, opt, max) as Dict\")\n\n parsed_input_sizes = []\n for i in input_sizes:\n if isinstance(i, dict):\n if all(k in i for k in [\"min\", \"opt\", \"min\"]):\n in_range = trtorch._C.InputRange()\n in_range.min = i[\"min\"]\n in_range.opt = i[\"opt\"]\n in_range.max = i[\"max\"]\n parsed_input_sizes.append(in_range)\n\n elif \"opt\" in i:\n in_range = trtorch._C.InputRange()\n in_range.min = i[\"opt\"]\n in_range.opt = i[\"opt\"]\n in_range.max = i[\"opt\"]\n parsed_input_sizes.append(in_range)\n\n else:\n raise KeyError(\n \"An input size must either be a static size or a range of three sizes (min, opt, max) as Dict\")\n\n elif isinstance(i, list):\n in_range = trtorch._C.InputRange()\n in_range.min = i\n in_range.opt = i\n in_range.max = i\n parsed_input_sizes.append(in_range)\n\n elif isinstance(i, tuple):\n in_range = trtorch._C.InputRange()\n in_range.min = list(i)\n in_range.opt = list(i)\n in_range.max = list(i)\n parsed_input_sizes.append(in_range)\n\n return parsed_input_sizes\n\n\ndef _parse_op_precision(precision: Any) -> _types.dtype:\n if isinstance(precision, torch.dtype):\n if precision == torch.int8:\n return _types.dtype.int8\n elif precision == torch.half:\n return _types.dtype.half\n elif precision == torch.float:\n return _types.dtype.float\n else:\n raise TypeError(\"Provided an unsupported dtype as operating precision (support: int8, half, float), got: \" +\n str(precision))\n\n elif isinstance(precision, _types.DataTypes):\n return precision\n\n else:\n raise TypeError(\"Op precision type needs to be specified with a torch.dtype or a trtorch.dtype, got: \" +\n str(type(precision)))\n\n\ndef _parse_device_type(device: Any) -> _types.DeviceType:\n if isinstance(device, torch.device):\n if device.type == 'cuda':\n return _types.DeviceType.gpu\n else:\n ValueError(\"Got a device type other than GPU or DLA (type: \" + str(device.type) + \")\")\n elif isinstance(device, _types.DeviceType):\n return device\n elif isinstance(device, str):\n if device == \"gpu\" or device == \"GPU\":\n return _types.DeviceType.gpu\n elif device == \"dla\" or device == \"DLA\":\n return _types.DeviceType.dla\n else:\n ValueError(\"Got a device type other than GPU or DLA (type: \" + str(device) + \")\")\n else:\n raise TypeError(\"Device specification must be of type torch.device, string or trtorch.DeviceType, but got: \" +\n str(type(device)))\n\n\ndef _parse_compile_spec(compile_spec: Dict[str, Any]) -> trtorch._C.CompileSpec:\n info = trtorch._C.CompileSpec()\n if \"input_shapes\" not in compile_spec:\n raise KeyError(\n \"Input shapes for inputs are required as a List, provided as either a static sizes or a range of three sizes (min, opt, max) as Dict\"\n )\n\n info.input_ranges = _parse_input_ranges(compile_spec[\"input_shapes\"])\n\n if \"op_precision\" in compile_spec:\n info.op_precision = _parse_op_precision(compile_spec[\"op_precision\"])\n\n if \"refit\" in compile_spec:\n assert isinstance(compile_spec[\"refit\"], bool)\n info.refit = compile_spec[\"refit\"]\n\n if \"debug\" in compile_spec:\n assert isinstance(compile_spec[\"debug\"], bool)\n info.debug = compile_spec[\"debug\"]\n\n if \"strict_types\" in compile_spec:\n assert isinstance(compile_spec[\"strict_types\"], bool)\n info.strict_types = compile_spec[\"strict_types\"]\n\n if \"allow_gpu_fallback\" in compile_spec:\n assert isinstance(compile_spec[\"allow_gpu_fallback\"], bool)\n info.allow_gpu_fallback = compile_spec[\"allow_gpu_fallback\"]\n\n if \"device_type\" in compile_spec:\n info.device = _parse_device_type(compile_spec[\"device_type\"])\n\n if \"capability\" in compile_spec:\n assert isinstance(compile_spec[\"capability\"], _types.EngineCapability)\n info.capability = compile_spec[\"capability\"]\n\n if \"num_min_timing_iters\" in compile_spec:\n assert type(compile_spec[\"num_min_timing_iters\"]) is int\n info.num_min_timing_iters = compile_spec[\"num_min_timing_iters\"]\n\n if \"num_avg_timing_iters\" in compile_spec:\n assert type(compile_spec[\"num_avg_timing_iters\"]) is int\n info.num_avg_timing_iters = compile_spec[\"num_avg_timing_iters\"]\n\n if \"workspace_size\" in compile_spec:\n assert type(compile_spec[\"workspace_size\"]) is int\n info.workspace_size = compile_spec[\"workspace_size\"]\n\n if \"max_batch_size\" in compile_spec:\n assert type(compile_spec[\"max_batch_size\"]) is int\n info.max_batch_size = compile_spec[\"max_batch_size\"]\n\n return info\n\n\ndef TensorRTCompileSpec(compile_spec: Dict[str, Any]):\n \"\"\"\n Utility to create a formated spec dictionary for using the PyTorch TensorRT backend\n\n Args:\n compile_spec (dict): Compilation settings including operating precision, target device, etc.\n One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs\n to the graph. All other keys are optional. Entries for each method to be compiled.\n\n .. code-block:: py\n\n CompileSpec = {\n \"forward\" : trtorch.TensorRTCompileSpec({\n \"input_shapes\": [\n (1, 3, 224, 224), # Static input shape for input #1\n {\n \"min\": (1, 3, 224, 224),\n \"opt\": (1, 3, 512, 512),\n \"max\": (1, 3, 1024, 1024)\n } # Dynamic input shape for input #2\n ],\n \"op_precision\": torch.half, # Operating precision set to FP16\n \"refit\": False, # enable refit\n \"debug\": False, # enable debuggable engine\n \"strict_types\": False, # kernels should strictly run in operating precision\n \"allow_gpu_fallback\": True, # (DLA only) Allow layers unsupported on DLA to run on GPU\n \"device\": torch.device(\"cuda\"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)\n \"capability\": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": 2, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": 1, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": 0, # Maximum size of workspace given to TensorRT\n \"max_batch_size\": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)\n })\n }\n\n Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using\n torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum\n to select device type.\n\n Returns:\n torch.classes.tensorrt.CompileSpec: List of methods and formated spec objects to be provided to ``torch._C._jit_to_tensorrt``\n \"\"\"\n\n parsed_spec = _parse_compile_spec(compile_spec)\n\n backend_spec = torch.classes.tensorrt.CompileSpec()\n\n for i in parsed_spec.input_ranges:\n ir = torch.classes.tensorrt.InputRange()\n ir.set_min(i.min)\n ir.set_opt(i.opt)\n ir.set_max(i.max)\n backend_spec.append_input_range(ir)\n\n backend_spec.set_op_precision(int(parsed_spec.op_precision))\n backend_spec.set_refit(parsed_spec.refit)\n backend_spec.set_debug(parsed_spec.debug)\n backend_spec.set_refit(parsed_spec.refit)\n backend_spec.set_strict_types(parsed_spec.strict_types)\n backend_spec.set_allow_gpu_fallback(parsed_spec.allow_gpu_fallback)\n backend_spec.set_device(int(parsed_spec.device))\n backend_spec.set_capability(int(parsed_spec.capability))\n backend_spec.set_num_min_timing_iters(parsed_spec.num_min_timing_iters)\n backend_spec.set_num_avg_timing_iters(parsed_spec.num_avg_timing_iters)\n backend_spec.set_workspace_size(parsed_spec.workspace_size)\n backend_spec.set_max_batch_size(parsed_spec.max_batch_size)\n\n return backend_spec\n" ]
[ [ "torch.classes.tensorrt.CompileSpec", "torch.classes.tensorrt.InputRange" ] ]
brnor/dipl
[ "db516610aecffb10825e899fb5aa9f2902093b6e" ]
[ "gym_puyopuyo/test-feedforward-smallenv.py" ]
[ "from __future__ import print_function\n\nimport os\nimport pickle\nimport time\n\nfrom gym_puyopuyo import register\nimport gym\nimport numpy as np\n\nimport neat\nimport visualize\n\npiece_shape = (3, 2)\nDRAW_NETS = False\nNUM_COLORS = 3.0 # 3 colors in the small env mode\n# TODO: could probably read color number from observation data\nfn_results = \"feedforward-small\"\n\ndef multiplyMatrices(pieces, field, norm = True):\n pieces = pieces.astype(np.float64)\n field = field.astype(np.float64)\n pieces_sum = np.zeros(piece_shape)\n field_sum = np.zeros(field[0].shape)\n for i in range(0, len(pieces)):\n pieces[i] = np.multiply(pieces[i], i + 1)\n if(norm):\n pieces[i] /= NUM_COLORS\n pieces_sum += pieces[i]\n for i in range(0, len(field)):\n field[i] = np.multiply(field[i], i + 1)\n if(norm):\n field[i] /= NUM_COLORS\n field_sum += field[i]\n \n return pieces_sum, field_sum\n\ndef run():\n with open(\"results/winner-pickle-\"+fn_results, 'rb') as f:\n c = pickle.load(f)\n \n print('loaded genome:')\n print(c)\n\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'config-feedforward-small')\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_path)\n\n net = neat.nn.FeedForwardNetwork.create(c, config)\n register()\n env = gym.make(\"PuyoPuyoEndlessSmall-v2\")\n done = False\n ob = env.reset()\n count = 0\n total_reward = 0\n\n while True:\n env.render()\n #input()\n time.sleep(0.5)\n pieces_sum, field_sum = multiplyMatrices(ob[0], ob[1])\n next_piece = pieces_sum[0]\n \n inp_piece = np.ndarray.flatten(next_piece)\n inp_field = np.ndarray.flatten(field_sum)\n inputs = np.hstack([inp_piece, inp_field])\n \n nn_output = net.activate(inputs)\n action = np.argmax(nn_output)\n #print(nn_output)\n #nn_output = int(round(nn_output[0] * NUM_ACTIONS))\n #print(nn_output)\n #input()\n \n ob, rew, done, info = env.step(action)\n \n total_reward += rew\n count += 1\n \n if done:\n break\n\n print(\"Game played for \", count, \" turns.\")\n print(\"Total score: \", total_reward)\n\n if DRAW_NETS:\n visualize.draw_net(config, c, view=True, \n filename=\"results/winner-\"+fn_results+\".net\")\n \n visualize.draw_net(config, c, view=True, \n filename=\"results/winner-\"+fn_results+\"-enabled.net\",\n show_disabled=False)\n \n visualize.draw_net(config, c, view=True, \n filename=\"results/winner-\"+fn_results+\"-pruned.net\",\n show_disabled=False, prune_unused=True)\n\nif __name__ == '__main__':\n run()\n" ]
[ [ "numpy.multiply", "numpy.ndarray.flatten", "numpy.zeros", "numpy.argmax", "numpy.hstack" ] ]
kumi123/pytorch-learning
[ "29f5b4d53f4e72b95b3fab979b1bc496ef23674c" ]
[ "chapter9_Computer-Vision/Deep-Dream/util.py" ]
[ "import PIL.Image\nfrom io import BytesIO\nfrom IPython.display import clear_output, Image, display\nimport numpy as np\n\n\ndef showarray(a, fmt='jpeg'):\n a = np.uint8(np.clip(a, 0, 255))\n f = BytesIO()\n PIL.Image.fromarray(a).save(f, fmt)\n display(Image(data=f.getvalue()))\n\n\ndef showtensor(a):\n mean = np.array([0.485, 0.456, 0.406]).reshape([1, 1, 3])\n std = np.array([0.229, 0.224, 0.225]).reshape([1, 1, 3])\n inp = a[0, :, :, :]\n inp = inp.transpose(1, 2, 0)\n inp = std * inp + mean\n inp *= 255\n showarray(inp)\n clear_output(wait=True)\n" ]
[ [ "numpy.array", "numpy.clip" ] ]
TopCoder2K/mdetr
[ "aedfd63f550ae36d1477484c489a2aa438d10aa3" ]
[ "datasets/vqa_v2.py" ]
[ "# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved\n\"\"\"\nCOCO dataset which returns image_id for evaluation.\n\nMostly copy-paste from https://github.com/ashkamath/mdetr/blob/main/datasets/gqa.py\n\"\"\"\nimport json\nfrom pathlib import Path\n\nimport torch\nimport torchvision\nfrom transformers import RobertaTokenizerFast\n\nfrom .coco import ConvertCocoPolysToMask, ModulatedDetection, make_coco_transforms\n\nclass VQAv2Detection(ModulatedDetection):\n pass\n\nclass VQAv2QuestionAnswering(torchvision.datasets.CocoDetection):\n def __init__(self, img_folder, ann_file, transforms, return_masks, return_tokens, tokenizer, ann_folder):\n super(VQAv2QuestionAnswering, self).__init__(img_folder, ann_file)\n self._transforms = transforms\n self.prepare = ConvertCocoPolysToMask(return_masks, return_tokens, tokenizer=tokenizer)\n with open(ann_folder / \"vqa2_answer2id.json\", \"r\") as f:\n self.answer2id = json.load(f)\n with open(ann_folder / \"vqa2_answer2id_by_type.json\", \"r\") as f:\n self.answer2id_by_type = json.load(f)\n self.type2id = {\"yes/no\": 0, \"number\": 1, \"other\": 2}\n\n def __getitem__(self, idx):\n img, target = super(VQAv2QuestionAnswering, self).__getitem__(idx)\n image_id = self.ids[idx]\n coco_img = self.coco.loadImgs(image_id)[0]\n caption = coco_img[\"caption\"]\n dataset_name = coco_img[\"dataset_name\"]\n questionId = coco_img[\"questionId\"]\n target = {\"image_id\": image_id, \"annotations\": target, \"caption\": caption}\n img, target = self.prepare(img, target)\n if self._transforms is not None:\n img, target = self._transforms(img, target)\n target[\"dataset_name\"] = dataset_name\n target[\"questionId\"] = questionId\n\n if coco_img[\"answer\"] not in self.answer2id:\n answer = \"unknown\"\n else:\n answer = coco_img[\"answer\"]\n\n target[\"answer\"] = torch.as_tensor(self.answer2id[answer], dtype=torch.long)\n target[\"answer_type\"] = torch.as_tensor(self.type2id[coco_img[\"answer_type\"]], dtype=torch.long)\n\n # util.misc.collate_fn requires to put 'answer' before every type of answer in target\n if coco_img[\"answer\"] not in self.answer2id_by_type[\"yes/no\"]:\n answer = \"unknown\"\n else:\n answer = coco_img[\"answer\"]\n target[\"answer_yes/no\"] = torch.as_tensor(\n self.answer2id_by_type[\"yes/no\"][answer] if coco_img[\"answer_type\"] == \"yes/no\" else -100,\n dtype=torch.long,\n )\n\n if coco_img[\"answer\"] not in self.answer2id_by_type[\"number\"]:\n answer = \"unknown\"\n else:\n answer = coco_img[\"answer\"]\n target[\"answer_number\"] = torch.as_tensor(\n self.answer2id_by_type[\"number\"][answer] if coco_img[\"answer_type\"] == \"number\" else -100,\n dtype=torch.long,\n )\n\n if coco_img[\"answer\"] not in self.answer2id_by_type[\"other\"]:\n answer = \"unknown\"\n else:\n answer = coco_img[\"answer\"]\n target[\"answer_other\"] = torch.as_tensor(\n self.answer2id_by_type[\"other\"][answer] if coco_img[\"answer_type\"] == \"other\" else -100,\n dtype=torch.long,\n )\n\n return img, target\n\n\ndef build(image_set, args):\n # TODO: img or all?\n img_dir = Path(args.coco_img_path)\n assert img_dir.exists(), f\"provided COCO img path {img_dir} does not exist\"\n\n tokenizer = RobertaTokenizerFast.from_pretrained(args.text_encoder_type)\n\n if args.do_qa:\n # Для vqa2 это не нужно:\n # assert args.vqa2_split_type is not None\n\n if image_set == \"train\":\n datasets = []\n for imset in [\"train\", \"minival\"]:\n ann_file = Path(args.vqa2_ann_path) / f\"finetune_vqa2_{imset}.json\"\n\n datasets.append(\n VQAv2QuestionAnswering(\n img_dir / \"train2014\" if imset == \"train\" else img_dir / \"val2014\",\n ann_file,\n transforms=make_coco_transforms(image_set, cautious=True),\n return_masks=args.masks,\n return_tokens=True,\n tokenizer=tokenizer,\n ann_folder=Path(args.vqa2_ann_path),\n )\n )\n\n return torch.utils.data.ConcatDataset(datasets)\n elif image_set == \"val\":\n # TODO: правильный ли ann_file?\n ann_file = Path(args.vqa2_ann_path) / f\"finetune_vqa2_minival.json\"\n\n return VQAv2QuestionAnswering(\n img_dir / \"val2014\",\n ann_file,\n transforms=make_coco_transforms(image_set, cautious=True),\n return_masks=args.masks,\n return_tokens=True,\n tokenizer=tokenizer,\n ann_folder=Path(args.vqa2_ann_path),\n )\n elif image_set in [\"test\", \"testdev\", \"trainval\"]:\n ann_file = Path(args.vqa2_ann_path) / f\"finetune_vqa2_{image_set}.json\"\n\n return VQAv2QuestionAnswering(\n img_dir / \"test2015\",\n ann_file,\n transforms=make_coco_transforms(\"val\", cautious=True),\n return_masks=args.masks,\n return_tokens=True,\n tokenizer=tokenizer,\n ann_folder=Path(args.vqa2_ann_path),\n )\n\n else:\n assert False, f\"Unknown image set {image_set}\"\n" ]
[ [ "torch.as_tensor", "torch.utils.data.ConcatDataset" ] ]
imanolperez/optimal-double-execution
[ "b380087765925043b01fe2f1066e5e2d1d850cf9" ]
[ "src/data/gbm.py" ]
[ "import numpy as np\nfrom .base import Price\n\nclass GBM(Price):\n \"\"\"Brownian motion.\"\"\"\n\n def __init__(self, T=1., sigma1=0.02, sigma2=0.01, s1=1., s2=1.,\n drift1=0., drift2=0., n=100):\n self.sigma1 = sigma1\n self.sigma2 = sigma2\n self.drift1 = drift1\n self.drift2 = drift2\n self.n = n\n self.s1 = s1\n self.s2 = s2\n self.T = T\n\n def generate(self):\n dt1 = self.sigma1 ** 2 * self.T / self.n\n dt2 = self.sigma2 ** 2 * self.T / self.n\n\n bm1 = np.r_[[0.], np.sqrt(dt1) * np.random.randn(self.n - 1).cumsum()]\n bm2 = np.r_[[0.], np.sqrt(dt2) * np.random.randn(self.n - 1).cumsum()]\n\n path = np.c_[np.linspace(0, self.T, self.n), bm1, bm2]\n path[:, 1] = np.exp((self.drift1 - self.sigma1 ** 2 / 2.) * path[:, 0] + self.sigma1 * path[:, 1])\n path[:, 2] = np.exp((self.drift2 - self.sigma2 ** 2 / 2.) * path[:, 0] + self.sigma2 * path[:, 2])\n\n path[:, 1] *= self.s1\n path[:, 2] *= self.s2\n\n\n return path\n" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.random.randn", "numpy.exp" ] ]
zarppy/MUREIL_2014
[ "25ba16554ce8f614b9337e0fffce75da3fa259a4" ]
[ "generator/txmultigeneratormultisite.py" ]
[ "#\r\n#\r\n# Copyright (C) University of Melbourne 2013\r\n#\r\n#\r\n#\r\n#Permission is hereby granted, free of charge, to any person obtaining a copy\r\n#of this software and associated documentation files (the \"Software\"), to deal\r\n#in the Software without restriction, including without limitation the rights\r\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n#copies of the Software, and to permit persons to whom the Software is\r\n#furnished to do so, subject to the following conditions:\r\n#\r\n#The above copyright notice and this permission notice shall be included in all\r\n#copies or substantial portions of the Software.\r\n#\r\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n#SOFTWARE.\r\n#\r\n#\r\n\r\n\"\"\"Module subclassing TxMultiGeneratorBase that provides an implementation for\r\nmulti-site generators. \r\n\"\"\"\r\n\r\nfrom tools import mureilexception, mureilbuilder\r\nimport copy\r\nimport numpy\r\nfrom generator import txmultigeneratorbase\r\n\r\nimport logging\r\nlogger = logging.getLogger(__name__)\r\n\r\nclass TxMultiGeneratorMultiSite(txmultigeneratorbase.TxMultiGeneratorBase):\r\n \"\"\"Module subclassing TxMultiGeneratorBase that provides an implementation of\r\n state_handle and related handling functions for multi-site generators. \r\n \r\n The 'capacity' term in state_handle is implemented as a dict with one item per site. \r\n Each site item is a list of tuples containing (site_index,build_period,decommissioning_period),\r\n describing the set of installed capacity. \r\n \"\"\"\r\n \r\n def __init__(self):\r\n \"\"\"Initialise as for the base class, and also initialise the params_to_site map.\r\n \"\"\"\r\n \r\n txmultigeneratorbase.TxMultiGeneratorBase.__init__(self)\r\n\r\n # params_to_site maps the index in the params list to the site indices.\r\n self.params_to_site = []\r\n \r\n\r\n def get_config_spec(self):\r\n \"\"\"Return a list of tuples of format (name, conversion function, default),\r\n e.g. ('capex', float, 2.0). Put None if no conversion required, or if no\r\n default value, e.g. ('name', None, None)\r\n\r\n Configuration:\r\n time_period_yrs: float - the length of the time period in years\r\n time_scale_up_mult: float - the value to multiply non-discounted items,\r\n such as carbon emissions, by to account for a shorter dataset than the\r\n calculation period length.\r\n variable_cost_mult: as for time_scale_up_mult, but may include a factor for\r\n cost discounting.\r\n\r\n size: float, optional - relates param to new capacity\r\n\r\n carbon_price_m: float - carbon price in $M/tonne\r\n \r\n startup_data_name: string, optional - the name of the data array that contains\r\n data on startup capacities.\r\n startup_data_string: string, optional - a python format data array suitable for \r\n input into set_startup_state, all on a single line.\r\n\r\n params_to_site_data_name: string, optional - the name of the data array that\r\n contains a list of how the input params list maps to site indices.\r\n params_to_site_data_string: list of integers, optional - the site indices, \r\n listed separated by spaces, defining the site index corresponding to \r\n each optimisation param, in order.\r\n\r\n vom: float, default 0 - variable operating and maintenance cost, in $/MWh, same for all sites\r\n\r\n capital_cost: float, default 0 - cost in $M per MW for new capacity.\r\n install_cost: float, default 0 - cost in $M per site, when site has an\r\n installation from this generator for the first time.\r\n\r\n decommissioning_cost: float, optional (default 0) - cost in $M per MW for \r\n decommissioning.\r\n lifetime_yrs: float, default 20 - the time in years that new capacity lasts\r\n \"\"\"\r\n return txmultigeneratorbase.TxMultiGeneratorBase.get_config_spec(self) + [\r\n ('variable_cost_mult', float, 1.0),\r\n ('time_scale_up_mult', float, 1.0),\r\n ('carbon_price_m', float, 0.0),\r\n ('startup_data_name', None, ''),\r\n ('startup_data_string', mureilbuilder.python_eval, 'None'),\r\n ('params_to_site_data_name', None, ''),\r\n ('params_to_site_data_string', mureilbuilder.make_int_list, ''),\r\n ('decommissioning_cost', float, 0),\r\n ('vom', float, 0),\r\n ('capital_cost', float, 0),\r\n ('install_cost', float, 0),\r\n ('time_period_yrs', float, None),\r\n ('lifetime_yrs', float, 20),\r\n ('size', float, 1.0),\r\n ('start_min_param', int, 1e20),\r\n ('start_max_param', int, 1e20),\r\n ('timestep_hrs', float, None)\r\n ]\r\n\r\n\r\n def complete_configuration_pre_expand(self):\r\n \"\"\"Complete the configuration prior to expanding the\r\n period configs. \r\n \r\n This implementation checks that the lifetime_yrs is a multiple\r\n of time_period_yrs, and sets the startup state and params_to_site from the\r\n configuration strings.\r\n \"\"\"\r\n \r\n time_period_yrs = self.config['time_period_yrs']\r\n lifetime_yrs = self.config['lifetime_yrs']\r\n error = None\r\n if isinstance(lifetime_yrs, dict):\r\n for value in lifetime_yrs.itervalues():\r\n div = value / time_period_yrs\r\n if not (float(int(div)) == div):\r\n error = value\r\n else:\r\n div = lifetime_yrs / time_period_yrs\r\n if not (float(int(div)) == div):\r\n error = lifetime_yrs\r\n \r\n if error is not None:\r\n msg = ('In section ' + self.config['section'] + ', lifetime_yrs = ' +\r\n str(error) + ' which is required to be a multiple of time_period_yrs of ' +\r\n str(time_period_yrs))\r\n raise mureilexception.ConfigException(msg, {})\r\n\r\n # Set the startup state and the params to site from the configuration strings.\r\n if self.config['startup_data_string'] is not None:\r\n self.set_startup_state(self.config['startup_data_string'])\r\n \r\n if len(self.config['params_to_site_data_string']) > 0:\r\n self.params_to_site = self.config['params_to_site_data_string']\r\n \r\n\r\n def get_data_types(self):\r\n \"\"\"Return a list of keys for each type of\r\n data required, for example ts_wind, ts_demand.\r\n \r\n Outputs:\r\n data_type: list of strings - each a key name \r\n describing the data required for this generator.\r\n \"\"\"\r\n \r\n data_types = []\r\n \r\n if len(self.config['startup_data_name']) > 0:\r\n data_types.append(self.config['startup_data_name'])\r\n\r\n if len(self.config['params_to_site_data_name']) > 0:\r\n data_types.append(self.config['params_to_site_data_name'])\r\n \r\n return data_types\r\n \r\n \r\n def set_data(self, data):\r\n \"\"\"Set the data dict with the data series required\r\n for the generator.\r\n\r\n This implementation looks for the data types:\r\n self.config['startup_data_name']: Interpets this into\r\n the startup state, using the set_startup_state function.\r\n\r\n self.config['params_to_site_data_name']: Sets self.params_to_site\r\n to this.\r\n \r\n Inputs:\r\n data: dict - with keys matching those requested by\r\n get_data_types. \r\n \"\"\"\r\n startup_data_name = self.config['startup_data_name']\r\n if (len(startup_data_name) > 0) and (startup_data_name in data):\r\n self.set_startup_state(data[startup_data_name])\r\n\r\n params_to_site_name = self.config['params_to_site_data_name']\r\n if (len(params_to_site_name) > 0) and (params_to_site_name in data):\r\n self.params_to_site = data[params_to_site_name]\r\n\r\n \r\n def set_startup_state(self, startup_data):\r\n \"\"\"Set the startup state from the data provided. Sets \r\n self.startup_state from this.\r\n \r\n Inputs:\r\n startup_data: An array of generators * 4:\r\n [[site_index, capacity, build_date, decommissioning_period],\r\n ...]\r\n \"\"\"\r\n\r\n # Check if the startup data is empty. If so, just return.\r\n if len(startup_data) == 0:\r\n return\r\n\r\n # Find out which build periods are covered.\r\n startup_data = numpy.array(startup_data)\r\n if not (len(startup_data.shape) == 2):\r\n raise mureilexception.ConfigException('startup data array for module ' +\r\n self.config['section'] + ' is not rectangular.', {})\r\n \r\n if not (startup_data.shape[1] == 4):\r\n raise mureilexception.ConfigException('startup data array for module ' +\r\n self.config['section'] + ' shape ' + str(startup_data.shape) + \r\n ' but (n, 4) is required.', {})\r\n\r\n self.extra_periods = map(int, \r\n (list(set(startup_data[:,2].tolist() + self.extra_periods))))\r\n self.extra_periods.sort()\r\n\r\n # And insert each existing generator into the starting state.\r\n cap_list = self.startup_state['capacity']\r\n hist_list = self.startup_state['history']\r\n\r\n for i in range(startup_data.shape[0]):\r\n site_index = int(startup_data[i, 0])\r\n new_cap = startup_data[i, 1]\r\n period = int(startup_data[i, 2])\r\n decomm_date = int(startup_data[i, 3])\r\n\r\n new_entry = (new_cap, period, decomm_date)\r\n if decomm_date < self.run_periods[0]:\r\n logger.warning('Model in section ' + self.config['section'] +\r\n ' adds startup capacity decommissioned at end of ' + decomm_date +\r\n ' but the first run period is ' + self.run_periods[0] + \r\n ' so it has been removed from the startup state.')\r\n if site_index not in hist_list:\r\n hist_list[site_index] = []\r\n hist_list[site_index].append(new_entry)\r\n else:\r\n new_entry = (new_cap, period, decomm_date)\r\n\r\n if site_index not in cap_list:\r\n cap_list[site_index] = []\r\n cap_list[site_index].append(new_entry)\r\n\r\n\r\n def get_param_count(self):\r\n \"\"\"Return the number of parameters that this generator,\r\n as configured, requires to be optimised, per time period.\r\n \r\n Outputs:\r\n param_count: non-negative integer - the number of\r\n parameters required per time period.\r\n \"\"\"\r\n\r\n return len(self.params_to_site)\r\n \r\n \r\n def get_param_starts(self):\r\n \"\"\"Return two nested lists - one for min, one max, for starting values for the\r\n params. Must be either [[]] or [len(run_periods),param_count].\r\n \r\n Outputs:\r\n min_start_list: list of param integers, or [[]]\r\n max_start_list: list of param integers, or [[]]\r\n \"\"\"\r\n \r\n param_count = self.get_param_count()\r\n period_count = len(self.run_periods)\r\n \r\n if param_count > 0:\r\n if (self.config['start_min_param'] == 1e20):\r\n start_mins = [[]]\r\n else:\r\n start_mins = (numpy.ones((period_count, param_count)) * self.config['start_min_param']).tolist() \r\n\r\n if (self.config['start_max_param'] == 1e20):\r\n start_maxs = [[]]\r\n else:\r\n start_maxs = (numpy.ones((period_count, param_count)) * self.config['start_max_param']).tolist() \r\n else:\r\n start_mins = [[]]\r\n start_maxs = [[]]\r\n \r\n return start_mins, start_maxs\r\n \r\n \r\n def update_state_new_period_list(self, state_handle, period, new_capacity):\r\n \"\"\"Implements update_state_new_period_list as defined in txmultigeneratorbase,\r\n for the state_handle format for this multi-site implementation.\r\n \"\"\"\r\n\r\n state_handle['curr_period'] = period\r\n\r\n cap_list = state_handle['capacity'] \r\n\r\n for site_index, new_cap, decomm_date in new_capacity:\r\n site_index = int(site_index)\r\n \r\n new_entry = (new_cap, period, int(decomm_date))\r\n\r\n if site_index not in cap_list:\r\n cap_list[site_index] = []\r\n\r\n cap_list[site_index].append(new_entry)\r\n\r\n return None\r\n\r\n\r\n def update_state_new_period_params(self, state_handle, period, new_params):\r\n \"\"\"Implements update_state_new_period_params as defined in txmultigeneratorbase,\r\n for the state_handle format for this multi-site implementation.\r\n \r\n Filters any negative new_params values to 0.\r\n \"\"\"\r\n \r\n state_handle['curr_period'] = period\r\n curr_conf = self.period_configs[period]\r\n decomm_date = int(curr_conf['lifetime_yrs'] - curr_conf['time_period_yrs'] + period)\r\n \r\n cap_list = state_handle['capacity'] \r\n\r\n new_cap = numpy.array(new_params).clip(0) * curr_conf['size']\r\n\r\n for i in (numpy.nonzero(new_cap)[0]):\r\n site_index = self.params_to_site[i]\r\n new_entry = (new_cap[i], period, decomm_date)\r\n\r\n if site_index not in cap_list:\r\n cap_list[site_index] = []\r\n\r\n cap_list[site_index].append(new_entry)\r\n\r\n return None\r\n \r\n \r\n def calculate_update_decommission(self, state_handle):\r\n \"\"\"Implements update_decommission as defined in txmultigeneratorbase,\r\n for the state_handle format for this multi-site implementation.\r\n \"\"\"\r\n period = state_handle['curr_period']\r\n cap_list = state_handle['capacity']\r\n hist_list = state_handle['history']\r\n \r\n total_cost = 0.0\r\n sites = []\r\n cost = []\r\n decommissioned = []\r\n fully_decommissioned = []\r\n \r\n decomm_cost = self.period_configs[period]['decommissioning_cost']\r\n\r\n for site, site_caps in cap_list.iteritems():\r\n \r\n decomm = [tup for tup in site_caps if (tup[2] == period)]\r\n\r\n if len(decomm) > 0:\r\n sites.append(site)\r\n decom_cap = sum([tup[0] for tup in decomm])\r\n decommissioned.append(decom_cap)\r\n this_cost = decom_cap * decomm_cost\r\n cost.append(this_cost)\r\n total_cost += this_cost\r\n\r\n # add the decommissioned capacity to the 'history' list\r\n if not site in hist_list:\r\n hist_list[site] = []\r\n hist_list[site] += decomm\r\n \r\n # and rebuild the list of what's left\r\n # note that the expression in here is the complement of that to compute\r\n # decomm above.\r\n new_list = [tup for tup in site_caps if not (tup[2] == period)]\r\n \r\n # if all capacity is gone from this site\r\n if len(new_list) == 0:\r\n fully_decommissioned.append(site)\r\n else:\r\n cap_list[site] = new_list\r\n \r\n for site in fully_decommissioned:\r\n del cap_list[site]\r\n \r\n return total_cost, zip(sites, decommissioned, cost)\r\n \r\n \r\n def calculate_new_capacity_cost(self, state_handle):\r\n \"\"\"Implements calculate_new_capacity_cost as defined in TxMultiGeneratorBase,\r\n for the state_handle format for this multi-site implementation. Calculates\r\n the cost as a simple multiple of the new capacity size.\r\n \"\"\"\r\n \r\n period = state_handle['curr_period']\r\n cap_list = state_handle['capacity']\r\n hist_list = state_handle['history']\r\n \r\n total_cost = 0.0\r\n sites = []\r\n cost = []\r\n new_capacity = []\r\n \r\n for site, value in cap_list.iteritems():\r\n try:\r\n hist = hist_list[site]\r\n except KeyError:\r\n hist = []\r\n\r\n this_cost, new_cap = self.calculate_capital_cost_site(\r\n (value, hist), period, site)\r\n\r\n if new_cap > 0:\r\n sites.append(site)\r\n new_capacity.append(new_cap)\r\n cost.append(this_cost)\r\n total_cost += this_cost\r\n \r\n return total_cost, zip(sites, new_capacity, cost)\r\n\r\n \r\n def calculate_capital_cost_site(self, site_data, period, site):\r\n \"\"\"\"Calculate the incremental capital cost incurred in this \r\n period by the new capacity, for this site.\r\n \r\n This is a useful function for generators to override to implement\r\n cost functions that depend on the existing installed capacity. \r\n\r\n This function charges a per-MW cost plus an install figure if all\r\n the current capacity is new, and the site has not been used before\r\n for this type of generator.\r\n \r\n Inputs: \r\n site_data: a pair of lists - (current_capacity, history), each \r\n a list of tuples of (capacity, build, decom) from the\r\n state_handle.\r\n period: the current period, an integer\r\n site: the site index\r\n \r\n Outputs:\r\n cost: the cost in $M of this new capacity\r\n new_capacity: the total new capacity installed at this site\r\n \"\"\"\r\n \r\n new_cap_list = [tup[0] for tup in site_data[0] if (tup[1] == period)] \r\n new_cap = sum(new_cap_list)\r\n\r\n capacity_cost = self.period_configs[period]['capital_cost']\r\n this_cost = new_cap * capacity_cost\r\n\r\n install_cost = self.period_configs[period]['install_cost']\r\n if install_cost > 0:\r\n # check if all the current capacity is new\r\n if len(new_cap_list) == len(site_data[0]):\r\n # and check if the site has been used before, ever\r\n if len(site_data[1]) == 0:\r\n # the site is new, so charge the 'install' as well\r\n this_cost += install_cost\r\n \r\n return this_cost, new_cap \r\n \r\n \r\n def get_capacity(self, state_handle):\r\n \"\"\"Implement the get_capacity function as defined in TxMultiGeneratorBase, for this\r\n multi-site implementation.\r\n \"\"\"\r\n\r\n index_list = self.get_site_indices(state_handle)\r\n cap_list = state_handle['capacity']\r\n \r\n capacity = []\r\n\r\n for site in index_list:\r\n capacity.append(sum([tup[0] for tup in cap_list[site]]))\r\n \r\n return capacity\r\n\r\n \r\n def get_site_indices(self, state_handle):\r\n \"\"\"Implement the get_site_indices function as defined in TxMultiGeneratorBase, for this\r\n multi-site implementation.\r\n \"\"\"\r\n \r\n site_indices = state_handle['capacity'].keys()\r\n site_indices.sort()\r\n \r\n return site_indices\r\n\r\n\r\n def calculate_time_period_simple(self, state_handle, period, new_params, \r\n supply_request, full_results=False):\r\n \"\"\"Implement calculate_time_period_simple as defined in TxMultiGeneratorBase for\r\n the multi-site generator model.\r\n \"\"\"\r\n \r\n curr_config = self.period_configs[period]\r\n\r\n # Update the state and get the calculations for each site\r\n self.update_state_new_period_params(state_handle, period, new_params)\r\n site_indices = self.get_site_indices(state_handle)\r\n capital_cost, new_capacity = self.calculate_new_capacity_cost(state_handle)\r\n supply_list, variable_cost_list, carbon_emissions_list, other_list = ( \r\n self.calculate_outputs_and_costs(state_handle, supply_request))\r\n\r\n if full_results:\r\n capacity = self.get_capacity(state_handle)\r\n\r\n # Compute the total supply\r\n supply = numpy.sum(supply_list, axis=0)\r\n \r\n # Compute the total variable costs, including carbon cost, for the timeseries, scaled up\r\n cost = ((numpy.sum(variable_cost_list, axis=0) + \r\n (numpy.sum(carbon_emissions_list, axis=0) * curr_config['carbon_price_m'])) * (\r\n curr_config['variable_cost_mult']))\r\n \r\n # Do the decommissioning\r\n decomm_cost, decommissioned = self.calculate_update_decommission(state_handle)\r\n\r\n # Add the capital and decommissioning costs\r\n cost += decomm_cost\r\n cost += capital_cost\r\n\r\n if not full_results:\r\n return site_indices, cost, supply\r\n\r\n if full_results:\r\n results = {}\r\n results['site_indices'] = site_indices\r\n results['cost'] = cost\r\n results['aggregate_supply'] = supply\r\n results['capacity'] = capacity\r\n results['decommissioned'] = decommissioned\r\n results['new_capacity'] = new_capacity\r\n results['supply'] = supply_list\r\n results['variable_cost_period'] = variable_cost_list * curr_config['variable_cost_mult']\r\n results['carbon_emissions_period'] = (carbon_emissions_list * \r\n curr_config['time_scale_up_mult'])\r\n results['total_supply_period'] = (curr_config['time_scale_up_mult'] * numpy.sum(supply) *\r\n curr_config['timestep_hrs'])\r\n results['other'] = other_list\r\n results['desc_string'] = self.get_simple_desc_string(results, state_handle)\r\n\r\n return site_indices, cost, supply, results\r\n \r\n\r\n def calculate_time_period_full(self, state_handle, period, new_params, supply_request, \r\n max_supply=[], price=[], make_string=False, do_decommissioning=True):\r\n \"\"\"Implement calculate_time_period_full as defined in TxMultiGeneratorBase for\r\n the multi-site generator model.\r\n \"\"\"\r\n \r\n results = {}\r\n self.update_state_new_period_params(state_handle, period, new_params)\r\n results['site_indices'] = self.get_site_indices(state_handle)\r\n results['capacity'] = self.get_capacity(state_handle)\r\n dummy, results['new_capacity'] = self.calculate_new_capacity_cost(state_handle)\r\n results['supply'], results['variable_cost_ts'], results['carbon_emissions_ts'], results['other'] = (\r\n self.calculate_outputs_and_costs(state_handle, supply_request, max_supply, price))\r\n if do_decommissioning:\r\n dummy, results['decommissioned'] = (\r\n self.calculate_update_decommissioning(state_handle))\r\n else:\r\n results['decommissioned'] = []\r\n\r\n if make_string:\r\n results['desc_string'] = self.get_full_desc_string(results, state_handle)\r\n \r\n return results\r\n\r\n\r\n def recalculate_time_period_full(self, state_handle, results, supply_request, max_supply=[], price=[], make_string=False):\r\n \"\"\"Implement recalculate_time_period_full as defined in TxMultiGeneratorBase for\r\n the multi-site generator model.\r\n \"\"\"\r\n\r\n results['supply'], results['variable_cost_ts'], results['carbon_emissions_ts'], results['other'] = (\r\n self.calculate_outputs_and_costs(state_handle, supply_request, max_supply, price))\r\n\r\n if make_string:\r\n results['desc_string'] = self.get_full_desc_string(results, state_handle)\r\n return results\r\n else:\r\n return results \r\n\r\n\r\n def calculate_costs_from_schedule_and_finalise(self, state_handle, schedule, make_string=False): \r\n \"\"\"Calculate the costs, given the schedule from the dispatcher.\r\n Finalise the decommissioning for that period.\r\n This assumes that update_state_new_period_params has been called previously,\r\n and the offer quantities have been determined for the active sites.\r\n \r\n Inputs:\r\n state_handle: \r\n as for calculate_time_period_full in txmultigeneratorbase.py\r\n schedule: a set of timeseries for each active site, as previously\r\n listed in the call to get_offers_* \r\n \r\n Outputs:\r\n as for calculate_time_period_full in txmultigeneratorbase.py\r\n \"\"\"\r\n results = {}\r\n site_indices = self.get_site_indices(state_handle)\r\n results['site_indices'] = site_indices\r\n results['capacity'] = self.get_capacity(state_handle)\r\n results['new_capacity_total_cost'], results['new_capacity'] = self.calculate_new_capacity_cost(state_handle)\r\n results['supply'] = schedule\r\n results['variable_cost_ts'], results['carbon_emissions_ts'], results['other'] = (\r\n self.calculate_variable_costs(state_handle, site_indices, schedule))\r\n results['decomm_total_cost'], results['decommissioned'] = (\r\n self.calculate_update_decommission(state_handle))\r\n\r\n if make_string:\r\n results['desc_string'] = self.get_full_desc_string(results, state_handle)\r\n \r\n return results\r\n " ]
[ [ "numpy.array", "numpy.sum", "numpy.nonzero", "numpy.ones" ] ]
CartoDB/cartoframes
[ "7c7392be5d15d0472ff428546c4791ed1a3842b0" ]
[ "cartoframes/data/observatory/catalog/variable.py" ]
[ "import pandas as pd\n\nfrom .entity import CatalogEntity\nfrom .repository.dataset_repo import get_dataset_repo\nfrom .repository.variable_repo import get_variable_repo\nfrom .repository.constants import VARIABLE_FILTER\nfrom .summary import variable_describe, head, tail, counts, quantiles, top_values, histogram\n\n\n_DESCRIPTION_LENGTH_LIMIT = 50\n\n\nclass Variable(CatalogEntity):\n \"\"\"This class represents a :py:class:`Variable <cartoframes.data.observatory.Variable>`\n of datasets in the :py:class:`Catalog <cartoframes.data.observatory.Catalog>`.\n\n Variables contain column names, description, data type, aggregation method, and some other metadata that is\n useful to understand the underlying data inside a :obj:`Dataset`\n\n Examples:\n List the variables of a :py:class:`Dataset <cartoframes.data.observatory.Dataset>`\n in combination with nested filters (categories, countries, etc.)\n\n >>> dataset = Dataset.get('mbi_retail_turn_705247a')\n >>> dataset.variables\n [<Variable.get('RT_CI_95050c10')> #'Retail Turnover: index (country eq.100)', ...]\n\n \"\"\"\n _entity_repo = get_variable_repo()\n\n @property\n def datasets(self):\n \"\"\"Get the list of datasets related to this variable.\n\n Returns:\n :py:class:`CatalogList <cartoframes.data.observatory.entity.CatalogList>` List of Dataset instances.\n\n Raises:\n CatalogError: if there's a problem when connecting to the catalog or no datasets are found.\n\n \"\"\"\n return get_dataset_repo().get_all({VARIABLE_FILTER: self.id})\n\n @property\n def name(self):\n \"\"\"Name of this variable.\"\"\"\n return self.data['name']\n\n @property\n def description(self):\n \"\"\"Description of this variable.\"\"\"\n return self.data['description']\n\n @property\n def column_name(self):\n \"\"\"Column name of the actual table related to the variable in the :obj:`Dataset`.\"\"\"\n return self.data['column_name']\n\n @property\n def db_type(self):\n \"\"\"Type in the database.\n\n Returns:\n str\n\n Examples: INTEGER, STRING, FLOAT, GEOGRAPHY, JSON, BOOL, etc.\n\n \"\"\"\n return self.data['db_type']\n\n @property\n def dataset(self):\n \"\"\"ID of the :obj:`Dataset` to which this variable belongs.\"\"\"\n return self.data['dataset_id']\n\n @property\n def agg_method(self):\n \"\"\"Text representing a description of the aggregation method used to compute the values in this `Variable`\"\"\"\n return self.data['agg_method']\n\n @property\n def variable_group(self):\n \"\"\"If any, ID of the variable group to which this variable belongs.\"\"\"\n return self.data['variable_group_id']\n\n @property\n def summary(self):\n \"\"\"JSON object with extra metadata that summarizes different properties of this variable.\"\"\"\n return self.data['summary_json']\n\n @property\n def project_name(self):\n project, _, _, _ = self.id.split('.')\n return project\n\n @property\n def schema_name(self):\n _, schema, _, _ = self.id.split('.')\n return schema\n\n @property\n def dataset_name(self):\n _, _, dataset, _ = self.id.split('.')\n return dataset\n\n def describe(self, autoformat=True):\n \"\"\"Shows a summary of the actual stats of the variable (column) of the dataset.\n Some of the stats provided per variable are: avg, max, min, sum, range,\n stdev, q1, q3, median and interquartile_range\n\n Args:\n autoformat (boolean): set automatic format for values. Default is True.\n\n Example:\n\n .. code::\n\n # avg average value\n # max max value\n # min min value\n # sum sum of all values\n # range\n # stdev standard deviation\n # q1 first quantile\n # q3 third quantile\n # median median value\n # interquartile_range\n\n \"\"\"\n FLOAT_FORMAT = 'display.float_format'\n\n if autoformat:\n pd.set_option(FLOAT_FORMAT, lambda x: '%.3f' % x)\n\n data = self.data['summary_json']\n return variable_describe(data)\n\n def head(self):\n \"\"\"Returns a sample of the 10 first values of the variable data.\n\n For the cases of datasets with a content fewer than 10 rows\n (i.e. zip codes of small countries), this method won't return anything\n\n \"\"\"\n data = self.data['summary_json']\n return head(self.__class__, data)\n\n def tail(self):\n \"\"\"Returns a sample of the 10 last values of the variable data.\n\n For the cases of datasets with a content fewer than 10 rows\n (i.e. zip codes of small countries), this method won't return anything\n\n \"\"\"\n data = self.data['summary_json']\n return tail(self.__class__, data)\n\n def counts(self):\n \"\"\"Returns a summary of different counts over the actual variable values.\n\n Example:\n\n .. code::\n\n # all total number of values\n # null total number of null values\n # zero number of zero-valued entries\n # extreme number of values 3stdev outside the interquartile range\n # distinct number of distinct (unique) entries\n # outliers number of outliers (outside 1.5stdev the interquartile range\n # zero_percent percent of values that are zero\n # distinct_percent percent of values that are distinct\n\n \"\"\"\n data = self.data['summary_json']\n return counts(data)\n\n def quantiles(self):\n \"\"\"Returns the quantiles of the variable data.\"\"\"\n data = self.data['summary_json']\n return quantiles(data)\n\n def top_values(self):\n \"\"\"Returns information about the top values of the variable data.\"\"\"\n data = self.data['summary_json']\n return top_values(data)\n\n def histogram(self):\n \"\"\"Plots an histogram with the variable data.\"\"\"\n data = self.data['summary_json']\n return histogram(data)\n\n def __repr__(self):\n descr = self.description\n\n if descr and len(descr) > _DESCRIPTION_LENGTH_LIMIT:\n descr = descr[0:_DESCRIPTION_LENGTH_LIMIT] + '...'\n\n return \"<{classname}.get('{entity_id}')> #'{descr}'\" \\\n .format(classname=self.__class__.__name__, entity_id=self._get_print_id(), descr=descr)\n" ]
[ [ "pandas.set_option" ] ]
PepSalehi/algorithms
[ "1c20f57185e6324aa840ccff98e69764b4213131" ]
[ "ML/tf-cifar-10/cifar10_input.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Routine for decoding the CIFAR-10 binary file format.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\n# Process images of this size. Note that this differs from the original CIFAR\n# image size of 32 x 32. If one alters this number, then the entire model\n# architecture will change and any model would need to be retrained.\nIMAGE_SIZE = 24\n\n# Global constants describing the CIFAR-10 data set.\nNUM_CLASSES = 10\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000\n\n\ndef read_cifar10(filename_queue):\n \"\"\"Reads and parses examples from CIFAR10 data files.\n\n Recommendation: if you want N-way read parallelism, call this function\n N times. This will give you N independent Readers reading different\n files & positions within those files, which will give better mixing of\n examples.\n\n Args:\n filename_queue: A queue of strings with the filenames to read from.\n\n Returns:\n An object representing a single example, with the following fields:\n height: number of rows in the result (32)\n width: number of columns in the result (32)\n depth: number of color channels in the result (3)\n key: a scalar string Tensor describing the filename & record number\n for this example.\n label: an int32 Tensor with the label in the range 0..9.\n uint8image: a [height, width, depth] uint8 Tensor with the image data\n \"\"\"\n\n class CIFAR10Record(object):\n pass\n result = CIFAR10Record()\n\n # Dimensions of the images in the CIFAR-10 dataset.\n # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the\n # input format.\n label_bytes = 1 # 2 for CIFAR-100\n result.height = 32\n result.width = 32\n result.depth = 3\n image_bytes = result.height * result.width * result.depth\n # Every record consists of a label followed by the image, with a\n # fixed number of bytes for each.\n record_bytes = label_bytes + image_bytes\n\n # Read a record, getting filenames from the filename_queue. No\n # header or footer in the CIFAR-10 format, so we leave header_bytes\n # and footer_bytes at their default of 0.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n result.key, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8 that is record_bytes long.\n record_bytes = tf.decode_raw(value, tf.uint8)\n\n # The first bytes represent the label, which we convert from uint8->int32.\n result.label = tf.cast(\n tf.strided_slice(record_bytes, [0], [label_bytes], [1]), tf.int32)\n\n # The remaining bytes after the label represent the image, which we reshape\n # from [depth * height * width] to [depth, height, width].\n depth_major = tf.reshape(\n tf.strided_slice(record_bytes, [label_bytes],\n [label_bytes + image_bytes], [1]),\n [result.depth, result.height, result.width])\n # Convert from [depth, height, width] to [height, width, depth].\n result.uint8image = tf.transpose(depth_major, [1, 2, 0])\n\n return result\n\n\ndef _generate_image_and_label_batch(image, label, min_queue_examples,\n batch_size, shuffle):\n \"\"\"Construct a queued batch of images and labels.\n\n Args:\n image: 3-D Tensor of [height, width, 3] of type.float32.\n label: 1-D Tensor of type.int32\n min_queue_examples: int32, minimum number of samples to retain\n in the queue that provides of batches of examples.\n batch_size: Number of images per batch.\n shuffle: boolean indicating whether to use a shuffling queue.\n\n Returns:\n images: Images. 4D tensor of [batch_size, height, width, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n # Create a queue that shuffles the examples, and then\n # read 'batch_size' images + labels from the example queue.\n num_preprocess_threads = 16\n if shuffle:\n images, label_batch = tf.train.shuffle_batch(\n [image, label],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples)\n else:\n images, label_batch = tf.train.batch(\n [image, label],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size)\n\n # Display the training images in the visualizer.\n tf.summary.image('images', images)\n\n return images, tf.reshape(label_batch, [batch_size])\n\n\ndef distorted_inputs(data_dir, batch_size):\n \"\"\"Construct distorted input for CIFAR training using the Reader ops.\n\n Args:\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)\n for i in xrange(1, 6)]\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n\n # Image processing for training the network. Note the many random\n # distortions applied to the image.\n\n # Randomly crop a [height, width] section of the image.\n distorted_image = tf.random_crop(reshaped_image, [height, width, 3])\n\n # Randomly flip the image horizontally.\n distorted_image = tf.image.random_flip_left_right(distorted_image)\n\n # Because these operations are not commutative, consider randomizing\n # the order their operation.\n distorted_image = tf.image.random_brightness(distorted_image,\n max_delta=63)\n distorted_image = tf.image.random_contrast(distorted_image,\n lower=0.2, upper=1.8)\n\n # Subtract off the mean and divide by the variance of the pixels.\n float_image = tf.image.per_image_standardization(distorted_image)\n\n # Set the shapes of tensors.\n float_image.set_shape([height, width, 3])\n read_input.label.set_shape([1])\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *\n min_fraction_of_examples_in_queue)\n print ('Filling queue with %d CIFAR images before starting to train. '\n 'This will take a few minutes.' % min_queue_examples)\n\n # Generate a batch of images and labels by building up a queue of examples.\n return _generate_image_and_label_batch(float_image, read_input.label,\n min_queue_examples, batch_size,\n shuffle=True)\n\n\ndef inputs(eval_data, data_dir, batch_size):\n \"\"\"Construct input for CIFAR evaluation using the Reader ops.\n\n Args:\n eval_data: bool, indicating if one should use the train or eval data set.\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n if not eval_data:\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)\n for i in xrange(1, 6)]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\n else:\n filenames = [os.path.join(data_dir, 'test_batch.bin')]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n\n # Image processing for evaluation.\n # Crop the central [height, width] of the image.\n resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,\n width, height)\n\n # Subtract off the mean and divide by the variance of the pixels.\n float_image = tf.image.per_image_standardization(resized_image)\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(num_examples_per_epoch *\n min_fraction_of_examples_in_queue)\n\n # Generate a batch of images and labels by building up a queue of examples.\n if eval_data:\n read_input.label.set_shape((1,))\n return _generate_image_and_label_batch(float_image, read_input.label,\n min_queue_examples, batch_size,\n shuffle=False)\n" ]
[ [ "tensorflow.FixedLengthRecordReader", "tensorflow.image.random_brightness", "tensorflow.random_crop", "tensorflow.summary.image", "tensorflow.image.random_flip_left_right", "tensorflow.image.random_contrast", "tensorflow.decode_raw", "tensorflow.image.resize_image_with_crop_or_pad", "tensorflow.reshape", "tensorflow.train.string_input_producer", "tensorflow.strided_slice", "tensorflow.cast", "tensorflow.train.batch", "tensorflow.train.shuffle_batch", "tensorflow.gfile.Exists", "tensorflow.transpose", "tensorflow.image.per_image_standardization" ] ]
hypergravity/bopy
[ "90cf5bf695c4ae4f53d9a9bec7cdc9ba16994267" ]
[ "bopy/spec/lamost.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n\nAuthor\n------\nBo Zhang\n\nEmail\n-----\nbozhang@nao.cas.cn\n\nCreated on\n----------\n- Fri Jul 3 13:13:06 2015 read_spectrum\n\nModifications\n-------------\n- Fri Nov 20 10:16:59 2015 reformatting code\n- Sun Feb 28 14:39:16 2016 migrated to bopy.spec.lamost\n- Fri Jul 15 16:08:00 2016 migrate read_spectrum to read_spectrum.py\n\n\nAims\n----\n- generate LAMOST spectra file name/path\n\n\"\"\"\n\n# from __future__ import print_function\nimport os\nimport numpy as np\n# from astropy.io import fits\n# from astropy.table import Table, Column\n\n\ndef lamost_filepath(planid, mjd, spid, fiberid, dirpath=\"\", extname=\".fits\"):\n \"\"\" generate file path of a LAMOST spectrum\n\n Parameters\n ----------\n planid: string\n planid\n\n mjd: 5-digit integer\n mjd (use lmjd rather than mjd for DR3 and after!)\n\n spid: 2-digit integer\n spid, the number of the spectrogragh\n\n fiberid: 3-digit integer\n fiberid\n\n dirpath: string\n the root directory for storing spectra.\n\n Returns\n --------\n filepath: string\n the path of root dir of directory (prefix).\n if un-specified, return file name.\n\n \"\"\"\n\n # pre-processing: strip\n if np.isscalar(planid):\n planid = planid.strip()\n else:\n planid = [_.strip() for _ in planid]\n\n if dirpath == \"\" or dirpath is None:\n # return file name\n if np.isscalar(mjd):\n # if only input one item\n return \"spec-%05d-%s_sp%02d-%03d%s\" \\\n % (mjd, planid, spid, fiberid, extname)\n else:\n # if input a list of items\n return np.array([\"spec-%05d-%s_sp%02d-%03d%s\" %\n (mjd[i], planid[i], spid[i], fiberid[i], extname)\n for i in range(len(mjd))])\n else:\n # return file path\n if not dirpath[-1] == os.path.sep:\n dirpath += os.path.sep\n\n if np.isscalar(mjd):\n # if only input one item\n return \"%s%s%sspec-%05d-%s_sp%02d-%03d%s\" \\\n % (dirpath, planid, os.path.sep,\n mjd, planid, spid, fiberid, extname)\n else:\n # if input a list of items\n return np.array([\"%s%s%sspec-%05d-%s_sp%02d-%03d%s\" %\n (dirpath, planid[i], os.path.sep, mjd[i],\n planid[i], spid[i], fiberid[i], extname)\n for i in range(len(mjd))])\n\n\ndef _test_lamost_filepath():\n \"\"\"test function **lamost_filepath**\n \"\"\"\n print(lamost_filepath(\"GAC_061N46_V3\", 55939, 7, 78))\n print(lamost_filepath(\"GAC_061N46_V3\", 55939, 7, 78, \"/\"))\n print(lamost_filepath(\"GAC_061N46_V3\", 55939, 7, 78, \"/pool\"))\n print(lamost_filepath(\"GAC_061N46_V3\", 55939, 7, 78, \"/pool/\"))\n\n\ndef sdss_filepath(plate, mjd, fiberid, dirpath=\"\", extname=\".fits\"):\n \"\"\" generate file path of a LAMOST spectrum\n\n Parameters\n ----------\n plate: string\n plate\n\n mjd: 5-digit integer\n mjd (use lmjd rather than mjd for DR3 and after!)\n\n fiberid: 4-digit integer\n fiberid\n\n dirpath: string\n the root directory for storing spectra.\n\n extname: string\n in case that users want to synthesize other data format\n\n Returns\n --------\n filepath: string\n the path of root dir of directory (prefix).\n if un-specified, return file name.\n\n \"\"\"\n\n if dirpath == \"\" or dirpath is None:\n # return file name\n if np.isscalar(mjd):\n # if only input one item\n return \"spec-%04d-%05d-%04d%s\" % (plate, mjd, fiberid, extname)\n else:\n # if input a list of items\n return np.array([\"spec-%04d-%05d-%04d%s\" %\n (plate[i], mjd[i], fiberid[i], extname)\n for i in range(len(mjd))])\n else:\n # return file path\n if not dirpath[-1] == os.path.sep:\n dirpath += os.path.sep\n\n if np.isscalar(mjd):\n # if only input one item\n return \"%s%04d%sspec-%04d-%05d-%04d%s\" \\\n % (dirpath, plate, os.path.sep,\n plate, mjd, fiberid, extname)\n else:\n # if input a list of items\n return np.array([\"%s%04d%sspec-%04d-%05d-%04d%s\" %\n (dirpath, plate[i], os.path.sep, plate[i],\n mjd[i], fiberid[i], extname)\n for i in range(len(mjd))])\n\n\ndef _test_sdss_filepath():\n print(sdss_filepath(2238, 52059, 1, \"/\"))\n\n\nif __name__ == \"__main__\":\n print(\"\")\n print(\"@Cham: start to test the module ...\")\n print(\"\")\n print(\"@Cham: testing \"\"lamost_filepath\"\" ...\")\n _test_lamost_filepath()\n _test_sdss_filepath()\n print(\"@Cham: OK\")\n" ]
[ [ "numpy.isscalar" ] ]
aleaf/flopy
[ "a5777a4d4a745e473110a167c69603ac4ad3106c" ]
[ "flopy/export/netcdf.py" ]
[ "import os\nimport platform\nimport socket\nimport copy\nimport json\nimport numpy as np\nfrom datetime import datetime\nimport time\nfrom .metadata import acdd\nimport flopy\n\n# globals\nFILLVALUE = -99999.9\nITMUNI = {\n 0: \"undefined\",\n 1: \"seconds\",\n 2: \"minutes\",\n 3: \"hours\",\n 4: \"days\",\n 5: \"years\",\n}\nPRECISION_STRS = [\"f4\", \"f8\", \"i4\"]\n\nSTANDARD_VARS = [\"longitude\", \"latitude\", \"layer\", \"elevation\", \"time\"]\n\npath = os.path.split(__file__)[0]\nwith open(path + \"/longnames.json\") as f:\n NC_LONG_NAMES = json.load(f)\n\n\nclass Logger(object):\n \"\"\"\n Basic class for logging events during the linear analysis calculations\n if filename is passed, then an file handle is opened\n\n Parameters\n ----------\n filename : bool or string\n if string, it is the log file to write. If a bool, then log is\n written to the screen. echo (bool): a flag to force screen output\n\n Attributes\n ----------\n items : dict\n tracks when something is started. If a log entry is\n not in items, then it is treated as a new entry with the string\n being the key and the datetime as the value. If a log entry is\n in items, then the end time and delta time are written and\n the item is popped from the keys\n\n \"\"\"\n\n def __init__(self, filename, echo=False):\n self.items = {}\n self.echo = bool(echo)\n if filename == True:\n self.echo = True\n self.filename = None\n elif filename:\n self.f = open(filename, \"w\", 0) # unbuffered\n self.t = datetime.now()\n self.log(\"opening \" + str(filename) + \" for logging\")\n else:\n self.filename = None\n\n def log(self, phrase):\n \"\"\"\n log something that happened\n\n Parameters\n ----------\n phrase : str\n the thing that happened\n\n \"\"\"\n pass\n t = datetime.now()\n if phrase in self.items.keys():\n s = (\n str(t)\n + \" finished: \"\n + str(phrase)\n + \", took: \"\n + str(t - self.items[phrase])\n + \"\\n\"\n )\n if self.echo:\n print(s,)\n if self.filename:\n self.f.write(s)\n self.items.pop(phrase)\n else:\n s = str(t) + \" starting: \" + str(phrase) + \"\\n\"\n if self.echo:\n print(s,)\n if self.filename:\n self.f.write(s)\n self.items[phrase] = copy.deepcopy(t)\n\n def warn(self, message):\n \"\"\"\n Write a warning to the log file\n\n Parameters\n ----------\n message : str\n the warning text\n\n \"\"\"\n s = str(datetime.now()) + \" WARNING: \" + message + \"\\n\"\n if self.echo:\n print(s,)\n if self.filename:\n self.f.write(s)\n return\n\n\nclass NetCdf(object):\n \"\"\"\n Support for writing a netCDF4 compliant file from a flopy model\n\n Parameters\n ----------\n output_filename : str\n Name of the .nc file to write\n model : flopy model instance\n time_values : the entries for the time dimension\n if not None, the constructor will initialize\n the file. If None, the perlen array of ModflowDis\n will be used\n z_positive : str ('up' or 'down')\n Positive direction of vertical coordinates written to NetCDF file.\n (default 'down')\n verbose : if True, stdout is verbose. If str, then a log file\n is written to the verbose file\n forgive : what to do if a duplicate variable name is being created. If\n True, then the newly requested var is skipped. If False, then\n an exception is raised.\n **kwargs : keyword arguments\n modelgrid : flopy.discretization.Grid instance\n user supplied model grid which will be used in lieu of the model\n object modelgrid for netcdf production\n\n Notes\n -----\n This class relies heavily on the grid and modeltime objects,\n including these attributes: lenuni, itmuni, start_datetime, and proj4.\n Make sure these attributes have meaningful values.\n\n \"\"\"\n\n def __init__(\n self,\n output_filename,\n model,\n time_values=None,\n z_positive=\"up\",\n verbose=None,\n prj=None,\n logger=None,\n forgive=False,\n **kwargs\n ):\n\n assert output_filename.lower().endswith(\".nc\")\n if verbose is None:\n verbose = model.verbose\n if logger is not None:\n self.logger = logger\n else:\n self.logger = Logger(verbose)\n self.var_attr_dict = {}\n self.log = self.logger.log\n if os.path.exists(output_filename):\n self.logger.warn(\"removing existing nc file: \" + output_filename)\n os.remove(output_filename)\n self.output_filename = output_filename\n\n self.forgive = bool(forgive)\n\n self.model = model\n self.model_grid = model.modelgrid\n if \"modelgrid\" in kwargs:\n self.model_grid = kwargs.pop(\"modelgrid\")\n self.model_time = model.modeltime\n if prj is not None:\n self.model_grid.proj4 = prj\n if self.model_grid.grid_type == \"structured\":\n self.dimension_names = (\"layer\", \"y\", \"x\")\n STANDARD_VARS.extend([\"delc\", \"delr\"])\n # elif self.model_grid.grid_type == 'vertex':\n # self.dimension_names = ('layer', 'ncpl')\n else:\n raise Exception(\n \"Grid type {} not supported.\".format(self.model_grid.grid_type)\n )\n self.shape = self.model_grid.shape\n\n try:\n import dateutil.parser\n except:\n print(\n \"python-dateutil is not installed\\n\"\n + \"try pip install python-dateutil\"\n )\n return\n\n self.start_datetime = self._dt_str(\n dateutil.parser.parse(self.model_time.start_datetime)\n )\n self.logger.warn(\"start datetime:{0}\".format(str(self.start_datetime)))\n\n proj4_str = self.model_grid.proj4\n if proj4_str is None:\n proj4_str = \"epsg:4326\"\n self.log(\n \"Warning: model has no coordinate reference system specified. \"\n \"Using default proj4 string: {}\".format(proj4_str)\n )\n self.proj4_str = proj4_str\n self.grid_units = self.model_grid.units\n self.z_positive = z_positive\n if self.grid_units is None:\n self.grid_units = \"undefined\"\n assert self.grid_units in [\"feet\", \"meters\", \"undefined\"], (\n \"unsupported length units: \" + self.grid_units\n )\n\n self.time_units = self.model_time.time_units\n\n # this gives us confidence that every NetCdf instance\n # has the same attributes\n self.log(\"initializing attributes\")\n self._initialize_attributes()\n self.log(\"initializing attributes\")\n\n self.time_values_arg = time_values\n\n self.log(\"initializing file\")\n self.initialize_file(time_values=self.time_values_arg)\n self.log(\"initializing file\")\n\n def __add__(self, other):\n new_net = NetCdf.zeros_like(self)\n if np.isscalar(other) or isinstance(other, np.ndarray):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:] + other\n )\n elif isinstance(other, NetCdf):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:] + other.nc.variables[vname][:]\n )\n else:\n raise Exception(\n \"NetCdf.__add__(): unrecognized other:{0}\".format(\n str(type(other))\n )\n )\n return new_net\n\n def __sub__(self, other):\n new_net = NetCdf.zeros_like(self)\n if np.isscalar(other) or isinstance(other, np.ndarray):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:] - other\n )\n elif isinstance(other, NetCdf):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:] - other.nc.variables[vname][:]\n )\n else:\n raise Exception(\n \"NetCdf.__sub__(): unrecognized other:{0}\".format(\n str(type(other))\n )\n )\n return new_net\n\n def __mul__(self, other):\n new_net = NetCdf.zeros_like(self)\n if np.isscalar(other) or isinstance(other, np.ndarray):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:] * other\n )\n elif isinstance(other, NetCdf):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:] * other.nc.variables[vname][:]\n )\n else:\n raise Exception(\n \"NetCdf.__mul__(): unrecognized other:{0}\".format(\n str(type(other))\n )\n )\n return new_net\n\n def __div__(self, other):\n return self.__truediv__(other)\n\n def __truediv__(self, other):\n new_net = NetCdf.zeros_like(self)\n with np.errstate(invalid=\"ignore\"):\n if np.isscalar(other) or isinstance(other, np.ndarray):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:] / other\n )\n elif isinstance(other, NetCdf):\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = (\n self.nc.variables[vname][:]\n / other.nc.variables[vname][:]\n )\n else:\n raise Exception(\n \"NetCdf.__sub__(): unrecognized other:{0}\".format(\n str(type(other))\n )\n )\n return new_net\n\n def append(self, other, suffix=\"_1\"):\n assert isinstance(other, NetCdf) or isinstance(other, dict)\n if isinstance(other, NetCdf):\n for vname in other.var_attr_dict.keys():\n attrs = other.var_attr_dict[vname].copy()\n var = other.nc.variables[vname]\n new_vname = vname\n\n if vname in self.nc.variables.keys():\n if vname not in STANDARD_VARS:\n new_vname = vname + suffix\n if \"long_name\" in attrs:\n attrs[\"long_name\"] += \" \" + suffix\n else:\n continue\n assert (\n new_vname not in self.nc.variables.keys()\n ), \"var already exists:{0} in {1}\".format(\n new_vname, \",\".join(self.nc.variables.keys())\n )\n attrs[\"max\"] = var[:].max()\n attrs[\"min\"] = var[:].min()\n new_var = self.create_variable(\n new_vname, attrs, var.dtype, dimensions=var.dimensions\n )\n new_var[:] = var[:]\n else:\n for vname, array in other.items():\n vname_norm = self.normalize_name(vname)\n assert (\n vname_norm in self.nc.variables.keys()\n ), \"dict var not in \" \"self.vars:{0}-->\".format(\n vname\n ) + \",\".join(\n self.nc.variables.keys()\n )\n\n new_vname = vname_norm + suffix\n assert new_vname not in self.nc.variables.keys()\n attrs = self.var_attr_dict[vname_norm].copy()\n attrs[\"max\"] = np.nanmax(array)\n attrs[\"min\"] = np.nanmin(array)\n attrs[\"name\"] = new_vname\n attrs[\"long_name\"] = attrs[\"long_name\"] + \" \" + suffix\n var = self.nc.variables[vname_norm]\n # assert var.shape == array.shape,\\\n # \"{0} shape ({1}) doesn't make array shape ({2})\".\\\n # format(new_vname,str(var.shape),str(array.shape))\n new_var = self.create_variable(\n new_vname, attrs, var.dtype, dimensions=var.dimensions\n )\n try:\n new_var[:] = array\n except:\n new_var[:, 0] = array\n\n return\n\n def copy(self, output_filename):\n new_net = NetCdf.zeros_like(self, output_filename=output_filename)\n for vname in self.var_attr_dict.keys():\n new_net.nc.variables[vname][:] = self.nc.variables[vname][:]\n return new_net\n\n @classmethod\n def zeros_like(\n cls, other, output_filename=None, verbose=None, logger=None\n ):\n new_net = NetCdf.empty_like(\n other,\n output_filename=output_filename,\n verbose=verbose,\n logger=logger,\n )\n # add the vars to the instance\n for vname in other.var_attr_dict.keys():\n if new_net.nc.variables.get(vname) is not None:\n new_net.logger.warn(\n \"variable {0} already defined, skipping\".format(vname)\n )\n continue\n new_net.log(\"adding variable {0}\".format(vname))\n var = other.nc.variables[vname]\n data = var[:]\n try:\n mask = data.mask\n data = np.array(data)\n except:\n mask = None\n new_data = np.zeros_like(data)\n new_data[mask] = FILLVALUE\n new_var = new_net.create_variable(\n vname,\n other.var_attr_dict[vname],\n var.dtype,\n dimensions=var.dimensions,\n )\n new_var[:] = new_data\n new_net.log(\"adding variable {0}\".format(vname))\n global_attrs = {}\n for attr in other.nc.ncattrs():\n if attr not in new_net.nc.ncattrs():\n global_attrs[attr] = other.nc[attr]\n new_net.add_global_attributes(global_attrs)\n return new_net\n\n @classmethod\n def empty_like(\n cls, other, output_filename=None, verbose=None, logger=None\n ):\n if output_filename is None:\n output_filename = (\n str(time.mktime(datetime.now().timetuple())) + \".nc\"\n )\n\n while os.path.exists(output_filename):\n print(\"{}...already exists\".format(output_filename))\n output_filename = (\n str(time.mktime(datetime.now().timetuple())) + \".nc\"\n )\n print(\n \"creating temporary netcdf file...\"\n + \"{}\".format(output_filename)\n )\n\n new_net = cls(\n output_filename,\n other.model,\n time_values=other.time_values_arg,\n verbose=verbose,\n logger=logger,\n )\n return new_net\n\n def difference(\n self, other, minuend=\"self\", mask_zero_diff=True, onlydiff=True\n ):\n \"\"\"\n make a new NetCDF instance that is the difference with another\n netcdf file\n\n Parameters\n ----------\n other : either an str filename of a netcdf file or\n a netCDF4 instance\n\n minuend : (optional) the order of the difference operation.\n Default is self (e.g. self - other). Can be \"self\" or \"other\"\n\n mask_zero_diff : bool flag to mask differences that are zero. If\n True, positions in the difference array that are zero will be set\n to self.fillvalue\n\n only_diff : bool flag to only add non-zero diffs to output file\n\n Returns\n -------\n net NetCDF instance\n\n Notes\n -----\n assumes the current NetCDF instance has been populated. The\n variable names and dimensions between the two files must match\n exactly. The name of the new .nc file is\n <self.output_filename>.diff.nc. The masks from both self and\n other are carried through to the new instance\n\n \"\"\"\n\n assert self.nc is not None, (\n \"can't call difference() if nc \" + \"hasn't been populated\"\n )\n try:\n import netCDF4\n except Exception as e:\n mess = \"error import netCDF4: {0}\".format(str(e))\n self.logger.warn(mess)\n raise Exception(mess)\n\n if isinstance(other, str):\n assert os.path.exists(\n other\n ), \"filename 'other' not found:\" + \"{0}\".format(other)\n other = netCDF4.Dataset(other, \"r\")\n\n assert isinstance(other, netCDF4.Dataset)\n\n # check for similar variables\n self_vars = set(self.nc.variables.keys())\n other_vars = set(other.variables)\n diff = self_vars.symmetric_difference(other_vars)\n if len(diff) > 0:\n self.logger.warn(\n \"variables are not the same between the two \"\n + \"nc files: \"\n + \",\".join(diff)\n )\n return\n\n # check for similar dimensions\n self_dimens = self.nc.dimensions\n other_dimens = other.dimensions\n for d in self_dimens.keys():\n if d not in other_dimens:\n self.logger.warn(\"missing dimension in other:{0}\".format(d))\n return\n if len(self_dimens[d]) != len(other_dimens[d]):\n self.logger.warn(\n \"dimension not consistent: \"\n + \"{0}:{1}\".format(self_dimens[d], other_dimens[d])\n )\n return\n # should be good to go\n time_values = self.nc.variables.get(\"time\")[:]\n new_net = NetCdf(\n self.output_filename.replace(\".nc\", \".diff.nc\"),\n self.model,\n time_values=time_values,\n )\n # add the vars to the instance\n for vname in self_vars:\n if (\n vname not in self.var_attr_dict\n or new_net.nc.variables.get(vname) is not None\n ):\n self.logger.warn(\"skipping variable: {0}\".format(vname))\n continue\n self.log(\"processing variable {0}\".format(vname))\n s_var = self.nc.variables[vname]\n o_var = other.variables[vname]\n s_data = s_var[:]\n o_data = o_var[:]\n o_mask, s_mask = None, None\n\n # keep the masks to apply later\n if isinstance(s_data, np.ma.MaskedArray):\n self.logger.warn(\"masked array for {0}\".format(vname))\n s_mask = s_data.mask\n s_data = np.array(s_data)\n s_data[s_mask] = 0.0\n else:\n np.nan_to_num(s_data)\n\n if isinstance(o_data, np.ma.MaskedArray):\n o_mask = o_data.mask\n o_data = np.array(o_data)\n o_data[o_mask] = 0.0\n else:\n np.nan_to_num(o_data)\n\n # difference with self\n if minuend.lower() == \"self\":\n d_data = s_data - o_data\n elif minuend.lower() == \"other\":\n d_data = o_data - s_data\n else:\n mess = \"unrecognized minuend {0}\".format(minuend)\n self.logger.warn(mess)\n raise Exception(mess)\n\n # check for non-zero diffs\n if onlydiff and d_data.sum() == 0.0:\n self.logger.warn(\n \"var {0} has zero differences, skipping...\".format(vname)\n )\n continue\n\n self.logger.warn(\n \"resetting diff attrs max,min:{0},{1}\".format(\n d_data.min(), d_data.max()\n )\n )\n attrs = self.var_attr_dict[vname].copy()\n attrs[\"max\"] = np.nanmax(d_data)\n attrs[\"min\"] = np.nanmin(d_data)\n # reapply masks\n if s_mask is not None:\n self.log(\"applying self mask\")\n s_mask[d_data != 0.0] = False\n d_data[s_mask] = FILLVALUE\n self.log(\"applying self mask\")\n if o_mask is not None:\n self.log(\"applying other mask\")\n o_mask[d_data != 0.0] = False\n d_data[o_mask] = FILLVALUE\n self.log(\"applying other mask\")\n\n d_data[np.isnan(d_data)] = FILLVALUE\n if mask_zero_diff:\n d_data[np.where(d_data == 0.0)] = FILLVALUE\n\n var = new_net.create_variable(\n vname, attrs, s_var.dtype, dimensions=s_var.dimensions\n )\n\n var[:] = d_data\n self.log(\"processing variable {0}\".format(vname))\n\n def _dt_str(self, dt):\n \"\"\" for datetime to string for year < 1900\n \"\"\"\n dt_str = \"{0:04d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02}Z\".format(\n dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second\n )\n return dt_str\n\n def write(self):\n \"\"\"write the nc object to disk\"\"\"\n self.log(\"writing nc file\")\n assert (\n self.nc is not None\n ), \"netcdf.write() error: nc file not initialized\"\n\n # write any new attributes that have been set since\n # initializing the file\n for k, v in self.global_attributes.items():\n try:\n if self.nc.attributes.get(k) is not None:\n self.nc.setncattr(k, v)\n except Exception:\n self.logger.warn(\n \"error setting global attribute {0}\".format(k)\n )\n\n self.nc.sync()\n self.nc.close()\n self.log(\"writing nc file\")\n\n def _initialize_attributes(self):\n \"\"\"private method to initial the attributes\n of the NetCdf instance\n \"\"\"\n assert (\n \"nc\" not in self.__dict__.keys()\n ), \"NetCdf._initialize_attributes() error: nc attribute already set\"\n\n self.nc_epsg_str = \"epsg:4326\"\n self.nc_crs_longname = \"http://www.opengis.net/def/crs/EPSG/0/4326\"\n self.nc_semi_major = float(6378137.0)\n self.nc_inverse_flat = float(298.257223563)\n\n self.global_attributes = {}\n self.global_attributes[\"namefile\"] = self.model.namefile\n self.global_attributes[\"model_ws\"] = self.model.model_ws\n self.global_attributes[\"exe_name\"] = self.model.exe_name\n self.global_attributes[\"modflow_version\"] = self.model.version\n\n self.global_attributes[\"create_hostname\"] = socket.gethostname()\n self.global_attributes[\"create_platform\"] = platform.system()\n self.global_attributes[\"create_directory\"] = os.getcwd()\n\n htol, rtol = -999, -999\n try:\n htol, rtol = self.model.solver_tols()\n except Exception as e:\n self.logger.warn(\n \"unable to get solver tolerances:\" + \"{0}\".format(str(e))\n )\n self.global_attributes[\"solver_head_tolerance\"] = htol\n self.global_attributes[\"solver_flux_tolerance\"] = rtol\n spatial_attribs = {\n \"xll\": self.model_grid.xoffset,\n \"yll\": self.model_grid.yoffset,\n \"rotation\": self.model_grid.angrot,\n \"proj4_str\": self.model_grid.proj4,\n }\n for n, v in spatial_attribs.items():\n self.global_attributes[\"flopy_sr_\" + n] = v\n self.global_attributes[\n \"start_datetime\"\n ] = self.model_time.start_datetime\n\n self.fillvalue = FILLVALUE\n\n # initialize attributes\n self.grid_crs = None\n self.zs = None\n self.ys = None\n self.xs = None\n self.nc = None\n\n def initialize_geometry(self):\n \"\"\" initialize the geometric information\n needed for the netcdf file\n \"\"\"\n try:\n import pyproj\n except ImportError as e:\n raise ImportError(\n \"NetCdf error importing pyproj module:\\n\" + str(e)\n )\n from distutils.version import LooseVersion\n\n # Check if using newer pyproj version conventions\n pyproj220 = LooseVersion(pyproj.__version__) >= LooseVersion(\"2.2.0\")\n\n proj4_str = self.proj4_str\n print(\"initialize_geometry::proj4_str = {}\".format(proj4_str))\n\n self.log(\"building grid crs using proj4 string: {}\".format(proj4_str))\n if pyproj220:\n self.grid_crs = pyproj.CRS(proj4_str)\n else:\n self.grid_crs = pyproj.Proj(proj4_str, preserve_units=True)\n\n print(\"initialize_geometry::self.grid_crs = {}\".format(self.grid_crs))\n\n vmin, vmax = self.model_grid.botm.min(), self.model_grid.top.max()\n if self.z_positive == \"down\":\n vmin, vmax = vmax, vmin\n else:\n self.zs = self.model_grid.xyzcellcenters[2].copy()\n\n ys = self.model_grid.xyzcellcenters[1].copy()\n xs = self.model_grid.xyzcellcenters[0].copy()\n\n # Transform to a known CRS\n if pyproj220:\n nc_crs = pyproj.CRS(self.nc_epsg_str)\n self.transformer = pyproj.Transformer.from_crs(\n self.grid_crs, nc_crs, always_xy=True\n )\n else:\n nc_crs = pyproj.Proj(self.nc_epsg_str)\n self.transformer = None\n\n print(\"initialize_geometry::nc_crs = {}\".format(nc_crs))\n\n if pyproj220:\n print(\n \"transforming coordinates using = {}\".format(self.transformer)\n )\n\n self.log(\"projecting grid cell center arrays\")\n if pyproj220:\n self.xs, self.ys = self.transformer.transform(xs, ys)\n else:\n self.xs, self.ys = pyproj.transform(self.grid_crs, nc_crs, xs, ys)\n\n # get transformed bounds and record to check against ScienceBase later\n xmin, xmax, ymin, ymax = self.model_grid.extent\n bbox = np.array(\n [[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]]\n )\n if pyproj220:\n x, y = self.transformer.transform(*bbox.transpose())\n else:\n x, y = pyproj.transform(self.grid_crs, nc_crs, *bbox.transpose())\n self.bounds = x.min(), y.min(), x.max(), y.max()\n self.vbounds = vmin, vmax\n\n def initialize_file(self, time_values=None):\n \"\"\"\n initialize the netcdf instance, including global attributes,\n dimensions, and grid information\n\n Parameters\n ----------\n\n time_values : list of times to use as time dimension\n entries. If none, then use the times in\n self.model.dis.perlen and self.start_datetime\n\n \"\"\"\n if self.nc is not None:\n raise Exception(\"nc file already initialized\")\n\n if self.grid_crs is None:\n self.log(\"initializing geometry\")\n self.initialize_geometry()\n self.log(\"initializing geometry\")\n try:\n import netCDF4\n except Exception as e:\n self.logger.warn(\"error importing netCDF module\")\n msg = \"NetCdf error importing netCDF4 module:\\n\" + str(e)\n raise Exception(msg)\n\n # open the file for writing\n try:\n self.nc = netCDF4.Dataset(self.output_filename, \"w\")\n except Exception as e:\n msg = \"error creating netcdf dataset:\\n{}\".format(str(e))\n raise Exception(msg)\n\n # write some attributes\n self.log(\"setting standard attributes\")\n\n self.nc.setncattr(\n \"Conventions\",\n \"CF-1.6, ACDD-1.3, flopy {}\".format(flopy.__version__),\n )\n self.nc.setncattr(\n \"date_created\", datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:00Z\")\n )\n self.nc.setncattr(\n \"geospatial_vertical_positive\", \"{}\".format(self.z_positive)\n )\n min_vertical = np.min(self.zs)\n max_vertical = np.max(self.zs)\n self.nc.setncattr(\"geospatial_vertical_min\", min_vertical)\n self.nc.setncattr(\"geospatial_vertical_max\", max_vertical)\n self.nc.setncattr(\"geospatial_vertical_resolution\", \"variable\")\n self.nc.setncattr(\"featureType\", \"Grid\")\n for k, v in self.global_attributes.items():\n try:\n self.nc.setncattr(k, v)\n except:\n self.logger.warn(\n \"error setting global attribute {0}\".format(k)\n )\n self.global_attributes = {}\n self.log(\"setting standard attributes\")\n\n # spatial dimensions\n self.log(\"creating dimensions\")\n # time\n if time_values is None:\n time_values = np.cumsum(self.model_time.perlen)\n self.nc.createDimension(\"time\", len(time_values))\n for name, length in zip(self.dimension_names, self.shape):\n self.nc.createDimension(name, length)\n self.log(\"creating dimensions\")\n\n self.log(\"setting CRS info\")\n # Metadata variables\n crs = self.nc.createVariable(\"crs\", \"i4\")\n crs.long_name = self.nc_crs_longname\n crs.epsg_code = self.nc_epsg_str\n crs.semi_major_axis = self.nc_semi_major\n crs.inverse_flattening = self.nc_inverse_flat\n self.log(\"setting CRS info\")\n\n attribs = {\n \"units\": \"{} since {}\".format(\n self.time_units, self.start_datetime\n ),\n \"standard_name\": \"time\",\n \"long_name\": NC_LONG_NAMES.get(\"time\", \"time\"),\n \"calendar\": \"gregorian\",\n \"_CoordinateAxisType\": \"Time\",\n }\n time = self.create_variable(\n \"time\", attribs, precision_str=\"f8\", dimensions=(\"time\",)\n )\n self.logger.warn(\"time_values:{0}\".format(str(time_values)))\n time[:] = np.asarray(time_values)\n\n # Elevation\n attribs = {\n \"units\": self.model_grid.units,\n \"standard_name\": \"elevation\",\n \"long_name\": NC_LONG_NAMES.get(\"elevation\", \"elevation\"),\n \"axis\": \"Z\",\n \"valid_min\": min_vertical,\n \"valid_max\": max_vertical,\n \"positive\": self.z_positive,\n }\n elev = self.create_variable(\n \"elevation\",\n attribs,\n precision_str=\"f8\",\n dimensions=self.dimension_names,\n )\n elev[:] = self.zs\n\n # Longitude\n attribs = {\n \"units\": \"degrees_east\",\n \"standard_name\": \"longitude\",\n \"long_name\": NC_LONG_NAMES.get(\"longitude\", \"longitude\"),\n \"axis\": \"X\",\n \"_CoordinateAxisType\": \"Lon\",\n }\n lon = self.create_variable(\n \"longitude\",\n attribs,\n precision_str=\"f8\",\n dimensions=self.dimension_names[1:],\n )\n lon[:] = self.xs\n self.log(\"creating longitude var\")\n\n # Latitude\n self.log(\"creating latitude var\")\n attribs = {\n \"units\": \"degrees_north\",\n \"standard_name\": \"latitude\",\n \"long_name\": NC_LONG_NAMES.get(\"latitude\", \"latitude\"),\n \"axis\": \"Y\",\n \"_CoordinateAxisType\": \"Lat\",\n }\n lat = self.create_variable(\n \"latitude\",\n attribs,\n precision_str=\"f8\",\n dimensions=self.dimension_names[1:],\n )\n lat[:] = self.ys\n\n # x\n self.log(\"creating x var\")\n attribs = {\n \"units\": self.model_grid.units,\n \"standard_name\": \"projection_x_coordinate\",\n \"long_name\": NC_LONG_NAMES.get(\"x\", \"x coordinate of projection\"),\n \"axis\": \"X\",\n }\n x = self.create_variable(\n \"x_proj\",\n attribs,\n precision_str=\"f8\",\n dimensions=self.dimension_names[1:],\n )\n x[:] = self.model_grid.xyzcellcenters[0]\n\n # y\n self.log(\"creating y var\")\n attribs = {\n \"units\": self.model_grid.units,\n \"standard_name\": \"projection_y_coordinate\",\n \"long_name\": NC_LONG_NAMES.get(\"y\", \"y coordinate of projection\"),\n \"axis\": \"Y\",\n }\n y = self.create_variable(\n \"y_proj\",\n attribs,\n precision_str=\"f8\",\n dimensions=self.dimension_names[1:],\n )\n y[:] = self.model_grid.xyzcellcenters[1]\n\n # grid mapping variable\n crs = flopy.utils.reference.crs(\n prj=self.model_grid.prj, epsg=self.model_grid.epsg\n )\n attribs = crs.grid_mapping_attribs\n if attribs is not None:\n self.log(\"creating grid mapping variable\")\n self.create_variable(\n attribs[\"grid_mapping_name\"], attribs, precision_str=\"f8\"\n )\n\n # layer\n self.log(\"creating layer var\")\n attribs = {\n \"units\": \"\",\n \"standard_name\": \"layer\",\n \"long_name\": NC_LONG_NAMES.get(\"layer\", \"layer\"),\n \"positive\": \"down\",\n \"axis\": \"Z\",\n }\n lay = self.create_variable(\"layer\", attribs, dimensions=(\"layer\",))\n lay[:] = np.arange(0, self.shape[0])\n self.log(\"creating layer var\")\n\n if self.model_grid.grid_type == \"structured\":\n # delc\n attribs = {\n \"units\": self.model_grid.units.strip(\"s\"),\n \"long_name\": NC_LONG_NAMES.get(\n \"delc\", \"Model grid cell spacing along a column\"\n ),\n }\n delc = self.create_variable(\"delc\", attribs, dimensions=(\"y\",))\n delc[:] = self.model_grid.delc[::-1]\n if self.model_grid.angrot != 0:\n delc.comments = (\n \"This is the row spacing that applied to the UNROTATED grid. \"\n + \"This grid HAS been rotated before being saved to NetCDF. \"\n + \"To compute the unrotated grid, use the origin point and this array.\"\n )\n\n # delr\n attribs = {\n \"units\": self.model_grid.units.strip(\"s\"),\n \"long_name\": NC_LONG_NAMES.get(\n \"delr\", \"Model grid cell spacing along a row\"\n ),\n }\n delr = self.create_variable(\"delr\", attribs, dimensions=(\"x\",))\n delr[:] = self.model_grid.delr[::-1]\n if self.model_grid.angrot != 0:\n delr.comments = (\n \"This is the col spacing that applied to the UNROTATED grid. \"\n + \"This grid HAS been rotated before being saved to NetCDF. \"\n + \"To compute the unrotated grid, use the origin point and this array.\"\n )\n # else:\n # vertices\n # attribs = {\"units\": self.model_grid.lenuni.strip('s'),\n # \"long_name\": NC_LONG_NAMES.get(\"vertices\",\n # \"List of vertices used in the model by cell\"),\n # }\n # vertices = self.create_variable('vertices', attribs, dimensions=('ncpl',))\n # vertices[:] = self.model_grid.vertices\n\n # Workaround for CF/CDM.\n # http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/\n # reference/StandardCoordinateTransforms.html\n # \"explicit_field\"\n exp = self.nc.createVariable(\"VerticalTransform\", \"S1\")\n exp.transform_name = \"explicit_field\"\n exp.existingDataField = \"elevation\"\n exp._CoordinateTransformType = \"vertical\"\n exp._CoordinateAxes = \"layer\"\n return\n\n def initialize_group(\n self,\n group=\"timeseries\",\n dimensions=(\"time\",),\n attributes=None,\n dimension_data=None,\n ):\n \"\"\"\n Method to initialize a new group within a netcdf file. This group\n can have independent dimensions from the global dimensions\n\n Parameters:\n ----------\n name : str\n name of the netcdf group\n dimensions : tuple\n data dimension names for group\n dimension_shape : tuple\n tuple of data dimension lengths\n attributes : dict\n nested dictionary of {dimension : {attributes}} for each netcdf\n group dimension\n dimension_data : dict\n dictionary of {dimension : [data]} for each netcdf group dimension\n\n \"\"\"\n if attributes is None:\n attributes = {}\n\n if dimension_data is None:\n dimension_data = {}\n\n if self.nc is None:\n self.initialize_file()\n\n if group in self.nc.groups:\n raise AttributeError(\"{} group already initialized\".format(group))\n\n self.log(\"creating netcdf group {}\".format(group))\n self.nc.createGroup(group)\n self.log(\"{} group created\".format(group))\n\n self.log(\"creating {} group dimensions\".format(group))\n for dim in dimensions:\n if dim == \"time\":\n if \"time\" not in dimension_data:\n time_values = np.cumsum(self.model_time.perlen)\n else:\n time_values = dimension_data[\"time\"]\n\n self.nc.groups[group].createDimension(dim, len(time_values))\n\n else:\n if dim not in dimension_data:\n raise AssertionError(\n \"{} information must be supplied \"\n \"to dimension data\".format(dim)\n )\n else:\n\n self.nc.groups[group].createDimension(\n dim, len(dimension_data[dim])\n )\n\n self.log(\"created {} group dimensions\".format(group))\n\n dim_names = tuple([i for i in dimensions if i != \"time\"])\n for dim in dimensions:\n if dim.lower() == \"time\":\n if \"time\" not in attributes:\n unit_value = \"{} since {}\".format(\n self.time_units, self.start_datetime\n )\n attribs = {\n \"units\": unit_value,\n \"standard_name\": \"time\",\n \"long_name\": NC_LONG_NAMES.get(\"time\", \"time\"),\n \"calendar\": \"gregorian\",\n \"Axis\": \"Y\",\n \"_CoordinateAxisType\": \"Time\",\n }\n else:\n attribs = attributes[\"time\"]\n\n time = self.create_group_variable(\n group,\n \"time\",\n attribs,\n precision_str=\"f8\",\n dimensions=(\"time\",),\n )\n\n time[:] = np.asarray(time_values)\n\n elif dim.lower() == \"zone\":\n if \"zone\" not in attributes:\n attribs = {\n \"units\": \"N/A\",\n \"standard_name\": \"zone\",\n \"long_name\": \"zonebudget zone\",\n \"Axis\": \"X\",\n \"_CoordinateAxisType\": \"Zone\",\n }\n\n else:\n attribs = attributes[\"zone\"]\n\n zone = self.create_group_variable(\n group,\n \"zone\",\n attribs,\n precision_str=\"i4\",\n dimensions=(\"zone\",),\n )\n zone[:] = np.asarray(dimension_data[\"zone\"])\n\n else:\n attribs = attributes[dim]\n var = self.create_group_variable(\n group,\n dim,\n attribs,\n precision_str=\"f8\",\n dimensions=dim_names,\n )\n var[:] = np.asarray(dimension_data[dim])\n\n @staticmethod\n def normalize_name(name):\n return name.replace(\".\", \"_\").replace(\" \", \"_\").replace(\"-\", \"_\")\n\n def create_group_variable(\n self, group, name, attributes, precision_str, dimensions=(\"time\",)\n ):\n \"\"\"\n Create a new group variable in the netcdf object\n\n Parameters\n ----------\n name : str\n the name of the variable\n attributes : dict\n attributes to add to the new variable\n precision_str : str\n netcdf-compliant string. e.g. f4\n dimensions : tuple\n which dimensions the variable applies to\n default : (\"time\",\"layer\",\"x\",\"y\")\n group : str\n which netcdf group the variable goes in\n default : None which creates the variable in root\n\n Returns\n -------\n nc variable\n\n Raises\n ------\n AssertionError if precision_str not right\n AssertionError if variable name already in netcdf object\n AssertionError if one of more dimensions do not exist\n\n \"\"\"\n name = self.normalize_name(name)\n\n if (\n name in STANDARD_VARS\n and name in self.nc.groups[group].variables.keys()\n ):\n return\n\n if name in self.nc.groups[group].variables.keys():\n if self.forgive:\n self.logger.warn(\n \"skipping duplicate {} group variable: {}\".format(\n group, name\n )\n )\n return\n else:\n raise Exception(\n \"duplicate {} group variable name: {}\".format(group, name)\n )\n\n self.log(\"creating group {} variable: {}\".format(group, name))\n\n if precision_str not in PRECISION_STRS:\n raise AssertionError(\n \"netcdf.create_variable() error: precision \"\n \"string {} not in {}\".format(precision_str, PRECISION_STRS)\n )\n\n if group not in self.nc.groups:\n raise AssertionError(\n \"netcdf group `{}` must be created before \"\n \"variables can be added to it\".format(group)\n )\n\n self.var_attr_dict[\"{}/{}\".format(group, name)] = attributes\n\n var = self.nc.groups[group].createVariable(\n name,\n precision_str,\n dimensions,\n fill_value=self.fillvalue,\n zlib=True,\n )\n\n for k, v in attributes.items():\n try:\n var.setncattr(k, v)\n except:\n self.logger.warn(\n \"error setting attribute\"\n + \"{} for group {} variable {}\".format(k, group, name)\n )\n self.log(\"creating group {} variable: {}\".format(group, name))\n\n return var\n\n def create_variable(\n self,\n name,\n attributes,\n precision_str=\"f4\",\n dimensions=(\"time\", \"layer\"),\n group=None,\n ):\n \"\"\"\n Create a new variable in the netcdf object\n\n Parameters\n ----------\n name : str\n the name of the variable\n attributes : dict\n attributes to add to the new variable\n precision_str : str\n netcdf-compliant string. e.g. f4\n dimensions : tuple\n which dimensions the variable applies to\n default : (\"time\",\"layer\",\"x\",\"y\")\n group : str\n which netcdf group the variable goes in\n default : None which creates the variable in root\n\n Returns\n -------\n nc variable\n\n Raises\n ------\n AssertionError if precision_str not right\n AssertionError if variable name already in netcdf object\n AssertionError if one of more dimensions do not exist\n\n \"\"\"\n # Normalize variable name\n name = self.normalize_name(name)\n # if this is a core var like a dimension...\n # long_name = attributes.pop(\"long_name\",name)\n if name in STANDARD_VARS and name in self.nc.variables.keys():\n return\n if (\n name not in self.var_attr_dict.keys()\n and name in self.nc.variables.keys()\n ):\n if self.forgive:\n self.logger.warn(\n \"skipping duplicate variable: {0}\".format(name)\n )\n return\n else:\n raise Exception(\"duplicate variable name: {0}\".format(name))\n if name in self.nc.variables.keys():\n raise Exception(\"duplicate variable name: {0}\".format(name))\n\n self.log(\"creating variable: \" + str(name))\n assert (\n precision_str in PRECISION_STRS\n ), \"netcdf.create_variable() error: precision string {0} not in {1}\".format(\n precision_str, PRECISION_STRS\n )\n\n if self.nc is None:\n self.initialize_file()\n\n # check that the requested dimension exists and\n # build up the chuck sizes\n # chunks = []\n # for dimension in dimensions:\n # assert self.nc.dimensions.get(dimension) is not None, \\\n # \"netcdf.create_variable() dimension not found:\" + dimension\n # chunk = self.chunks[dimension]\n # assert chunk is not None, \\\n # \"netcdf.create_variable() chunk size of {0} is None in self.chunks\". \\\n # format(dimension)\n # chunks.append(chunk)\n\n self.var_attr_dict[name] = attributes\n\n var = self.nc.createVariable(\n name,\n precision_str,\n dimensions,\n fill_value=self.fillvalue,\n zlib=True,\n ) # ,\n # chunksizes=tuple(chunks))\n for k, v in attributes.items():\n try:\n var.setncattr(k, v)\n except:\n self.logger.warn(\n \"error setting attribute\"\n + \"{0} for variable {1}\".format(k, name)\n )\n self.log(\"creating variable: \" + str(name))\n return var\n\n def add_global_attributes(self, attr_dict):\n \"\"\" add global attribute to an initialized file\n\n Parameters\n ----------\n attr_dict : dict(attribute name, attribute value)\n\n Returns\n -------\n None\n\n Raises\n ------\n Exception of self.nc is None (initialize_file()\n has not been called)\n\n \"\"\"\n if self.nc is None:\n # self.initialize_file()\n mess = (\n \"NetCDF.add_global_attributes() should only \"\n + \"be called after the file has been initialized\"\n )\n self.logger.warn(mess)\n raise Exception(mess)\n\n self.log(\"setting global attributes\")\n self.nc.setncatts(attr_dict)\n self.log(\"setting global attributes\")\n\n def add_sciencebase_metadata(self, id, check=True):\n \"\"\"Add metadata from ScienceBase using the\n flopy.export.metadata.acdd class.\n\n Returns\n -------\n metadata : flopy.export.metadata.acdd object\n \"\"\"\n md = acdd(id, model=self.model)\n if md.sb is not None:\n if check:\n self._check_vs_sciencebase(md)\n # get set of public attributes\n attr = {n for n in dir(md) if \"_\" not in n[0]}\n # skip some convenience attributes\n skip = {\n \"bounds\",\n \"creator\",\n \"sb\",\n \"xmlroot\",\n \"time_coverage\",\n \"get_sciencebase_xml_metadata\",\n \"get_sciencebase_metadata\",\n }\n towrite = sorted(list(attr.difference(skip)))\n for k in towrite:\n v = md.__getattribute__(k)\n if v is not None:\n # convert everything to strings\n if not isinstance(v, str):\n if isinstance(v, list):\n v = \",\".join(v)\n else:\n v = str(v)\n self.global_attributes[k] = v\n self.nc.setncattr(k, v)\n self.write()\n return md\n\n def _check_vs_sciencebase(self, md):\n \"\"\"Check that model bounds read from flopy are consistent with those in ScienceBase.\"\"\"\n xmin, ymin, xmax, ymax = self.bounds\n tol = 1e-5\n assert md.geospatial_lon_min - xmin < tol\n assert md.geospatial_lon_max - xmax < tol\n assert md.geospatial_lat_min - ymin < tol\n assert md.geospatial_lat_max - ymax < tol\n assert md.geospatial_vertical_min - self.vbounds[0] < tol\n assert md.geospatial_vertical_max - self.vbounds[1] < tol\n\n def get_longnames_from_docstrings(self, outfile=\"longnames.json\"):\n \"\"\"\n This is experimental.\n\n Scrape Flopy module docstrings and return docstrings for parameters\n included in the list of variables added to NetCdf object. Create\n a dictionary of longnames keyed by the NetCdf variable names; make each\n longname from the first sentence of the docstring for that parameter.\n\n One major limitation is that variables from mflists often aren't described\n in the docstrings.\n \"\"\"\n\n def startstop(ds):\n \"\"\"Get just the Parameters section of the docstring.\"\"\"\n start, stop = 0, -1\n for i, l in enumerate(ds):\n if \"Parameters\" in l and \"----\" in ds[i + 1]:\n start = i + 2\n if l.strip() in [\"Attributes\", \"Methods\", \"Returns\", \"Notes\"]:\n stop = i - 1\n break\n if i >= start and \"----\" in l:\n stop = i - 2\n break\n return start, stop\n\n def get_entries(ds):\n \"\"\"Parse docstring entries into dictionary.\"\"\"\n stuff = {}\n k = None\n for line in ds:\n if (\n len(line) >= 5\n and line[:4] == \" \" * 4\n and line[4] != \" \"\n and \":\" in line\n ):\n k = line.split(\":\")[0].strip()\n stuff[k] = \"\"\n # lines with parameter descriptions\n elif k is not None and len(line) > 10: # avoid orphans\n stuff[k] += line.strip() + \" \"\n return stuff\n\n # get a list of the flopy classes\n # packages = inspect.getmembers(flopy.modflow, inspect.isclass)\n packages = [(pp.name[0], pp) for pp in self.model.packagelist]\n # get a list of the NetCDF variables\n attr = [v.split(\"_\")[-1] for v in self.nc.variables]\n\n # parse docstrings to get long names\n longnames = {}\n for pkg in packages:\n # parse the docstring\n obj = pkg[-1]\n ds = obj.__doc__.split(\"\\n\")\n start, stop = startstop(ds)\n txt = ds[start:stop]\n if stop - start > 0:\n params = get_entries(txt)\n for k, v in params.items():\n if k in attr:\n longnames[k] = v.split(\". \")[0]\n\n # add in any variables that weren't found\n for var in attr:\n if var not in longnames.keys():\n longnames[var] = \"\"\n with open(outfile, \"w\") as output:\n json.dump(longnames, output, sort_keys=True, indent=2)\n return longnames\n" ]
[ [ "numpy.zeros_like", "numpy.cumsum", "numpy.nanmax", "numpy.nan_to_num", "numpy.asarray", "numpy.errstate", "numpy.arange", "numpy.nanmin", "numpy.max", "numpy.min", "numpy.isnan", "numpy.array", "numpy.where", "numpy.isscalar" ] ]
Karol-G/nnUNet
[ "a30bdbd64254c94c515ee03617173eb217eea505" ]
[ "i3Deep/merge_labels.py" ]
[ "import numpy as np\r\nfrom i3Deep import utils\r\nfrom tqdm import tqdm\r\nimport os\r\n\r\n\r\n# name = \"KGU-53317EB91645\"\r\n# load_mask = \"D:/Datasets/medical_data/ExportKGU/3D Slicer 2/\" + name + \"/mask.nii.gz\"\r\n# load_label_table = \"D:/Datasets/medical_data/ExportKGU/3D Slicer 2/\" + name + \"/label_table.txt\"\r\n# save_mask = \"D:/Datasets/medical_data/ExportKGU/3D Slicer 2/\" + name + \"/mask2.nii.gz\"\r\nload_path = \"D:/Datasets/medical_data/ExportKGU/3D Slicer 2/\"\r\n\r\n\r\ndef rename(case_path):\r\n filenames = utils.load_filenames(case_path + \"/\", extensions=None)\r\n for filename in filenames:\r\n name = os.path.basename(filename)\r\n if \"label\" in name and \".nii.gz\" in name:\r\n os.rename(filename, case_path + \"/mask.nii.gz\")\r\n elif \".txt\" in name:\r\n os.rename(filename, case_path + \"/label_table.txt\")\r\n elif \".nii.gz\" in name:\r\n os.rename(filename, case_path + \"/image.nii.gz\")\r\n\r\n\r\ndef get_labels(load_label_table):\r\n with open(load_label_table) as f:\r\n label_table = f.readlines()\r\n label_table = np.asarray(label_table)\r\n\r\n ggo = []\r\n cons = []\r\n pe = []\r\n for line in label_table:\r\n label = line.split()[0]\r\n if label.isnumeric():\r\n if \"Background\" in line or \"background\" in line:\r\n continue\r\n infection = line.split(\"_\")[1]\r\n keywords = [\"ggo\", \"gg\"]\r\n if any(x in infection.lower() for x in keywords):\r\n ggo.append(int(label))\r\n keywords = [\"cons\", \"cns\", \"con\", \"cos\", \"co\"]\r\n if any(x in infection.lower() for x in keywords):\r\n cons.append(int(label))\r\n keywords = [\"pe\", \"pes\"]\r\n if any(x in infection.lower() for x in keywords):\r\n pe.append(int(label))\r\n return ggo, cons, pe\r\n\r\n\r\ndef merge_labels(load_mask, save_mask, load_label_table):\r\n mask, affine, spacing, header = utils.load_nifty(load_mask)\r\n mask = mask.astype(int)\r\n ggo, cons, pe = get_labels(load_label_table)\r\n\r\n for label in tqdm(np.concatenate((ggo, cons, pe), axis=0), disable=True):\r\n mask[mask == label] = -label\r\n\r\n for label in tqdm(ggo, disable=True):\r\n mask[mask == -label] = 1\r\n\r\n for label in tqdm(cons, disable=True):\r\n mask[mask == -label] = 2\r\n\r\n for label in tqdm(pe, disable=True):\r\n mask[mask == -label] = 3\r\n\r\n mask = np.rint(mask)\r\n mask = mask.astype(int)\r\n\r\n utils.save_nifty(save_mask, mask, affine, spacing, header)\r\n\r\ndef round_mask(filename):\r\n mask, affine, spacing, header = utils.load_nifty(filename)\r\n mask = np.rint(mask)\r\n mask = mask.astype(int)\r\n utils.save_nifty(filename, mask, affine, spacing, header)\r\n\r\ndef tmp2(filename):\r\n mask, affine, spacing, header = utils.load_nifty(filename)\r\n print(mask[46-1][155-1][116-1])\r\n\r\n\r\nif __name__ == '__main__':\r\n # filenames = utils.load_filenames(load_path, extensions=None)\r\n # for filename in tqdm(filenames):\r\n # if os.path.isfile(filename + \"/mask2.nii.gz\"):\r\n # continue\r\n # rename(filename)\r\n # load_mask = filename + \"/mask.nii.gz\"\r\n # save_mask = filename + \"/mask2.nii.gz\"\r\n # load_label_table = filename + \"/label_table.txt\"\r\n # merge_labels(load_mask, save_mask, load_label_table)\r\n\r\n # for filename in tqdm(filenames):\r\n # old_mask = filename + \"/mask.nii.gz\"\r\n # new_mask = filename + \"/mask2.nii.gz\"\r\n # label_table = filename + \"/label_table.txt\"\r\n # if os.path.exists(new_mask):\r\n # os.remove(old_mask)\r\n # os.rename(new_mask, old_mask)\r\n # os.remove(label_table)\r\n\r\n # filenames = utils.load_filenames(\"/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/Task79_frankfurt3/labelsTr/\", extensions=None)\r\n # for filename in tqdm(filenames):\r\n # mask, affine, spacing, header = utils.load_nifty(filename)\r\n # mask = np.rint(mask)\r\n # mask = mask.astype(np.uint8)\r\n # utils.save_nifty(filename, mask, affine, spacing, header)\r\n\r\n # filename = \"/gris/gris-f/homelv/kgotkows/datasets/covid19/UK_Frankfurt3/KGU-E9EC0F06F1D6/mask.nii.gz\"\r\n # mask, affine, spacing, header = utils.load_nifty(filename)\r\n # mask[mask == 5] = 2\r\n # mask[mask == 6] = 2\r\n # utils.save_nifty(filename, mask, affine, spacing, header)\r\n #tmp(\"/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/nnUNet_raw_data/Task077_frankfurt3Guided/imagesTr/0001_0001.nii.gz\")\r\n tmp2(\"/gris/gris-f/homelv/kgotkows/datasets/nnUnet_datasets/nnUNet_raw_data/Task77_frankfurt3Guided/tmp/900.nii.gz\")" ]
[ [ "numpy.rint", "numpy.concatenate", "numpy.asarray" ] ]
pnkraemer/differentiable_likelihoods
[ "a07876dbf8fcd4aa14bf36bd3e98e06ea10d2a94" ]
[ "difflikelihoods/sampling.py" ]
[ "\"\"\"\nsampling.py\n\nWe sample Metropolis-Hastings:\n * Random walk proposals\n * Langevin proposals\n * Langevin proposals with preconditioning\n * Hamiltonian MC\n * Hamiltonian MC with preconditioning\n\nNOTE:\n The functionality of this module is restricted to log-densities,\n i.e. densities of the form p(s) = exp(-E(s)). We work with E(s) only.\n The reason is that in Bayesian inference, evaluations of exp(-E(s))\n are too instable in a numerical sense. \n\"\"\"\n\nimport collections\nfrom abc import ABC, abstractmethod\nimport numpy as np\nfrom difflikelihoods import logdensity\n\n\ndef metropolishastings_rw(logpdf, nsamps, initstate, pwidth, ninits):\n \"\"\"\n Convenience function for Metropolis-Hastings sampling with\n random walk proposal kernel.\n \"\"\"\n logdens = logdensity.LogDensity(logpdf)\n rwmh = RandomWalkMH(logdens)\n return rwmh.sample_nd(nsamps, initstate, pwidth, ninits)\n\n\ndef metropolishastings_lang(logpdf, loggrad, nsamps, initstate, pwidth, ninits):\n \"\"\"\n Convenience function for Metropolis-Hastings sampling with\n Langevin dynamics proposal kernel.\n \"\"\"\n logdens = logdensity.LogDensity(logpdf, loggrad)\n langmh = LangevinMH(logdens)\n return langmh.sample_nd(nsamps, initstate, pwidth, ninits)\n\n\ndef metropolishastings_plang(\n logpdf, loggrad, loghess, nsamps, initstate, pwidth, ninits\n):\n \"\"\"\n Convenience function for Metropolis-Hastings sampling with\n Riemannian (preconditioned) Langevin dynamics proposal kernel.\n \"\"\"\n logdens = logdensity.LogDensity(logpdf, loggrad, loghess)\n plangmh = PrecondLangevinMH(logdens)\n return plangmh.sample_nd(nsamps, initstate, pwidth, ninits)\n\n\ndef metropolishastings_ham(\n logpdf, loggrad, nsamps, initstate, stepsize, nsteps, ninits\n):\n \"\"\"\n Convenience function for Hamiltonian MCMC.\n \"\"\"\n logdens = logdensity.LogDensity(logpdf, loggrad)\n hmc = HamiltonianMC(logdens, nsteps)\n return hmc.sample_nd(nsamps, initstate, stepsize, ninits)\n\n\ndef metropolishastings_pham(\n logpdf, loggrad, loghess, nsamps, initstate, stepsize, nsteps, ninits\n):\n \"\"\"\n Convenience function for preconditioned Hamiltonian MCMC.\n \"\"\"\n logdens = logdensity.LogDensity(logpdf, loggrad, loghess)\n phmc = PrecondHamiltonianMC(logdens, nsteps)\n return phmc.sample_nd(nsamps, initstate, stepsize, ninits)\n\n\n# Convenience data structure.\nMCMCState = collections.namedtuple(\"MCMCState\", \"state logdens loggrad loghess\")\n\n\nclass MetropolisHastings(ABC):\n \"\"\"\n Abstract Metropolis-Hastings class. Contains everything but the\n proposal kernels.\n \"\"\"\n\n def __init__(self, logdens):\n \"\"\"\n Initialise MH sampler with a log-density function.\n\n Args:\n logdens: LogDensity object, evaluations of a negative log-\n density and derivatives\n \"\"\"\n self.logdens = logdens\n\n def sample_nd(self, nsamps, init_state, pwidth, ninits=None, *optional):\n \"\"\"\n \"\"\"\n assert init_state_is_array(\n init_state\n ), \"Please enter a (d,) dimensional initial state\"\n states, logprobs = np.zeros((nsamps, len(init_state))), np.zeros(nsamps)\n accepted = 0\n if ninits is None:\n ninits = 0\n currstate = self.evaluate_logdens(init_state)\n states[0], logprobs[0] = currstate.state, currstate.logdens\n for idx in range(1, nsamps):\n if idx < ninits:\n proposal, corrfact = self.generate_proposal(currstate, pwidth)\n else:\n proposal, corrfact = self.generate_proposal(currstate, 0.2 * pwidth)\n currstate, is_accept = self.accept_or_reject(\n currstate, proposal, corrfact, idx, ninits\n )\n states[idx], logprobs[idx] = (\n currstate.state.copy(),\n currstate.logdens.copy(),\n )\n if idx >= ninits:\n accepted = accepted + int(is_accept)\n ratio = accepted / nsamps\n return states, logprobs, ratio\n\n def evaluate_logdens(self, loc):\n \"\"\"\n \"\"\"\n logdenseval = self.logdens.eval(loc)\n if self.logdens.has_gradient:\n gradeval = self.logdens.gradeval(loc)\n else:\n gradeval = 0\n if self.logdens.has_hessian:\n hesseval = self.logdens.hesseval(loc)\n else:\n hesseval = 0\n return MCMCState(\n state=loc, logdens=logdenseval, loggrad=gradeval, loghess=hesseval\n )\n\n def accept_or_reject(self, currstate, proposal, corrfact, idx, ninits):\n \"\"\"\n \"\"\"\n logaccprob = self.get_logaccprob(currstate, proposal, corrfact, idx, ninits)\n if logaccprob < 0 or logaccprob < -np.log(np.random.rand()):\n state = proposal\n is_accept = True\n else:\n state = currstate\n is_accept = False\n return state, is_accept\n\n def get_logaccprob(self, currstate, proposal, corrfact, idx, ninits):\n \"\"\"\n Returns NEGATIVE log acceptance probability, i.e.\n corrected proposal - corrected currstate\n \"\"\"\n if idx < ninits:\n corrfact = -corrfact\n return (corrfact) + (proposal.logdens - currstate.logdens)\n\n @abstractmethod\n def generate_proposal(self, *args):\n \"\"\"\n \"\"\"\n pass\n\n\ndef init_state_is_array(init_state):\n \"\"\"\n Checks whether init_state is compliant with an Nd algorithm.\n That is, whether init_state is an (d,) np.ndarray.\n \"\"\"\n assert isinstance(init_state, np.ndarray), \"Please enter init_state of shape (d,)\"\n assert len(init_state.shape) == 1, \"Please enter init_state of shape (d,)\"\n return True\n\n\nclass RandomWalkMH(MetropolisHastings):\n \"\"\"\n \"\"\"\n\n def __init__(self, logdens):\n \"\"\"\n \"\"\"\n MetropolisHastings.__init__(self, logdens)\n\n def generate_proposal(self, currstate, pwidth):\n \"\"\"\n \"\"\"\n newloc = self.sample_randomwalk(currstate.state, pwidth)\n proposal = self.evaluate_logdens(newloc)\n corrfact = 0\n return proposal, corrfact\n\n def sample_randomwalk(self, mean, var):\n \"\"\"\n \"\"\"\n return mean + np.sqrt(var) * np.random.randn(len(mean))\n\n\nclass LangevinMH(MetropolisHastings):\n \"\"\"\n \"\"\"\n\n def __init__(self, logdens):\n \"\"\"\n \"\"\"\n MetropolisHastings.__init__(self, logdens)\n\n def generate_proposal(self, currstate, pwidth):\n \"\"\"\n \"\"\"\n newloc = self.sample_langevin(currstate, pwidth)\n proposal = self.evaluate_logdens(newloc)\n corrfact = self.compute_corrfact_langevin(currstate, proposal, pwidth)\n return proposal, corrfact\n\n def sample_langevin(self, currstate, pwidth):\n \"\"\"\n \"\"\"\n noise = np.random.randn(len(currstate.state))\n return (\n currstate.state - pwidth * currstate.loggrad + np.sqrt(2 * pwidth) * noise\n )\n\n def compute_corrfact_langevin(self, currstate, proposal, pwidth):\n \"\"\"\n \"\"\"\n lognomin = self.kernel_langevin(currstate, proposal, pwidth)\n logdenom = self.kernel_langevin(proposal, currstate, pwidth)\n return lognomin - logdenom\n\n def kernel_langevin(self, state1, state2, pwidth):\n \"\"\"\n \"\"\"\n state2_dyn = state2.state - pwidth * state2.loggrad\n dist = np.linalg.norm(state1.state - state2_dyn) ** 2\n return 0.5 * dist / (2 * pwidth)\n\n\nclass PrecondLangevinMH(MetropolisHastings):\n \"\"\"\n Preconditioning with (inverse) Hessian.\n \"\"\"\n\n def __init__(self, logdens):\n \"\"\"\n precondeval returns M (and not M^{-1}) as used in Cald&Gir\n \"\"\"\n MetropolisHastings.__init__(self, logdens)\n\n def generate_proposal(self, currstate, pwidth):\n \"\"\"\n \"\"\"\n newloc = self.sample_langevin(currstate, pwidth)\n proposal = self.evaluate_logdens(newloc)\n corrfact = self.compute_corrfact_langevin(currstate, proposal, pwidth)\n return proposal, corrfact\n\n def sample_langevin(self, currstate, pwidth):\n \"\"\"\n \"\"\"\n noise = np.random.multivariate_normal(\n np.zeros(len(currstate.loghess)), np.linalg.inv(currstate.loghess)\n )\n prec_dyn = np.linalg.solve(currstate.loghess, currstate.loggrad)\n return currstate.state - pwidth * prec_dyn + np.sqrt(2 * pwidth) * noise\n\n def compute_corrfact_langevin(self, currstate, proposal, pwidth):\n \"\"\"\n \"\"\"\n lognomin = self.kernel_langevin(currstate, proposal, pwidth)\n logdenom = self.kernel_langevin(proposal, currstate, pwidth)\n return lognomin - logdenom\n\n def kernel_langevin(self, state1, state2, pwidth):\n \"\"\"\n \"\"\"\n prec_dyn = np.linalg.solve(state2.loghess, state2.loggrad)\n state2_dyn = state2.state - pwidth * prec_dyn\n difference = state1.state - state2_dyn\n return 0.5 * difference.dot(np.dot(state2.loghess, difference)) / (2 * pwidth)\n\n\nclass HamiltonianMC(MetropolisHastings):\n \"\"\"\n \"\"\"\n\n def __init__(self, logdens, nsteps):\n \"\"\"\n \"\"\"\n MetropolisHastings.__init__(self, logdens)\n self.nsteps = nsteps\n\n def generate_proposal(self, currstate, pwidth):\n \"\"\"\n pwidth is used as stepsize for self.nsteps leapfrog steps.\n\n The correction factor is the quotient of the hamiltonian terms.\n \"\"\"\n momentum = np.random.multivariate_normal(\n np.zeros(len(currstate.state)), np.eye(len(currstate.state))\n )\n # hamilt = self.evaluate_hamiltonian(momentum, currstate)\n momentum_new, proposal = self.leapfrog_dynamics(momentum, currstate, pwidth)\n # prop_hamilt = self.evaluate_hamiltonian(momentum_new, proposal)\n corrfact = self.get_corrfact(momentum, momentum_new)\n return proposal, corrfact\n\n def leapfrog_dynamics(self, momentum, currstate, pwidth):\n \"\"\"\n \"\"\"\n proposal = currstate\n for idx in range(self.nsteps):\n momentum, proposal = self.compute_next_lfstep(momentum, proposal, pwidth)\n return momentum, proposal\n\n def compute_next_lfstep(self, momentum, proposal, pwidth):\n \"\"\"\n \"\"\"\n momentum = momentum - 0.5 * pwidth * proposal.loggrad\n pstate = proposal.state + pwidth * momentum\n proposal = self.evaluate_logdens(pstate)\n momentum = momentum - 0.5 * pwidth * proposal.loggrad\n return momentum, proposal\n\n def get_corrfact(self, mom_new, mom):\n \"\"\"\n \"\"\"\n return 0.5 * (mom_new.T @ mom_new - mom.T @ mom)\n\n\nclass PrecondHamiltonianMC(MetropolisHastings):\n \"\"\"\n In fact, the true name would be either\n * Riemannian-Gaussian HMC: if the preconditioner depends on the state\n * Euclidean-Gaussian HMC: if the preconditioner is constant\n [Girolami and Calderhead, 2011; Betancourt, 2018]\n \"\"\"\n\n def __init__(self, logdens, nsteps):\n \"\"\"\n evalprecond returns M (and not M^{-1}) as used in Cald&Gir.\n M is the Hessian\n \"\"\"\n MetropolisHastings.__init__(self, logdens)\n self.nsteps = nsteps\n\n def generate_proposal(self, currstate, pwidth):\n \"\"\"\n pwidth is used as stepsize for self.nsteps leapfrog steps.\n\n The correction factor is the quotient of the hamiltonian terms.\n \"\"\"\n momentum = np.random.multivariate_normal(\n np.zeros(len(currstate.state)), currstate.loghess\n )\n momentum_new, proposal = self.leapfrog_dynamics(momentum, currstate, pwidth)\n corrfact = self.get_corrfact(momentum, momentum_new, currstate, proposal)\n return proposal, corrfact\n\n def leapfrog_dynamics(self, momentum, currstate, pwidth):\n \"\"\"\n \"\"\"\n proposal = currstate\n for idx in range(self.nsteps):\n momentum, proposal = self.compute_next_lfstep(momentum, proposal, pwidth)\n return momentum, proposal\n\n def compute_next_lfstep(self, momentum, proposal, pwidth):\n \"\"\"\n \"\"\"\n momentum = momentum - 0.5 * pwidth * proposal.loggrad\n pstate = proposal.state + pwidth * np.linalg.solve(proposal.loghess, momentum)\n proposal = self.evaluate_logdens(pstate)\n momentum = momentum - 0.5 * pwidth * proposal.loggrad\n return momentum, proposal\n\n def get_corrfact(self, mom, mom_new, currstate, proposal):\n \"\"\"\n \"\"\"\n return 0.5 * (\n mom_new.T @ np.linalg.solve(proposal.loghess, mom_new)\n + np.log(np.linalg.det(proposal.loghess))\n - mom.T @ np.linalg.solve(currstate.loghess, mom)\n - np.log(np.linalg.det(currstate.loghess))\n )\n" ]
[ [ "numpy.linalg.solve", "numpy.zeros", "numpy.linalg.inv", "numpy.linalg.det", "numpy.random.rand", "numpy.sqrt", "numpy.dot", "numpy.linalg.norm" ] ]
ChristophReich1996/kornia
[ "35f955b46e8015da1cb9faa28c6943ec2b09cc2a" ]
[ "test/augmentation/test_random_generator.py" ]
[ "import pytest\nimport torch\nfrom torch.testing import assert_allclose\n\nfrom kornia.augmentation.random_generator import (\n random_prob_generator,\n random_color_jitter_generator,\n random_perspective_generator,\n random_affine_generator,\n random_rotation_generator,\n random_crop_generator,\n random_crop_size_generator,\n random_rectangles_params_generator,\n center_crop_generator,\n random_motion_blur_generator,\n random_solarize_generator,\n random_posterize_generator,\n random_sharpness_generator,\n random_mixup_generator,\n random_cutmix_generator,\n)\n\n\nclass RandomGeneratorBaseTests():\n\n def test_valid_param_combinations(self, device, dtype):\n raise NotImplementedError\n\n def test_invalid_param_combinations(self, device, dtype):\n raise NotImplementedError\n\n def test_random_gen(self, device, dtype):\n raise NotImplementedError\n\n def test_same_on_batch(self, device, dtype):\n raise NotImplementedError\n\n\nclass TestRandomProbGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('p', [0., 0.5, 1.])\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, p, batch_size, same_on_batch, device, dtype):\n random_prob_generator(batch_size=batch_size, p=p, same_on_batch=same_on_batch)\n\n @pytest.mark.parametrize(\n 'p',\n [\n # Should be failed if p > 1. or p < 0.\n (-1.),\n (2.)\n ]\n )\n def test_invalid_param_combinations(self, p, device, dtype):\n with pytest.raises(Exception):\n random_prob_generator(batch_size=8, p=p)\n\n @pytest.mark.parametrize(\n 'p,expected',\n [(0., [False] * 8), (0.5, [False, False, True, False, True, False, True, False]), (1., [True] * 8)]\n )\n def test_random_gen(self, p, expected, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n res = random_prob_generator(batch_size=batch_size, p=p)\n assert (res == torch.tensor(expected)).long().sum() == batch_size\n\n @pytest.mark.parametrize(\"seed,expected\", [\n (42, [False] * 8),\n (0, [True] * 8),\n ])\n def test_same_on_batch(self, seed, expected, device, dtype):\n torch.manual_seed(seed)\n batch_size = 8\n res = random_prob_generator(batch_size=batch_size, p=.5, same_on_batch=True)\n assert (res == torch.tensor(expected)).long().sum() == batch_size\n\n\nclass TestColorJitterGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('brightness', [None, torch.tensor([0.8, 1.2])])\n @pytest.mark.parametrize('contrast', [None, torch.tensor([0.8, 1.2])])\n @pytest.mark.parametrize('saturation', [None, torch.tensor([0.8, 1.2])])\n @pytest.mark.parametrize('hue', [None, torch.tensor([-0.1, 0.1])])\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(\n self, brightness, contrast, saturation, hue, batch_size, same_on_batch, device, dtype\n ):\n random_color_jitter_generator(\n batch_size,\n brightness.to(device=device, dtype=dtype) if brightness is not None else None,\n contrast.to(device=device, dtype=dtype) if contrast is not None else None,\n saturation.to(device=device, dtype=dtype) if saturation is not None else None,\n hue.to(device=device, dtype=dtype) if hue is not None else None, same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'brightness,contrast,saturation,hue',\n [\n # Should be failed if value out of bounds or tensor.shape != [1, 2]\n (torch.tensor([-1., 2.]), None, None, None),\n (torch.tensor([0., 3.]), None, None, None),\n (torch.tensor(0.), None, None, None),\n (torch.tensor([0.]), None, None, None),\n (torch.tensor([0., 1., 2.]), None, None, None),\n (None, torch.tensor([-1., 2.]), None, None),\n (None, torch.tensor(0.), None, None),\n (None, torch.tensor([0.]), None, None),\n (None, torch.tensor([0., 1., 2.]), None, None),\n (None, None, torch.tensor([-1., 2.]), None),\n (None, None, torch.tensor(0.), None),\n (None, None, torch.tensor([0.]), None),\n (None, None, torch.tensor([0., 1., 2.]), None),\n (None, None, None, torch.tensor([-1., 0.])),\n (None, None, None, torch.tensor([0, 1.])),\n (None, None, None, torch.tensor(0.)),\n (None, None, None, torch.tensor([0.])),\n (None, None, None, torch.tensor([0., 1., 2.])),\n ]\n )\n def test_invalid_param_combinations(self, brightness, contrast, saturation, hue, device, dtype):\n with pytest.raises(Exception):\n random_color_jitter_generator(\n 8,\n brightness.to(device=device, dtype=dtype) if brightness is not None else None,\n contrast.to(device=device, dtype=dtype) if contrast is not None else None,\n saturation.to(device=device, dtype=dtype) if saturation is not None else None,\n hue.to(device=device, dtype=dtype) if hue is not None else None\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n jitter_params = random_color_jitter_generator(\n batch_size,\n brightness=torch.tensor([0.8, 1.2], device=device, dtype=dtype),\n contrast=torch.tensor([0.7, 1.3], device=device, dtype=dtype),\n saturation=torch.tensor([0.6, 1.4], device=device, dtype=dtype),\n hue=torch.tensor([-0.1, 0.1], device=device, dtype=dtype)\n )\n\n expected_jitter_params = {\n 'brightness_factor': torch.tensor(\n [1.1529, 1.1660, 0.9531, 1.1837, 0.9562, 1.0404, 0.9026, 1.1175], device=device, dtype=dtype\n ),\n 'contrast_factor': torch.tensor(\n [1.2645, 0.7799, 1.2608, 1.0561, 1.2216, 1.0406, 1.1447, 0.9576], device=device, dtype=dtype\n ),\n 'hue_factor': torch.tensor(\n [0.0771, 0.0148, -0.0467, 0.0255, -0.0461, -0.0117, -0.0406, 0.0663], device=device, dtype=dtype\n ),\n 'saturation_factor': torch.tensor(\n [0.6843, 0.8156, 0.8871, 0.7595, 1.0378, 0.6049, 1.3612, 0.6602], device=device, dtype=dtype\n ),\n 'order': torch.tensor([3, 2, 0, 1], device=device, dtype=dtype)\n }\n\n assert set(list(jitter_params.keys())) == set([\n 'brightness_factor', 'contrast_factor', 'hue_factor', 'saturation_factor', 'order']), \\\n \"Redundant keys found apart from \\\n 'brightness_factor', 'contrast_factor', 'hue_factor', 'saturation_factor', 'order'\"\n\n assert_allclose(\n jitter_params['brightness_factor'], expected_jitter_params['brightness_factor'], rtol=1e-4, atol=1e-4\n )\n assert_allclose(\n jitter_params['contrast_factor'], expected_jitter_params['contrast_factor'], rtol=1e-4, atol=1e-4\n )\n assert_allclose(jitter_params['hue_factor'], expected_jitter_params['hue_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(\n jitter_params['saturation_factor'], expected_jitter_params['saturation_factor'], rtol=1e-4, atol=1e-4\n )\n assert_allclose(jitter_params['order'].to(dtype), expected_jitter_params['order'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n jitter_params = random_color_jitter_generator(\n batch_size,\n brightness=torch.tensor([0.8, 1.2], device=device, dtype=dtype),\n contrast=torch.tensor([0.7, 1.3], device=device, dtype=dtype),\n saturation=torch.tensor([0.6, 1.4], device=device, dtype=dtype),\n hue=torch.tensor([-0.1, 0.1], device=device, dtype=dtype),\n same_on_batch=True\n )\n\n expected_res = {\n 'brightness_factor': torch.tensor([1.1529] * batch_size, device=device, dtype=dtype),\n 'contrast_factor': torch.tensor([1.2490] * batch_size, device=device, dtype=dtype),\n 'hue_factor': torch.tensor([-0.0234] * batch_size, device=device, dtype=dtype),\n 'saturation_factor': torch.tensor([1.3674] * batch_size, device=device, dtype=dtype),\n 'order': torch.tensor([2, 3, 0, 1], device=device, dtype=dtype)\n }\n\n assert_allclose(jitter_params['brightness_factor'], expected_res['brightness_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(jitter_params['contrast_factor'], expected_res['contrast_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(jitter_params['hue_factor'], expected_res['hue_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(jitter_params['saturation_factor'], expected_res['saturation_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(jitter_params['order'].to(dtype), expected_res['order'], rtol=1e-4, atol=1e-4)\n\n\nclass TestRandomPerspectiveGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('height,width', [(200, 200)])\n @pytest.mark.parametrize('distortion_scale', [torch.tensor(0.), torch.tensor(0.5), torch.tensor(1.)])\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, height, width, distortion_scale, batch_size, same_on_batch, device, dtype):\n random_perspective_generator(\n batch_size=8,\n height=height,\n width=width,\n distortion_scale=distortion_scale.to(device=device, dtype=dtype),\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'height,width,distortion_scale',\n [\n # Should be failed if distortion_scale > 1. or distortion_scale < 0.\n (-100, 100, torch.tensor(0.5)),\n (100, -100, torch.tensor(0.5)),\n (100, 100, torch.tensor(-0.5)),\n (100, 100, torch.tensor(1.5)),\n (100, 100, torch.tensor([0., 0.5])),\n ]\n )\n def test_invalid_param_combinations(self, height, width, distortion_scale, device, dtype):\n with pytest.raises(Exception):\n random_perspective_generator(\n batch_size=8,\n height=height,\n width=width,\n distortion_scale=distortion_scale.to(device=device, dtype=dtype)\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 2\n res = random_perspective_generator(batch_size, 200, 200, torch.tensor(0.5, device=device, dtype=dtype))\n expected = dict(\n start_points=torch.tensor(\n [[[0., 0.], [199., 0.], [199., 199.], [0., 199.]], [[0., 0.], [199., 0.], [199., 199.], [0., 199.]]],\n device=device,\n dtype=dtype\n ),\n end_points=torch.tensor(\n [\n [[44.1135, 45.7502], [179.8568, 47.9653], [179.4776, 168.9552], [12.8286, 159.3179]],\n [[47.0386, 6.6593], [152.2701, 29.6790], [155.5298, 170.6142], [37.0547, 177.5298]]\n ],\n device=device,\n dtype=dtype\n ),\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['start_points'], expected['start_points'])\n assert_allclose(res['end_points'], expected['end_points'])\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 2\n res = random_perspective_generator(\n batch_size, 200, 200, torch.tensor(0.5, device=device, dtype=dtype), same_on_batch=True\n )\n expected = dict(\n start_points=torch.tensor([[[0., 0.], [199., 0.], [199., 199.], [0., 199.]]], device=device,\n dtype=dtype).repeat(2, 1, 1),\n end_points=torch.tensor(\n [[[44.1135, 45.7502], [179.8568, 47.9653], [179.4776, 168.9552], [12.8286, 159.3179]]],\n device=device,\n dtype=dtype\n ).repeat(2, 1, 1),\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['start_points'], expected['start_points'])\n assert_allclose(res['end_points'], expected['end_points'])\n\n\nclass TestRandomAffineGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 4])\n @pytest.mark.parametrize('height', [200])\n @pytest.mark.parametrize('width', [300])\n @pytest.mark.parametrize('degrees', [torch.tensor([0, 30])])\n @pytest.mark.parametrize('translate', [None, torch.tensor([0.1, 0.1])])\n @pytest.mark.parametrize('scale', [None, torch.tensor([0.7, 1.2])])\n @pytest.mark.parametrize('shear', [None, torch.tensor([[0, 20], [0, 20]])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(\n self, batch_size, height, width, degrees, translate, scale, shear, same_on_batch, device, dtype\n ):\n random_affine_generator(\n batch_size=batch_size,\n height=height,\n width=width,\n degrees=degrees.to(device=device, dtype=dtype),\n translate=translate.to(device=device, dtype=dtype) if translate is not None else None,\n scale=scale.to(device=device, dtype=dtype) if scale is not None else None,\n shear=shear.to(device=device, dtype=dtype) if shear is not None else None,\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'height,width,degrees,translate,scale,shear', [\n (-100, 100, torch.tensor([10, 20]), None, None, None),\n (100, -100, torch.tensor([10, 20]), None, None, None),\n (100, 100, 0.5, None, None, None),\n (100, 100, torch.tensor([10, 20, 30]), None, None, None),\n (100, 100, torch.tensor([10, 20]), torch.tensor([0.1]), None, None),\n (10, 10, torch.tensor([1, 2]), torch.tensor([0.1, 0.2, 0.3]), None, None),\n (100, 100, torch.tensor([10, 20]), None, torch.tensor([1]), None),\n (100, 100, torch.tensor([10, 20]), None, torch.tensor([1, 2, 3]), None),\n (100, 100, torch.tensor([10, 20]), None, None, torch.tensor([1])),\n (100, 100, torch.tensor([10, 20]), None, None, torch.tensor([1, 2])),\n (10, 10, torch.tensor([1, 2]), None, None, torch.tensor([1, 2, 3])),\n (10, 10, torch.tensor([1, 2]), None, None, torch.tensor([1, 2, 3, 4])),\n (10, 10, torch.tensor([1, 2]), None, None, torch.tensor([1, 2, 3, 4, 5])),\n ]\n )\n def test_invalid_param_combinations(self, height, width, degrees, translate, scale, shear, device, dtype):\n with pytest.raises(Exception):\n random_affine_generator(\n batch_size=8,\n height=height,\n width=width,\n degrees=degrees.to(device=device, dtype=dtype),\n translate=translate.to(device=device, dtype=dtype) if translate is not None else None,\n scale=scale.to(device=device, dtype=dtype) if scale is not None else None,\n shear=shear.to(device=device, dtype=dtype) if shear is not None else None\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n degrees = torch.tensor([10, 20], device=device, dtype=dtype)\n translate = torch.tensor([0.1, 0.1], device=device, dtype=dtype)\n scale = torch.tensor([0.7, 1.2], device=device, dtype=dtype)\n shear = torch.tensor([[10, 20], [10, 20]], device=device, dtype=dtype)\n res = random_affine_generator(\n batch_size=2,\n height=200,\n width=200,\n degrees=degrees,\n translate=translate,\n scale=scale,\n shear=shear,\n same_on_batch=False\n )\n expected = dict(\n translations=torch.tensor([[-4.3821, -9.7371], [4.0358, 11.7457]], device=device, dtype=dtype),\n center=torch.tensor([[99.5000, 99.5000], [99.5000, 99.5000]], device=device, dtype=dtype),\n scale=torch.tensor([[0.8914, 0.8914], [1.1797, 1.1797]], device=device, dtype=dtype),\n angle=torch.tensor([18.8227, 19.1500], device=device, dtype=dtype),\n sx=torch.tensor([19.4077, 11.3319], device=device, dtype=dtype),\n sy=torch.tensor([19.3460, 15.9358], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['translations'], expected['translations'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['center'], expected['center'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['scale'], expected['scale'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['angle'], expected['angle'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['sx'], expected['sx'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['sy'], expected['sy'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n degrees = torch.tensor([10, 20], device=device, dtype=dtype)\n translate = torch.tensor([0.1, 0.1], device=device, dtype=dtype)\n scale = torch.tensor([0.7, 1.2], device=device, dtype=dtype)\n shear = torch.tensor([[10, 20], [10, 20]], device=device, dtype=dtype)\n res = random_affine_generator(\n batch_size=2,\n height=200,\n width=200,\n degrees=degrees,\n translate=translate,\n scale=scale,\n shear=shear,\n same_on_batch=True\n )\n expected = dict(\n translations=torch.tensor([[-4.6854, 18.3722], [-4.6854, 18.3722]], device=device, dtype=dtype),\n center=torch.tensor([[99.5000, 99.5000], [99.5000, 99.5000]], device=device, dtype=dtype),\n scale=torch.tensor([[1.1575, 1.1575], [1.1575, 1.1575]], device=device, dtype=dtype),\n angle=torch.tensor([18.8227, 18.8227], device=device, dtype=dtype),\n sx=torch.tensor([13.9045, 13.9045], device=device, dtype=dtype),\n sy=torch.tensor([16.0090, 16.0090], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['translations'], expected['translations'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['center'], expected['center'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['scale'], expected['scale'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['angle'], expected['angle'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['sx'], expected['sx'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['sy'], expected['sy'], rtol=1e-4, atol=1e-4)\n\n\nclass TestRandomRotationGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('degrees', [torch.tensor([0, 30])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, degrees, same_on_batch, device, dtype):\n random_rotation_generator(\n batch_size=batch_size, degrees=degrees.to(device=device, dtype=dtype), same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize('degrees', [(torch.tensor(10)), (torch.tensor([10])), (torch.tensor([10, 20, 30]))])\n def test_invalid_param_combinations(self, degrees, device, dtype):\n batch_size = 8\n with pytest.raises(Exception):\n random_rotation_generator(batch_size=batch_size, degrees=degrees.to(device=device, dtype=dtype))\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n degrees = torch.tensor([10, 20])\n res = random_rotation_generator(\n batch_size=2, degrees=degrees.to(device=device, dtype=dtype), same_on_batch=False\n )\n expected = dict(degrees=torch.tensor([18.8227, 19.1500], device=device, dtype=dtype))\n assert res.keys() == expected.keys()\n assert_allclose(res['degrees'], expected['degrees'])\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n degrees = torch.tensor([10, 20])\n res = random_rotation_generator(\n batch_size=2, degrees=degrees.to(device=device, dtype=dtype), same_on_batch=True\n )\n expected = dict(degrees=torch.tensor([18.8227, 18.8227], device=device, dtype=dtype))\n assert res.keys() == expected.keys()\n assert_allclose(res['degrees'], expected['degrees'])\n\n\nclass TestRandomCropGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 2])\n @pytest.mark.parametrize('input_size', [(200, 200)])\n @pytest.mark.parametrize('size', [(100, 100), torch.tensor([50, 50])])\n @pytest.mark.parametrize('resize_to', [None, (100, 100)])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, input_size, size, resize_to, same_on_batch, device, dtype):\n if isinstance(size, torch.Tensor):\n size = size.repeat(batch_size, 1).to(device=device, dtype=dtype)\n random_crop_generator(\n batch_size=batch_size,\n input_size=input_size,\n size=size,\n resize_to=resize_to,\n same_on_batch=same_on_batch,\n device=device,\n dtype=dtype\n )\n\n @pytest.mark.parametrize(\n 'input_size,size,resize_to', [\n ((-300, 300), (200, 200), (100, 100)),\n ((200, 200), torch.tensor([50, 50]), (100, 100)),\n ]\n )\n def test_invalid_param_combinations(self, input_size, size, resize_to, device, dtype):\n batch_size = 2\n with pytest.raises(Exception):\n random_crop_generator(\n batch_size=batch_size,\n input_size=input_size,\n size=size.to(device=device, dtype=dtype) if isinstance(size, torch.Tensor) else size,\n resize_to=resize_to\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n degrees = torch.tensor([10, 20], device=device, dtype=dtype)\n res = random_crop_generator(\n batch_size=2,\n input_size=(100, 100),\n size=torch.tensor([[50, 60], [70, 80]], device=device, dtype=dtype),\n resize_to=(200, 200)\n )\n expected = dict(\n src=torch.tensor(\n [[[36, 19], [95, 19], [95, 68], [36, 68]], [[19, 29], [98, 29], [98, 98], [19, 98]]],\n device=device,\n dtype=dtype\n ),\n dst=torch.tensor(\n [[[0, 0], [199, 0], [199, 199], [0, 199]], [[0, 0], [199, 0], [199, 199], [0, 199]]],\n device=device,\n dtype=dtype\n ),\n input_size=torch.tensor([[100, 100], [100, 100]], device=device, dtype=torch.long)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['src'], expected['src'])\n assert_allclose(res['dst'], expected['dst'])\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n degrees = torch.tensor([10, 20], device=device, dtype=dtype)\n res = random_crop_generator(\n batch_size=2,\n input_size=(100, 100),\n size=torch.tensor([[50, 60], [70, 80]], device=device, dtype=dtype),\n resize_to=(200, 200),\n same_on_batch=True\n )\n expected = dict(\n src=torch.tensor(\n [[[36, 46], [95, 46], [95, 95], [36, 95]], [[36, 46], [115, 46], [115, 115], [36, 115]]],\n device=device,\n dtype=dtype\n ),\n dst=torch.tensor(\n [[[0, 0], [199, 0], [199, 199], [0, 199]], [[0, 0], [199, 0], [199, 199], [0, 199]]],\n device=device,\n dtype=dtype\n ),\n input_size=torch.tensor([[100, 100], [100, 100]], device=device, dtype=torch.long)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['src'], expected['src'])\n assert_allclose(res['dst'], expected['dst'])\n\n\nclass TestRandomCropSizeGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('size', [(200, 200)])\n @pytest.mark.parametrize('scale', [torch.tensor([.7, 1.3])])\n @pytest.mark.parametrize('ratio', [torch.tensor([.9, 1.1])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, size, scale, ratio, same_on_batch, device, dtype):\n random_crop_size_generator(\n batch_size=batch_size,\n size=size,\n scale=scale.to(device=device, dtype=dtype),\n ratio=ratio.to(device=device, dtype=dtype),\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'size,scale,ratio', [\n ((100), torch.tensor([.7, 1.3]), torch.tensor([.9, 1.1])),\n ((100, 100, 100), torch.tensor([.7, 1.3]), torch.tensor([.9, 1.1])),\n ((100, 100), torch.tensor([.7]), torch.tensor([.9, 1.1])),\n ((100, 100), torch.tensor([.7, 1.3, 1.5]), torch.tensor([.9, 1.1])),\n ((100, 100), torch.tensor([.7, 1.3]), torch.tensor([.9])),\n ((100, 100), torch.tensor([.7, 1.3]), torch.tensor([.9, 1.1, 1.3])),\n ]\n )\n def test_invalid_param_combinations(self, size, scale, ratio, device, dtype):\n batch_size = 2\n with pytest.raises(Exception):\n random_crop_size_generator(\n batch_size=batch_size,\n size=size,\n scale=scale.to(device=device, dtype=dtype),\n ratio=ratio.to(device=device, dtype=dtype),\n same_on_batch=same_on_batch\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n res = random_crop_size_generator(\n batch_size=8,\n size=(100, 100),\n scale=torch.tensor([0.7, 1.3], device=device, dtype=dtype),\n ratio=torch.tensor([0.9, 1.1], device=device, dtype=dtype),\n same_on_batch=False\n )\n expected = dict(\n size=torch.tensor(\n [[99, 94], [91, 95], [90, 96], [87, 86], [94, 98], [87, 81], [85, 93], [83, 90]],\n device=device,\n dtype=dtype\n )\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['size'], expected['size'])\n\n res = random_crop_size_generator(\n batch_size=100,\n size=(100, 100),\n scale=torch.tensor([0.999, 1.], device=device, dtype=dtype),\n ratio=torch.tensor([1., 1.], device=device, dtype=dtype),\n same_on_batch=False\n )\n expected = dict(size=torch.tensor([[100, 100]], device=device, dtype=dtype).repeat(100, 1))\n assert res.keys() == expected.keys()\n assert_allclose(res['size'], expected['size'])\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n degrees = torch.tensor([10, 20])\n res = random_crop_size_generator(\n batch_size=8,\n size=(100, 100),\n scale=torch.tensor([0.7, 1.3], device=device, dtype=dtype),\n ratio=torch.tensor([0.9, 1.1], device=device, dtype=dtype),\n same_on_batch=True\n )\n expected = dict(\n size=torch.tensor(\n [[99, 95], [99, 95], [99, 95], [99, 95], [99, 95], [99, 95], [99, 95], [99, 95]],\n device=device,\n dtype=dtype\n ),\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['size'], expected['size'])\n\n\nclass TestRandomRectangleGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('height', [200])\n @pytest.mark.parametrize('width', [300])\n @pytest.mark.parametrize('scale', [torch.tensor([.7, 1.1])])\n @pytest.mark.parametrize('ratio', [torch.tensor([.7, 1.1])])\n @pytest.mark.parametrize('value', [0])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(\n self, batch_size, height, width, scale, ratio, value, same_on_batch, device, dtype\n ):\n random_rectangles_params_generator(\n batch_size=batch_size,\n height=height,\n width=width,\n scale=scale.to(device=device, dtype=dtype),\n ratio=ratio.to(device=device, dtype=dtype),\n value=value,\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'height,width,scale,ratio,value', [\n (-100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), 0),\n (100, -100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), 0),\n (100, -100, torch.tensor([0.7]), torch.tensor([0.7, 1.3]), 0),\n (100, 100, torch.tensor([0.7, 1.3, 1.5]), torch.tensor([0.7, 1.3]), 0),\n (100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7]), 0),\n (100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3, 1.5]), 0),\n (100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), -1),\n (100, 100, torch.tensor([0.7, 1.3]), torch.tensor([0.7, 1.3]), 2),\n (100, 100, torch.tensor([.5, .7]), torch.tensor([.7, .9]), torch.tensor(0.5)),\n ]\n )\n def test_invalid_param_combinations(self, height, width, scale, ratio, value, device, dtype):\n batch_size = 8\n with pytest.raises(Exception):\n random_rectangles_params_generator(\n batch_size=batch_size,\n height=height,\n width=width,\n scale=scale.to(device=device, dtype=dtype),\n ratio=ratio.to(device=device, dtype=dtype),\n value=value,\n same_on_batch=same_on_batch\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n width, height = 100, 150\n scale = torch.tensor([0.7, 1.3], device=device, dtype=dtype)\n ratio = torch.tensor([0.7, 1.3], device=device, dtype=dtype)\n value = 0.5\n res = random_rectangles_params_generator(\n batch_size=2, height=height, width=width, scale=scale, ratio=ratio, value=value, same_on_batch=False\n )\n expected = dict(\n widths=torch.tensor([100, 100], device=device, dtype=dtype),\n heights=torch.tensor([0, 0], device=device, dtype=dtype),\n xs=torch.tensor([0, 0], device=device, dtype=dtype),\n ys=torch.tensor([6, 8], device=device, dtype=dtype),\n values=torch.tensor([0.5000, 0.5000], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['widths'], expected['widths'])\n assert_allclose(res['widths'], expected['widths'])\n assert_allclose(res['xs'], expected['xs'])\n assert_allclose(res['ys'], expected['ys'])\n assert_allclose(res['values'], expected['values'])\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n width, height = 100, 150\n scale = torch.tensor([0.7, 1.3], device=device, dtype=dtype)\n ratio = torch.tensor([0.7, 1.3], device=device, dtype=dtype)\n value = 0.5\n res = random_rectangles_params_generator(\n batch_size=2, height=height, width=width, scale=scale, ratio=ratio, value=value, same_on_batch=True\n )\n expected = dict(\n widths=torch.tensor([100, 100], device=device, dtype=dtype),\n heights=torch.tensor([0, 0], device=device, dtype=dtype),\n xs=torch.tensor([0, 0], device=device, dtype=dtype),\n ys=torch.tensor([10, 10], device=device, dtype=dtype),\n values=torch.tensor([0.5000, 0.5000], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['widths'], expected['widths'])\n assert_allclose(res['widths'], expected['widths'])\n assert_allclose(res['xs'], expected['xs'])\n assert_allclose(res['ys'], expected['ys'])\n assert_allclose(res['values'], expected['values'])\n\n\nclass TestCenterCropGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 2])\n @pytest.mark.parametrize('height', [200])\n @pytest.mark.parametrize('width', [200])\n @pytest.mark.parametrize('size', [(100, 100)])\n def test_valid_param_combinations(self, batch_size, height, width, size, device, dtype):\n center_crop_generator(batch_size=batch_size, height=height, width=width, size=size)\n\n @pytest.mark.parametrize(\n 'height,width,size', [\n (200, -200, (100, 100)),\n (-200, 200, (100, 100)),\n (100, 100, (120, 120)),\n (150, 100, (120, 120)),\n (100, 150, (120, 120)),\n ]\n )\n def test_invalid_param_combinations(self, height, width, size, device, dtype):\n batch_size = 2\n with pytest.raises(Exception):\n center_crop_generator(batch_size=batch_size, height=height, width=width, size=size)\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n res = center_crop_generator(batch_size=2, height=200, width=200, size=(120, 150))\n expected = dict(\n src=torch.tensor(\n [[[25, 40], [174, 40], [174, 159], [25, 159]], [[25, 40], [174, 40], [174, 159], [25, 159]]],\n device=device,\n dtype=torch.long\n ),\n dst=torch.tensor(\n [[[0, 0], [149, 0], [149, 119], [0, 119]], [[0, 0], [149, 0], [149, 119], [0, 119]]],\n device=device,\n dtype=torch.long\n ),\n input_size=torch.tensor([[200, 200], [200, 200]], device=device, dtype=torch.long)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['src'].to(device=device), expected['src'])\n assert_allclose(res['dst'].to(device=device), expected['dst'])\n\n def test_same_on_batch(self, device, dtype):\n pass\n\n\nclass TestRandomMotionBlur(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('kernel_size', [3, (3, 5)])\n @pytest.mark.parametrize('angle', [torch.tensor([10, 30])])\n @pytest.mark.parametrize('direction', [torch.tensor([-1, -1]), torch.tensor([1, 1])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, kernel_size, angle, direction, same_on_batch, device, dtype):\n random_motion_blur_generator(\n batch_size=batch_size,\n kernel_size=kernel_size,\n angle=angle.to(device=device, dtype=dtype),\n direction=direction.to(device=device, dtype=dtype),\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'kernel_size,angle,direction', [\n (4, torch.tensor([30, 100]), torch.tensor([-1, 1])),\n (1, torch.tensor([30, 100]), torch.tensor([-1, 1])),\n ((1, 2, 3), torch.tensor([30, 100]), torch.tensor([-1, 1])),\n (3, torch.tensor([30, 100]), torch.tensor([-2, 1])),\n (3, torch.tensor([30, 100]), torch.tensor([-1, 2])),\n ]\n )\n def test_invalid_param_combinations(self, kernel_size, angle, direction, device, dtype):\n with pytest.raises(Exception):\n random_motion_blur_generator(\n batch_size=8,\n kernel_size=kernel_size,\n angle=angle.to(device=device, dtype=dtype),\n direction=direction.to(device=device, dtype=dtype)\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n angle = torch.tensor([30, 90])\n direction = torch.tensor([-1, 1])\n res = random_motion_blur_generator(\n batch_size=2,\n kernel_size=3,\n angle=angle.to(device=device, dtype=dtype),\n direction=direction.to(device=device, dtype=dtype),\n same_on_batch=False\n )\n expected = dict(\n ksize_factor=torch.tensor([3, 3], device=device, dtype=torch.int32),\n angle_factor=torch.tensor([82.9362, 84.9002], device=device, dtype=dtype),\n direction_factor=torch.tensor([-0.2343, 0.9186], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['ksize_factor'], expected['ksize_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['angle_factor'], expected['angle_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['direction_factor'], expected['direction_factor'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n angle = torch.tensor([30, 90])\n direction = torch.tensor([-1, 1])\n res = random_motion_blur_generator(\n batch_size=2,\n kernel_size=3,\n angle=angle.to(device=device, dtype=dtype),\n direction=direction.to(device=device, dtype=dtype),\n same_on_batch=True\n )\n expected = dict(\n ksize_factor=torch.tensor([3, 3], device=device, dtype=torch.int32),\n angle_factor=torch.tensor([82.9362, 82.9362], device=device, dtype=dtype),\n direction_factor=torch.tensor([0.8300, 0.8300], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['ksize_factor'], expected['ksize_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['angle_factor'], expected['angle_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['direction_factor'], expected['direction_factor'], rtol=1e-4, atol=1e-4)\n\n\nclass TestRandomSolarizeGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('thresholds', [torch.tensor([0, 1]), torch.tensor([0.4, 0.6])])\n @pytest.mark.parametrize('additions', [torch.tensor([-0.5, 0.5])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, thresholds, additions, same_on_batch, device, dtype):\n random_solarize_generator(\n batch_size=batch_size,\n thresholds=thresholds.to(device=device, dtype=dtype),\n additions=additions.to(device=device, dtype=dtype),\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'thresholds,additions', [\n (torch.tensor([0, 2]), torch.tensor([-0.5, 0.5])),\n (torch.tensor([-1, 1]), torch.tensor([-0.5, 0.5])),\n ([0, 1], torch.tensor([-0.5, 0.5])),\n (torch.tensor([0, 1]), torch.tensor([-0.5, 1])),\n (torch.tensor([0, 1]), torch.tensor([-1, 0.5])),\n (torch.tensor([0, 1]), [-0.5, 0.5]),\n ]\n )\n def test_invalid_param_combinations(self, thresholds, additions, device, dtype):\n with pytest.raises(Exception):\n random_solarize_generator(\n batch_size=batch_size,\n thresholds=thresholds.to(device=device, dtype=dtype),\n additions=additions.to(device=device, dtype=dtype)\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n res = random_solarize_generator(\n batch_size=batch_size,\n thresholds=torch.tensor([0, 1], device=device, dtype=dtype),\n additions=torch.tensor([-0.5, 0.5], device=device, dtype=dtype),\n same_on_batch=False\n )\n expected = dict(\n thresholds_factor=torch.tensor(\n [0.8823, 0.9150, 0.3829, 0.9593, 0.3904, 0.6009, 0.2566, 0.7936], device=device, dtype=dtype\n ),\n additions_factor=torch.tensor(\n [0.4408, -0.3668, 0.4346, 0.0936, 0.3694, 0.0677, 0.2411, -0.0706], device=device, dtype=dtype\n ),\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['thresholds_factor'], expected['thresholds_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['additions_factor'], expected['additions_factor'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n res = random_solarize_generator(\n batch_size=batch_size,\n thresholds=torch.tensor([0, 1], device=device, dtype=dtype),\n additions=torch.tensor([-0.5, 0.5], device=device, dtype=dtype),\n same_on_batch=True\n )\n expected = dict(\n thresholds_factor=torch.tensor(\n [0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823], device=device, dtype=dtype\n ),\n additions_factor=torch.tensor(\n [0.4150, 0.4150, 0.4150, 0.4150, 0.4150, 0.4150, 0.4150, 0.4150], device=device, dtype=dtype\n ),\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['thresholds_factor'], expected['thresholds_factor'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['additions_factor'], expected['additions_factor'], rtol=1e-4, atol=1e-4)\n\n\nclass TestRandomPosterizeGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('bits', [torch.tensor([0, 8])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, bits, same_on_batch, device, dtype):\n random_posterize_generator(\n batch_size=batch_size, bits=bits.to(device=device, dtype=dtype), same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'bits', [\n (torch.tensor([-1, 1])),\n (torch.tensor([0, 9])),\n (torch.tensor([3])),\n ([0, 8]),\n ]\n )\n def test_invalid_param_combinations(self, bits, device, dtype):\n with pytest.raises(Exception):\n random_posterize_generator(\n batch_size=batch_size, bits=bits.to(device=device, dtype=dtype), same_on_batch=same_on_batch\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(9)\n batch_size = 8\n res = random_posterize_generator(\n batch_size=batch_size, bits=torch.tensor([0, 8], device=device, dtype=dtype), same_on_batch=False\n )\n expected = dict(bits_factor=torch.tensor([5, 2, 3, 6, 7, 7, 2, 7], device=device, dtype=torch.int32))\n assert res.keys() == expected.keys()\n assert_allclose(res['bits_factor'], expected['bits_factor'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(9)\n batch_size = 8\n res = random_posterize_generator(\n batch_size=batch_size, bits=torch.tensor([0, 8], device=device, dtype=dtype), same_on_batch=True\n )\n expected = dict(bits_factor=torch.tensor([5, 5, 5, 5, 5, 5, 5, 5], device=device, dtype=torch.int32))\n assert res.keys() == expected.keys()\n assert_allclose(res['bits_factor'], expected['bits_factor'], rtol=1e-4, atol=1e-4)\n\n\nclass TestRandomSharpnessGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('sharpness', [torch.tensor([0., 1.])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, sharpness, same_on_batch, device, dtype):\n random_sharpness_generator(\n batch_size=batch_size, sharpness=sharpness.to(device=device, dtype=dtype), same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize('sharpness', [\n (torch.tensor([-1, 5])),\n (torch.tensor([3])),\n ([0, 1.]),\n ])\n def test_invalid_param_combinations(self, sharpness, device, dtype):\n with pytest.raises(Exception):\n random_sharpness_generator(\n batch_size=batch_size, sharpness=sharpness.to(device=device, dtype=dtype), same_on_batch=same_on_batch\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n res = random_sharpness_generator(\n batch_size=batch_size, sharpness=torch.tensor([0., 1.], device=device, dtype=dtype), same_on_batch=False\n )\n expected = dict(\n sharpness_factor=torch.\n tensor([0.8823, 0.9150, 0.3829, 0.9593, 0.3904, 0.6009, 0.2566, 0.7936], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['sharpness_factor'], expected['sharpness_factor'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n res = random_sharpness_generator(\n batch_size=batch_size, sharpness=torch.tensor([0., 1.], device=device, dtype=dtype), same_on_batch=True\n )\n expected = dict(\n sharpness_factor=torch.\n tensor([0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823, 0.8823], device=device, dtype=dtype)\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['sharpness_factor'], expected['sharpness_factor'], rtol=1e-4, atol=1e-4)\n\n\nclass TestRandomMixUpGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('p', [0., 0.5, 1.])\n @pytest.mark.parametrize('lambda_val', [None, torch.tensor([0., 1.])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(self, batch_size, p, lambda_val, same_on_batch, device, dtype):\n random_mixup_generator(\n batch_size=batch_size,\n p=p,\n lambda_val=lambda_val.to(device=device, dtype=dtype) if isinstance(lambda_val,\n (torch.Tensor)) else lambda_val,\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'lambda_val', [\n (torch.tensor([-1, 1])),\n (torch.tensor([0, 2])),\n (torch.tensor([0, 0.5, 1])),\n ([0., 1.]),\n ]\n )\n def test_invalid_param_combinations(self, lambda_val, device, dtype):\n with pytest.raises(Exception):\n random_mixup_generator(batch_size=8, lambda_val=lambda_val.to(device=device, dtype=dtype))\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 8\n res = random_mixup_generator(\n batch_size=batch_size,\n p=0.5,\n lambda_val=torch.tensor([0., 1.], device=device, dtype=dtype),\n same_on_batch=False\n )\n expected = dict(\n mixup_pairs=torch.tensor([6, 1, 0, 7, 2, 5, 3, 4], device=device, dtype=torch.long),\n mixup_lambdas=torch.tensor(\n [0.0000, 0.0000, 0.5739, 0.0000, 0.6274, 0.0000, 0.4414, 0.0000], device=device, dtype=dtype\n )\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['mixup_pairs'], expected['mixup_pairs'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['mixup_lambdas'], expected['mixup_lambdas'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(9)\n batch_size = 8\n res = random_mixup_generator(\n batch_size=batch_size,\n p=.9999,\n lambda_val=torch.tensor([0., 1.], device=device, dtype=dtype),\n same_on_batch=True\n )\n expected = dict(\n mixup_pairs=torch.tensor([4, 6, 7, 5, 0, 1, 3, 2], device=device, dtype=torch.long),\n mixup_lambdas=torch.tensor(\n [0.3804, 0.3804, 0.3804, 0.3804, 0.3804, 0.3804, 0.3804, 0.3804], device=device, dtype=dtype\n )\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['mixup_pairs'], expected['mixup_pairs'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['mixup_lambdas'], expected['mixup_lambdas'], rtol=1e-4, atol=1e-4)\n\n\nclass TestRandomCutMixGen(RandomGeneratorBaseTests):\n\n @pytest.mark.parametrize('batch_size', [0, 1, 8])\n @pytest.mark.parametrize('p', [0, 0.5, 1.])\n @pytest.mark.parametrize('width,height', [(200, 200)])\n @pytest.mark.parametrize('num_mix', [1, 3])\n @pytest.mark.parametrize('beta', [None, torch.tensor(1e-15), torch.tensor(1.)])\n @pytest.mark.parametrize('cut_size', [None, torch.tensor([0., 1.]), torch.tensor([0.3, 0.6])])\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_valid_param_combinations(\n self, batch_size, p, width, height, num_mix, beta, cut_size, same_on_batch, device, dtype\n ):\n random_cutmix_generator(\n batch_size=batch_size,\n p=p,\n width=width,\n height=height,\n num_mix=num_mix,\n beta=beta.to(device=device, dtype=dtype) if isinstance(beta, (torch.Tensor)) else beta,\n cut_size=cut_size.to(device=device, dtype=dtype) if isinstance(cut_size, (torch.Tensor)) else cut_size,\n same_on_batch=same_on_batch\n )\n\n @pytest.mark.parametrize(\n 'width,height,num_mix,beta,cut_size', [\n (200, -200, 1, None, None),\n (-200, 200, 1, None, None),\n (200, 200, 0, None, None),\n (200, 200, 1.5, None, None),\n (200, 200, 1, torch.tensor([0., 1.]), None),\n (200, 200, 1, None, torch.tensor([-1., 1.])),\n (200, 200, 1, None, torch.tensor([0., 2.])),\n ]\n )\n @pytest.mark.parametrize('same_on_batch', [True, False])\n def test_invalid_param_combinations(self, width, height, num_mix, beta, cut_size, same_on_batch, device, dtype):\n with pytest.raises(Exception):\n random_cutmix_generator(\n batch_size=8,\n p=0.5,\n width=width,\n height=height,\n num_mix=num_mix,\n beta=beta.to(device=device, dtype=dtype) if isinstance(beta, (torch.Tensor)) else beta,\n cut_size=beta.to(device=device, dtype=dtype) if isinstance(cut_size, (torch.Tensor)) else cut_size,\n same_on_batch=same_on_batch\n )\n\n def test_random_gen(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 2\n res = random_cutmix_generator(\n batch_size=batch_size,\n width=200,\n height=200,\n p=0.5,\n num_mix=1,\n beta=torch.tensor(1., device=device, dtype=dtype),\n cut_size=torch.tensor([0., 1.], device=device, dtype=dtype),\n same_on_batch=False\n )\n expected = dict(\n mix_pairs=torch.tensor([[0, 1]], device=device, dtype=torch.long),\n crop_src=torch.tensor(\n [[[[71, 108], [70, 108], [70, 107], [71, 107]], [[39, 1], [38, 1], [38, 0], [39, 0]]]],\n device=device,\n dtype=dtype\n )\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['mix_pairs'], expected['mix_pairs'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['crop_src'], expected['crop_src'], rtol=1e-4, atol=1e-4)\n\n def test_same_on_batch(self, device, dtype):\n torch.manual_seed(42)\n batch_size = 2\n res = random_cutmix_generator(\n batch_size=batch_size,\n width=200,\n height=200,\n p=0.5,\n num_mix=1,\n beta=torch.tensor(1., device=device, dtype=dtype),\n cut_size=torch.tensor([0., 1.], device=device, dtype=dtype),\n same_on_batch=True\n )\n expected = dict(\n mix_pairs=torch.tensor([[1, 0]], device=device, dtype=torch.long),\n crop_src=torch.tensor(\n [[[[114, 53], [113, 53], [113, 52], [114, 52]], [[114, 53], [113, 53], [113, 52], [114, 52]]]],\n device=device,\n dtype=dtype\n )\n )\n assert res.keys() == expected.keys()\n assert_allclose(res['mix_pairs'], expected['mix_pairs'], rtol=1e-4, atol=1e-4)\n assert_allclose(res['crop_src'], expected['crop_src'], rtol=1e-4, atol=1e-4)\n" ]
[ [ "torch.manual_seed", "torch.tensor", "torch.testing.assert_allclose" ] ]
vermaakarsh/Code-Vulnerability
[ "38791e2f2bc970bed4c4e8af397ac1f4ac4d7363" ]
[ "tpot_output _pipeline.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# NOTE: Make sure that the outcome column is labeled 'target' in the data file\ntpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)\nfeatures = tpot_data.drop('target', axis=1)\ntraining_features, testing_features, training_target, testing_target = \\\n train_test_split(features, tpot_data['target'], random_state=42)\n\n# Average CV score on the training set was: 0.9996457287206185\nexported_pipeline = KNeighborsClassifier(n_neighbors=2, p=1, weights=\"distance\")\n# Fix random state in exported estimator\nif hasattr(exported_pipeline, 'random_state'):\n setattr(exported_pipeline, 'random_state', 42)\n\nexported_pipeline.fit(training_features, training_target)\nresults = exported_pipeline.predict(testing_features)\n" ]
[ [ "pandas.read_csv", "sklearn.neighbors.KNeighborsClassifier", "sklearn.model_selection.train_test_split" ] ]
DMIRLAB-Group/Dassl.pytorch
[ "79052448cc0b0622f14e9768dbd6e6c0598fe6d1" ]
[ "dassl/engine/dg/crossgrad.py" ]
[ "import torch\nfrom torch.nn import functional as F\n\nfrom dassl.optim import build_optimizer, build_lr_scheduler\nfrom dassl.utils import count_num_param\nfrom dassl.engine import TRAINER_REGISTRY, TrainerX\nfrom dassl.engine.trainer import SimpleNet\n\n\n@TRAINER_REGISTRY.register()\nclass CrossGrad(TrainerX):\n \"\"\"Cross-gradient training.\n\n https://arxiv.org/abs/1804.10745.\n \"\"\"\n\n def __init__(self, cfg):\n super().__init__(cfg)\n self.eps_f = cfg.TRAINER.CG.EPS_F\n self.eps_d = cfg.TRAINER.CG.EPS_D\n self.alpha_f = cfg.TRAINER.CG.ALPHA_F\n self.alpha_d = cfg.TRAINER.CG.ALPHA_D\n\n def build_model(self):\n cfg = self.cfg\n\n print('Building F')\n self.F = SimpleNet(cfg, cfg.MODEL, self.num_classes)\n self.F.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.F)))\n self.optim_F = build_optimizer(self.F, cfg.OPTIM)\n self.sched_F = build_lr_scheduler(self.optim_F, cfg.OPTIM)\n self.register_model('F', self.F, self.optim_F, self.sched_F)\n\n print('Building D')\n self.D = SimpleNet(cfg, cfg.MODEL, self.dm.num_source_domains)\n self.D.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.D)))\n self.optim_D = build_optimizer(self.D, cfg.OPTIM)\n self.sched_D = build_lr_scheduler(self.optim_D, cfg.OPTIM)\n self.register_model('D', self.D, self.optim_D, self.sched_D)\n\n def forward_backward(self, batch):\n input, label, domain = self.parse_batch_train(batch)\n\n input.requires_grad = True\n\n # Compute domain perturbation\n loss_d = F.cross_entropy(self.D(input), domain)\n loss_d.backward()\n grad_d = torch.clamp(input.grad.data, min=-0.1, max=0.1)\n input_d = input.data + self.eps_f * grad_d\n\n # Compute label perturbation\n input.grad.data.zero_()\n loss_f = F.cross_entropy(self.F(input), label)\n loss_f.backward()\n grad_f = torch.clamp(input.grad.data, min=-0.1, max=0.1)\n input_f = input.data + self.eps_d * grad_f\n\n input = input.detach()\n\n # Update label net\n loss_f1 = F.cross_entropy(self.F(input), label)\n loss_f2 = F.cross_entropy(self.F(input_d), label)\n loss_f = (1 - self.alpha_f) * loss_f1 + self.alpha_f * loss_f2\n self.model_backward_and_update(loss_f, 'F')\n\n # Update domain net\n loss_d1 = F.cross_entropy(self.D(input), domain)\n loss_d2 = F.cross_entropy(self.D(input_f), domain)\n loss_d = (1 - self.alpha_d) * loss_d1 + self.alpha_d * loss_d2\n self.model_backward_and_update(loss_d, 'D')\n\n output_dict = {\n 'loss_f': loss_f.item(),\n 'loss_d': loss_d.item(),\n 'lr': self.optim_F.param_groups[0]['lr']\n }\n\n if (self.batch_idx + 1) == self.num_batches:\n self.update_lr()\n\n return output_dict\n\n def model_inference(self, input):\n return self.F(input)\n" ]
[ [ "torch.clamp" ] ]
0HenryH/ai2021s
[ "1cadc3f963f7f2fba99441607e62c8da88183327" ]
[ "AI-lec8-rnn/classify.py" ]
[ "import time\r\nimport numpy as np\r\nimport pandas as pd\r\nimport torch\r\nimport torch.nn as nn\r\nimport itertools\r\nimport collections\r\nimport matplotlib.pyplot as plt\r\n\r\n# Read in data\r\ndf = pd.read_csv(\"Chinese_Names_Corpus_Gender(120W).txt\", header=2)\r\ndf = df[df.sex != \"未知\"]\r\nnames = df[\"dict\"].values\r\n\r\n# Compute character frequency\r\nchars = [list(name) for name in names]\r\nchars_flatten = list(itertools.chain(*chars))\r\nfreq = collections.Counter(chars_flatten)\r\nfreq = pd.DataFrame(freq.items(), columns=[\"char\", \"freq\"])\r\nfreq = freq.sort_values(by=\"freq\", ascending=False)\r\n\r\n# Power law (?)\r\nchar_rank = np.arange(freq.shape[0])\r\nchar_freq = freq[\"freq\"].values\r\nplt.plot(char_rank, char_freq)\r\nplt.plot(np.log(1.0 + char_rank), np.log(char_freq))\r\n\r\n# Prepare data\r\ndict_size = 500\r\ndict = list(freq[\"char\"].values[:dict_size])\r\ndict_set = set(dict)\r\nfiltered = list(filter(lambda item: set(item[1]).issubset(dict_set), enumerate(names)))\r\nind = [idx for idx, name in filtered]\r\ndat = df.iloc[ind]\r\ndat[\"y\"] = np.where(dat[\"sex\"] == \"男\", 0, 1)\r\n\r\n# Split training set and test set\r\n# train = dat.sample(frac=0.8, random_state=123)\r\n# test = dat.drop(train.index)\r\ntrain = dat.sample(n=10000, random_state=123)\r\ntest = dat.sample(n=1000, random_state=321)\r\n\r\n# One-hot encoding\r\ndef char2index(char):\r\n return dict.index(char)\r\n\r\ndef name2index(name):\r\n return [char2index(char) for char in name]\r\n\r\ndef name2tensor(name):\r\n tensor = torch.zeros(len(name), 1, dict_size)\r\n for i, char in enumerate(name):\r\n tensor[i, 0, char2index(char)] = 1\r\n return tensor\r\n\r\nchar2index(\"李\")\r\nname2index(\"李兴\")\r\nname2tensor(\"李兴\")\r\n\r\n\r\n\r\n# Build model\r\nclass RNN(nn.Module):\r\n def __init__(self, input_size, hidden_size):\r\n super(RNN, self).__init__()\r\n self.hidden_size = hidden_size\r\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\r\n self.h2o = nn.Linear(hidden_size, 1)\r\n\r\n def forward(self, input, hidden):\r\n combined = torch.cat((input, hidden), dim=1)\r\n hidden = torch.tanh(self.i2h(combined))\r\n output = torch.sigmoid(self.h2o(hidden))\r\n return output, hidden\r\n\r\n def init_hidden(self):\r\n return torch.zeros(1, self.hidden_size)\r\n\r\n# n_hidden = 128\r\n# rnn = RNN(dict_size, n_hidden)\r\n# input = name2tensor(\"李兴\")\r\n# hidden = rnn.init_hidden()\r\n# output, next_hidden = rnn(input[0], hidden)\r\n\r\n\r\n\r\nnp.random.seed(123)\r\ntorch.random.manual_seed(123)\r\n\r\nn = train.shape[0]\r\nn_hidden = 64\r\nnepoch = 5\r\nbs = 100\r\n\r\nrnn = RNN(dict_size, n_hidden)\r\nopt = torch.optim.Adam(rnn.parameters(), lr=0.001)\r\ntrain_ind = np.arange(n)\r\nlosses = []\r\n\r\nt1 = time.time()\r\nfor k in range(nepoch):\r\n np.random.shuffle(train_ind)\r\n # Update on mini-batches\r\n for j in range(0, n, bs):\r\n # Create mini-batch\r\n mb = train.iloc[train_ind[j:(j + bs)]]\r\n mb_size = mb.shape[0]\r\n loss = 0.0\r\n # Loop over each name in the mini-batch\r\n for i in range(mb_size):\r\n name = mb[\"dict\"].values[i]\r\n input = name2tensor(name)\r\n hidden = rnn.init_hidden()\r\n y = mb[\"y\"].values[i]\r\n for s in range(input.shape[0]):\r\n output, hidden = rnn(input[s], hidden)\r\n loss = loss - y * torch.log(output) - (1.0 - y) * torch.log(1.0 - output)\r\n\r\n loss = loss / mb_size\r\n opt.zero_grad()\r\n loss.backward()\r\n opt.step()\r\n\r\n losses.append(loss.item())\r\n if j // bs % 10 == 0:\r\n print(f\"epoch {k}, batch {j // bs}, loss = {loss.item()}\")\r\nt2 = time.time()\r\nprint(t2 - t1)\r\n\r\nplt.plot(losses)\r\n\r\n# Prediction on test set\r\nntest = test.shape[0]\r\ntrue_label = test[\"y\"].values\r\npred = np.zeros(ntest)\r\nrnn.eval()\r\nfor i in range(ntest):\r\n input = name2tensor(test[\"dict\"].values[i])\r\n hidden = rnn.init_hidden()\r\n with torch.no_grad():\r\n for s in range(input.shape[0]):\r\n output, hidden = rnn(input[s], hidden)\r\n pred[i] = output.item()\r\n if i % 100 == 0:\r\n print(f\"processed {i}\")\r\nloss = -np.mean(true_label * np.log(pred) + (1.0 - true_label) * np.log(1.0 - pred))\r\nprint(loss)\r\npred_label = (pred > 0.5).astype(int)\r\nprint(np.mean(pred_label == true_label))\r\n\r\n# Random cases\r\nnp.random.seed(123)\r\ntorch.random.manual_seed(123)\r\nind = np.random.choice(ntest, 10)\r\nypred = 1 * (pred[ind] > 0.5)\r\nprint(test.iloc[ind])\r\nprint(test[\"y\"].values[ind])\r\nprint(ypred)\r\n\r\n\r\n\r\nnames = [\"李\", \"李雪\", \"李雪峰\"]\r\nfor name in names:\r\n input = name2tensor(name)\r\n hidden = rnn.init_hidden()\r\n with torch.no_grad():\r\n for s in range(input.shape[0]):\r\n output, hidden = rnn(input[s], hidden)\r\n pred = output.item()\r\n print(f\"namae: {name}, P(female) = {pred}\")\r\n" ]
[ [ "numpy.random.shuffle", "torch.nn.Linear", "numpy.zeros", "pandas.read_csv", "torch.no_grad", "numpy.random.seed", "numpy.random.choice", "numpy.arange", "numpy.log", "torch.log", "torch.zeros", "matplotlib.pyplot.plot", "numpy.where", "torch.random.manual_seed", "torch.cat", "numpy.mean" ] ]
Jerry2001Qu/pennylane-qiskit
[ "7ba24ac6ab695d83508cd0f5064f7dfb1670a79b" ]
[ "tests/test_integration.py" ]
[ "import sys\n\nimport numpy as np\nimport pennylane as qml\nimport pytest\nimport qiskit\n\nfrom pennylane_qiskit import AerDevice, BasicAerDevice\n\nfrom conftest import state_backends\n\npldevices = [(\"qiskit.aer\", qiskit.Aer), (\"qiskit.basicaer\", qiskit.BasicAer)]\n\n\nclass TestDeviceIntegration:\n \"\"\"Test the devices work correctly from the PennyLane frontend.\"\"\"\n\n @pytest.mark.parametrize(\"d\", pldevices)\n def test_load_device(self, d, backend):\n \"\"\"Test that the qiskit device loads correctly\"\"\"\n dev = qml.device(d[0], wires=2, backend=backend, shots=1024)\n assert dev.num_wires == 2\n assert dev.shots == 1024\n assert dev.short_name == d[0]\n assert dev.provider == d[1]\n\n def test_incorrect_backend(self):\n \"\"\"Test that exception is raised if name is incorrect\"\"\"\n with pytest.raises(ValueError, match=\"Backend 'none' does not exist\"):\n qml.device(\"qiskit.aer\", wires=2, backend=\"none\")\n\n def test_incorrect_backend_wires(self):\n \"\"\"Test that exception is raised if number of wires is too large\"\"\"\n with pytest.raises(ValueError, match=r\"Backend 'statevector\\_simulator' supports maximum\"):\n qml.device(\"qiskit.aer\", wires=100, backend=\"statevector_simulator\")\n\n def test_args(self):\n \"\"\"Test that the device requires correct arguments\"\"\"\n with pytest.raises(TypeError, match=\"missing 1 required positional argument\"):\n qml.device(\"qiskit.aer\")\n\n with pytest.raises(qml.DeviceError, match=\"specified number of shots needs to be at least 1\"):\n qml.device(\"qiskit.aer\", backend=\"qasm_simulator\", wires=1, shots=0)\n\n @pytest.mark.parametrize(\"d\", pldevices)\n @pytest.mark.parametrize(\"analytic\", [True, False])\n @pytest.mark.parametrize(\"shots\", [8192])\n def test_one_qubit_circuit(self, shots, analytic, d, backend, tol):\n \"\"\"Test that devices provide correct result for a simple circuit\"\"\"\n if backend not in state_backends and analytic:\n pytest.skip(\"Hardware simulators do not support analytic mode\")\n\n dev = qml.device(d[0], wires=1, backend=backend, shots=shots, analytic=analytic)\n\n a = 0.543\n b = 0.123\n c = 0.987\n\n @qml.qnode(dev)\n def circuit(x, y, z):\n \"\"\"Reference QNode\"\"\"\n qml.BasisState(np.array([1]), wires=0)\n qml.Hadamard(wires=0)\n qml.Rot(x, y, z, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n assert np.allclose(circuit(a, b, c), np.cos(a) * np.sin(b), **tol)\n\n @pytest.mark.parametrize(\"d\", pldevices)\n @pytest.mark.parametrize(\"analytic\", [False])\n @pytest.mark.parametrize(\"shots\", [8192])\n def test_one_qubit_circuit(self, shots, analytic, d, backend, tol):\n \"\"\"Integration test for the Basisstate and Rot operations for when analytic\n is False\"\"\"\n dev = qml.device(d[0], wires=1, backend=backend, shots=shots, analytic=analytic)\n\n a = 0\n b = 0\n c = np.pi\n expected = 1\n\n @qml.qnode(dev)\n def circuit(x, y, z):\n \"\"\"Reference QNode\"\"\"\n qml.BasisState(np.array([0]), wires=0)\n qml.Rot(x, y, z, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n assert np.allclose(circuit(a, b, c), expected, **tol)\n\n def test_gradient_for_tensor_product(self):\n \"\"\"Test that the gradient of a circuit containing a tensor product is\n computed without any errors.\"\"\"\n n_qubits = 2\n depth = 2\n\n def ansatz(weights):\n weights = weights.reshape(depth, n_qubits)\n qml.RX(weights[0][0], wires=[0])\n qml.RZ(weights[0][1], wires=[0])\n qml.RX(weights[1][0], wires=[0])\n qml.RZ(weights[1][1], wires=[0])\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\n\n dev_qsk = qml.device(\n \"qiskit.aer\",\n wires=n_qubits,\n shots=1000,\n backend=\"qasm_simulator\",\n )\n\n weights = np.random.random((depth, n_qubits)).flatten()\n\n # Want to get expectation value and gradient\n exp_sampled = qml.QNode(ansatz, dev_qsk, diff_method=\"parameter-shift\")\n grad_shift = qml.grad(exp_sampled, argnum=0)\n exp_sampled(weights)\n grad_shift(weights)\n\nclass TestKeywordArguments:\n \"\"\"Test keyword argument logic is correct\"\"\"\n\n @pytest.mark.parametrize(\"d\", pldevices)\n def test_compile_backend(self, d):\n \"\"\"Test that the compile backend argument is properly\n extracted\"\"\"\n dev = qml.device(d[0], wires=2, compile_backend=\"test value\")\n assert dev.compile_backend == \"test value\"\n\n def test_noise_model(self):\n \"\"\"Test that the noise model argument is properly\n extracted if the backend supports it\"\"\"\n dev = qml.device(\"qiskit.aer\", wires=2, noise_model=\"test value\")\n assert dev.noise_model == \"test value\"\n\n def test_invalid_noise_model(self):\n \"\"\"Test that the noise model argument causes an exception to be raised\n if the backend does not support it\"\"\"\n with pytest.raises(ValueError, match=\"does not support noisy simulations\"):\n dev = qml.device(\"qiskit.basicaer\", wires=2, noise_model=\"test value\")\n\n def test_overflow_kwargs(self):\n \"\"\"Test all overflow kwargs are extracted for the AerDevice\"\"\"\n dev = qml.device('qiskit.aer', wires=2, k1=\"v1\", k2=\"v2\")\n assert dev.run_args[\"k1\"] == \"v1\"\n assert dev.run_args[\"k2\"] == \"v2\"\n\n\nclass TestLoadIntegration:\n \"\"\"Integration tests for the PennyLane load function. This test ensures that the PennyLane-Qiskit\n specific load functions integrate properly with the PennyLane-Qiskit plugin.\"\"\"\n\n hadamard_qasm = 'OPENQASM 2.0;' \\\n 'include \"qelib1.inc\";' \\\n 'qreg q[1];' \\\n 'h q[0];'\n\n def test_load_qiskit_circuit(self):\n \"\"\"Test that the default load function works correctly.\"\"\"\n theta = qiskit.circuit.Parameter('θ')\n\n qc = qiskit.QuantumCircuit(2)\n qc.rx(theta, 0)\n\n my_template = qml.load(qc, format='qiskit')\n\n dev = qml.device('default.qubit', wires=2)\n\n angles = np.array([0.53896774, 0.79503606, 0.27826503, 0.])\n\n @qml.qnode(dev)\n def loaded_quantum_circuit(angle):\n my_template({theta: angle})\n return qml.expval(qml.PauliZ(0))\n\n @qml.qnode(dev)\n def quantum_circuit(angle):\n qml.RX(angle, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n for x in angles:\n assert np.allclose(loaded_quantum_circuit(x), quantum_circuit(x))\n\n def test_load_from_qasm_string(self):\n \"\"\"Test that quantum circuits can be loaded from a qasm string.\"\"\"\n\n dev = qml.device('default.qubit', wires=2)\n\n @qml.qnode(dev)\n def loaded_quantum_circuit():\n qml.from_qasm(TestLoadIntegration.hadamard_qasm)(wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n @qml.qnode(dev)\n def quantum_circuit():\n qml.Hadamard(wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n assert np.allclose(loaded_quantum_circuit(), quantum_circuit())\n\n @pytest.mark.skipif(sys.version_info < (3, 6), reason=\"tmpdir fixture requires Python >=3.6\")\n def test_load_qasm_from_file(self, tmpdir):\n \"\"\"Test that quantum circuits can be loaded from a qasm file.\"\"\"\n apply_hadamard = tmpdir.join(\"hadamard.qasm\")\n\n with open(apply_hadamard, \"w\") as f:\n f.write(TestLoadIntegration.hadamard_qasm)\n\n hadamard = qml.from_qasm_file(apply_hadamard)\n\n dev = qml.device('default.qubit', wires=2)\n\n @qml.qnode(dev)\n def loaded_quantum_circuit():\n hadamard(wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n @qml.qnode(dev)\n def quantum_circuit():\n qml.Hadamard(wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n assert np.allclose(loaded_quantum_circuit(), quantum_circuit())\n\n\nclass TestPLOperations:\n \"\"\"Integration tests for checking certain PennyLane specific operations.\"\"\"\n\n @pytest.mark.parametrize(\"shots\", [1000])\n @pytest.mark.parametrize(\"analytic\", [True, False])\n def test_rotation(self, init_state, state_vector_device, shots, analytic, tol):\n \"\"\"Test that the QubitStateVector and Rot operations are decomposed using a\n Qiskit device with statevector backend\"\"\"\n\n dev = state_vector_device(1)\n\n if dev.backend_name == \"unitary_simulator\":\n pytest.skip(\"Test only runs for backends that are not the unitary simulator.\")\n\n state = init_state(1)\n\n a = 0.542\n b = 1.3432\n c = -0.654\n\n I = np.eye(2)\n Y = np.array([[0, -1j], [1j, 0]]) #: Pauli-Y matrix\n Z = np.array([[1, 0], [0, -1]]) #: Pauli-Z matrix\n\n def ry(theta):\n return np.cos(theta / 2) * I + 1j * np.sin(-theta / 2) * Y\n\n def rz(theta):\n return np.cos(theta / 2) * I + 1j * np.sin(-theta / 2) * Z\n\n @qml.qnode(dev)\n def qubitstatevector_and_rot():\n qml.QubitStateVector(state, wires=[0])\n qml.Rot(a, b, c, wires=[0])\n return qml.expval(qml.Identity(0))\n\n qubitstatevector_and_rot()\n\n assert np.allclose(np.abs(dev.state) ** 2, np.abs(rz(c) @ ry(b) @ rz(a) @ state) ** 2, **tol)\n\n @pytest.mark.parametrize(\"shots\", [1000])\n @pytest.mark.parametrize(\"analytic\", [True, False])\n def test_basisstate(self, init_state, state_vector_device, shots, analytic, tol):\n \"\"\"Test that the Basisstate is decomposed using a Qiskit device with\n statevector backend\"\"\"\n\n dev = state_vector_device(2)\n state = np.array([1, 0])\n\n @qml.qnode(dev)\n def basisstate():\n qml.BasisState(state, wires=[0, 1])\n return qml.expval(qml.Identity(0))\n\n basisstate()\n\n expected_state = np.zeros(2**dev.num_wires)\n expected_state[2] = 1\n\n assert np.allclose(np.abs(dev.state) ** 2, np.abs(expected_state) ** 2, **tol)\n\n @pytest.mark.parametrize(\"shots\", [1000])\n @pytest.mark.parametrize(\"analytic\", [True, False])\n def test_basisstate_init_all_zero_states(self, init_state, state_vector_device, shots, analytic, tol):\n \"\"\"Test that the Basisstate that receives the all zero state is decomposed using\n a Qiskit device with statevector backend\"\"\"\n\n dev = state_vector_device(4)\n state = np.array([0, 0, 0, 0])\n\n @qml.qnode(dev)\n def basisstate():\n qml.BasisState(state, wires=[0, 1, 2, 3])\n return qml.expval(qml.Identity(0))\n\n basisstate()\n\n expected_state = np.zeros(2**dev.num_wires)\n expected_state[0] = 1\n\n assert np.allclose(np.abs(dev.state) ** 2, np.abs(expected_state) ** 2, **tol)\n\n\nclass TestInverses:\n \"\"\"Integration tests checking that the inverse of the operations are applied.\"\"\"\n\n def test_inverse_of_operation(self):\n \"\"\"Test that the inverse of operations works as expected\n by comparing a simple circuit with default.qubit.\"\"\"\n dev = qml.device('default.qubit', wires=2)\n\n dev2 = qml.device('qiskit.aer', backend='statevector_simulator', shots=5, wires=2, analytic=True)\n\n angles = np.array([0.53896774, 0.79503606, 0.27826503, 0.])\n\n @qml.qnode(dev)\n def circuit_with_inverses(angle):\n qml.Hadamard(0).inv()\n qml.RX(angle, wires=0).inv()\n return qml.expval(qml.PauliZ(0))\n\n @qml.qnode(dev2)\n def circuit_with_inverses_default_qubit(angle):\n qml.Hadamard(0).inv()\n qml.RX(angle, wires=0).inv()\n return qml.expval(qml.PauliZ(0))\n\n for x in angles:\n assert np.allclose(circuit_with_inverses(x), circuit_with_inverses_default_qubit(x))\n" ]
[ [ "numpy.eye", "numpy.zeros", "numpy.abs", "numpy.cos", "numpy.random.random", "numpy.array", "numpy.sin" ] ]
darkxaze/PINNs
[ "f344a907cf8b585e5f667465178c4442b907024d" ]
[ "mycode/src/def_Net_u_B.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 20 11:36:58 2020\r\n\r\n@author: nastavirs\r\n\"\"\"\r\nimport numpy as np\r\nimport tensorflow as tf\r\ndef net_u(self, x, t): \r\n u = self.neural_net(tf.concat([x,t],1), self.weights, self.biases)\r\n return u" ]
[ [ "tensorflow.concat" ] ]
jgasthaus/gluon-ts
[ "e14ad69058e58e1ce51c40551674318341781331" ]
[ "test/distribution/test_distribution_inference.py" ]
[ "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\"\"\"\nTest that maximizing likelihood allows to correctly recover distribution parameters for all\ndistributions exposed to the user.\n\"\"\"\n# Standard library imports\nfrom typing import Iterable, List, Tuple\n\n# Third-party imports\nimport mxnet as mx\nimport numpy as np\nimport pytest\nfrom pydantic import PositiveFloat, PositiveInt\n\n# First-party imports\nfrom gluonts.model.common import NPArrayLike\nfrom gluonts.distribution.box_cox_tranform import (\n InverseBoxCoxTransform,\n InverseBoxCoxTransformOutput,\n)\nfrom gluonts.distribution import (\n DistributionOutput,\n StudentT,\n StudentTOutput,\n MultivariateGaussian,\n MultivariateGaussianOutput,\n LowrankMultivariateGaussian,\n LowrankMultivariateGaussianOutput,\n NegativeBinomial,\n NegativeBinomialOutput,\n Laplace,\n LaplaceOutput,\n Gaussian,\n GaussianOutput,\n PiecewiseLinear,\n PiecewiseLinearOutput,\n Binned,\n BinnedOutput,\n)\nfrom gluonts.distribution.transformed_distribution_output import (\n TransformedDistributionOutput,\n)\nfrom gluonts.distribution.transformed_distribution import (\n TransformedDistribution,\n)\n\n\nNUM_SAMPLES = 2000\nBATCH_SIZE = 32\nTOL = 0.3\nSTART_TOL_MULTIPLE = 1\n\nnp.random.seed(1)\nmx.random.seed(1)\n\n\ndef inv_softplus(y: NPArrayLike) -> np.ndarray:\n # y = log(1 + exp(x)) ==> x = log(exp(y) - 1)\n return np.log(np.exp(y) - 1)\n\n\ndef maximum_likelihood_estimate_sgd(\n distr_output: DistributionOutput,\n samples: mx.ndarray,\n init_biases: List[mx.ndarray.NDArray] = None,\n num_epochs: PositiveInt = PositiveInt(5),\n learning_rate: PositiveFloat = PositiveFloat(1e-2),\n hybridize: bool = True,\n) -> Iterable[float]:\n model_ctx = mx.cpu()\n\n arg_proj = distr_output.get_args_proj()\n arg_proj.initialize()\n\n if hybridize:\n arg_proj.hybridize()\n\n if init_biases is not None:\n for param, bias in zip(arg_proj.proj, init_biases):\n param.params[param.prefix + \"bias\"].initialize(\n mx.initializer.Constant(bias), force_reinit=True\n )\n\n trainer = mx.gluon.Trainer(\n arg_proj.collect_params(),\n \"sgd\",\n {\"learning_rate\": learning_rate, \"clip_gradient\": 10.0},\n )\n\n # The input data to our model is one-dimensional\n dummy_data = mx.nd.array(np.ones((len(samples), 1)))\n\n train_data = mx.gluon.data.DataLoader(\n mx.gluon.data.ArrayDataset(dummy_data, samples),\n batch_size=BATCH_SIZE,\n shuffle=True,\n )\n\n for e in range(num_epochs):\n cumulative_loss = 0\n num_batches = 0\n # inner loop\n for i, (data, sample_label) in enumerate(train_data):\n data = data.as_in_context(model_ctx)\n sample_label = sample_label.as_in_context(model_ctx)\n with mx.autograd.record():\n distr_args = arg_proj(data)\n distr = distr_output.distribution(distr_args)\n loss = distr.loss(sample_label)\n if not hybridize:\n assert loss.shape == distr.batch_shape\n loss.backward()\n trainer.step(BATCH_SIZE)\n num_batches += 1\n\n cumulative_loss += mx.nd.mean(loss).asscalar()\n print(\"Epoch %s, loss: %s\" % (e, cumulative_loss / num_batches))\n\n return [\n param[0].asnumpy() for param in arg_proj(mx.nd.array(np.ones((1, 1))))\n ]\n\n\n@pytest.mark.parametrize(\"mu, sigma, nu\", [(2.3, 0.7, 6.0)])\n@pytest.mark.parametrize(\"hybridize\", [True, False])\ndef test_studentT_likelihood(\n mu: float, sigma: float, nu: float, hybridize: bool\n) -> None:\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters\n \"\"\"\n\n # generate samples\n mus = mx.nd.zeros((NUM_SAMPLES,)) + mu\n sigmas = mx.nd.zeros((NUM_SAMPLES,)) + sigma\n nus = mx.nd.zeros((NUM_SAMPLES,)) + nu\n\n distr = StudentT(mus, sigmas, nus)\n samples = distr.sample()\n\n # nu takes very long to learn, so we initialize it at the true value.\n # transform used is softplus(x) + 2\n init_bias = [\n mu - START_TOL_MULTIPLE * TOL * mu,\n inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),\n inv_softplus(nu - 2),\n ]\n\n mu_hat, sigma_hat, nu_hat = maximum_likelihood_estimate_sgd(\n StudentTOutput(),\n samples,\n init_biases=init_bias,\n hybridize=hybridize,\n num_epochs=PositiveInt(10),\n learning_rate=PositiveFloat(1e-2),\n )\n\n assert (\n np.abs(mu_hat - mu) < TOL * mu\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n assert (\n np.abs(sigma_hat - sigma) < TOL * sigma\n ), f\"sigma did not match: sigma = {sigma}, sigma_hat = {sigma_hat}\"\n assert (\n np.abs(nu_hat - nu) < TOL * nu\n ), \"nu0 did not match: nu0 = %s, nu_hat = %s\" % (nu, nu_hat)\n\n\n@pytest.mark.parametrize(\"mu, sigma\", [(1.0, 0.1)])\n@pytest.mark.parametrize(\"hybridize\", [True, False])\ndef test_gaussian_likelihood(mu: float, sigma: float, hybridize: bool):\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters\n \"\"\"\n\n # generate samples\n mus = mx.nd.zeros((NUM_SAMPLES,)) + mu\n sigmas = mx.nd.zeros((NUM_SAMPLES,)) + sigma\n\n distr = Gaussian(mus, sigmas)\n samples = distr.sample()\n\n init_biases = [\n mu - START_TOL_MULTIPLE * TOL * mu,\n inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),\n ]\n\n mu_hat, sigma_hat = maximum_likelihood_estimate_sgd(\n GaussianOutput(),\n samples,\n init_biases=init_biases,\n hybridize=hybridize,\n learning_rate=PositiveFloat(0.001),\n num_epochs=PositiveInt(5),\n )\n\n assert (\n np.abs(mu_hat - mu) < TOL * mu\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n assert (\n np.abs(sigma_hat - sigma) < TOL * sigma\n ), f\"alpha did not match: sigma = {sigma}, sigma_hat = {sigma_hat}\"\n\n\n@pytest.mark.timeout(10)\ndef test_multivariate_gaussian() -> None:\n num_samples = 2000\n dim = 2\n\n mu = np.arange(0, dim) / float(dim)\n\n L_diag = np.ones((dim,))\n L_low = 0.1 * np.ones((dim, dim)) * np.tri(dim, k=-1)\n L = np.diag(L_diag) + L_low\n Sigma = L.dot(L.transpose())\n\n distr = MultivariateGaussian(mu=mx.nd.array(mu), L=mx.nd.array(L))\n\n samples = distr.sample(num_samples)\n\n mu_hat, L_hat = maximum_likelihood_estimate_sgd(\n MultivariateGaussianOutput(dim=dim),\n samples,\n init_biases=None, # todo we would need to rework biases a bit to use it in the multivariate case\n hybridize=False,\n learning_rate=PositiveFloat(0.01),\n num_epochs=PositiveInt(10),\n )\n\n distr = MultivariateGaussian(\n mu=mx.nd.array([mu_hat]), L=mx.nd.array([L_hat])\n )\n\n Sigma_hat = distr.variance[0].asnumpy()\n\n assert np.allclose(\n mu_hat, mu, atol=0.1, rtol=0.1\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n assert np.allclose(\n Sigma_hat, Sigma, atol=0.1, rtol=0.1\n ), f\"Sigma did not match: sigma = {Sigma}, sigma_hat = {Sigma_hat}\"\n\n\n@pytest.mark.timeout(10)\ndef test_lowrank_multivariate_gaussian() -> None:\n num_samples = 2000\n dim = 2\n rank = 1\n\n mu = np.arange(0, dim) / float(dim)\n D = np.eye(dim) * (np.arange(dim) / dim + 0.5)\n W = np.sqrt(np.ones((dim, rank)) * 0.2)\n Sigma = D + W.dot(W.transpose())\n\n distr = LowrankMultivariateGaussian(\n mu=mx.nd.array([mu]),\n D=mx.nd.array([np.diag(D)]),\n W=mx.nd.array([W]),\n dim=dim,\n rank=rank,\n )\n\n assert np.allclose(\n distr.variance[0].asnumpy(), Sigma, atol=0.1, rtol=0.1\n ), f\"did not match: sigma = {Sigma}, sigma_hat = {distr.variance[0]}\"\n\n samples = distr.sample(num_samples).squeeze().asnumpy()\n\n mu_hat, D_hat, W_hat = maximum_likelihood_estimate_sgd(\n LowrankMultivariateGaussianOutput(dim=dim, rank=rank),\n samples,\n learning_rate=PositiveFloat(0.01),\n num_epochs=PositiveInt(10),\n init_biases=None, # todo we would need to rework biases a bit to use it in the multivariate case\n hybridize=False,\n )\n\n distr = LowrankMultivariateGaussian(\n dim=dim,\n rank=rank,\n mu=mx.nd.array([mu_hat]),\n D=mx.nd.array([D_hat]),\n W=mx.nd.array([W_hat]),\n )\n\n Sigma_hat = distr.variance.asnumpy()\n\n assert np.allclose(\n mu_hat, mu, atol=0.2, rtol=0.1\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n\n assert np.allclose(\n Sigma_hat, Sigma, atol=0.1, rtol=0.1\n ), f\"alpha did not match: sigma = {Sigma}, sigma_hat = {Sigma_hat}\"\n\n\n@pytest.mark.parametrize(\"mu\", [6.0])\n@pytest.mark.parametrize(\"hybridize\", [True, False])\ndef test_deterministic_l2(mu: float, hybridize: bool) -> None:\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters.\n This tests uses the Gaussian distribution with fixed variance and sample mean.\n This essentially reduces to determistic L2.\n \"\"\"\n # generate samples\n mu = mu\n mus = mx.nd.zeros(NUM_SAMPLES) + mu\n\n deterministic_distr = Gaussian(mu=mus, sigma=0.1 * mx.nd.ones_like(mus))\n samples = deterministic_distr.sample()\n\n class GaussianFixedVarianceOutput(GaussianOutput):\n @classmethod\n def domain_map(cls, F, mu, sigma):\n sigma = 0.1 * F.ones_like(sigma)\n return mu.squeeze(axis=-1), sigma.squeeze(axis=-1)\n\n mu_hat, _ = maximum_likelihood_estimate_sgd(\n GaussianFixedVarianceOutput(),\n samples,\n init_biases=[3 * mu, 0.1],\n hybridize=hybridize,\n num_epochs=PositiveInt(1),\n )\n\n assert (\n np.abs(mu_hat - mu) < TOL * mu\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n\n\n@pytest.mark.parametrize(\"mu\", [1.0])\n@pytest.mark.parametrize(\"hybridize\", [True, False])\ndef test_deterministic_l1(mu: float, hybridize: bool) -> None:\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters.\n This tests uses the Laplace distribution with fixed variance and sample mean.\n This essentially reduces to determistic L1.\n \"\"\"\n # generate samples\n mu = mu\n mus = mx.nd.zeros(NUM_SAMPLES) + mu\n\n class LaplaceFixedVarianceOutput(LaplaceOutput):\n @classmethod\n def domain_map(cls, F, mu, b):\n b = 0.1 * F.ones_like(b)\n return mu.squeeze(axis=-1), b.squeeze(axis=-1)\n\n deterministic_distr = Laplace(mu=mus, b=0.1 * mx.nd.ones_like(mus))\n samples = deterministic_distr.sample()\n\n mu_hat, _ = maximum_likelihood_estimate_sgd(\n LaplaceFixedVarianceOutput(),\n samples,\n init_biases=[3 * mu, 0.1],\n learning_rate=PositiveFloat(1e-3),\n hybridize=hybridize,\n )\n\n assert (\n np.abs(mu_hat - mu) < TOL * mu\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n\n\n@pytest.mark.parametrize(\"mu_alpha\", [(2.5, 0.7)])\n@pytest.mark.parametrize(\"hybridize\", [True, False])\ndef test_neg_binomial(mu_alpha: Tuple[float, float], hybridize: bool) -> None:\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters\n \"\"\"\n # test instance\n mu, alpha = mu_alpha\n\n # generate samples\n mus = mx.nd.zeros((NUM_SAMPLES,)) + mu\n alphas = mx.nd.zeros((NUM_SAMPLES,)) + alpha\n\n neg_bin_distr = NegativeBinomial(mu=mus, alpha=alphas)\n samples = neg_bin_distr.sample()\n\n init_biases = [\n inv_softplus(mu - START_TOL_MULTIPLE * TOL * mu),\n inv_softplus(alpha + START_TOL_MULTIPLE * TOL * alpha),\n ]\n\n mu_hat, alpha_hat = maximum_likelihood_estimate_sgd(\n NegativeBinomialOutput(),\n samples,\n hybridize=hybridize,\n init_biases=init_biases,\n num_epochs=PositiveInt(15),\n )\n\n assert (\n np.abs(mu_hat - mu) < TOL * mu\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n assert (\n np.abs(alpha_hat - alpha) < TOL * alpha\n ), f\"alpha did not match: alpha = {alpha}, alpha_hat = {alpha_hat}\"\n\n\n@pytest.mark.timeout(10)\n@pytest.mark.parametrize(\"mu_b\", [(3.3, 0.7)])\n@pytest.mark.parametrize(\"hybridize\", [True, False])\ndef test_laplace(mu_b: Tuple[float, float], hybridize: bool) -> None:\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters\n \"\"\"\n # test instance\n mu, b = mu_b\n\n # generate samples\n mus = mx.nd.zeros((NUM_SAMPLES,)) + mu\n bs = mx.nd.zeros((NUM_SAMPLES,)) + b\n\n laplace_distr = Laplace(mu=mus, b=bs)\n samples = laplace_distr.sample()\n\n init_biases = [\n mu - START_TOL_MULTIPLE * TOL * mu,\n inv_softplus(b + START_TOL_MULTIPLE * TOL * b),\n ]\n\n mu_hat, b_hat = maximum_likelihood_estimate_sgd(\n LaplaceOutput(), samples, hybridize=hybridize, init_biases=init_biases\n )\n\n assert (\n np.abs(mu_hat - mu) < TOL * mu\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n assert (\n np.abs(b_hat - b) < TOL * b\n ), f\"b did not match: b = {b}, b_hat = {b_hat}\"\n\n\n@pytest.mark.parametrize(\n \"gamma, slopes, knot_spacings\",\n [(2.0, np.array([3, 1, 3, 4]), np.array([0.3, 0.2, 0.35, 0.15]))],\n)\n@pytest.mark.parametrize(\"hybridize\", [True, False])\ndef test_piecewise_linear(\n gamma: float,\n slopes: np.ndarray,\n knot_spacings: np.ndarray,\n hybridize: bool,\n) -> None:\n \"\"\"\n Test to check that minimizing the CRPS recovers the quantile function\n \"\"\"\n num_samples = 500 # use a few samples for timeout failure\n\n gammas = mx.nd.zeros((num_samples,)) + gamma\n slopess = mx.nd.zeros((num_samples, len(slopes))) + mx.nd.array(slopes)\n knot_spacingss = mx.nd.zeros(\n (num_samples, len(knot_spacings))\n ) + mx.nd.array(knot_spacings)\n\n pwl_sqf = PiecewiseLinear(gammas, slopess, knot_spacingss)\n\n samples = pwl_sqf.sample()\n\n # Parameter initialization\n gamma_init = gamma - START_TOL_MULTIPLE * TOL * gamma\n slopes_init = slopes - START_TOL_MULTIPLE * TOL * slopes\n knot_spacings_init = knot_spacings\n # We perturb knot spacings such that even after the perturbation they sum to 1.\n mid = len(slopes) // 2\n knot_spacings_init[:mid] = (\n knot_spacings[:mid] - START_TOL_MULTIPLE * TOL * knot_spacings[:mid]\n )\n knot_spacings_init[mid:] = (\n knot_spacings[mid:] + START_TOL_MULTIPLE * TOL * knot_spacings[mid:]\n )\n\n init_biases = [gamma_init, slopes_init, knot_spacings_init]\n\n # check if it returns original parameters of mapped\n gamma_hat, slopes_hat, knot_spacings_hat = maximum_likelihood_estimate_sgd(\n PiecewiseLinearOutput(len(slopes)),\n samples,\n init_biases=init_biases,\n hybridize=hybridize,\n learning_rate=PositiveFloat(0.01),\n num_epochs=PositiveInt(20),\n )\n\n # Since the problem is highly non-convex we may not be able to recover the exact parameters\n # Here we check if the estimated parameters yield similar function evaluations at different quantile levels.\n quantile_levels = np.arange(0.1, 1.0, 0.1)\n\n # create a LinearSplines instance with the estimated parameters to have access to .quantile\n pwl_sqf_hat = PiecewiseLinear(\n mx.nd.array(gamma_hat),\n mx.nd.array(slopes_hat).expand_dims(axis=0),\n mx.nd.array(knot_spacings_hat).expand_dims(axis=0),\n )\n\n # Compute quantiles with the estimated parameters\n quantiles_hat = np.squeeze(\n pwl_sqf_hat.quantile(\n mx.nd.array(quantile_levels).expand_dims(axis=0), axis=1\n ).asnumpy()\n )\n\n # Compute quantiles with the original parameters\n # Since params is replicated across samples we take only the first entry\n quantiles = np.squeeze(\n pwl_sqf.quantile(\n mx.nd.array(quantile_levels)\n .expand_dims(axis=0)\n .repeat(axis=0, repeats=num_samples),\n axis=1,\n ).asnumpy()[0, :]\n )\n\n for ix, (quantile, quantile_hat) in enumerate(\n zip(quantiles, quantiles_hat)\n ):\n assert np.abs(quantile_hat - quantile) < TOL * quantile, (\n f\"quantile level {quantile_levels[ix]} didn't match:\"\n f\" \"\n f\"q = {quantile}, q_hat = {quantile_hat}\"\n )\n\n\n@pytest.mark.skip(\"this test fails when run locally\")\n@pytest.mark.parametrize(\"lam_1, lam_2\", [(0.1, 0.01)])\n@pytest.mark.parametrize(\"mu, sigma\", [(-1.5, 0.5)])\n@pytest.mark.parametrize(\"hybridize\", [True])\ndef test_box_cox_tranform(\n lam_1: float, lam_2: float, mu: float, sigma: float, hybridize: bool\n):\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters\n \"\"\"\n\n # generate samples\n lamdas_1 = mx.nd.zeros((NUM_SAMPLES,)) + lam_1\n lamdas_2 = mx.nd.zeros((NUM_SAMPLES,)) + lam_2\n transform = InverseBoxCoxTransform(lamdas_1, lamdas_2)\n\n mus = mx.nd.zeros((NUM_SAMPLES,)) + mu\n sigmas = mx.nd.zeros((NUM_SAMPLES,)) + sigma\n gausian_distr = Gaussian(mus, sigmas)\n\n # Here the base distribution is Guassian which is transformed to\n # non-Gaussian via the inverse Box-Cox transform.\n # Sampling from `trans_distr` gives non-Gaussian samples\n trans_distr = TransformedDistribution(gausian_distr, transform)\n\n # Given the non-Gaussian samples find the true parameters\n # of the Box-Cox transformation as well as the underlying Gaussian distribution.\n samples = trans_distr.sample()\n\n init_biases = [\n mu - START_TOL_MULTIPLE * TOL * mu,\n inv_softplus(sigma - START_TOL_MULTIPLE * TOL * sigma),\n lam_1 - START_TOL_MULTIPLE * TOL * lam_1,\n inv_softplus(lam_2 - START_TOL_MULTIPLE * TOL * lam_2),\n ]\n\n mu_hat, sigma_hat, lam_1_hat, lam_2_hat = maximum_likelihood_estimate_sgd(\n TransformedDistributionOutput(\n GaussianOutput(),\n InverseBoxCoxTransformOutput(lb_obs=lam_2, fix_lambda_2=True),\n ),\n samples,\n init_biases=init_biases,\n hybridize=hybridize,\n learning_rate=PositiveFloat(0.01),\n num_epochs=PositiveInt(18),\n )\n\n assert (\n np.abs(lam_1_hat - lam_1) < TOL * lam_1\n ), f\"lam_1 did not match: lam_1 = {lam_1}, lam_1_hat = {lam_1_hat}\"\n # assert (\n # np.abs(lam_2_hat - lam_2) < TOL * lam_2\n # ), f\"lam_2 did not match: lam_2 = {lam_2}, lam_2_hat = {lam_2_hat}\"\n\n assert np.abs(mu_hat - mu) < TOL * np.abs(\n mu\n ), f\"mu did not match: mu = {mu}, mu_hat = {mu_hat}\"\n assert (\n np.abs(sigma_hat - sigma) < TOL * sigma\n ), f\"sigma did not match: sigma = {sigma}, sigma_hat = {sigma_hat}\"\n\n\n@pytest.mark.parametrize(\"num_bins\", [6])\n@pytest.mark.parametrize(\n \"bin_probabilites\", [np.array([0.3, 0.1, 0.05, 0.2, 0.1, 0.25])]\n)\n@pytest.mark.parametrize(\"hybridize\", [True, False])\ndef test_binned_likelihood(\n num_bins: float, bin_probabilites: np.ndarray, hybridize: bool\n):\n \"\"\"\n Test to check that maximizing the likelihood recovers the parameters\n \"\"\"\n\n bin_prob = mx.nd.array(bin_probabilites)\n bin_center = mx.nd.array(np.logspace(-1, 1, num_bins))\n\n # generate samples\n bin_probs = mx.nd.zeros((NUM_SAMPLES, num_bins)) + bin_prob\n bin_centers = mx.nd.zeros((NUM_SAMPLES, num_bins)) + bin_center\n\n distr = Binned(bin_probs, bin_centers)\n samples = distr.sample()\n\n # add some jitter to the uniform initialization and normalize\n bin_prob_init = mx.nd.random_uniform(1 - TOL, 1 + TOL, num_bins) * bin_prob\n bin_prob_init = bin_prob_init / bin_prob_init.sum()\n\n init_biases = [bin_prob_init]\n\n bin_prob_hat, = maximum_likelihood_estimate_sgd(\n BinnedOutput(list(bin_center.asnumpy())),\n samples,\n init_biases=init_biases,\n hybridize=hybridize,\n learning_rate=PositiveFloat(0.05),\n num_epochs=PositiveInt(25),\n )\n\n assert all(\n mx.nd.abs(mx.nd.array(bin_prob_hat) - bin_prob) < TOL * bin_prob\n ), f\"bin_prob did not match: bin_prob = {bin_prob}, bin_prob_hat = {bin_prob_hat}\"\n" ]
[ [ "numpy.ones", "numpy.allclose", "numpy.tri", "numpy.eye", "numpy.diag", "numpy.random.seed", "numpy.abs", "numpy.exp", "numpy.arange", "numpy.logspace", "numpy.array" ] ]
nicoperetti/metadata-sadosky-santander
[ "a0d686ba8dfa6c3929727248fc52d802d74f4c45" ]
[ "pre-process.py" ]
[ "import click\nimport pandas as pd\n# Due textacy problems\ntry:\n from textacy.preprocess import preprocess_text\nexcept Exception:\n from textacy.preprocess import preprocess_text\n\n\ndef preprocess_f(text, fix_unicode=True, lowercase=True,\n no_urls=True, no_emails=True,\n no_phone_numbers=True,\n no_numbers=True, no_currency_symbols=True,\n no_punct=True, no_accents=True):\n \"\"\"Preprocess text.\"\"\"\n clean_text = preprocess_text(text, fix_unicode=fix_unicode,\n lowercase=lowercase,\n no_urls=no_urls, no_emails=no_emails,\n no_phone_numbers=no_phone_numbers,\n no_numbers=no_numbers,\n no_currency_symbols=no_currency_symbols,\n no_punct=no_punct,\n no_accents=no_accents)\n return clean_text\n\n\n@click.command()\n@click.option('--input_path', type=click.STRING, help='Path to input file')\n@click.option('--output_path', type=click.STRING, help='Path to input file')\n@click.option('--set_', type=click.Choice(['train', 'test']), help=\"set\")\ndef preprocess(input_path, output_path, set_):\n \"\"\"pre-process script\n\n :param input_path: path to input file\n :type input_path: str\n :param output_path: path to output file\n :type output_path: str\n :param set_: kind of data\n :type set_: str\n \"\"\"\n if set_ == \"train\":\n df = pd.read_csv(input_path, sep='|')\n else:\n df = pd.read_csv(input_path)\n\n df[\"clean_txt\"] = df[\"Pregunta\"].apply(lambda x: preprocess_f(x))\n\n df.to_csv(output_path, index=False)\n\n\nif __name__ == \"__main__\":\n preprocess()\n" ]
[ [ "pandas.read_csv" ] ]
GRAVITYLab/edda
[ "2acd00373db1003922db9f5959644e7506de5726" ]
[ "pyEdda/test_uni_gaussian.py" ]
[ "#make print in python 2, 3 compatible\nfrom __future__ import print_function \nimport numpy as np\nimport pyedda as edda\n\n\n#Univariate Gaussian\nprint(\"//////////Univariate Gaussian///////\")\ndummy_data = np.random.rand(100)\ngaussian = edda.Gaussian(100, 20)\nprint(\"gaussian.getMean():\", gaussian.getMean())\nprint(\"gaussian.getVar():\", gaussian.getVar())\nprint(\"gaussian.getPdf(105):\", gaussian.getPdf(105))\nprint(\"gaussian.getSample():\", gaussian.getSample())\nprint(\"gaussian.getCdf(105):\", gaussian.getCdf(105))\nprint(\"gaussian.getCdfPrecise():\", gaussian.getCdfPrecise(105))\nprint(\"Output gaussian:\")\ngaussian.output()\nprint()\n" ]
[ [ "numpy.random.rand" ] ]
1130310223/Static-Dynamic-Attention
[ "1da223b06ae41f14575960e247fb13506ed8a124" ]
[ "hybrid/opensubtitle/hyb/bid/type1/len2/hybrid_len2_t1_predict.py" ]
[ " #-*- coding: utf-8 -*-\r\nimport sys\r\nimport os\r\nimport random\r\nimport re\r\nimport time\r\nimport torch \r\nfrom torch.autograd import Variable\r\nfrom torch import optim\r\nimport torch.nn as nn\r\n#sys.path.append('../')\r\nfrom hybrid_bid_t1_model import Seq2Seq\r\nfrom hybrid_data_utils import *\r\n\r\nsub = '-'*20\r\ndef init_command_line(argv):\r\n\tfrom argparse import ArgumentParser\r\n\tusage = \"seq2seq\"\r\n\tdescription = ArgumentParser(usage)\r\n\tdescription.add_argument(\"--w2v_path\", type=str, default=\"/users3/yfwang/data/w2v/opensubtitle/\")\r\n\tdescription.add_argument(\"--corpus_path\", type=str, default=\"/users3/yfwang/data/corpus/opensubtitle/\")\r\n\tdescription.add_argument(\"--w2v\", type=str, default=\"train_all_200e.w2v\")\r\n\tdescription.add_argument(\"--test_file\", type=str, default=\"test_sessions.txt\")\r\n\t\r\n\tdescription.add_argument(\"--max_context_size\", type=int, default=2)\r\n\tdescription.add_argument(\"--batch_size\", type=int, default=64)\r\n\tdescription.add_argument(\"--enc_hidden_size\", type=int, default=512)\r\n\tdescription.add_argument(\"--max_senten_len\", type=int, default=15)\r\n\r\n\tdescription.add_argument(\"--dropout\", type=float, default=0.5)\r\n\r\n\tdescription.add_argument(\"--teach_forcing\", type=int, default=1)\r\n\tdescription.add_argument(\"--print_every\", type=int, default=100, help=\"print every batches when training\")\r\n\tdescription.add_argument(\"--weights\", type=str, default=None)\r\n\treturn description.parse_args(argv)\r\n\r\nopts = init_command_line(sys.argv[1:])\r\nprint (\"Configure:\")\r\nprint (\" w2v:\",os.path.join(opts.w2v_path,opts.w2v))\r\nprint (\" test_file:\",os.path.join(opts.corpus_path,opts.test_file))\r\n\r\nprint (\" max_context_size:\",opts.max_context_size)\r\nprint (\" batch_size:\",opts.batch_size)\r\nprint (\" enc_hidden_size:\",opts.enc_hidden_size)\r\nprint (\" max_senten_len:\",opts.max_senten_len)\r\n\r\nprint (\" dropout:\",opts.dropout)\r\n\r\nprint (\" teach_forcing:\",opts.teach_forcing)\r\nprint (\" print_every:\",opts.print_every)\r\nprint (\" weights:\",opts.weights)\r\nprint (\"\")\r\n\r\ndef readingTestCorpus(test_file_path):\r\n\tprint (\"reading...\")\r\n\ttest_file = open(test_file_path,'r')\r\n\tlist_pairs = []\r\n\ttmp_pair = []\r\n\tfor line in test_file:\r\n\t\tline = line.strip('\\n')\r\n\t\tif line == sub:\r\n\t\t\tlist_pairs.append(tmp_pair)\r\n\t\t\ttmp_pair = []\r\n\t\telse:\r\n\t\t\ttmp_pair.append(line)\r\n\ttest_file.close()\r\n\r\n\ttest_contexts = []\r\n\ttest_replys = []\r\n\tmax_con_size = 0\r\n\tmin_con_size = 10000\r\n\tfor pair in list_pairs:\r\n\t\tif len(pair) >= 3:\r\n\t\t\ttest_contexts.append(pair[0:-1])\r\n\t\t\ttest_replys.append(pair[-1])\r\n\t\t\tmax_con_size = max(len(pair[0:-1]),max_con_size)\r\n\t\t\tmin_con_size = min(len(pair[0:-1]),min_con_size)\r\n\t\telse:\r\n\t\t\tpass\r\n\tprint (max_con_size)\r\n\tprint (min_con_size)\r\n\treturn test_contexts,test_replys\r\n\r\ndef preProcess(word2index,test_contexts,unk_char,ini_char,max_senten_len,max_context_size):\r\n\tprint (\"preprocessing...\")\r\n\tfilter_test_contexts = []\r\n\tfor context in test_contexts:\r\n\t\tfilter_context = [filteringSenten(word2index,senten,unk_char,ini_char) for senten in context]\r\n\t\tfilter_test_contexts.append(filter_context)\r\n\r\n\tpadded_test_pairs = []\r\n\tfor context in filter_test_contexts:\r\n\t\tpad_list = [0]*len(context)\r\n\t\tif len(context) <= max_context_size:\r\n\t\t\tpad_list = [1]*(max_context_size-len(context)) + pad_list\r\n\t\t\tcontext = ['<unk>']*(max_context_size-len(context)) + context\r\n\t\telse:\r\n\t\t\tpad_list = pad_list[-max_context_size:]\r\n\t\t\tcontext = context[-max_context_size:]\r\n\t\tpadded_context = [paddingSenten(senten,max_senten_len) for senten in context]\r\n\t\tpadded_test_pairs.append([padded_context,pad_list])\r\n\r\n\treturn padded_test_pairs\r\n\r\n\r\n# 读入一个句子的list,构建batch后进行预测\r\ndef predictSentences(index2word,unk_char,ini_char,ini_idx,model,test_pairs,\r\n\t\t\t\t\tprint_every,batch_size,max_senten_len,max_context_size):\r\n\tmodel.eval()\r\n\t#构造batch的list\r\n\tpairs_batches,num_batches = buildingPairsBatch(test_pairs,batch_size,shuffle=False)\r\n\tprint (\"\")\r\n\tprint (\"num of batch:\",num_batches)\r\n\t\r\n\tpredict_sentences = []\r\n\tidx_batch = 0\r\n\tfor contexts_tensor_batch, pad_matrix_batch in getTensorsContextPairsBatch(word2index,pairs_batches,max_context_size):\r\n\t\tpredict_batch = model.predict(contexts_tensor_batch,index2word,pad_matrix_batch,ini_idx,sep_char='\\t')\r\n\t\tpredict_sentences.extend(predict_batch)\r\n\t\tif (idx_batch+1)%print_every == 0:\r\n\t\t\tprint (\"{} batches finished\".format(idx_batch+1))\r\n\t\tidx_batch += 1\r\n\r\n\tpredict_sentences = predict_sentences[0:len(test_pairs)]\r\n\treturn predict_sentences\r\n\r\nif __name__ == '__main__':\r\n\tini_char = '</i>'\r\n\tunk_char = '<unk>'\r\n\tt0 = time.time()\r\n\tprint (\"loading word2vec...\")\r\n\tctable = W2vCharacterTable(os.path.join(opts.w2v_path,opts.w2v),ini_char,unk_char)\r\n\tprint(\" dict size:\",ctable.getDictSize())\r\n\tprint (\" emb size:\",ctable.getEmbSize())\r\n\tprint (time.time()-t0)\r\n\tprint (\"\")\r\n\r\n\tseq2seq = Seq2Seq(ctable.getDictSize(),ctable.getEmbSize(),opts.enc_hidden_size,opts.batch_size,opts.dropout,\r\n\t\t\t\t\topts.max_senten_len,opts.teach_forcing).cuda()\r\n\r\n\tif opts.weights != None:\r\n\t\tprint (\"load model parameters...\")\r\n\t\tseq2seq.load_state_dict(torch.load(opts.weights))\r\n\telse:\r\n\t\tprint (\"No model parameters!\")\r\n\t\texit()\r\n\r\n\ttest_contexts,test_replys = readingTestCorpus(os.path.join(opts.corpus_path,opts.test_file))\r\n\tprint (\"len(test_contexts):\",len(test_contexts))\r\n\tprint (\"len(test_replys):\",len(test_replys))\r\n\r\n\tword2index = ctable.getWord2Index()\r\n\ttest_pairs = preProcess(word2index,test_contexts,unk_char,ini_char,opts.max_senten_len,opts.max_context_size)\r\n\tprint (\"len(test_pairs):\",len(test_pairs))\r\n\t'''test_pair = test_pairs[100]\r\n\ttest_context = test_pair[0]\r\n\tpad_list = test_pair[1]\r\n\r\n\tfor senten in test_context:\r\n\t\tprint senten\r\n\tprint pad_list'''\r\n\t\r\n\tprint (\"start predicting...\")\r\n\tini_idx = word2index[ini_char]\r\n\tpredict_sentences = predictSentences(ctable.getIndex2Word(),unk_char,ini_char,ini_idx,seq2seq,test_pairs,\r\n\t\t\t\t\t\t\t\t\topts.print_every,opts.batch_size,opts.max_senten_len,opts.max_context_size)\r\n\r\n\tprint (\"writing...\")\r\n\tif not os.path.exists('./result/'):\r\n\t\tos.mkdir('./result/')\r\n\tpred_res_file = open(\"./result/open_pred_res_hyb_t1_len2\",'w')\r\n\tpred_ans_file = open(\"./result/open_pred_ans_hyb_t1_len2\",'w')\r\n\tfor idx,senten in enumerate(predict_sentences):\r\n\t\ttest_context = test_contexts[idx]\r\n\t\tfor test_post in test_context:\r\n\t\t\tpred_res_file.write(test_post+'\\n')\r\n\t\tpred_res_file.write(senten+'\\n')\r\n\t\tpred_res_file.write(sub+'\\n')\r\n\t\tsenten_l = [c for c in senten.split('\\t') if c != '</s>']\r\n\t\tpred_ans_file.write(' '.join(senten_l)+' __eou__'+'\\n')\r\n\r\n\tpred_res_file.close()\r\n\tpred_ans_file.close()\r\n\tprint (\"end\")\r\n\t\r\n" ]
[ [ "torch.load" ] ]
aligapaul/automlbenchmark
[ "59e796fe6632637233a7104dfffe65f210f9eef5" ]
[ "frameworks/shared/callee.py" ]
[ "import json\nimport logging\nimport os\nimport re\nimport sys\nimport time\n\n\ndef setup_logger():\n console = logging.StreamHandler(sys.stdout)\n handlers = [console]\n logging.basicConfig(handlers=handlers)\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n\n\nsetup_logger()\n\nlog = logging.getLogger(__name__)\n\n\nclass NS:\n\n @staticmethod\n def dict(ns, deep=True):\n dic = ns.__dict__\n if not deep:\n return dic\n for k, v in dic.items():\n if isinstance(v, NS):\n dic[k] = NS.dict(v)\n return dic\n\n @staticmethod\n def from_dict(dic, deep=True):\n ns = NS(dic)\n if not deep:\n return ns\n for k, v in ns.__dict__.items():\n if isinstance(v, dict):\n ns.__dict__[k] = NS.from_dict(v)\n return ns\n\n @staticmethod\n def walk(ns, fn, inplace=False):\n nns = ns if inplace else NS()\n for k, v in ns.__dict__.items():\n nk, nv = fn(k, v)\n if nk is not None:\n if v is nv and isinstance(v, NS):\n nv = NS.walk(nv, fn, inplace)\n nns.__dict__[nk] = nv\n return nns\n\n def __init__(self, *args, **kwargs):\n self.__dict__.update(dict(*args, **kwargs))\n\n def __str__(self):\n return str(self.__dict__)\n\n def __repr__(self):\n return repr(self.__dict__)\n\n\nclass Timer:\n\n @staticmethod\n def _zero():\n return 0\n\n def __init__(self, clock=time.time, enabled=True):\n self.start = 0\n self.stop = 0\n self._time = clock if enabled else Timer._zero\n\n def __enter__(self):\n self.start = self._time()\n return self\n\n def __exit__(self, *args):\n self.stop = self._time()\n\n @property\n def duration(self):\n if self.stop > 0:\n return self.stop - self.start\n return self._time() - self.start\n\n\ndef result(output_file=None,\n predictions=None, truth=None,\n probabilities=None, probabilities_labels=None,\n target_is_encoded=False,\n error_message=None,\n models_count=None,\n training_duration=None):\n return locals()\n\n\ndata_keys = re.compile(\"^(X|y|data)(_.+)?$\")\n\n\ndef call_run(run_fn):\n import numpy as np\n\n params = NS.from_dict(json.loads(sys.stdin.read()))\n\n def load_data(name, path):\n if isinstance(path, str) and data_keys.match(name):\n return name, np.load(path, allow_pickle=True)\n return name, path\n\n print(params.dataset)\n ds = NS.walk(params.dataset, load_data)\n\n config = params.config\n config.framework_params = NS.dict(config.framework_params)\n\n try:\n result = run_fn(ds, config)\n res = dict(result)\n for name in ['predictions', 'truth', 'probabilities']:\n arr = result[name]\n if arr is not None:\n res[name] = os.path.join(config.result_dir, '.'.join([name, 'npy']))\n np.save(res[name], arr, allow_pickle=True)\n except Exception as e:\n log.exception(e)\n res = dict(\n error_message=str(e),\n models_count=0\n )\n\n print(config.result_token)\n print(json.dumps(res, separators=(',', ':')))\n" ]
[ [ "numpy.load", "numpy.save" ] ]
rokdd/yahoo_fin
[ "da55c89582bc8e858131581da1bd380d19d68bf2" ]
[ "yahoo_fin/stock_info.py" ]
[ "import requests\r\nimport pandas as pd\r\nimport ftplib\r\nimport io\r\nimport re\r\nimport json\r\nimport datetime\r\n\r\ntry:\r\n from requests_html import HTMLSession\r\nexcept Exception:\r\n print(\"\"\"Warning - Certain functionality \r\n requires requests_html, which is not installed.\r\n \r\n Install using: \r\n pip install requests_html\r\n \r\n After installation, you may have to restart your Python session.\"\"\")\r\n\r\n \r\nbase_url = \"https://query1.finance.yahoo.com/v8/finance/chart/\"\r\n\r\ndef build_url(ticker, start_date = None, end_date = None, interval = \"1d\"):\r\n \r\n if end_date is None: \r\n end_seconds = int(pd.Timestamp(\"now\").timestamp())\r\n \r\n else:\r\n end_seconds = int(pd.Timestamp(end_date).timestamp())\r\n \r\n if start_date is None:\r\n start_seconds = 7223400 \r\n \r\n else:\r\n start_seconds = int(pd.Timestamp(start_date).timestamp())\r\n \r\n site = base_url + ticker\r\n \r\n params = {\"period1\": start_seconds, \"period2\": end_seconds,\r\n \"interval\": interval.lower(), \"events\": \"div,splits\"}\r\n \r\n \r\n return site, params\r\n\r\n\r\ndef force_float(elt):\r\n \r\n try:\r\n return float(elt)\r\n except:\r\n return elt\r\n \r\ndef _convert_to_numeric(s):\r\n\r\n if \"M\" in s:\r\n s = s.strip(\"M\")\r\n return force_float(s) * 1_000_000\r\n \r\n if \"B\" in s:\r\n s = s.strip(\"B\")\r\n return force_float(s) * 1_000_000_000\r\n \r\n return force_float(s)\r\n\r\n\r\ndef get_data(ticker, start_date = None, end_date = None, index_as_date = True,\r\n interval = \"1d\", headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\r\n):\r\n '''Downloads historical stock price data into a pandas data frame. Interval\r\n must be \"1d\", \"1wk\", \"1mo\", or \"1m\" for daily, weekly, monthly, or minute data.\r\n Intraday minute data is limited to 7 days.\r\n \r\n @param: ticker\r\n @param: start_date = None\r\n @param: end_date = None\r\n @param: index_as_date = True\r\n @param: interval = \"1d\"\r\n '''\r\n \r\n if interval not in (\"1d\", \"1wk\", \"1mo\", \"1m\"):\r\n raise AssertionError(\"interval must be of of '1d', '1wk', '1mo', or '1m'\")\r\n \r\n \r\n # build and connect to URL\r\n site, params = build_url(ticker, start_date, end_date, interval)\r\n resp = requests.get(site, params = params, headers = headers)\r\n \r\n \r\n if not resp.ok:\r\n raise AssertionError(resp.json())\r\n \r\n \r\n # get JSON response\r\n data = resp.json()\r\n \r\n # get open / high / low / close data\r\n frame = pd.DataFrame(data[\"chart\"][\"result\"][0][\"indicators\"][\"quote\"][0])\r\n\r\n # get the date info\r\n temp_time = data[\"chart\"][\"result\"][0][\"timestamp\"]\r\n\r\n if interval != \"1m\":\r\n \r\n # add in adjclose\r\n frame[\"adjclose\"] = data[\"chart\"][\"result\"][0][\"indicators\"][\"adjclose\"][0][\"adjclose\"] \r\n frame.index = pd.to_datetime(temp_time, unit = \"s\")\r\n frame.index = frame.index.map(lambda dt: dt.floor(\"d\"))\r\n frame = frame[[\"open\", \"high\", \"low\", \"close\", \"adjclose\", \"volume\"]]\r\n \r\n else:\r\n\r\n frame.index = pd.to_datetime(temp_time, unit = \"s\")\r\n frame = frame[[\"open\", \"high\", \"low\", \"close\", \"volume\"]]\r\n \r\n \r\n frame['ticker'] = ticker.upper()\r\n \r\n if not index_as_date: \r\n frame = frame.reset_index()\r\n frame.rename(columns = {\"index\": \"date\"}, inplace = True)\r\n \r\n return frame\r\n\r\n\r\n\r\ndef tickers_sp500(include_company_data = False):\r\n '''Downloads list of tickers currently listed in the S&P 500 '''\r\n # get list of all S&P 500 stocks\r\n sp500 = pd.read_html(\"https://en.wikipedia.org/wiki/List_of_S%26P_500_companies\")[0]\r\n sp500[\"Symbol\"] = sp500[\"Symbol\"].str.replace(\".\", \"-\", regex=True)\r\n\r\n if include_company_data:\r\n return sp500\r\n\r\n sp_tickers = sp500.Symbol.tolist()\r\n sp_tickers = sorted(sp_tickers)\r\n \r\n return sp_tickers\r\n\r\n\r\ndef tickers_nasdaq(include_company_data = False):\r\n \r\n '''Downloads list of tickers currently listed in the NASDAQ'''\r\n \r\n ftp = ftplib.FTP(\"ftp.nasdaqtrader.com\")\r\n ftp.login()\r\n ftp.cwd(\"SymbolDirectory\")\r\n \r\n r = io.BytesIO()\r\n ftp.retrbinary('RETR nasdaqlisted.txt', r.write)\r\n \r\n if include_company_data:\r\n r.seek(0)\r\n data = pd.read_csv(r, sep = \"|\")\r\n return data\r\n \r\n info = r.getvalue().decode()\r\n splits = info.split(\"|\")\r\n \r\n \r\n tickers = [x for x in splits if \"\\r\\n\" in x]\r\n tickers = [x.split(\"\\r\\n\")[1] for x in tickers if \"NASDAQ\" not in x != \"\\r\\n\"]\r\n tickers = [ticker for ticker in tickers if \"File\" not in ticker] \r\n \r\n ftp.close() \r\n\r\n return tickers\r\n \r\n \r\n\r\ndef tickers_other(include_company_data = False):\r\n '''Downloads list of tickers currently listed in the \"otherlisted.txt\"\r\n file on \"ftp.nasdaqtrader.com\" '''\r\n ftp = ftplib.FTP(\"ftp.nasdaqtrader.com\")\r\n ftp.login()\r\n ftp.cwd(\"SymbolDirectory\")\r\n \r\n r = io.BytesIO()\r\n ftp.retrbinary('RETR otherlisted.txt', r.write)\r\n \r\n if include_company_data:\r\n r.seek(0)\r\n data = pd.read_csv(r, sep = \"|\")\r\n return data\r\n \r\n info = r.getvalue().decode()\r\n splits = info.split(\"|\") \r\n \r\n tickers = [x for x in splits if \"\\r\\n\" in x]\r\n tickers = [x.split(\"\\r\\n\")[1] for x in tickers]\r\n tickers = [ticker for ticker in tickers if \"File\" not in ticker] \r\n \r\n ftp.close() \r\n\r\n return tickers\r\n \r\n \r\ndef tickers_dow(include_company_data = False):\r\n \r\n '''Downloads list of currently traded tickers on the Dow'''\r\n\r\n site = \"https://en.wikipedia.org/wiki/Dow_Jones_Industrial_Average\"\r\n \r\n table = pd.read_html(site, attrs = {\"id\":\"constituents\"})[0]\r\n \r\n if include_company_data:\r\n return table\r\n\r\n dow_tickers = sorted(table['Symbol'].tolist())\r\n \r\n return dow_tickers \r\n \r\n\r\ndef tickers_ibovespa(include_company_data = False):\r\n \r\n '''Downloads list of currently traded tickers on the Ibovespa, Brazil'''\r\n\r\n table = pd.read_html(\"https://pt.wikipedia.org/wiki/Lista_de_companhias_citadas_no_Ibovespa\")[0]\r\n table.columns = [\"Symbol\", \"Share\", \"Sector\", \"Type\", \"Site\"]\r\n \r\n if include_company_data:\r\n return table\r\n \r\n ibovespa_tickers = sorted(table.Symbol.tolist())\r\n \r\n return ibovespa_tickers \r\n\r\n\r\n\r\ndef tickers_nifty50(include_company_data = False, headers = {'User-agent': 'Mozilla/5.0'}):\r\n\r\n '''Downloads list of currently traded tickers on the NIFTY 50, India'''\r\n\r\n site = \"https://finance.yahoo.com/quote/%5ENSEI/components?p=%5ENSEI\"\r\n table = pd.read_html(requests.get(site, headers=headers).text)[0]\r\n \r\n if include_company_data:\r\n return table\r\n \r\n nifty50 = sorted(table['Symbol'].tolist())\r\n\r\n return nifty50\r\n\r\ndef tickers_niftybank():\r\n ''' Currently traded tickers on the NIFTY BANK, India '''\r\n \r\n niftybank = ['AXISBANK', 'KOTAKBANK', 'HDFCBANK', 'SBIN', 'BANKBARODA', 'INDUSINDBK', 'PNB', 'IDFCFIRSTB', 'ICICIBANK', 'RBLBANK', 'FEDERALBNK', 'BANDHANBNK']\r\n \r\n return niftybank\r\n\r\n\r\n\r\ndef tickers_ftse100(include_company_data = False):\r\n \r\n '''Downloads a list of the tickers traded on the FTSE 100 index'''\r\n \r\n table = pd.read_html(\"https://en.wikipedia.org/wiki/FTSE_100_Index\", attrs = {\"id\": \"constituents\"})[0]\r\n \r\n if include_company_data:\r\n return table\r\n \r\n return sorted(table.EPIC.tolist())\r\n \r\n\r\ndef tickers_ftse250(include_company_data = False):\r\n \r\n \r\n '''Downloads a list of the tickers traded on the FTSE 250 index'''\r\n \r\n table = pd.read_html(\"https://en.wikipedia.org/wiki/FTSE_250_Index\", attrs = {\"id\": \"constituents\"})[0]\r\n \r\n table.columns = [\"Company\", \"Ticker\"]\r\n \r\n if include_company_data:\r\n return table\r\n \r\n return sorted(table.Ticker.tolist())\r\n \r\n\r\n\r\n\r\ndef get_quote_table(ticker , dict_result = True, headers = {'User-agent': 'Mozilla/5.0'}): \r\n \r\n '''Scrapes data elements found on Yahoo Finance's quote page \r\n of input ticker\r\n \r\n @param: ticker\r\n @param: dict_result = True\r\n '''\r\n\r\n site = \"https://finance.yahoo.com/quote/\" + ticker + \"?p=\" + ticker\r\n \r\n tables = pd.read_html(requests.get(site, headers=headers).text)\r\n \r\n data = tables[0].append(tables[1])\r\n\r\n data.columns = [\"attribute\" , \"value\"]\r\n \r\n quote_price = pd.DataFrame([\"Quote Price\", get_live_price(ticker)]).transpose()\r\n quote_price.columns = data.columns.copy()\r\n \r\n data = data.append(quote_price)\r\n \r\n data = data.sort_values(\"attribute\")\r\n \r\n data = data.drop_duplicates().reset_index(drop = True)\r\n \r\n data[\"value\"] = data.value.map(force_float)\r\n\r\n if dict_result:\r\n \r\n result = {key : val for key,val in zip(data.attribute , data.value)}\r\n return result\r\n \r\n return data \r\n \r\n \r\ndef get_stats(ticker, headers = {'User-agent': 'Mozilla/5.0'}):\r\n \r\n '''Scrapes information from the statistics tab on Yahoo Finance \r\n for an input ticker \r\n \r\n @param: ticker\r\n '''\r\n\r\n stats_site = \"https://finance.yahoo.com/quote/\" + ticker + \\\r\n \"/key-statistics?p=\" + ticker\r\n \r\n\r\n tables = pd.read_html(requests.get(stats_site, headers=headers).text)\r\n \r\n tables = [table for table in tables[1:] if table.shape[1] == 2]\r\n \r\n table = tables[0]\r\n for elt in tables[1:]:\r\n table = table.append(elt)\r\n\r\n table.columns = [\"Attribute\" , \"Value\"]\r\n \r\n table = table.reset_index(drop = True)\r\n \r\n return table\r\n\r\n\r\ndef get_stats_valuation(ticker, headers = {'User-agent': 'Mozilla/5.0'}):\r\n \r\n '''Scrapes Valuation Measures table from the statistics tab on Yahoo Finance \r\n for an input ticker \r\n \r\n @param: ticker\r\n '''\r\n\r\n stats_site = \"https://finance.yahoo.com/quote/\" + ticker + \\\r\n \"/key-statistics?p=\" + ticker\r\n \r\n \r\n tables = pd.read_html(requests.get(stats_site, headers=headers).text)\r\n \r\n tables = [table for table in tables if \"Trailing P/E\" in table.iloc[:,0].tolist()]\r\n \r\n \r\n table = tables[0].reset_index(drop = True)\r\n \r\n return table\r\n\r\n\r\n\r\n\r\n\r\ndef _parse_json(url, headers = {'User-agent': 'Mozilla/5.0'}):\r\n html = requests.get(url=url, headers = headers).text\r\n\r\n json_str = html.split('root.App.main =')[1].split(\r\n '(this)')[0].split(';\\n}')[0].strip()\r\n \r\n try:\r\n data = json.loads(json_str)[\r\n 'context']['dispatcher']['stores']['QuoteSummaryStore']\r\n except:\r\n return '{}'\r\n else:\r\n # return data\r\n new_data = json.dumps(data).replace('{}', 'null')\r\n new_data = re.sub(r'\\{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)\\}', r'\\1', new_data)\r\n\r\n json_info = json.loads(new_data)\r\n\r\n return json_info\r\n\r\n\r\ndef _parse_table(json_info):\r\n\r\n df = pd.DataFrame(json_info)\r\n \r\n if df.empty:\r\n return df\r\n \r\n del df[\"maxAge\"]\r\n\r\n df.set_index(\"endDate\", inplace=True)\r\n df.index = pd.to_datetime(df.index, unit=\"s\")\r\n \r\n df = df.transpose()\r\n df.index.name = \"Breakdown\"\r\n\r\n return df\r\n\r\n\r\ndef get_income_statement(ticker, yearly = True):\r\n \r\n '''Scrape income statement from Yahoo Finance for a given ticker\r\n \r\n @param: ticker\r\n '''\r\n \r\n income_site = \"https://finance.yahoo.com/quote/\" + ticker + \\\r\n \"/financials?p=\" + ticker\r\n\r\n json_info = _parse_json(income_site)\r\n \r\n if yearly:\r\n temp = json_info[\"incomeStatementHistory\"][\"incomeStatementHistory\"]\r\n else:\r\n temp = json_info[\"incomeStatementHistoryQuarterly\"][\"incomeStatementHistory\"]\r\n \r\n return _parse_table(temp) \r\n \r\n\r\ndef get_balance_sheet(ticker, yearly = True):\r\n \r\n '''Scrapes balance sheet from Yahoo Finance for an input ticker \r\n \r\n @param: ticker\r\n ''' \r\n \r\n balance_sheet_site = \"https://finance.yahoo.com/quote/\" + ticker + \\\r\n \"/balance-sheet?p=\" + ticker\r\n \r\n\r\n json_info = _parse_json(balance_sheet_site)\r\n \r\n try:\r\n if yearly:\r\n temp = json_info[\"balanceSheetHistory\"][\"balanceSheetStatements\"]\r\n else:\r\n temp = json_info[\"balanceSheetHistoryQuarterly\"][\"balanceSheetStatements\"]\r\n except:\r\n temp = []\r\n \r\n return _parse_table(temp) \r\n\r\n\r\ndef get_cash_flow(ticker, yearly = True):\r\n \r\n '''Scrapes the cash flow statement from Yahoo Finance for an input ticker \r\n \r\n @param: ticker\r\n '''\r\n \r\n cash_flow_site = \"https://finance.yahoo.com/quote/\" + \\\r\n ticker + \"/cash-flow?p=\" + ticker\r\n \r\n \r\n json_info = _parse_json(cash_flow_site)\r\n \r\n if yearly:\r\n temp = json_info[\"cashflowStatementHistory\"][\"cashflowStatements\"]\r\n else:\r\n temp = json_info[\"cashflowStatementHistoryQuarterly\"][\"cashflowStatements\"]\r\n \r\n return _parse_table(temp) \r\n\r\n\r\ndef get_financials(ticker, yearly = True, quarterly = True):\r\n\r\n '''Scrapes financials data from Yahoo Finance for an input ticker, including\r\n balance sheet, cash flow statement, and income statement. Returns dictionary\r\n of results.\r\n \r\n @param: ticker\r\n @param: yearly = True\r\n @param: quarterly = True\r\n '''\r\n\r\n if not yearly and not quarterly:\r\n raise AssertionError(\"yearly or quarterly must be True\")\r\n \r\n financials_site = \"https://finance.yahoo.com/quote/\" + ticker + \\\r\n \"/financials?p=\" + ticker\r\n \r\n json_info = _parse_json(financials_site)\r\n \r\n result = {}\r\n \r\n if yearly:\r\n\r\n temp = json_info[\"incomeStatementHistory\"][\"incomeStatementHistory\"]\r\n table = _parse_table(temp)\r\n result[\"yearly_income_statement\"] = table\r\n \r\n temp = json_info[\"balanceSheetHistory\"][\"balanceSheetStatements\"]\r\n table = _parse_table(temp)\r\n result[\"yearly_balance_sheet\"] = table\r\n \r\n temp = json_info[\"cashflowStatementHistory\"][\"cashflowStatements\"]\r\n table = _parse_table(temp)\r\n result[\"yearly_cash_flow\"] = table\r\n\r\n if quarterly:\r\n temp = json_info[\"incomeStatementHistoryQuarterly\"][\"incomeStatementHistory\"]\r\n table = _parse_table(temp)\r\n result[\"quarterly_income_statement\"] = table\r\n \r\n temp = json_info[\"balanceSheetHistoryQuarterly\"][\"balanceSheetStatements\"]\r\n table = _parse_table(temp)\r\n result[\"quarterly_balance_sheet\"] = table\r\n \r\n temp = json_info[\"cashflowStatementHistoryQuarterly\"][\"cashflowStatements\"]\r\n table = _parse_table(temp)\r\n result[\"quarterly_cash_flow\"] = table\r\n\r\n \r\n return result\r\n\r\n\r\ndef get_holders(ticker, headers = {'User-agent': 'Mozilla/5.0'}):\r\n \r\n '''Scrapes the Holders page from Yahoo Finance for an input ticker \r\n \r\n @param: ticker\r\n ''' \r\n \r\n holders_site = \"https://finance.yahoo.com/quote/\" + \\\r\n ticker + \"/holders?p=\" + ticker\r\n \r\n \r\n tables = pd.read_html(requests.get(holders_site, headers=headers).text)\r\n \r\n \r\n table_names = [\"Major Holders\" , \"Direct Holders (Forms 3 and 4)\" ,\r\n \"Top Institutional Holders\" , \"Top Mutual Fund Holders\"]\r\n \r\n \r\n table_mapper = {key : val for key,val in zip(table_names , tables)}\r\n \r\n \r\n return table_mapper \r\n\r\ndef get_analysts_info(ticker, headers = {'User-agent': 'Mozilla/5.0'}):\r\n \r\n '''Scrapes the Analysts page from Yahoo Finance for an input ticker \r\n \r\n @param: ticker\r\n ''' \r\n \r\n \r\n analysts_site = \"https://finance.yahoo.com/quote/\" + ticker + \\\r\n \"/analysts?p=\" + ticker\r\n \r\n tables = pd.read_html(requests.get(analysts_site, headers=headers).text)\r\n \r\n table_names = [table.columns[0] for table in tables]\r\n\r\n table_mapper = {key : val for key , val in zip(table_names , tables)}\r\n \r\n\r\n return table_mapper\r\n \r\n\r\ndef get_live_price(ticker):\r\n \r\n '''Gets the live price of input ticker\r\n \r\n @param: ticker\r\n ''' \r\n \r\n df = get_data(ticker, end_date = pd.Timestamp.today() + pd.DateOffset(10))\r\n \r\n \r\n return df.close[-1]\r\n \r\n \r\ndef _raw_get_daily_info(site):\r\n \r\n session = HTMLSession()\r\n \r\n resp = session.get(site)\r\n \r\n tables = pd.read_html(resp.html.raw_html) \r\n \r\n df = tables[0].copy()\r\n \r\n df.columns = tables[0].columns\r\n \r\n del df[\"52 Week Range\"]\r\n \r\n df[\"% Change\"] = df[\"% Change\"].map(lambda x: float(x.strip(\"%+\").replace(\",\", \"\")))\r\n \r\n\r\n fields_to_change = [x for x in df.columns.tolist() if \"Vol\" in x \\\r\n or x == \"Market Cap\"]\r\n \r\n for field in fields_to_change:\r\n \r\n if type(df[field][0]) == str:\r\n df[field] = df[field].map(_convert_to_numeric)\r\n \r\n session.close()\r\n \r\n return df\r\n \r\n\r\ndef get_day_most_active(count: int = 100):\r\n\r\n return _raw_get_daily_info(f\"https://finance.yahoo.com/most-active?offset=0&count={count}\")\r\n\r\n\r\ndef get_day_gainers(count: int = 100):\r\n\r\n return _raw_get_daily_info(f\"https://finance.yahoo.com/gainers?offset=0&count={count}\")\r\n\r\n\r\ndef get_day_losers(count: int = 100):\r\n\r\n return _raw_get_daily_info(f\"https://finance.yahoo.com/losers?offset=0&count={count}\")\r\n\r\n\r\ndef get_top_crypto():\r\n \r\n '''Gets the top 100 Cryptocurrencies by Market Cap''' \r\n\r\n session = HTMLSession()\r\n \r\n resp = session.get(\"https://finance.yahoo.com/cryptocurrencies?offset=0&count=100\")\r\n \r\n tables = pd.read_html(resp.html.raw_html) \r\n \r\n df = tables[0].copy()\r\n\r\n \r\n df[\"% Change\"] = df[\"% Change\"].map(lambda x: float(str(x).strip(\"%\").\\\r\n strip(\"+\").\\\r\n replace(\",\", \"\")))\r\n del df[\"52 Week Range\"]\r\n del df[\"1 Day Chart\"]\r\n \r\n fields_to_change = [x for x in df.columns.tolist() if \"Volume\" in x \\\r\n or x == \"Market Cap\" or x == \"Circulating Supply\"]\r\n \r\n for field in fields_to_change:\r\n \r\n if type(df[field][0]) == str:\r\n df[field] = df[field].map(lambda x: _convert_to_numeric(str(x)))\r\n \r\n \r\n session.close() \r\n \r\n return df\r\n \r\n \r\ndef get_dividends(ticker, start_date = None, end_date = None, index_as_date = True, \r\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\r\n):\r\n '''Downloads historical dividend data into a pandas data frame.\r\n \r\n @param: ticker\r\n @param: start_date = None\r\n @param: end_date = None\r\n @param: index_as_date = True\r\n '''\r\n \r\n # build and connect to URL\r\n site, params = build_url(ticker, start_date, end_date, \"1d\")\r\n resp = requests.get(site, params = params, headers = headers)\r\n \r\n \r\n if not resp.ok:\r\n return pd.DataFrame()\r\n \r\n \r\n # get JSON response\r\n data = resp.json()\r\n \r\n # check if there is data available for dividends\r\n if \"events\" not in data[\"chart\"][\"result\"][0] or \"dividends\" not in data[\"chart\"][\"result\"][0]['events']:\r\n return pd.DataFrame()\r\n \r\n # get the dividend data\r\n frame = pd.DataFrame(data[\"chart\"][\"result\"][0]['events']['dividends'])\r\n \r\n frame = frame.transpose()\r\n \r\n frame.index = pd.to_datetime(frame.index, unit = \"s\")\r\n frame.index = frame.index.map(lambda dt: dt.floor(\"d\"))\r\n \r\n # sort in chronological order\r\n frame = frame.sort_index()\r\n \r\n frame['ticker'] = ticker.upper()\r\n \r\n # remove old date column\r\n frame = frame.drop(columns='date')\r\n \r\n frame = frame.rename({'amount': 'dividend'}, axis = 'columns')\r\n \r\n if not index_as_date: \r\n frame = frame.reset_index()\r\n frame.rename(columns = {\"index\": \"date\"}, inplace = True)\r\n \r\n return frame\r\n\r\n\r\n\r\ndef get_splits(ticker, start_date = None, end_date = None, index_as_date = True,\r\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\r\n):\r\n '''Downloads historical stock split data into a pandas data frame.\r\n \r\n @param: ticker\r\n @param: start_date = None\r\n @param: end_date = None\r\n @param: index_as_date = True\r\n '''\r\n \r\n # build and connect to URL\r\n site, params = build_url(ticker, start_date, end_date, \"1d\")\r\n resp = requests.get(site, params = params, headers = headers)\r\n \r\n \r\n if not resp.ok:\r\n raise AssertionError(resp.json())\r\n \r\n \r\n # get JSON response\r\n data = resp.json()\r\n \r\n # check if there is data available for events\r\n if \"events\" not in data[\"chart\"][\"result\"][0]:\r\n raise AssertionError(\"There is no data available on stock events, or none have occured\") \r\n\r\n # check if there is data available for splits\r\n if \"splits\" not in data[\"chart\"][\"result\"][0]['events']:\r\n raise AssertionError(\"There is no data available on stock splits, or none have occured\")\r\n \r\n # get the split data\r\n frame = pd.DataFrame(data[\"chart\"][\"result\"][0]['events']['splits'])\r\n \r\n frame = frame.transpose()\r\n \r\n frame.index = pd.to_datetime(frame.index, unit = \"s\")\r\n frame.index = frame.index.map(lambda dt: dt.floor(\"d\"))\r\n \r\n # sort in to chronological order\r\n frame = frame.sort_index()\r\n \r\n frame['ticker'] = ticker.upper()\r\n \r\n # remove unnecessary columns\r\n frame = frame.drop(columns=['date', 'denominator', 'numerator'])\r\n \r\n if not index_as_date: \r\n frame = frame.reset_index()\r\n frame.rename(columns = {\"index\": \"date\"}, inplace = True)\r\n \r\n return frame\r\n \r\n \r\n\r\n\r\ndef get_earnings(ticker):\r\n \r\n '''Scrapes earnings data from Yahoo Finance for an input ticker \r\n \r\n @param: ticker\r\n '''\r\n\r\n result = {\r\n \"quarterly_results\": pd.DataFrame(),\r\n \"yearly_revenue_earnings\": pd.DataFrame(),\r\n \"quarterly_revenue_earnings\": pd.DataFrame()\r\n }\r\n\r\n financials_site = \"https://finance.yahoo.com/quote/\" + ticker + \\\r\n \"/financials?p=\" + ticker\r\n\r\n json_info = _parse_json(financials_site)\r\n\r\n if \"earnings\" not in json_info:\r\n return result\r\n\r\n temp = json_info[\"earnings\"]\r\n\r\n if temp == None:\r\n return result\r\n \r\n result[\"quarterly_results\"] = pd.DataFrame.from_dict(temp[\"earningsChart\"][\"quarterly\"])\r\n \r\n result[\"yearly_revenue_earnings\"] = pd.DataFrame.from_dict(temp[\"financialsChart\"][\"yearly\"])\r\n \r\n result[\"quarterly_revenue_earnings\"] = pd.DataFrame.from_dict(temp[\"financialsChart\"][\"quarterly\"])\r\n \r\n return result\r\n\r\n\r\n\r\n### Earnings functions\r\ndef _parse_earnings_json(url, headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\r\n):\r\n resp = requests.get(url, headers = headers)\r\n \r\n content = resp.content.decode(encoding='utf-8', errors='strict')\r\n \r\n page_data = [row for row in content.split(\r\n '\\n') if row.startswith('root.App.main = ')][0][:-1]\r\n \r\n page_data = page_data.split('root.App.main = ', 1)[1]\r\n \r\n return json.loads(page_data)\r\n\r\ndef get_next_earnings_date(ticker):\r\n \r\n base_earnings_url = 'https://finance.yahoo.com/quote'\r\n new_url = base_earnings_url + \"/\" + ticker\r\n\r\n parsed_result = _parse_earnings_json(new_url)\r\n \r\n temp = parsed_result['context']['dispatcher']['stores']['QuoteSummaryStore']['calendarEvents']['earnings']['earningsDate'][0]['raw']\r\n\r\n return datetime.datetime.fromtimestamp(temp)\r\n\r\n\r\ndef get_earnings_history(ticker):\r\n \r\n '''Inputs: @ticker\r\n Returns the earnings calendar history of the input ticker with \r\n EPS actual vs. expected data.'''\r\n\r\n url = 'https://finance.yahoo.com/calendar/earnings?symbol=' + ticker\r\n \r\n result = _parse_earnings_json(url)\r\n \r\n return result[\"context\"][\"dispatcher\"][\"stores\"][\"ScreenerResultsStore\"][\"results\"][\"rows\"]\r\n\r\n\r\n\r\ndef get_earnings_for_date(date, offset = 0, count = 1):\r\n\r\n '''Inputs: @date\r\n Returns a dictionary of stock tickers with earnings expected on the\r\n input date. The dictionary contains the expected EPS values for each\r\n stock if available.'''\r\n \r\n base_earnings_url = 'https://finance.yahoo.com/calendar/earnings'\r\n \r\n if offset >= count:\r\n return []\r\n \r\n temp = pd.Timestamp(date)\r\n date = temp.strftime(\"%Y-%m-%d\")\r\n\r\n dated_url = '{0}?day={1}&offset={2}&size={3}'.format(\r\n base_earnings_url, date, offset, 100)\r\n \r\n result = _parse_earnings_json(dated_url)\r\n \r\n stores = result['context']['dispatcher']['stores']\r\n \r\n earnings_count = stores['ScreenerCriteriaStore']['meta']['total']\r\n\r\n new_offset = offset + 100\r\n \r\n more_earnings = get_earnings_for_date(date, new_offset, earnings_count)\r\n \r\n current_earnings = stores['ScreenerResultsStore']['results']['rows']\r\n\r\n total_earnings = current_earnings + more_earnings\r\n\r\n return total_earnings\r\n\r\n\r\ndef get_earnings_in_date_range(start_date, end_date):\r\n\r\n '''Inputs: @start_date\r\n @end_date\r\n \r\n Returns the stock tickers with expected EPS data for all dates in the\r\n input range (inclusive of the start_date and end_date.'''\r\n \r\n earnings_data = []\r\n\r\n days_diff = pd.Timestamp(end_date) - pd.Timestamp(start_date)\r\n days_diff = days_diff.days\r\n\r\n \r\n current_date = pd.Timestamp(start_date)\r\n \r\n dates = [current_date + datetime.timedelta(diff) for diff in range(days_diff + 1)]\r\n dates = [d.strftime(\"%Y-%m-%d\") for d in dates]\r\n \r\n i = 0\r\n while i < len(dates):\r\n try:\r\n earnings_data += get_earnings_for_date(dates[i])\r\n except Exception:\r\n pass\r\n \r\n i += 1\r\n \r\n return earnings_data\r\n\r\n\r\ndef get_currencies(headers = {'User-agent': 'Mozilla/5.0'}):\r\n \r\n '''Returns the currencies table from Yahoo Finance'''\r\n \r\n site = \"https://finance.yahoo.com/currencies\"\r\n tables = pd.read_html(requests.get(site, headers=headers).text)\r\n \r\n result = tables[0]\r\n \r\n return result\r\n\r\n\r\ndef get_futures(headers = {'User-agent': 'Mozilla/5.0'}):\r\n \r\n '''Returns the futures table from Yahoo Finance'''\r\n \r\n site = \"https://finance.yahoo.com/commodities\"\r\n tables = pd.read_html(requests.get(site, headers=headers).text)\r\n \r\n result = tables[0]\r\n \r\n return result\r\n\r\n\r\ndef get_undervalued_large_caps(headers = {'User-agent': 'Mozilla/5.0'}):\r\n \r\n '''Returns the undervalued large caps table from Yahoo Finance'''\r\n \r\n site = \"https://finance.yahoo.com/screener/predefined/undervalued_large_caps?offset=0&count=100\"\r\n \r\n tables = pd.read_html(requests.get(site, headers=headers).text)\r\n \r\n result = tables[0]\r\n \r\n return result\r\n\r\n\r\ndef get_quote_data(ticker, headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\r\n):\r\n \r\n '''Inputs: @ticker\r\n \r\n Returns a dictionary containing over 70 elements corresponding to the \r\n input ticker, including company name, book value, moving average data,\r\n pre-market / post-market price (when applicable), and more.'''\r\n \r\n site = \"https://query1.finance.yahoo.com/v7/finance/quote?symbols=\" + ticker\r\n \r\n resp = requests.get(site, headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\r\n)\r\n \r\n if not resp.ok:\r\n raise AssertionError(\"\"\"Invalid response from server. Check if ticker is\r\n valid.\"\"\")\r\n \r\n \r\n json_result = resp.json()\r\n info = json_result[\"quoteResponse\"][\"result\"]\r\n \r\n return info[0]\r\n \r\n\r\ndef get_market_status():\r\n \r\n '''Returns the current state of the market - PRE, POST, OPEN, or CLOSED'''\r\n \r\n quote_data = get_quote_data(\"^dji\")\r\n\r\n return quote_data[\"marketState\"]\r\n\r\ndef get_premarket_price(ticker):\r\n\r\n '''Inputs: @ticker\r\n \r\n Returns the current pre-market price of the input ticker\r\n (returns value if pre-market price is available.'''\r\n \r\n quote_data = get_quote_data(ticker)\r\n \r\n if \"preMarketPrice\" in quote_data:\r\n return quote_data[\"preMarketPrice\"]\r\n \r\n raise AssertionError(\"Premarket price not currently available.\")\r\n\r\ndef get_postmarket_price(ticker):\r\n\r\n '''Inputs: @ticker\r\n \r\n Returns the current post-market price of the input ticker\r\n (returns value if pre-market price is available.'''\r\n \r\n quote_data = get_quote_data(ticker)\r\n \r\n if \"postMarketPrice\" in quote_data:\r\n return quote_data[\"postMarketPrice\"]\r\n \r\n raise AssertionError(\"Postmarket price not currently available.\")\r\n \r\n\r\n# Company Information Functions\r\ndef get_company_info(ticker):\r\n '''Scrape the company information for a ticker\r\n\r\n @param: ticker\r\n '''\r\n site = f\"https://finance.yahoo.com/quote/{ticker}/profile?p={ticker}\"\r\n json_info = _parse_json(site)\r\n json_info = json_info[\"assetProfile\"]\r\n info_frame = pd.DataFrame.from_dict(json_info,\r\n orient=\"index\",\r\n columns=[\"Value\"])\r\n info_frame = info_frame.drop(\"companyOfficers\", axis=\"index\")\r\n info_frame.index.name = \"Breakdown\"\r\n return info_frame\r\n\r\n\r\ndef get_company_officers(ticker):\r\n '''Scrape the company information and return a table of the officers\r\n\r\n @param: ticker\r\n '''\r\n site = f\"https://finance.yahoo.com/quote/{ticker}/profile?p={ticker}\"\r\n json_info = _parse_json(site)\r\n json_info = json_info[\"assetProfile\"][\"companyOfficers\"]\r\n info_frame = pd.DataFrame.from_dict(json_info)\r\n info_frame = info_frame.set_index(\"name\")\r\n return info_frame\r\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.read_html", "pandas.Timestamp.today", "pandas.to_datetime", "pandas.DateOffset", "pandas.Timestamp", "pandas.DataFrame.from_dict" ] ]
michaelosthege/aesara
[ "55c88832ba71f87c9612d573ede74a4c042ef570", "55c88832ba71f87c9612d573ede74a4c042ef570" ]
[ "theano/sparse/sandbox/sp2.py", "theano/sparse/opt.py" ]
[ "import numpy as np\nimport scipy.sparse\n\nimport theano\nfrom theano import gof, tensor\nfrom theano.gof.op import Op\nfrom theano.sparse.basic import (\n Remove0,\n SparseType,\n _is_sparse,\n as_sparse_variable,\n remove0,\n)\n\n# Also for compatibility\nfrom theano.tensor import discrete_dtypes, float_dtypes\n\n\n# Probability Ops are currently back in sandbox, because they do not respect\n# Theano's Op contract, as their behaviour is not reproducible: calling\n# the perform() method twice with the same argument will yield different\n# results.\n# from theano.sparse.basic import (\n# Multinomial, multinomial, Poisson, poisson,\n# Binomial, csr_fbinomial, csc_fbinomial, csr_dbinomial, csc_dbinomial)\n\n\n# Alias to maintain compatibility\nEliminateZeros = Remove0\neliminate_zeros = remove0\n\n\n# Probability\nclass Poisson(Op):\n \"\"\"Return a sparse having random values from a Poisson density\n with mean from the input.\n\n WARNING: This Op is NOT deterministic, as calling it twice with the\n same inputs will NOT give the same result. This is a violation of\n Theano's contract for Ops\n\n :param x: Sparse matrix.\n\n :return: A sparse matrix of random integers of a Poisson density\n with mean of `x` element wise.\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, x):\n x = as_sparse_variable(x)\n return gof.Apply(self, [x], [x.type()])\n\n def perform(self, node, inputs, outputs):\n (x,) = inputs\n (out,) = outputs\n assert _is_sparse(x)\n assert x.format in [\"csr\", \"csc\"]\n out[0] = x.copy()\n out[0].data = np.asarray(np.random.poisson(out[0].data), dtype=x.dtype)\n out[0].eliminate_zeros()\n\n def grad(self, inputs, outputs_gradients):\n comment = \"No gradient exists for class Poisson in\\\n theano/sparse/sandbox/sp2.py\"\n return [\n theano.gradient.grad_undefined(\n op=self, x_pos=0, x=inputs[0], comment=comment\n )\n ]\n\n def infer_shape(self, fgraph, node, ins_shapes):\n return ins_shapes\n\n\npoisson = Poisson()\n\n\nclass Binomial(Op):\n \"\"\"Return a sparse matrix having random values from a binomial\n density having number of experiment `n` and probability of succes\n `p`.\n\n WARNING: This Op is NOT deterministic, as calling it twice with the\n same inputs will NOT give the same result. This is a violation of\n Theano's contract for Ops\n\n :param n: Tensor scalar representing the number of experiment.\n :param p: Tensor scalar representing the probability of success.\n :param shape: Tensor vector for the output shape.\n\n :return: A sparse matrix of integers representing the number\n of success.\n \"\"\"\n\n __props__ = (\"format\", \"dtype\")\n\n def __init__(self, format, dtype):\n self.format = format\n self.dtype = dtype\n\n def make_node(self, n, p, shape):\n n = tensor.as_tensor_variable(n)\n p = tensor.as_tensor_variable(p)\n shape = tensor.as_tensor_variable(shape)\n\n assert n.dtype in discrete_dtypes\n assert p.dtype in float_dtypes\n assert shape.dtype in discrete_dtypes\n\n return gof.Apply(\n self, [n, p, shape], [SparseType(dtype=self.dtype, format=self.format)()]\n )\n\n def perform(self, node, inputs, outputs):\n (n, p, shape) = inputs\n (out,) = outputs\n binomial = np.random.binomial(n, p, size=shape)\n csx_matrix = getattr(scipy.sparse, self.format + \"_matrix\")\n out[0] = csx_matrix(binomial, dtype=self.dtype)\n\n def connection_pattern(self, node):\n return [[True], [True], [False]]\n\n def grad(self, inputs, gout):\n (n, p, shape) = inputs\n (gz,) = gout\n comment_n = \"No gradient exists for the number of samples in class\\\n Binomial of theano/sparse/sandbox/sp2.py\"\n comment_p = \"No gradient exists for the prob of success in class\\\n Binomial of theano/sparse/sandbox/sp2.py\"\n return [\n theano.gradient.grad_undefined(op=self, x_pos=0, x=n, comment=comment_n),\n theano.gradient.grad_undefined(op=self, x_pos=1, x=p, comment=comment_p),\n theano.gradient.disconnected_type(),\n ]\n\n def infer_shape(self, fgraph, node, ins_shapes):\n return [(node.inputs[2][0], node.inputs[2][1])]\n\n\ncsr_fbinomial = Binomial(\"csr\", \"float32\")\ncsc_fbinomial = Binomial(\"csc\", \"float32\")\ncsr_dbinomial = Binomial(\"csr\", \"float64\")\ncsc_dbinomial = Binomial(\"csc\", \"float64\")\n\n\nclass Multinomial(Op):\n \"\"\"Return a sparse matrix having random values from a multinomial\n density having number of experiment `n` and probability of succes\n `p`.\n\n WARNING: This Op is NOT deterministic, as calling it twice with the\n same inputs will NOT give the same result. This is a violation of\n Theano's contract for Ops\n\n :param n: Tensor type vector or scalar representing the number of\n experiment for each row. If `n` is a scalar, it will be\n used for each row.\n :param p: Sparse matrix of probability where each row is a probability\n vector representing the probability of succes. N.B. Each row\n must sum to one.\n\n :return: A sparse matrix of random integers from a multinomial density\n for each row.\n\n :note: It will works only if `p` have csr format.\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, n, p):\n n = tensor.as_tensor_variable(n)\n p = as_sparse_variable(p)\n assert p.format in [\"csr\", \"csc\"]\n\n return gof.Apply(self, [n, p], [p.type()])\n\n def perform(self, node, inputs, outputs):\n (n, p) = inputs\n (out,) = outputs\n assert _is_sparse(p)\n\n if p.format != \"csr\":\n raise NotImplementedError\n\n out[0] = p.copy()\n\n if n.ndim == 0:\n for i in range(p.shape[0]):\n k, l = p.indptr[i], p.indptr[i + 1]\n out[0].data[k:l] = np.random.multinomial(n, p.data[k:l])\n elif n.ndim == 1:\n if n.shape[0] != p.shape[0]:\n raise ValueError(\n \"The number of element of n must be \"\n \"the same as the number of row of p.\"\n )\n for i in range(p.shape[0]):\n k, l = p.indptr[i], p.indptr[i + 1]\n out[0].data[k:l] = np.random.multinomial(n[i], p.data[k:l])\n\n def grad(self, inputs, outputs_gradients):\n comment_n = \"No gradient exists for the number of samples in class\\\n Multinomial of theano/sparse/sandbox/sp2.py\"\n comment_p = \"No gradient exists for the prob of success in class\\\n Multinomial of theano/sparse/sandbox/sp2.py\"\n return [\n theano.gradient.grad_undefined(\n op=self, x_pos=0, x=inputs[0], comment=comment_n\n ),\n theano.gradient.grad_undefined(\n op=self, x_pos=1, x=inputs[1], comment=comment_p\n ),\n ]\n\n def infer_shape(self, fgraph, node, ins_shapes):\n return [ins_shapes[1]]\n\n\nmultinomial = Multinomial()\n", "import numpy as np\nimport scipy\n\nimport theano\nfrom theano import gof, scalar, tensor\nfrom theano.configdefaults import config\nfrom theano.gof.op import COp\nfrom theano.misc.safe_asarray import _asarray\nfrom theano.sparse import basic as sparse\nfrom theano.sparse.basic import (\n CSC,\n CSR,\n csm_data,\n csm_grad,\n csm_indices,\n csm_indptr,\n csm_properties,\n usmm,\n)\nfrom theano.tensor import blas\nfrom theano.tensor.opt import register_canonicalize, register_specialize\n\n\n_is_sparse_variable = sparse._is_sparse_variable\n_is_dense = sparse._is_dense\n\n# This is tested in tests/test_opt.py:test_local_csm_properties_csm\n\n\n@gof.local_optimizer([csm_properties])\ndef local_csm_properties_csm(fgraph, node):\n \"\"\"\n If we find csm_properties(CSM(*args)), then we can replace that with the\n *args directly.\n\n \"\"\"\n if node.op == csm_properties:\n (csm,) = node.inputs\n if csm.owner and (csm.owner.op == CSC or csm.owner.op == CSR):\n # csm.owner.inputs could be broadcastable. In that case, we have\n # to adjust the broadcasting flag here.\n ret_var = [\n theano.tensor.patternbroadcast(i, o.broadcastable)\n for i, o in zip(csm.owner.inputs, node.outputs)\n ]\n return ret_var\n\n return False\n\n\nregister_specialize(local_csm_properties_csm)\n\n\n# This is tested in tests/test_basic.py:test_remove0\n@gof.local_optimizer([sparse.Remove0])\ndef local_inplace_remove0(fgraph, node):\n \"\"\"\n Optimization to insert inplace versions of Remove0.\n\n \"\"\"\n # If inplace is not enabled, enable it and replace that op with a\n # new op which has inplace enabled\n if isinstance(node.op, sparse.Remove0) and not node.op.inplace:\n new_op = node.op.__class__(inplace=True)\n new_node = new_op(*node.inputs)\n return [new_node]\n return False\n\n\ntheano.compile.optdb.register(\n \"local_inplace_remove0\",\n gof.TopoOptimizer(\n local_inplace_remove0, failure_callback=gof.TopoOptimizer.warn_inplace\n ),\n 60,\n \"fast_run\",\n \"inplace\",\n)\n\n\nclass AddSD_ccode(COp):\n \"\"\"\n Add a sparse and a dense matrix.\n\n Parameters\n ----------\n x\n A sparse matrix.\n y\n A dense matrix\n\n Returns\n -------\n matrix\n `x`+`y`\n\n Notes\n -----\n The grad implemented is structured on `x`.\n\n \"\"\"\n\n __props__ = (\"format\", \"inplace\")\n\n def __init__(self, format, inplace=False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Should we do inplace addition or not ?\n self.inplace = inplace\n self.format = format\n if self.inplace:\n self.destroy_map = {0: [3]}\n\n def __str__(self):\n inp = \"\"\n if self.inplace:\n inp = \",inplace\"\n return f\"{self.__class__.__name__}{{{self.format}{inp}}}\"\n\n def make_node(self, x, y):\n x, y = sparse.as_sparse_variable(x), tensor.as_tensor_variable(y)\n out_dtype = scalar.upcast(x.type.dtype, y.type.dtype)\n if self.inplace:\n assert out_dtype == y.dtype\n\n indices, indptr, data = csm_indices(x), csm_indptr(x), csm_data(x)\n # We either use CSC or CSR depending on the format of input\n assert self.format == x.type.format\n # The magic number two here arises because L{scipy.sparse}\n # objects must be matrices (have dimension 2)\n assert y.type.ndim == 2\n out = tensor.TensorType(dtype=out_dtype, broadcastable=y.type.broadcastable)()\n return gof.Apply(self, [data, indices, indptr, y], [out])\n\n def c_code(self, node, name, inputs, outputs, sub):\n (_data, _indices, _indptr, y) = inputs\n (z,) = outputs\n inplace = int(self.inplace)\n format = {\"csc\": 0, \"csr\": 1}[self.format]\n out_typenum = node.outputs[0].type.dtype_specs()[2]\n code = \"\"\"\n Py_XDECREF(%(z)s);\n if (!%(inplace)s){\n if(PyArray_TYPE(%(y)s) != %(out_typenum)s){\n %(z)s = (PyArrayObject *) PyArray_FromArray(%(y)s, PyArray_DescrFromType(%(out_typenum)s), 0);\n }else{\n %(z)s = (PyArrayObject *) PyArray_NewCopy(%(y)s, NPY_CORDER);\n }\n }else{\n %(z)s = %(y)s;\n Py_XINCREF(%(z)s);\n }\n\n npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;\n\n const dtype_%(_indptr)s* __restrict__ indptr = (dtype_%(_indptr)s*)PyArray_DATA(%(_indptr)s);\n const dtype_%(_indices)s* __restrict__ indices = (dtype_%(_indices)s*)PyArray_DATA(%(_indices)s);\n const dtype_%(_data)s* __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);\n\n dtype_%(y)s* ydata = (dtype_%(y)s*)PyArray_DATA(%(y)s);\n dtype_%(z)s* zdata = (dtype_%(z)s*)PyArray_DATA(%(z)s);\n npy_intp Yi = PyArray_STRIDES(%(y)s)[0]/PyArray_DESCR(%(y)s)->elsize;\n npy_intp Yj = PyArray_STRIDES(%(y)s)[1]/PyArray_DESCR(%(y)s)->elsize;\n\n npy_intp pos;\n if (%(format)s == 0){\n for (npy_intp col = 0; col < N; ++col){\n for (dtype_%(_indptr)s ind = indptr[col]; ind < indptr[col+1]; ++ind){\n npy_intp row = indices[ind];\n pos = row * Yi + col * Yj;\n zdata[pos] = ydata[pos] + data[ind];\n }\n }\n }else{\n for (npy_intp row = 0; row < N; ++row){\n for (dtype_%(_indptr)s ind = indptr[row]; ind < indptr[row+1]; ++ind){\n npy_intp col = indices[ind];\n pos = row * Yi + col * Yj;\n zdata[pos] = ydata[pos] + data[ind];\n }\n }\n }\n \"\"\" % dict(\n locals(), **sub\n )\n return code\n\n def infer_shape(self, fgraph, node, shapes):\n return [shapes[3]]\n\n def c_code_cache_version(self):\n return (2,)\n\n\n@gof.local_optimizer([sparse.AddSD])\ndef local_inplace_addsd_ccode(fgraph, node):\n \"\"\"\n Optimization to insert inplace versions of AddSD.\n\n \"\"\"\n if isinstance(node.op, sparse.AddSD) and config.cxx:\n out_dtype = scalar.upcast(*node.inputs)\n if out_dtype != node.inputs[1].dtype:\n return\n new_node = AddSD_ccode(format=node.inputs[0].type.format, inplace=True)(\n *node.inputs\n )\n return [new_node]\n return False\n\n\ntheano.compile.optdb.register(\n \"local_inplace_addsd_ccode\",\n gof.TopoOptimizer(\n local_inplace_addsd_ccode, failure_callback=gof.TopoOptimizer.warn_inplace\n ),\n 60,\n \"fast_run\",\n \"inplace\",\n)\n\n\n@register_canonicalize(\"fast_compile\")\n@register_specialize\n@gof.local_optimizer([sparse.DenseFromSparse])\ndef local_dense_from_sparse_sparse_from_dense(fgraph, node):\n if isinstance(node.op, sparse.DenseFromSparse):\n inp = node.inputs[0]\n if inp.owner and isinstance(inp.owner.op, sparse.SparseFromDense):\n return inp.owner.inputs\n\n\n@gof.local_optimizer([sparse.AddSD])\ndef local_addsd_ccode(fgraph, node):\n \"\"\"\n Convert AddSD to faster AddSD_ccode.\n\n \"\"\"\n if isinstance(node.op, sparse.AddSD) and config.cxx:\n new_node = AddSD_ccode(format=node.inputs[0].type.format)(*node.inputs)\n return [new_node]\n return False\n\n\ntheano.compile.optdb.register(\n \"local_addsd_ccode\",\n gof.TopoOptimizer(local_addsd_ccode),\n # Must be after local_inplace_addsd_ccode at 60\n 61,\n \"fast_run\",\n)\n\n\nclass StructuredDotCSC(COp):\n \"\"\"\n Structured Dot CSC is like dot, except that only the gradient wrt non-zero\n elements of the sparse matrix `a` are calculated and propagated.\n\n The output is presumed to be a dense matrix, and is represented by a\n TensorType instance.\n\n Parameters\n ----------\n a\n A sparse matrix in csc format.\n b\n A sparse or dense matrix.\n\n Returns\n -------\n The dot product of `a` and `b`.\n\n Notes\n -----\n The grad implemented is structured.\n This op is used as an optimization for StructuredDot.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, a_val, a_ind, a_ptr, a_nrows, b):\n dtype_out = scalar.upcast(a_val.type.dtype, b.type.dtype)\n r = gof.Apply(\n self,\n [a_val, a_ind, a_ptr, a_nrows, b],\n [tensor.tensor(dtype_out, (False, b.type.broadcastable[1]))],\n )\n return r\n\n def perform(self, node, inputs, outputs):\n (a_val, a_ind, a_ptr, a_nrows, b) = inputs\n (out,) = outputs\n a = scipy.sparse.csc_matrix(\n (a_val, a_ind, a_ptr), (a_nrows, b.shape[0]), copy=False\n )\n # out[0] = a.dot(b)\n out[0] = _asarray(a * b, dtype=node.outputs[0].type.dtype)\n assert _is_dense(out[0]) # scipy 0.7 automatically converts to dense\n\n def c_code(self, node, name, inputs, outputs, sub):\n # C-implementation of the dot product of the sparse matrix A and matrix\n # B.\n # @param a_val: non-zero values of the sparse matrix\n # @param a_ind: column indices of the non-null values (.indices of a\n # scipy.csc_matrix)\n # @param a_ptr: a_ptr indicates col indices for col. i are in the range\n # a_ptr[i]:a_ptr[i+1]\n # @param n_rows: number of rows of sparse matrix\n # @param b: dense matrix to perform dot product with, as in dot(a, b)\n # @param z: return value\n # @param sub: TODO, not too sure, something to do with weave probably\n\n (a_val, a_ind, a_ptr, a_nrows, b) = inputs\n (z,) = outputs\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for a_val\")\n if node.inputs[4].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for b\")\n\n typenum_z = node.outputs[0].type.dtype_specs()[2] # retrieve dtype number\n typenum_a_val = node.inputs[0].type.dtype_specs()[2] # retrieve dtype number\n typenum_b = node.inputs[4].type.dtype_specs()[2] # retrieve dtype number\n\n rval = \"\"\"\n\n if (PyArray_NDIM(%(a_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_val) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(a_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ind) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(a_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ptr) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(a_nrows)s) != 0) {PyErr_SetString(PyExc_NotImplementedError, \"rank(nrows) != 0\"); %(fail)s;}\n if (PyArray_NDIM(%(b)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, \"rank(b) != 2\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_val)s) != %(typenum_a_val)s) {\n PyErr_SetString(PyExc_NotImplementedError, \"Invalid type for a_val\"); %(fail)s;}\n\n if (PyArray_TYPE(%(b)s) != %(typenum_b)s) {\n PyErr_SetString(PyExc_NotImplementedError, \"Invalid type for b\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_ind)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"a_ind dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_ptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"a_ptr dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_nrows)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"a_nrows dtype not INT32\"); %(fail)s;}\n\n if (PyArray_DIMS(%(a_val)s)[0] != PyArray_DIMS(%(a_ind)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, \"a_val and a_ind have different lengths\"); %(fail)s;}\n\n if (PyArray_DIMS(%(a_ptr)s)[0] != PyArray_DIMS(%(b)s)[0]+1)\n {PyErr_SetString(PyExc_NotImplementedError, \"a's number of columns doesn't match b's rows\"); %(fail)s;}\n\n if ((!%(z)s)\n || (PyArray_DIMS(%(z)s)[0] != ((npy_int32 *)PyArray_DATA(%(a_nrows)s))[0])\n || (PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(b)s)[1])\n )\n {\n {Py_XDECREF(%(z)s);}\n npy_intp dims[] = {0, 0};\n dims[0] = ((npy_int32 *)PyArray_DATA(%(a_nrows)s))[0];\n dims[1] = PyArray_DIMS(%(b)s)[1];\n %(z)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, %(typenum_z)s);\n }\n\n {\n // sparse array has size MxK, dense KxN, output MxN\n npy_intp M = PyArray_DIMS(%(z)s)[0];\n npy_intp N = PyArray_DIMS(%(z)s)[1];\n npy_intp K = PyArray_DIMS(%(b)s)[0];\n if (N > 0x7fffffffL)\n {PyErr_SetString(PyExc_NotImplementedError, \"array too big (overflows int32 index)\"); %(fail)s;}\n\n // strides tell you how many bytes to skip to go to next column/row entry\n npy_intp Szm = PyArray_STRIDES(%(z)s)[0] / PyArray_DESCR(%(z)s)->elsize;\n npy_intp Szn = PyArray_STRIDES(%(z)s)[1] / PyArray_DESCR(%(z)s)->elsize;\n //npy_intp Sbm = PyArray_STRIDES(%(b)s)[0] / PyArray_DESCR(%(b)s)->elsize;\n npy_intp Sbn = PyArray_STRIDES(%(b)s)[1] / PyArray_DESCR(%(b)s)->elsize;\n npy_intp Sval = PyArray_STRIDES(%(a_val)s)[0] / PyArray_DESCR(%(a_val)s)->elsize;\n npy_intp Sind = PyArray_STRIDES(%(a_ind)s)[0] / PyArray_DESCR(%(a_ind)s)->elsize;\n npy_intp Sptr = PyArray_STRIDES(%(a_ptr)s)[0] / PyArray_DESCR(%(a_ptr)s)->elsize;\n\n // pointers to access actual data in the arrays passed as params.\n dtype_%(z)s* __restrict__ Dz = (dtype_%(z)s*)PyArray_DATA(%(z)s);\n const dtype_%(a_val)s* __restrict__ Dval = (dtype_%(a_val)s*)PyArray_DATA(%(a_val)s);\n const npy_int32 * __restrict__ Dind = (npy_int32*)PyArray_DATA(%(a_ind)s);\n const npy_int32 * __restrict__ Dptr = (npy_int32*)PyArray_DATA(%(a_ptr)s);\n\n //npy_intp nnz = PyArray_DIMS(%(a_ind)s)[0];\n\n //clear the output array\n memset(Dz, 0, M*N*sizeof(dtype_%(z)s));\n\n //iterate over the sparse array, making the most of an entry wherever we find it.\n //\n // Normal matrix matrix multiply: A MxK, B KxN => Z = AB\n // for m\n // for n\n // for k\n // z[m, n] += a[m, k] * b[k, n]\n // Here instead: Z =\n // for k\n // for m (sparse)\n // for n\n // z[m, n] += a[m, k] * b[k, n]\n\n // loop over inner dimension\n for (npy_int32 k = 0; k < K; ++k)\n {\n // get pointer to k-th row of dense matrix\n const dtype_%(b)s* __restrict__ bk = (dtype_%(b)s*)(PyArray_BYTES(%(b)s) + PyArray_STRIDES(%(b)s)[0] * k);\n\n // loop over sparse column indices through index pointer array\n // (amounts to looping over rows M of sparse matrix)\n\n for (npy_int32 m_idx = Dptr[k * Sptr]; m_idx < Dptr[(k+1) * Sptr]; ++m_idx)\n {\n npy_int32 m = Dind[m_idx * Sind]; // row index of non-null value for column K\n const dtype_%(a_val)s Amk = Dval[m_idx * Sval]; // actual value at that location\n\n // pointer to m-th row of the output matrix Z\n dtype_%(z)s* __restrict__ zm = (dtype_%(z)s*)(PyArray_BYTES(%(z)s) + PyArray_STRIDES(%(z)s)[0] * m);\n\n //RESOLVE: a.shape[0] equals z.shape[0], why is this not an equality constraint?\n if (m >= PyArray_DIMS(%(z)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, \"illegal row index in a\"); %(fail)s;}\n\n // loop over final dimension (cols of dense matrix) and perform dot product\n if ((Szn == 1) && (Sbn == 1)) {\n for(npy_int32 n = 0; n < N; ++n)\n {\n zm[n] += Amk * bk[n];\n }\n }\n else\n {\n for(npy_int32 n = 0; n < N; ++n)\n {\n zm[n*Szn] += Amk * bk[n*Sbn];\n }\n }\n }\n }\n }\n \"\"\" % dict(\n locals(), **sub\n )\n\n return rval\n\n def c_code_cache_version(self):\n return (3,)\n\n\nsd_csc = StructuredDotCSC()\n\n\nclass StructuredDotCSR(COp):\n \"\"\"\n Structured Dot CSR is like dot, except that only the\n gradient wrt non-zero elements of the sparse matrix\n `a` are calculated and propagated.\n\n The output is presumed to be a dense matrix, and is represented by a\n TensorType instance.\n\n Parameters\n ----------\n a\n A sparse matrix in csr format.\n b\n A sparse or dense matrix.\n\n Returns\n -------\n matrix\n The dot product of `a` and `b`.\n\n Notes\n -----\n The grad implemented is structured.\n This op is used as an optimization for StructuredDot.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, a_val, a_ind, a_ptr, b):\n self.dtype_out = scalar.upcast(a_val.type.dtype, b.type.dtype)\n r = gof.Apply(\n self,\n [a_val, a_ind, a_ptr, b],\n [tensor.tensor(self.dtype_out, (False, b.type.broadcastable[1]))],\n )\n return r\n\n def perform(self, node, inputs, outputs):\n (a_val, a_ind, a_ptr, b) = inputs\n (out,) = outputs\n a = scipy.sparse.csr_matrix(\n (a_val, a_ind, a_ptr), (len(a_ptr) - 1, b.shape[0]), copy=True\n ) # use view_map before setting this to False\n # out[0] = a.dot(b)\n out[0] = a * b\n # scipy 0.7 automatically converts to dense, but not .6 sometimes\n assert _is_dense(out[0])\n\n def c_code(self, node, name, inputs, outputs, sub):\n \"\"\"\n C-implementation of the dot product of the sparse matrix A and matrix B.\n\n Parameters\n ----------\n a_val\n Non-zero values of the sparse matrix.\n a_ind\n Column indices of the non-null values (.indices of a\n scipy.csc_matrix).\n a_ptr\n Indicates col indices for col. i are in the range\n a_ptr[i]:a_ptr[i+1].\n n_cols\n Number of columns of sparse matrix.\n b\n Dense matrix to perform dot product with, as in dot(a, b).\n z\n Return value.\n sub\n TODO, not too sure, something to do with weave probably.\n\n \"\"\"\n (a_val, a_ind, a_ptr, b) = inputs\n (z,) = outputs\n typenum_z = tensor.TensorType(self.dtype_out, []).dtype_specs()[2]\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for a_val\")\n if node.inputs[3].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for b\")\n\n return \"\"\"\n if (PyArray_NDIM(%(a_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_val) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(a_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ind) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(a_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ptr) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(b)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, \"rank(b) != 2\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_ind)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"a_ind dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_ptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"a_ptr dtype not INT32\"); %(fail)s;}\n\n if (PyArray_DIMS(%(a_val)s)[0] != PyArray_DIMS(%(a_ind)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, \"a_val and a_ind have different lengths\"); %(fail)s;}\n\n if ((!%(z)s)\n || (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(a_ptr)s)[0]-1) //a's rows\n || (PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(b)s)[1]) //b's columns\n )\n {\n {Py_XDECREF(%(z)s);}\n npy_intp dims[] = {0, 0};\n dims[0] = PyArray_DIMS(%(a_ptr)s)[0]-1;\n dims[1] = PyArray_DIMS(%(b)s)[1];\n %(z)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, %(typenum_z)s);\n }\n\n {\n // sparse array has size MxK, dense KxN, output MxN\n npy_intp M = PyArray_DIMS(%(z)s)[0];\n npy_intp N = PyArray_DIMS(%(z)s)[1];\n npy_intp K = PyArray_DIMS(%(b)s)[0];\n if (N > 0x7fffffffL)\n {PyErr_SetString(PyExc_NotImplementedError, \"array too big (overflows int32 index)\"); %(fail)s;}\n\n // strides tell you how many bytes to skip to go to next column/row entry\n npy_intp Szm = PyArray_STRIDES(%(z)s)[0] / PyArray_DESCR(%(z)s)->elsize;\n npy_intp Szn = PyArray_STRIDES(%(z)s)[1] / PyArray_DESCR(%(z)s)->elsize;\n npy_intp Sbm = PyArray_STRIDES(%(b)s)[0] / PyArray_DESCR(%(b)s)->elsize;\n npy_intp Sbn = PyArray_STRIDES(%(b)s)[1] / PyArray_DESCR(%(b)s)->elsize;\n npy_intp Sval = PyArray_STRIDES(%(a_val)s)[0] / PyArray_DESCR(%(a_val)s)->elsize;\n npy_intp Sind = PyArray_STRIDES(%(a_ind)s)[0] / PyArray_DESCR(%(a_ind)s)->elsize;\n npy_intp Sptr = PyArray_STRIDES(%(a_ptr)s)[0] / PyArray_DESCR(%(a_ptr)s)->elsize;\n\n // pointers to access actual data in the arrays passed as params.\n dtype_%(z)s* __restrict__ Dz = (dtype_%(z)s*)PyArray_DATA(%(z)s);\n const dtype_%(a_val)s* __restrict__ Dval = (dtype_%(a_val)s*)PyArray_DATA(%(a_val)s);\n const npy_int32 * __restrict__ Dind = (npy_int32*)PyArray_DATA(%(a_ind)s);\n const npy_int32 * __restrict__ Dptr = (npy_int32*)PyArray_DATA(%(a_ptr)s);\n\n //npy_intp nnz = PyArray_DIMS(%(a_ind)s)[0];\n\n //clear the output array\n memset(Dz, 0, M*N*sizeof(dtype_%(z)s));\n\n //iterate over the sparse array, making the most of an entry wherever we find it.\n // Normal matrix matrix multiply:\n // for m\n // for n\n // for k\n // z[m, n] += a[m, k] * b[k, n]\n // Here instead:\n // for m\n // for k (sparse)\n // for n\n // z[m, n] += a[m, k] * b[k, n]\n\n // loop over inner dimension\n for (npy_int64 m = 0; m < M; ++m)\n {\n // pointer to m-th row of the output matrix Z\n dtype_%(z)s* __restrict__ zm = (dtype_%(z)s*)(PyArray_BYTES(%(z)s) + PyArray_STRIDES(%(z)s)[0] * m);\n\n // loop over sparse rows indices through index pointer array\n // (amounts to looping over cols k of sparse matrix)\n for (npy_int32 k_idx = Dptr[m * Sptr]; k_idx < Dptr[(m+1) * Sptr]; ++k_idx)\n {\n npy_int32 k = Dind[k_idx * Sind]; // col index of non-null value for row m\n const dtype_%(a_val)s Amk = Dval[k_idx * Sval]; // actual value at that location\n\n // get pointer to k-th row of dense matrix\n const dtype_%(b)s* __restrict__ bk = (dtype_%(b)s*)(PyArray_BYTES(%(b)s) + PyArray_STRIDES(%(b)s)[0] * k);\n\n // loop over final dimension (cols of dense matrix) and perform dot product\n for(npy_int32 n = 0; n < N; ++n)\n {\n zm[n*Szn] += Amk * bk[n*Sbn];\n }\n }\n }\n }\n\n \"\"\" % dict(\n locals(), **sub\n )\n\n def c_code_cache_version(self):\n return (2,)\n\n\nsd_csr = StructuredDotCSR()\n\n\n# register a specialization to replace StructuredDot -> StructuredDotCSx\n# This is tested in tests/test_basic.py:792\n@gof.local_optimizer([sparse._structured_dot])\ndef local_structured_dot(fgraph, node):\n if node.op == sparse._structured_dot:\n a, b = node.inputs\n if a.type.format == \"csc\":\n a_val, a_ind, a_ptr, a_shape = csm_properties(a)\n a_nsparse = a_shape[0]\n return [sd_csc(a_val, a_ind, a_ptr, a_nsparse, b)]\n if a.type.format == \"csr\":\n a_val, a_ind, a_ptr, a_shape = csm_properties(a)\n return [sd_csr(a_val, a_ind, a_ptr, b)]\n return False\n\n\n# Commented out because\n# a) it is only slightly faster than scipy these days, and sometimes a little\n# slower, and\n# b) the resulting graphs make it very difficult for an op to do size checking\n# on the matrices involved. dimension mismatches are hard to detect sensibly.\n# register_specialize(local_structured_dot)\n\n\nclass UsmmCscDense(COp):\n \"\"\"\n Performs the expression is `alpha` * `x` `y` + `z`.\n\n Parameters\n ----------\n x\n Matrix variable.\n y\n Matrix variable.\n z\n Dense matrix.\n alpha\n A tensor scalar.\n\n Returns\n -------\n The dense matrix resulting from `alpha` * `x` `y` + `z`.\n\n Notes\n -----\n The grad is not implemented for this op.\n Optimized version os Usmm when `x` is in csc format and `y` is dense.\n \"\"\"\n\n __props__ = (\"inplace\",)\n\n def __init__(self, inplace):\n self.inplace = inplace\n if inplace:\n self.destroy_map = {0: [6]}\n\n def __str__(self):\n if self.inplace:\n return \"UsmmCscDense{inplace}\"\n else:\n return \"UsmmCscDense{no_inplace}\"\n\n def make_node(self, alpha, x_val, x_ind, x_ptr, x_nrows, y, z):\n alpha = tensor.as_tensor_variable(alpha)\n x_val = tensor.as_tensor_variable(x_val)\n x_ind = tensor.as_tensor_variable(x_ind)\n x_ptr = tensor.as_tensor_variable(x_ptr)\n x_nrows = tensor.as_tensor_variable(x_nrows)\n y = tensor.as_tensor_variable(y)\n z = tensor.as_tensor_variable(z)\n assert x_ind.dtype == \"int32\"\n assert x_ptr.dtype == \"int32\"\n assert x_nrows.dtype == \"int32\"\n assert alpha.ndim == 2 and alpha.type.broadcastable == (True, True)\n assert x_val.ndim == 1\n assert y.ndim == 2\n assert z.ndim == 2\n\n dtype_out = scalar.upcast(\n alpha.type.dtype, x_val.type.dtype, y.type.dtype, z.type.dtype\n )\n\n if dtype_out not in (\"float32\", \"float64\"):\n raise NotImplementedError(\"only float types are supported in \" \"operands\")\n\n if self.inplace:\n assert z.type.dtype == dtype_out\n\n # axpy work only with the same dtype, so we should upcast the input\n if dtype_out != alpha.type.dtype:\n alpha = tensor.cast(alpha, dtype_out)\n if dtype_out != x_val.type.dtype:\n x_val = tensor.cast(x_val, dtype_out)\n if dtype_out != y.type.dtype:\n y = tensor.cast(y, dtype_out)\n if dtype_out != z.type.dtype:\n z = tensor.cast(z, dtype_out)\n\n r = gof.Apply(\n self,\n [alpha, x_val, x_ind, x_ptr, x_nrows, y, z],\n [tensor.tensor(dtype_out, (False, y.type.broadcastable[1]))],\n )\n return r\n\n def c_support_code(self):\n return blas.blas_header_text()\n\n def c_libraries(self):\n return blas.ldflags()\n\n def c_compile_args(self):\n return blas.ldflags(libs=False, flags=True)\n\n def c_lib_dirs(self):\n return blas.ldflags(libs=False, libs_dir=True)\n\n def c_header_dirs(self):\n return blas.ldflags(libs=False, include_dir=True)\n\n def c_code(self, node, name, inputs, outputs, sub):\n alpha, x_val, x_ind, x_ptr, x_nrows, y, z = inputs\n zn = outputs[0]\n if node.inputs[1].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for \" \"x_val\")\n if node.inputs[5].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for y\")\n if node.inputs[6].type.dtype != node.outputs[0].type.dtype:\n raise NotImplementedError(\"z and output must have same type\")\n\n if node.inputs[1].type.dtype == \"float32\":\n conv_type = \"float\"\n axpy = \"saxpy_\"\n else:\n conv_type = \"double\"\n axpy = \"daxpy_\"\n # retrieve dtype numbers\n typenum_alpha = node.inputs[0].type.dtype_specs()[2]\n typenum_x_val = node.inputs[1].type.dtype_specs()[2]\n typenum_y = node.inputs[5].type.dtype_specs()[2]\n typenum_z = node.inputs[6].type.dtype_specs()[2]\n typenum_zn = node.outputs[0].type.dtype_specs()[2]\n\n inplace = int(self.inplace)\n\n rval = \"\"\"\n\n if (PyArray_NDIM(%(x_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(x_val) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(x_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(x_ind) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(x_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(x_ptr) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(x_nrows)s) != 0) {PyErr_SetString(PyExc_NotImplementedError, \"rank(nrows) != 0\"); %(fail)s;}\n if (PyArray_NDIM(%(y)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, \"rank(y) != 2\"); %(fail)s;}\n\n if (PyArray_TYPE(%(x_val)s) != %(typenum_x_val)s) {\n PyErr_SetString(PyExc_NotImplementedError, \"Invalid type for x_val\"); %(fail)s;}\n\n if (PyArray_TYPE(%(y)s) != %(typenum_y)s) {\n PyErr_SetString(PyExc_NotImplementedError, \"Invalid type for y\"); %(fail)s;}\n\n if (PyArray_TYPE(%(z)s) != %(typenum_z)s) {\n PyErr_SetString(PyExc_NotImplementedError, \"Invalid type for z\"); %(fail)s;}\n\n if (PyArray_TYPE(%(alpha)s) != %(typenum_alpha)s) {\n PyErr_SetString(PyExc_NotImplementedError, \"Invalid type for alpha\"); %(fail)s;}\n\n if (PyArray_TYPE(%(x_ind)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"x_ind dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(x_ptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"x_ptr dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(x_nrows)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"x_nrows dtype not INT32\"); %(fail)s;}\n\n if (PyArray_DIMS(%(x_val)s)[0] != PyArray_DIMS(%(x_ind)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, \"x_val and x_ind have different lengths\"); %(fail)s;}\n\n if (PyArray_DIMS(%(x_ptr)s)[0] != PyArray_DIMS(%(y)s)[0]+1)\n {PyErr_SetString(PyExc_NotImplementedError, \"x's number of columns doesn't match y's rows\"); %(fail)s;}\n\n if (PyArray_DIMS(%(z)s)[0] != ((npy_int32 *)PyArray_DATA(%(x_nrows)s))[0] || PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(y)s)[1])\n {PyErr_SetString(PyExc_NotImplementedError, \"The dimension of the allocated output doesn't match the correct output size.\"); %(fail)s;}\n\n if (PyArray_SIZE(%(alpha)s) != 1)\n {PyErr_SetString(PyExc_NotImplementedError, \"The number of element in alpha must be 1\"); %(fail)s;}\n\n if (PyArray_NDIM(%(alpha)s) != 2)\n {PyErr_SetString(PyExc_NotImplementedError, \"The number dimension of alpha must be 2\"); %(fail)s;}\n\n if (PyArray_NDIM(%(x_val)s) != 1)\n {PyErr_SetString(PyExc_NotImplementedError, \"The number dimension of x_val must be 1\"); %(fail)s;}\n\n if (PyArray_NDIM(%(y)s) != 2)\n {PyErr_SetString(PyExc_NotImplementedError, \"The number dimension of y must be 2\"); %(fail)s;}\n\n if (PyArray_NDIM(%(z)s) != 2)\n {PyErr_SetString(PyExc_NotImplementedError, \"The number dimension of z must be 2\"); %(fail)s;}\n\n if (%(inplace)s)\n {\n if (%(typenum_zn)s != %(typenum_z)s) {\n PyErr_SetString(PyExc_NotImplementedError, \"When inplace the output dtype must be the same as the input\"); %(fail)s;}\n\n Py_XDECREF(%(zn)s);\n %(zn)s = %(z)s;\n Py_INCREF(%(zn)s);\n }\n else if (!%(zn)s\n || (PyArray_DIMS(%(zn)s)[0] != ((npy_int32 *)PyArray_DATA(%(x_nrows)s))[0])\n || (PyArray_DIMS(%(zn)s)[1] != PyArray_DIMS(%(y)s)[1])\n )\n {\n {Py_XDECREF(%(zn)s);}\n npy_intp dims[] = {0, 0};\n dims[0] = ((npy_int32 *)PyArray_DATA(%(x_nrows)s))[0];\n dims[1] = PyArray_DIMS(%(y)s)[1];\n %(zn)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, %(typenum_zn)s);\n }\n\n {\n // sparse array has size MxK, dense KxN, output MxN\n npy_intp M = PyArray_DIMS(%(zn)s)[0];\n npy_intp N = PyArray_DIMS(%(zn)s)[1];\n npy_intp K = PyArray_DIMS(%(y)s)[0];\n\n // pointers to access actual data in the arrays passed as params.\n const dtype_%(x_val)s* __restrict__ Dval = (dtype_%(x_val)s*)PyArray_DATA(%(x_val)s);\n const npy_int32 * __restrict__ Dind = (npy_int32*)PyArray_DATA(%(x_ind)s);\n const npy_int32 * __restrict__ Dptr = (npy_int32*)PyArray_DATA(%(x_ptr)s);\n const dtype_%(alpha)s alpha = ((dtype_%(alpha)s*)PyArray_DATA(%(alpha)s))[0];\n\n npy_intp Sz = PyArray_STRIDES(%(z)s)[1] / PyArray_DESCR(%(z)s)->elsize;\n npy_intp Szn = PyArray_STRIDES(%(zn)s)[1] / PyArray_DESCR(%(zn)s)->elsize;\n npy_intp Sval = PyArray_STRIDES(%(x_val)s)[0] / PyArray_DESCR(%(x_val)s)->elsize;\n npy_intp Sind = PyArray_STRIDES(%(x_ind)s)[0] / PyArray_DESCR(%(x_ind)s)->elsize;\n npy_intp Sptr = PyArray_STRIDES(%(x_ptr)s)[0] / PyArray_DESCR(%(x_ptr)s)->elsize;\n npy_intp Sy = PyArray_STRIDES(%(y)s)[1] / PyArray_DESCR(%(y)s)->elsize;\n\n // blas expects ints; convert here (rather than just making N etc ints) to avoid potential overflow in the negative-stride correction\n if ((N > 0x7fffffffL)||(Sy > 0x7fffffffL)||(Szn > 0x7fffffffL)||(Sy < -0x7fffffffL)||(Szn < -0x7fffffffL))\n {PyErr_SetString(PyExc_NotImplementedError, \"array too big for BLAS (overflows int32 index)\"); %(fail)s;}\n int N32 = N;\n int Sy32 = Sy;\n int Szn32 = Szn;\n\n if (!(%(inplace)s))\n {\n if (PyArray_CopyInto(%(zn)s, %(z)s))\n {\n Py_XDECREF(%(zn)s);\n %(fail)s;\n }\n }\n\n for (npy_intp k = 0; k < K; ++k)\n {\n for (npy_int32 m_idx = Dptr[k * Sptr]; m_idx < Dptr[(k+1)*Sptr]; ++m_idx)\n {\n const npy_int32 m = Dind[m_idx * Sind]; // row index of non-null value for column K\n\n const dtype_%(x_val)s Amk = alpha * Dval[m_idx * Sval]; // actual value at that location\n\n dtype_%(y)s* y_row = (dtype_%(y)s*)(PyArray_BYTES(%(y)s) + PyArray_STRIDES(%(y)s)[0] * k);\n // axpy expects pointer to the beginning of memory arrays,\n // so when the stride is negative, we need to get the\n // last element\n if (Sy < 0)\n y_row += (K - 1) * Sy;\n\n dtype_%(zn)s* z_row = (dtype_%(zn)s*)(PyArray_BYTES(%(zn)s) + PyArray_STRIDES(%(zn)s)[0] * m);\n if (Szn < 0)\n z_row += (N - 1) * Szn;\n\n %(axpy)s(&N32, (%(conv_type)s*)&Amk, (%(conv_type)s*)y_row, &Sy32, (%(conv_type)s*)z_row, &Szn32);\n }\n }\n }\n \"\"\" % dict(\n locals(), **sub\n )\n\n return rval\n\n def c_code_cache_version(self):\n return (3, blas.blas_header_version())\n\n\nusmm_csc_dense = UsmmCscDense(inplace=False)\nusmm_csc_dense_inplace = UsmmCscDense(inplace=True)\n\n\n# This is tested in tests/test_basic.py:UsmmTests\nlocal_usmm = gof.opt.PatternSub(\n (\n theano.tensor.sub,\n \"z\",\n (\n theano.tensor.mul,\n {\n \"pattern\": \"alpha\",\n \"constraint\": lambda expr: (\n np.all(expr.type.broadcastable) and config.blas__ldflags\n ),\n },\n (sparse._dot, \"x\", \"y\"),\n ),\n ),\n (usmm, (theano.tensor.neg, \"alpha\"), \"x\", \"y\", \"z\"),\n)\nregister_specialize(local_usmm, name=\"local_usmm\")\n\n\n# register a specialization to replace usmm_csc_dense -> usmm_csc_dense_inplace\n# This is tested in tests/test_basic.py:UsmmTests\n@gof.local_optimizer([usmm_csc_dense])\ndef local_usmm_csc_dense_inplace(fgraph, node):\n if node.op == usmm_csc_dense:\n return [usmm_csc_dense_inplace(*node.inputs)]\n\n\nregister_specialize(local_usmm_csc_dense_inplace, \"cxx_only\", \"inplace\")\n\n\n# This is tested in tests/test_basic.py:UsmmTests\n@gof.local_optimizer([usmm])\ndef local_usmm_csx(fgraph, node):\n \"\"\"\n usmm -> usmm_csc_dense\n\n \"\"\"\n if node.op == usmm:\n alpha, x, y, z = node.inputs\n\n x_is_sparse_variable = _is_sparse_variable(x)\n y_is_sparse_variable = _is_sparse_variable(y)\n\n if x_is_sparse_variable and not y_is_sparse_variable:\n if x.type.format == \"csc\":\n x_val, x_ind, x_ptr, x_shape = csm_properties(x)\n x_nsparse = x_shape[0]\n dtype_out = scalar.upcast(\n alpha.type.dtype, x.type.dtype, y.type.dtype, z.type.dtype\n )\n if dtype_out not in (\"float32\", \"float64\"):\n return False\n # Sparse cast is not implemented.\n if y.type.dtype != dtype_out:\n return False\n\n return [usmm_csc_dense(alpha, x_val, x_ind, x_ptr, x_nsparse, y, z)]\n return False\n\n\nregister_specialize(local_usmm_csx, \"cxx_only\")\n\n\nclass CSMGradC(COp):\n\n __props__ = ()\n\n def make_node(self, a_val, a_ind, a_ptr, a_dim, b_val, b_ind, b_ptr, b_dim):\n return gof.Apply(\n self,\n [a_val, a_ind, a_ptr, a_dim, b_val, b_ind, b_ptr, b_dim],\n [b_val.type()],\n )\n\n def c_code(self, node, name, inputs, outputs, sub):\n # retrieve dtype number\n (a_val, a_ind, a_ptr, a_dim, b_val, b_ind, b_ptr, b_dim) = inputs\n (z,) = outputs\n typenum_z = node.outputs[0].type.dtype_specs()[2]\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for a_val\")\n if node.inputs[3].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for b_val\")\n\n return \"\"\"\n if (PyArray_NDIM(%(a_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_val) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(a_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ind) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(a_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(a_ptr) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(b_val)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(b_val) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(b_ind)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(b_ind) != 1\"); %(fail)s;}\n if (PyArray_NDIM(%(b_ptr)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, \"rank(b_ptr) != 1\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_ind)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"a_ind dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(a_ptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"a_ptr dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(b_ind)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"b_ind dtype not INT32\"); %(fail)s;}\n\n if (PyArray_TYPE(%(b_ptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"b_ptr dtype not INT32\"); %(fail)s;}\n\n if (PyArray_DIMS(%(a_val)s)[0] != PyArray_DIMS(%(a_ind)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, \"a_val and a_ind have different lengths\"); %(fail)s;}\n\n if (PyArray_DIMS(%(b_val)s)[0] != PyArray_DIMS(%(b_ind)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, \"b_val and b_ind have different lengths\"); %(fail)s;}\n\n if (PyArray_DIMS(%(a_ptr)s)[0] != PyArray_DIMS(%(b_ptr)s)[0])\n {PyErr_SetString(PyExc_NotImplementedError, \"a_ptr and b_ptr have different lengths\"); %(fail)s;}\n\n if ((!%(z)s) || (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(a_val)s)[0]))\n {\n {Py_XDECREF(%(z)s);}\n npy_intp dims[] = {0};\n dims[0] = PyArray_DIMS(%(a_val)s)[0];\n %(z)s = (PyArrayObject*) PyArray_SimpleNew(1, dims, %(typenum_z)s);\n }\n\n {\n // sparse array has size MxK, dense KxN, output MxN\n npy_intp M = PyArray_DIMS(%(a_ptr)s)[0] - 1;\n npy_intp a_dim_0 = ((npy_int32 *)PyArray_DATA(%(a_dim)s))[0];\n npy_intp a_dim_1 = ((npy_int32 *)PyArray_DATA(%(a_dim)s))[1];\n\n npy_intp sp_dim = (M == a_dim_0)?a_dim_1:a_dim_0;\n\n // strides tell you how many bytes to skip to go to next column/row entry\n npy_intp Sz = PyArray_STRIDES(%(z)s)[0] / PyArray_DESCR(%(z)s)->elsize;\n npy_intp Sa_val = PyArray_STRIDES(%(a_val)s)[0] / PyArray_DESCR(%(a_val)s)->elsize;\n npy_intp Sa_ind = PyArray_STRIDES(%(a_ind)s)[0] / PyArray_DESCR(%(a_ind)s)->elsize;\n npy_intp Sa_ptr = PyArray_STRIDES(%(a_ptr)s)[0] / PyArray_DESCR(%(a_ptr)s)->elsize;\n npy_intp Sb_val = PyArray_STRIDES(%(b_val)s)[0] / PyArray_DESCR(%(b_val)s)->elsize;\n npy_intp Sb_ind = PyArray_STRIDES(%(b_ind)s)[0] / PyArray_DESCR(%(b_ind)s)->elsize;\n npy_intp Sb_ptr = PyArray_STRIDES(%(b_ptr)s)[0] / PyArray_DESCR(%(b_ptr)s)->elsize;\n\n // pointers to access actual data in the arrays passed as params.\n dtype_%(z)s* __restrict__ Dz = (dtype_%(z)s*)PyArray_DATA(%(z)s);\n const dtype_%(a_val)s* __restrict__ Da_val = (dtype_%(a_val)s*)PyArray_DATA(%(a_val)s);\n const npy_int32 * __restrict__ Da_ind = (npy_int32*)PyArray_DATA(%(a_ind)s);\n const npy_int32 * __restrict__ Da_ptr = (npy_int32*)PyArray_DATA(%(a_ptr)s);\n const dtype_%(b_val)s* __restrict__ Db_val = (dtype_%(b_val)s*)PyArray_DATA(%(b_val)s);\n const npy_int32 * __restrict__ Db_ind = (npy_int32*)PyArray_DATA(%(b_ind)s);\n const npy_int32 * __restrict__ Db_ptr = (npy_int32*)PyArray_DATA(%(b_ptr)s);\n\n npy_intp nnz = PyArray_DIMS(%(a_ind)s)[0];\n\n dtype_%(b_val)s b_row[sp_dim];\n\n //clear the output array\n for (npy_int64 i = 0; i < nnz; ++i)\n {\n Dz[i*Sz] = 0;\n }\n memset(b_row, 0, sp_dim*sizeof(dtype_%(b_val)s));\n\n // loop over inner dimension\n for (npy_int64 m = 0; m < M; ++m)\n {\n for (npy_int32 j_ptr = Db_ptr[m * Sb_ptr];\n j_ptr < Db_ptr[(m + 1) * Sb_ptr]; j_ptr++) {\n b_row[Db_ind[j_ptr * Sb_ind]] += Db_val[j_ptr*Sb_val];\n }\n\n for (npy_int32 j_ptr = Da_ptr[m * Sa_ptr];\n j_ptr < Da_ptr[(m + 1) * Sa_ptr]; j_ptr++) {\n Dz[j_ptr*Sz] = b_row[Da_ind[j_ptr * Sa_ind]];\n }\n\n for (npy_int32 j_ptr = Db_ptr[m * Sb_ptr];\n j_ptr < Db_ptr[(m + 1) * Sb_ptr]; j_ptr++) {\n b_row[Db_ind[j_ptr * Sb_ind]] = 0;\n }\n }\n }\n\n \"\"\" % dict(\n locals(), **sub\n )\n\n def c_code_cache_version(self):\n return (3,)\n\n\ncsm_grad_c = CSMGradC()\n\n\n# register a specialization to replace csm_grad -> csm_grad_c\n# This is tested in tests/test_opt.py:test_local_csm_grad_c\n@gof.local_optimizer([csm_grad(None)])\ndef local_csm_grad_c(fgraph, node):\n \"\"\"\n csm_grad(None) -> csm_grad_c\n\n \"\"\"\n if node.op == csm_grad(None):\n return [csm_grad_c(*node.inputs)]\n return False\n\n\n# DISABLED AS IT IS BROKEN FOR UNSORTED INDICES!\n# register_specialize(local_csm_grad_c, 'cxx_only')\n\n\nclass MulSDCSC(COp):\n \"\"\"\n Multiplication of sparse matrix by a broadcasted dense vector\n element wise.\n\n Parameters\n ----------\n a_data\n Sparse matrix data.\n a_indices\n Sparse matrix indices.\n a_indptr\n Sparse matrix indptr.\n b\n Tensor type matrix.\n\n Returns\n -------\n The multiplication of the two matrices element-wise.\n\n Notes\n -----\n `a_data`, `a_indices` and `a_indptr` must be the properties of a sparse\n matrix in csc format.\n\n The dtype of `a_data`, i.e. the dtype of the sparse matrix, cannot be a\n complex type.\n\n This op is used as an optimization of mul_s_d.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, a_data, a_indices, a_indptr, b):\n assert b.type.ndim == 2\n return gof.Apply(\n self, [a_data, a_indices, a_indptr, b], [tensor.tensor(b.dtype, (False,))]\n )\n\n def c_code_cache_version(self):\n return (3,)\n\n # def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)):\n # return NotImplementedError()\n\n def c_code(self, node, name, inputs, outputs, sub):\n\n (\n _data,\n _indices,\n _indptr,\n _b,\n ) = inputs\n (_zout,) = outputs\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for a\")\n if node.inputs[3].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for b\")\n\n return \"\"\"\n if (PyArray_NDIM(%(_b)s) != 2) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(b) != 2\");\n %(fail)s;}\n if (PyArray_NDIM(%(_data)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(data) != 1\");\n %(fail)s;}\n if (PyArray_NDIM(%(_indices)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indices) != 1\");\n %(fail)s;}\n if (PyArray_NDIM(%(_indptr)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indptr) != 1\");\n %(fail)s;}\n\n if( PyArray_TYPE(%(_indices)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"C\"); %(fail)s;}\n\n if( PyArray_TYPE(%(_indptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"D\"); %(fail)s;}\n\n if (!%(_zout)s ||\n (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0]) ||\n !(PyArray_ISCONTIGUOUS(%(_zout)s)))\n {\n Py_XDECREF(%(_zout)s);\n %(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1,\n PyArray_DIMS(%(_indices)s), PyArray_TYPE(%(_b)s));\n if (!%(_zout)s)\n {\n PyErr_SetString(PyExc_MemoryError,\n \"Could not allocate output memory.\");\n %(fail)s;\n }\n }\n\n { //makes it compile even though labels jump over variable definitions.\n const npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];\n //TODO: error checking with this\n const npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;\n\n const dtype_%(_data)s * const __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);\n const npy_int32 * const __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);\n const npy_int32 * const __restrict__ indices = (npy_int32 *)PyArray_DATA(%(_indices)s);\n\n dtype_%(_zout)s * const __restrict__ zout = (dtype_%(_zout)s*)PyArray_DATA(%(_zout)s);\n\n const npy_intp Sb = PyArray_STRIDES(%(_b)s)[0];\n\n // loop over columns\n for (npy_intp j = 0; j < N; ++j)\n {\n // for each non-null value in the sparse column\n for (npy_int32 i_idx = indptr[j]; i_idx < indptr[j+1]; ++i_idx)\n {\n // extract row index of non-null value\n npy_int32 i = indices[i_idx];\n\n // extract i-th row of dense matrix\n const dtype_%(_b)s* __restrict__ b_row = (dtype_%(_b)s*)(PyArray_BYTES(%(_b)s) + Sb * i);\n\n // write resulting gradient to sparse output\n zout[i_idx] = data[i_idx] * b_row[j];\n }\n }\n }\n\n \"\"\" % dict(\n locals(), **sub\n )\n\n def __str__(self):\n return self.__class__.__name__\n\n\nmul_s_d_csc = MulSDCSC()\n\n\nclass MulSDCSR(COp):\n \"\"\"\n Multiplication of sparse matrix by a broadcasted dense vector\n element wise.\n\n Parameters\n ----------\n a_data\n Sparse matrix data.\n a_indices\n Sparse matrix indices.\n a_indptr\n Sparse matrix indptr.\n b\n Tensor type matrix.\n\n Returns\n -------\n The multiplication of the two matrix element wise.\n\n Notes\n -----\n `a_data`, `a_indices` and `a_indptr` must be the properties\n of a sparse matrix in csr format.\n\n The dtype of `a_data`, i.e. the dtype of the sparse matrix,\n cannot be a complex type.\n\n This op is used as an optimization of mul_s_d.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, a_data, a_indices, a_indptr, b):\n assert b.type.ndim == 2\n return gof.Apply(\n self, [a_data, a_indices, a_indptr, b], [tensor.tensor(b.dtype, (False,))]\n )\n\n def c_code_cache_version(self):\n return (3,)\n\n # def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)):\n # return NotImplemented()\n\n def c_code(self, node, name, inputs, outputs, sub):\n\n (\n _data,\n _indices,\n _indptr,\n _b,\n ) = inputs\n (_zout,) = outputs\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for a\")\n if node.inputs[3].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for b\")\n\n return \"\"\"\n if (PyArray_NDIM(%(_b)s) != 2) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(b) != 2\");\n %(fail)s;}\n if (PyArray_NDIM(%(_data)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(data) != 1\");\n %(fail)s;}\n if (PyArray_NDIM(%(_indices)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indices) != 1\");\n %(fail)s;}\n if (PyArray_NDIM(%(_indptr)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indptr) != 1\");\n %(fail)s;}\n\n if( PyArray_TYPE(%(_indices)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"C\"); %(fail)s;}\n\n if( PyArray_TYPE(%(_indptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"D\"); %(fail)s;}\n\n if (!%(_zout)s ||\n (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0]) ||\n !(PyArray_ISCONTIGUOUS(%(_zout)s)))\n {\n Py_XDECREF(%(_zout)s);\n %(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1,\n PyArray_DIMS(%(_indices)s), PyArray_TYPE(%(_b)s));\n if (!%(_zout)s)\n {\n PyErr_SetString(PyExc_MemoryError,\n \"Could not allocate output memory.\");\n %(fail)s;\n }\n }\n\n { //makes it compile even though labels jump over variable definitions.\n const npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];\n //TODO: error checking with this\n const npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;\n\n const dtype_%(_data)s * const __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);\n const npy_int32 * const __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);\n const npy_int32 * const __restrict__ indices = (npy_int32 *)PyArray_DATA(%(_indices)s);\n\n dtype_%(_zout)s * const __restrict__ zout = (dtype_%(_zout)s*)PyArray_DATA(%(_zout)s);\n\n const npy_intp Sb = PyArray_STRIDES(%(_b)s)[0];\n\n // loop over columns\n for (npy_intp j = 0; j < N; ++j)\n {\n // extract i-th row of dense matrix\n const dtype_%(_b)s* __restrict__ b_row = (dtype_%(_b)s*)(PyArray_BYTES(%(_b)s) + Sb * j);\n\n // for each non-null value in the sparse column\n for (npy_int32 i_idx = indptr[j]; i_idx < indptr[j+1]; ++i_idx)\n {\n // extract row index of non-null value\n npy_int32 i = indices[i_idx];\n\n // write resulting gradient to sparse output\n zout[i_idx] = data[i_idx] * b_row[i];\n }\n }\n }\n\n \"\"\" % dict(\n locals(), **sub\n )\n\n def __str__(self):\n return self.__class__.__name__\n\n\nmul_s_d_csr = MulSDCSR()\n\n\n# register a specialization to replace MulSD -> MulSDCSX\n@gof.local_optimizer([sparse.mul_s_d])\ndef local_mul_s_d(fgraph, node):\n if node.op == sparse.mul_s_d:\n x, y = node.inputs\n\n x_is_sparse_variable = _is_sparse_variable(x)\n\n if x_is_sparse_variable:\n svar = x\n dvar = y\n else:\n svar = y\n dvar = x\n\n if dvar.type.ndim != 2:\n return False\n if svar.type.format == \"csc\":\n CSx = sparse.CSC\n mul_s_d_csx = mul_s_d_csc\n elif svar.type.format == \"csr\":\n CSx = sparse.CSR\n mul_s_d_csx = mul_s_d_csr\n else:\n raise NotImplementedError\n if x.dtype != y.dtype:\n # mul_s_d_csx don't support that case\n return\n\n c_data = mul_s_d_csx(\n sparse.csm_data(svar),\n sparse.csm_indices(svar),\n sparse.csm_indptr(svar),\n dvar,\n )\n\n return [\n CSx(\n c_data,\n sparse.csm_indices(svar),\n sparse.csm_indptr(svar),\n sparse.csm_shape(svar),\n )\n ]\n\n return False\n\n\nregister_specialize(local_mul_s_d, \"cxx_only\")\n\n\nclass MulSVCSR(COp):\n \"\"\"\n Multiplication of sparse matrix by a broadcasted dense vector\n element wise.\n\n Parameters\n ----------\n a_data\n Sparse matrix data.\n a_indices\n Sparse matrix indices.\n a_indptr\n Sparse matrix indptr.\n b\n Tensor type matrix.\n\n Returns\n -------\n The multiplication of the two matrix element wise.\n\n Notes\n -----\n `a_data`, `a_indices` and `a_indptr` must be the properties\n of a sparse matrix in csr format.\n\n The dtype of `a_data`, i.e. the dtype of the sparse matrix,\n cannot be a complex type.\n\n This op is used as an optimization of MulSV.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, a_data, a_indices, a_indptr, b):\n assert b.type.ndim == 1\n return gof.Apply(\n self, [a_data, a_indices, a_indptr, b], [tensor.tensor(b.dtype, (False,))]\n )\n\n def c_code_cache_version(self):\n return (2,)\n\n def c_code(self, node, name, inputs, outputs, sub):\n (\n _data,\n _indices,\n _indptr,\n _b,\n ) = inputs\n (_zout,) = outputs\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for a\")\n if node.inputs[3].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for b\")\n\n return \"\"\"\n if (PyArray_NDIM(%(_b)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(b) != 1\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(_data)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(data) != 1\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(_indices)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indices) != 1\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(_indptr)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indptr) != 1\");\n %(fail)s;\n }\n\n if( PyArray_TYPE(%(_indices)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"C\"); %(fail)s;}\n\n if( PyArray_TYPE(%(_indptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"D\"); %(fail)s;}\n\n if (!%(_zout)s\n || PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0]\n || !PyArray_ISCONTIGUOUS(%(_zout)s))\n {\n Py_XDECREF(%(_zout)s);\n %(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1,\n PyArray_DIMS(%(_indices)s), PyArray_TYPE(%(_b)s));\n }\n\n { //makes it compile even though labels jump over variable definitions.\n const npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];\n //TODO: error checking with this\n const npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;\n\n const dtype_%(_data)s * const __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);\n const npy_int32 * const __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);\n const npy_int32 * const __restrict__ indices = (npy_int32 *)PyArray_DATA(%(_indices)s);\n\n const dtype_%(_b)s* __restrict__ Db = (dtype_%(_b)s*)PyArray_DATA(%(_b)s);\n\n dtype_%(_zout)s * const __restrict__ zout = (dtype_%(_zout)s*)PyArray_DATA(%(_zout)s);\n\n const npy_intp Sb = PyArray_STRIDES(%(_b)s)[0] / PyArray_DESCR(%(_b)s)->elsize;\n\n // loop over rows\n for (npy_intp j = 0; j < N; ++j)\n {\n // for each non-null value in the sparse column\n for (npy_int32 i_idx = indptr[j]; i_idx < indptr[j+1]; ++i_idx)\n {\n // extract row index of non-null value\n npy_int32 i = indices[i_idx];\n\n zout[i_idx] = data[i_idx] * Db[i * Sb];\n }\n }\n }\n\n \"\"\" % dict(\n locals(), **sub\n )\n\n def __str__(self):\n return self.__class__.__name__\n\n\nmul_s_v_csr = MulSVCSR()\n\n\n# register a specialization to replace MulSV -> MulSVCSR\n@gof.local_optimizer([sparse.mul_s_v])\ndef local_mul_s_v(fgraph, node):\n if node.op == sparse.mul_s_v:\n x, y = node.inputs\n\n x_is_sparse_variable = _is_sparse_variable(x)\n\n if x_is_sparse_variable:\n svar = x\n dvar = y\n else:\n svar = y\n dvar = x\n\n if dvar.type.ndim != 1:\n return False\n elif svar.type.format == \"csr\":\n CSx = sparse.CSR\n mul_s_v_csx = mul_s_v_csr\n else:\n return False\n\n s_val, s_ind, s_ptr, s_shape = sparse.csm_properties(svar)\n\n c_data = mul_s_v_csx(s_val, s_ind, s_ptr, dvar)\n\n return [CSx(c_data, s_ind, s_ptr, s_shape)]\n\n return False\n\n\nregister_specialize(local_mul_s_v, \"cxx_only\")\n\n\nclass StructuredAddSVCSR(COp):\n \"\"\"\n Structured addition of a sparse matrix and a dense vector.\n The elements of the vector are are only added to the corresponding\n non-zero elements. Therefore, this operation outputs another sparse\n matrix.\n\n Parameters\n ----------\n a_data\n Sparse matrix data.\n a_indices\n Sparse matrix indices.\n a_indptr\n Sparse matrix indptr.\n b\n Tensor type vector.\n\n Returns\n -------\n A sparse matrix containing the addition of the vector to the data of the\n sparse matrix.\n\n Notes\n -----\n The a_* are the properties of a sparse matrix in csr format.\n\n This op is used as an optimization for StructuredAddSV.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, a_data, a_indices, a_indptr, b):\n b = tensor.as_tensor_variable(b)\n a_data = tensor.as_tensor_variable(a_data)\n a_indices = tensor.as_tensor_variable(a_indices)\n a_indptr = tensor.as_tensor_variable(a_indptr)\n assert a_data.type.ndim == 1\n assert a_indices.type.ndim == 1\n assert a_indptr.type.ndim == 1\n assert b.type.ndim == 1\n return gof.Apply(\n self, [a_data, a_indices, a_indptr, b], [tensor.tensor(b.dtype, (False,))]\n )\n\n def c_code_cache_version(self):\n return (3,)\n\n def c_code(self, node, name, inputs, outputs, sub):\n (\n _data,\n _indices,\n _indptr,\n _b,\n ) = inputs\n (_zout,) = outputs\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for a\")\n if node.inputs[3].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for b\")\n\n return \"\"\"\n if (PyArray_NDIM(%(_b)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(b) != 1\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(_data)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(data) != 1\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(_indices)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indices) != 1\");\n %(fail)s;\n }\n if (PyArray_NDIM(%(_indptr)s) != 1) {\n PyErr_SetString(PyExc_NotImplementedError, \"rank(indptr) != 1\");\n %(fail)s;\n }\n\n if( PyArray_TYPE(%(_indices)s) != NPY_INT32) {\n PyErr_SetString(PyExc_NotImplementedError, \"C\"); %(fail)s;}\n\n if( PyArray_TYPE(%(_indptr)s) != NPY_INT32)\n {PyErr_SetString(PyExc_NotImplementedError, \"D\"); %(fail)s;}\n\n if (!%(_zout)s\n || (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_indices)s)[0])\n || !(PyArray_ISCONTIGUOUS(%(_zout)s)))\n {\n Py_XDECREF(%(_zout)s);\n %(_zout)s = (PyArrayObject*) PyArray_SimpleNew(1,\n PyArray_DIMS(%(_indices)s), PyArray_TYPE(%(_b)s));\n if (!%(_zout)s)\n {\n PyErr_SetString(PyExc_MemoryError,\n \"Could not allocate output memory.\");\n %(fail)s;\n }\n }\n\n { //makes it compile even though labels jump over variable definitions.\n const npy_intp nnz = PyArray_DIMS(%(_indices)s)[0];\n //TODO: error checking with this\n const npy_intp N = PyArray_DIMS(%(_indptr)s)[0]-1;\n\n const dtype_%(_data)s * const __restrict__ data = (dtype_%(_data)s*)PyArray_DATA(%(_data)s);\n const npy_int32 * const __restrict__ indptr = (npy_int32 *)PyArray_DATA(%(_indptr)s);\n const npy_int32 * const __restrict__ indices = (npy_int32 *)PyArray_DATA(%(_indices)s);\n\n const dtype_%(_b)s* __restrict__ Db = (dtype_%(_b)s*)PyArray_DATA(%(_b)s);\n\n dtype_%(_zout)s * const __restrict__ zout = (dtype_%(_zout)s*)PyArray_DATA(%(_zout)s);\n\n const npy_intp Sb = PyArray_STRIDES(%(_b)s)[0] / PyArray_DESCR(%(_b)s)->elsize;\n\n // loop over columns\n for (npy_intp j = 0; j < N; ++j)\n {\n // for each non-null value in the sparse column\n for (npy_int32 i_idx = indptr[j]; i_idx < indptr[j+1]; ++i_idx)\n {\n // extract row index of non-null value\n npy_int32 i = indices[i_idx];\n\n // write resulting gradient to sparse output\n zout[i_idx] = data[i_idx] + Db[i * Sb];\n }\n }\n }\n\n \"\"\" % dict(\n locals(), **sub\n )\n\n def __str__(self):\n return self.__class__.__name__\n\n\nstructured_add_s_v_csr = StructuredAddSVCSR()\n\n\n# register a specialization to replace\n# structured_add_s_v -> structured_add_s_v_csr\n@gof.local_optimizer([sparse.structured_add_s_v])\ndef local_structured_add_s_v(fgraph, node):\n if node.op == sparse.structured_add_s_v:\n x, y = node.inputs\n\n x_is_sparse_variable = _is_sparse_variable(x)\n # y_is_sparse_variable = _is_sparse_variable(y)\n\n if x_is_sparse_variable:\n svar = x\n dvar = y\n else:\n svar = y\n dvar = x\n\n if dvar.type.ndim != 1:\n return False\n elif svar.type.format == \"csr\":\n CSx = sparse.CSR\n structured_add_s_v_csx = structured_add_s_v_csr\n else:\n return False\n\n s_val, s_ind, s_ptr, s_shape = sparse.csm_properties(svar)\n\n c_data = structured_add_s_v_csx(s_val, s_ind, s_ptr, dvar)\n\n return [CSx(c_data, s_ind, s_ptr, s_shape)]\n\n return False\n\n\nregister_specialize(local_structured_add_s_v, \"cxx_only\")\n\n\nclass SamplingDotCSR(COp):\n \"\"\"\n Operand optimized for calculating the dot product dot(`x`, `y`.T) = `z`\n when you only want to calculate a subset of `z`.\n\n It is equivalent to `p` o (`x` . `y`.T) where o is the element-wise\n product, `x` and `y` operands of the dot product and `p` is a matrix\n that contains 1 when the corresponding element of `z` should be\n calculated and 0 when it shouldn't. Note that SamplingDot has a different\n interface than `dot` because SamplingDot requires `x` to be a `m`x`k`\n matrix while `y` is a `n`x`k` matrix instead of the usual `k`x`n` matrix.\n\n Parameters\n ----------\n x\n Tensor matrix.\n y\n Tensor matrix.\n p_data\n Sparse matrix data.\n p_ind\n Sparse matrix indices.\n p_ptr\n Sparse matric indptr.\n p_ncols\n Sparse matrix number of columns.\n\n Returns\n -------\n A dense matrix containing the dot product of `x` by `y`.T only\n where `p` is 1.\n\n Notes\n -----\n It will work if the pattern is not binary value, but if the\n pattern doesn't have a high sparsity proportion it will be slower\n then a more optimized dot followed by a normal elemwise\n multiplication.\n\n If we have the input of mixed dtype, we insert cast elemwise\n in the graph to be able to call blas function as they don't\n allow mixed dtype.\n\n This op is used as an optimization for SamplingDot.\n\n \"\"\"\n\n __props__ = ()\n\n def make_node(self, x, y, p_data, p_ind, p_ptr, p_ncols):\n x = tensor.as_tensor_variable(x)\n y = tensor.as_tensor_variable(y)\n p_data = tensor.as_tensor_variable(p_data)\n p_ind = tensor.as_tensor_variable(p_ind)\n p_ptr = tensor.as_tensor_variable(p_ptr)\n p_ncols = tensor.as_tensor_variable(p_ncols)\n\n assert p_ncols.dtype == \"int32\"\n\n dtype_out = scalar.upcast(x.type.dtype, y.type.dtype, p_data.type.dtype)\n dot_out = scalar.upcast(x.type.dtype, y.type.dtype)\n\n # We call blas ?dot function that take only param of the same type\n x = tensor.cast(x, dot_out)\n y = tensor.cast(y, dot_out)\n\n return gof.Apply(\n self,\n [x, y, p_data, p_ind, p_ptr, p_ncols],\n [\n tensor.tensor(dtype=dtype_out, broadcastable=(False,)),\n tensor.tensor(dtype=p_ind.type.dtype, broadcastable=(False,)),\n tensor.tensor(dtype=p_ptr.type.dtype, broadcastable=(False,)),\n ],\n )\n\n def c_code_cache_version(self):\n return (4, blas.blas_header_version())\n\n def c_support_code(self):\n return blas.blas_header_text()\n\n def c_libraries(self):\n return blas.ldflags()\n\n def c_compile_args(self):\n return blas.ldflags(libs=False, flags=True)\n\n def c_lib_dirs(self):\n return blas.ldflags(libs=False, libs_dir=True)\n\n def c_header_dirs(self):\n return blas.ldflags(libs=False, include_dir=True)\n\n def c_code(self, node, name, inputs, outputs, sub):\n x, y, p_data, p_ind, p_ptr, p_ncols = inputs\n z_data, z_ind, z_ptr = outputs\n if node.inputs[0].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for x\")\n if node.inputs[1].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for y\")\n if node.inputs[2].type.dtype in (\"complex64\", \"complex128\"):\n raise NotImplementedError(\"Complex types are not supported for pattern\")\n\n dot_out = scalar.upcast(node.inputs[0].type.dtype, node.inputs[1].type.dtype)\n\n if dot_out == \"float32\":\n conv_type = \"float\"\n cdot = \"sdot_\"\n else:\n conv_type = \"double\"\n cdot = \"ddot_\"\n\n # retrieve dtype number\n typenum_x = node.inputs[0].type.dtype_specs()[2]\n typenum_y = node.inputs[1].type.dtype_specs()[2]\n typenum_p = node.inputs[2].type.dtype_specs()[2]\n typenum_zd = tensor.TensorType(node.outputs[0].dtype, []).dtype_specs()[2]\n typenum_zi = tensor.TensorType(node.outputs[1].dtype, []).dtype_specs()[2]\n typenum_zp = tensor.TensorType(node.outputs[2].dtype, []).dtype_specs()[2]\n\n rval = \"\"\"\n if (PyArray_NDIM(%(x)s) != 2) {\nPyErr_SetString(PyExc_NotImplementedError, \"rank(x) != 2\"); %(fail)s;}\n if (PyArray_NDIM(%(y)s) != 2) {\nPyErr_SetString(PyExc_NotImplementedError, \"rank(y) != 2\"); %(fail)s;}\n\n if (PyArray_TYPE(%(x)s) != %(typenum_x)s) {\n PyErr_SetString(PyExc_NotImplementedError,\n \"Invalid type for x\");\n %(fail)s;}\n\n if (PyArray_TYPE(%(y)s) != %(typenum_y)s) {\n PyErr_SetString(PyExc_NotImplementedError,\n \"Invalid type for y\");\n %(fail)s;}\n\n if (PyArray_TYPE(%(p_data)s) != %(typenum_p)s) {\n PyErr_SetString(PyExc_NotImplementedError,\n \"Invalid type for pattern\");\n %(fail)s;}\n\n if (PyArray_DIMS(%(x)s)[1] != PyArray_DIMS(%(y)s)[1]) {\n PyErr_SetString(PyExc_NotImplementedError,\n \"x's number of columns doesn't match y's rows! Note: sampling_dot is different from dot because y is assumed to be transposed.\");\n %(fail)s;}\n\n if (PyArray_DIMS(%(y)s)[0] != ((npy_int32 *)PyArray_DATA(%(p_ncols)s))[0] ||\n PyArray_DIMS(%(x)s)[0] != (PyArray_DIMS(%(p_ptr)s)[0] - 1))\n {PyErr_SetString(PyExc_NotImplementedError,\n \"The dimension of the pattern and the output must match\"); %(fail)s;}\n\n // Allocate output\n if (!%(z_data)s\n || (PyArray_DIMS(%(z_data)s)[0] != PyArray_DIMS(%(p_data)s)[0])\n || (PyArray_TYPE(%(z_data)s) != %(typenum_zd)s)\n || !(PyArray_ISCONTIGUOUS(%(z_data)s)))\n {\n {Py_XDECREF(%(z_data)s);}\n npy_intp dims[] = {0};\n dims[0] = PyArray_DIMS(%(p_data)s)[0];\n %(z_data)s = (PyArrayObject*) PyArray_SimpleNew(1, dims,\n %(typenum_zd)s);\n }\n if (!%(z_ind)s\n || (PyArray_DIMS(%(z_ind)s)[0] != PyArray_DIMS(%(p_ind)s)[0])\n || (PyArray_TYPE(%(z_ind)s) != %(typenum_zi)s)\n || !(PyArray_ISCONTIGUOUS(%(z_ind)s)))\n {\n {Py_XDECREF(%(z_ind)s);}\n npy_intp dims[] = {0};\n dims[0] = PyArray_DIMS(%(p_ind)s)[0];\n %(z_ind)s = (PyArrayObject*) PyArray_SimpleNew(1, dims,\n %(typenum_zi)s);\n }\n if (!%(z_ptr)s\n || (PyArray_DIMS(%(z_ptr)s)[0] != PyArray_DIMS(%(p_ptr)s)[0])\n || (PyArray_TYPE(%(z_ptr)s) != %(typenum_zp)s)\n || !(PyArray_ISCONTIGUOUS(%(z_ptr)s)))\n {\n {Py_XDECREF(%(z_ptr)s);}\n npy_intp dims[] = {0};\n dims[0] = PyArray_DIMS(%(p_ptr)s)[0];\n %(z_ptr)s = (PyArrayObject*) PyArray_SimpleNew(1, dims,\n %(typenum_zp)s);\n }\n\n {\n // Product of MxK and NxK, output MxN\n npy_intp M = PyArray_DIMS(%(x)s)[0];\n npy_intp N = PyArray_DIMS(%(y)s)[0];\n npy_intp K = PyArray_DIMS(%(y)s)[1];\n\n // pointers to access actual data in the arrays passed as params.\n const dtype_%(x)s* __restrict__ Dx = (dtype_%(x)s*)PyArray_DATA(%(x)s);\n const dtype_%(y)s* __restrict__ Dy = (dtype_%(y)s*)PyArray_DATA(%(y)s);\n const dtype_%(p_data)s* __restrict__ Dpd = (dtype_%(p_data)s*)PyArray_DATA(%(p_data)s);\n const dtype_%(p_ind)s* __restrict__ Dpi = (dtype_%(p_ind)s*)PyArray_DATA(%(p_ind)s);\n const dtype_%(p_ptr)s* __restrict__ Dpp = (dtype_%(p_ptr)s*)PyArray_DATA(%(p_ptr)s);\n dtype_%(z_data)s* __restrict__ Dzd = (dtype_%(z_data)s*)PyArray_DATA(%(z_data)s);\n dtype_%(z_ind)s* __restrict__ Dzi = (dtype_%(z_ind)s*)PyArray_DATA(%(z_ind)s);\n dtype_%(z_ptr)s* __restrict__ Dzp = (dtype_%(z_ptr)s*)PyArray_DATA(%(z_ptr)s);\n\n const npy_intp Sdx = PyArray_STRIDES(%(x)s)[1]/PyArray_DESCR(%(x)s)->elsize;\n const npy_intp Sdy = PyArray_STRIDES(%(y)s)[1]/PyArray_DESCR(%(y)s)->elsize;\n const npy_intp Sdpd = PyArray_STRIDES(%(p_data)s)[0] / PyArray_DESCR(%(p_data)s)->elsize;\n const npy_intp Sdpi = PyArray_STRIDES(%(p_ind)s)[0] / PyArray_DESCR(%(p_ind)s)->elsize;\n const npy_intp Sdpp = PyArray_STRIDES(%(p_ptr)s)[0] / PyArray_DESCR(%(p_ptr)s)->elsize;\n const npy_intp Sdzd = PyArray_STRIDES(%(z_data)s)[0] / PyArray_DESCR(%(z_data)s)->elsize;\n const npy_intp Sdzi = PyArray_STRIDES(%(z_ind)s)[0] / PyArray_DESCR(%(z_ind)s)->elsize;\n const npy_intp Sdzp = PyArray_STRIDES(%(z_ptr)s)[0] / PyArray_DESCR(%(z_ptr)s)->elsize;\n\n memcpy(Dzi, Dpi, PyArray_DIMS(%(p_ind)s)[0]*sizeof(dtype_%(p_ind)s));\n memcpy(Dzp, Dpp, PyArray_DIMS(%(p_ptr)s)[0]*sizeof(dtype_%(p_ptr)s));\n\n // blas expects ints; convert here (rather than just making K etc ints) to avoid potential overflow in the negative-stride correction\n if ((K > 0x7fffffffL)||(Sdx > 0x7fffffffL)||(Sdy > 0x7fffffffL)||(Sdx < -0x7fffffffL)||(Sdy < -0x7fffffffL))\n {PyErr_SetString(PyExc_NotImplementedError, \"array too big for BLAS (overflows int32 index)\"); %(fail)s;}\n int K32 = K;\n int Sdx32 = Sdx;\n int Sdy32 = Sdy;\n\n for (npy_intp m = 0; m < M; ++m) {\n for (npy_int32 n_idx = Dpp[m * Sdpp]; n_idx < Dpp[(m+1)*Sdpp]; ++n_idx) {\n const npy_int32 n = Dpi[n_idx * Sdpi]; // row index of non-null value for column K\n\n const dtype_%(x)s* x_row = (dtype_%(x)s*)(PyArray_BYTES(%(x)s) + PyArray_STRIDES(%(x)s)[0] * m);\n\n const dtype_%(y)s* y_col = (dtype_%(y)s*)(PyArray_BYTES(%(y)s) + PyArray_STRIDES(%(y)s)[0] * n);\n // dot expects pointer to the beginning of memory arrays,\n // so when the stride is negative, we need to get the\n // last element\n if (Sdx < 0)\n x_row += (K - 1) * Sdx;\n if (Sdy < 0)\n y_col += (K - 1) * Sdy;\n\n Dzd[n_idx * Sdzd] = Dpd[n_idx * Sdpd] * %(cdot)s(&K32, (const %(conv_type)s*)x_row, &Sdx32, (const %(conv_type)s*)y_col, &Sdy32);\n }\n }\n }\n \"\"\" % dict(\n locals(), **sub\n )\n\n return rval\n\n\nsampling_dot_csr = SamplingDotCSR()\n\n\n# register a specialization to replace SamplingDot -> SamplingDotCsr\n@gof.local_optimizer([sparse.sampling_dot])\ndef local_sampling_dot_csr(fgraph, node):\n if not config.blas__ldflags:\n # The C implementation of SamplingDotCsr relies on BLAS routines\n return\n if node.op == sparse.sampling_dot:\n x, y, p = node.inputs\n if p.type.format == \"csr\":\n p_data, p_ind, p_ptr, p_shape = sparse.csm_properties(p)\n\n z_data, z_ind, z_ptr = sampling_dot_csr(\n x, y, p_data, p_ind, p_ptr, p_shape[1]\n )\n\n return [sparse.CSR(z_data, z_ind, z_ptr, p_shape)]\n return False\n\n\nregister_specialize(local_sampling_dot_csr, \"cxx_only\", name=\"local_sampling_dot_csr\")\n" ]
[ [ "numpy.random.binomial", "numpy.random.multinomial", "numpy.random.poisson" ], [ "scipy.sparse.csc_matrix", "numpy.all" ] ]
ZurMaD/DeepGrabCut-PyTorch
[ "13d9e81e6e438ad3394fb3a78aca26c2cc63c825" ]
[ "dataloaders/combine_dbs.py" ]
[ "import torch.utils.data as data\n\n\nclass CombineDBs(data.Dataset):\n def __init__(self, dataloaders, excluded=None):\n self.dataloaders = dataloaders\n self.excluded = excluded\n self.im_ids = []\n\n # Combine object lists\n for dl in dataloaders:\n for elem in dl.im_ids:\n if elem not in self.im_ids:\n self.im_ids.append(elem)\n\n # Exclude\n if excluded:\n for dl in excluded:\n for elem in dl.im_ids:\n if elem in self.im_ids:\n self.im_ids.remove(elem)\n\n # Get object pointers\n self.obj_list = []\n self.im_list = []\n new_im_ids = []\n obj_counter = 0\n num_images = 0\n for ii, dl in enumerate(dataloaders):\n for jj, curr_im_id in enumerate(dl.im_ids):\n if (curr_im_id in self.im_ids) and (curr_im_id not in new_im_ids):\n flag = False\n new_im_ids.append(curr_im_id)\n for kk in range(len(dl.obj_dict[curr_im_id])):\n if dl.obj_dict[curr_im_id][kk] != -1:\n self.obj_list.append({'db_ii': ii, 'obj_ii': dl.obj_list.index([jj, kk])})\n flag = True\n obj_counter += 1\n self.im_list.append({'db_ii': ii, 'im_ii': jj})\n if flag:\n num_images += 1\n\n self.im_ids = new_im_ids\n print('Combined number of images: {:d}\\nCombined number of objects: {:d}'.format(num_images, len(self.obj_list)))\n\n def __getitem__(self, index):\n\n _db_ii = self.obj_list[index][\"db_ii\"]\n _obj_ii = self.obj_list[index]['obj_ii']\n sample = self.dataloaders[_db_ii].__getitem__(_obj_ii)\n\n if 'meta' in sample.keys():\n sample['meta']['db'] = str(self.dataloaders[_db_ii])\n\n return sample\n\n def __len__(self):\n return len(self.obj_list)\n\n def __str__(self):\n include_db = [str(db) for db in self.dataloaders]\n exclude_db = [str(db) for db in self.excluded]\n return 'Included datasets:'+str(include_db)+'\\n'+'Excluded datasets:'+str(exclude_db)\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n from dataloaders import pascal\n from dataloaders import sbd\n import torch\n import numpy as np\n import dataset.custom_transforms as tr\n from torchvision import transforms\n\n composed_transforms_tr = transforms.Compose([\n tr.RandomHorizontalFlip(),\n tr.ScaleNRotate(rots=(-15, 15), scales=(.75, 1.25)),\n tr.FixedResize(resolutions={'image': (450, 450), 'gt': (450, 450)}),\n tr.DistanceMap(v=0.15, elem='gt'),\n tr.ConcatInputs(elems=('image', 'distance_map')),\n tr.ToTensor()])\n\n composed_transforms_ts = transforms.Compose([\n tr.FixedResize(resolutions={'image': (450, 450), 'gt': (450, 450)}),\n tr.DistanceMap(v=0.15, elem='gt'),\n tr.ConcatInputs(elems=('image', 'distance_map')),\n tr.ToTensor()])\n\n pascal_voc_val = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts, retname=True)\n sbd = sbd.SBDSegmentation(split=['train', 'val'], transform=composed_transforms_tr, retname=True)\n pascal_voc_train = pascal.VOCSegmentation(split='train', transform=composed_transforms_tr, retname=True)\n\n dataset = CombineDBs([pascal_voc_train, sbd], excluded=[pascal_voc_val])\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True, num_workers=0)\n\n for ii, sample in enumerate(dataloader):\n for jj in range(sample[\"image\"].size()[0]):\n dismap = sample['distance_map'][jj].numpy()\n gt = sample['gt'][jj].numpy()\n gt[gt > 0] = 255\n gt = np.array(gt[0]).astype(np.uint8)\n dismap = np.array(dismap[0]).astype(np.uint8)\n display = 0.9 * gt + 0.4 * dismap\n display = display.astype(np.uint8)\n plt.figure()\n plt.title('display')\n plt.imshow(display, cmap='gray')\n\n if ii == 1:\n break\n plt.show(block=True)" ]
[ [ "torch.utils.data.DataLoader", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "numpy.array" ] ]
scrambler-crypto/pyecsca
[ "491abfb548455669abd470382a48dcd07b2eda87" ]
[ "test/sca/test_edit.py" ]
[ "from unittest import TestCase\n\nimport numpy as np\n\nfrom pyecsca.sca import Trace, trim, reverse, pad\n\n\nclass EditTests(TestCase):\n\n def setUp(self):\n self._trace = Trace(np.array([10, 20, 30, 40, 50], dtype=np.dtype(\"i1\")))\n\n def test_trim(self):\n result = trim(self._trace, 2)\n self.assertIsNotNone(result)\n np.testing.assert_equal(result.samples, np.array([30, 40, 50], dtype=np.dtype(\"i1\")))\n\n result = trim(self._trace, end=3)\n self.assertIsNotNone(result)\n np.testing.assert_equal(result.samples, np.array([10, 20, 30], dtype=np.dtype(\"i1\")))\n\n with self.assertRaises(ValueError):\n trim(self._trace, 5, 1)\n\n def test_reverse(self):\n result = reverse(self._trace)\n self.assertIsNotNone(result)\n np.testing.assert_equal(result.samples,\n np.array([50, 40, 30, 20, 10], dtype=np.dtype(\"i1\")))\n\n def test_pad(self):\n result = pad(self._trace, 2)\n self.assertIsNotNone(result)\n np.testing.assert_equal(result.samples,\n np.array([0, 0, 10, 20, 30, 40, 50, 0, 0], dtype=np.dtype(\"i1\")))\n\n result = pad(self._trace, (1, 3))\n self.assertIsNotNone(result)\n np.testing.assert_equal(result.samples,\n np.array([0, 10, 20, 30, 40, 50, 0, 0, 0], dtype=np.dtype(\"i1\")))\n" ]
[ [ "numpy.dtype" ] ]
kappaIO-Dev/kappaIO-sdk-armhf-crosscompile
[ "66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2", "66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2" ]
[ "rootfs/usr/lib/python3/dist-packages/numpy/polynomial/tests/test_legendre.py", "rootfs/usr/lib/python3/dist-packages/numpy/polynomial/tests/test_chebyshev.py" ]
[ "\"\"\"Tests for legendre module.\n\n\"\"\"\n\n\nimport numpy as np\nimport numpy.polynomial.legendre as leg\nimport numpy.polynomial.polynomial as poly\nfrom numpy.testing import *\n\nP0 = np.array([ 1])\nP1 = np.array([ 0, 1])\nP2 = np.array([-1, 0, 3])/2\nP3 = np.array([ 0, -3, 0, 5])/2\nP4 = np.array([ 3, 0, -30, 0, 35])/8\nP5 = np.array([ 0, 15, 0, -70, 0, 63])/8\nP6 = np.array([-5, 0, 105, 0,-315, 0, 231])/16\nP7 = np.array([ 0,-35, 0, 315, 0, -693, 0, 429])/16\nP8 = np.array([35, 0,-1260, 0,6930, 0,-12012, 0,6435])/128\nP9 = np.array([ 0,315, 0,-4620, 0,18018, 0,-25740, 0,12155])/128\n\nPlist = [P0, P1, P2, P3, P4, P5, P6, P7, P8, P9]\n\ndef trim(x) :\n return leg.legtrim(x, tol=1e-6)\n\n\nclass TestConstants(TestCase) :\n\n def test_legdomain(self) :\n assert_equal(leg.legdomain, [-1, 1])\n\n def test_legzero(self) :\n assert_equal(leg.legzero, [0])\n\n def test_legone(self) :\n assert_equal(leg.legone, [1])\n\n def test_legx(self) :\n assert_equal(leg.legx, [0, 1])\n\n\nclass TestArithmetic(TestCase) :\n x = np.linspace(-1, 1, 100)\n y0 = poly.polyval(x, P0)\n y1 = poly.polyval(x, P1)\n y2 = poly.polyval(x, P2)\n y3 = poly.polyval(x, P3)\n y4 = poly.polyval(x, P4)\n y5 = poly.polyval(x, P5)\n y6 = poly.polyval(x, P6)\n y7 = poly.polyval(x, P7)\n y8 = poly.polyval(x, P8)\n y9 = poly.polyval(x, P9)\n y = [y0, y1, y2, y3, y4, y5, y6, y7, y8, y9]\n\n def test_legval(self) :\n def f(x) :\n return x*(x**2 - 1)\n\n #check empty input\n assert_equal(leg.legval([], [1]).size, 0)\n\n #check normal input)\n for i in range(10) :\n msg = \"At i=%d\" % i\n ser = np.zeros\n tgt = self.y[i]\n res = leg.legval(self.x, [0]*i + [1])\n assert_almost_equal(res, tgt, err_msg=msg)\n\n #check that shape is preserved\n for i in range(3) :\n dims = [2]*i\n x = np.zeros(dims)\n assert_equal(leg.legval(x, [1]).shape, dims)\n assert_equal(leg.legval(x, [1,0]).shape, dims)\n assert_equal(leg.legval(x, [1,0,0]).shape, dims)\n\n def test_legadd(self) :\n for i in range(5) :\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n tgt = np.zeros(max(i,j) + 1)\n tgt[i] += 1\n tgt[j] += 1\n res = leg.legadd([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_legsub(self) :\n for i in range(5) :\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n tgt = np.zeros(max(i,j) + 1)\n tgt[i] += 1\n tgt[j] -= 1\n res = leg.legsub([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_legmulx(self):\n assert_equal(leg.legmulx([0]), [0])\n assert_equal(leg.legmulx([1]), [0,1])\n for i in range(1, 5):\n tmp = 2*i + 1\n ser = [0]*i + [1]\n tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp]\n assert_equal(leg.legmulx(ser), tgt)\n\n def test_legmul(self) :\n # check values of result\n for i in range(5) :\n pol1 = [0]*i + [1]\n val1 = leg.legval(self.x, pol1)\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n pol2 = [0]*j + [1]\n val2 = leg.legval(self.x, pol2)\n pol3 = leg.legmul(pol1, pol2)\n val3 = leg.legval(self.x, pol3)\n assert_(len(pol3) == i + j + 1, msg)\n assert_almost_equal(val3, val1*val2, err_msg=msg)\n\n def test_legdiv(self) :\n for i in range(5) :\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n ci = [0]*i + [1]\n cj = [0]*j + [1]\n tgt = leg.legadd(ci, cj)\n quo, rem = leg.legdiv(tgt, ci)\n res = leg.legadd(leg.legmul(quo, ci), rem)\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n\nclass TestCalculus(TestCase) :\n\n def test_legint(self) :\n # check exceptions\n assert_raises(ValueError, leg.legint, [0], .5)\n assert_raises(ValueError, leg.legint, [0], -1)\n assert_raises(ValueError, leg.legint, [0], 1, [0,0])\n\n # test integration of zero polynomial\n for i in range(2, 5):\n k = [0]*(i - 2) + [1]\n res = leg.legint([0], m=i, k=k)\n assert_almost_equal(res, [0, 1])\n\n # check single integration with integration constant\n for i in range(5) :\n scl = i + 1\n pol = [0]*i + [1]\n tgt = [i] + [0]*i + [1/scl]\n legpol = leg.poly2leg(pol)\n legint = leg.legint(legpol, m=1, k=[i])\n res = leg.leg2poly(legint)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check single integration with integration constant and lbnd\n for i in range(5) :\n scl = i + 1\n pol = [0]*i + [1]\n legpol = leg.poly2leg(pol)\n legint = leg.legint(legpol, m=1, k=[i], lbnd=-1)\n assert_almost_equal(leg.legval(-1, legint), i)\n\n # check single integration with integration constant and scaling\n for i in range(5) :\n scl = i + 1\n pol = [0]*i + [1]\n tgt = [i] + [0]*i + [2/scl]\n legpol = leg.poly2leg(pol)\n legint = leg.legint(legpol, m=1, k=[i], scl=2)\n res = leg.leg2poly(legint)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with default k\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = leg.legint(tgt, m=1)\n res = leg.legint(pol, m=j)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with defined k\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = leg.legint(tgt, m=1, k=[k])\n res = leg.legint(pol, m=j, k=list(range(j)))\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with lbnd\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1)\n res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with scaling\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = leg.legint(tgt, m=1, k=[k], scl=2)\n res = leg.legint(pol, m=j, k=list(range(j)), scl=2)\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_legder(self) :\n # check exceptions\n assert_raises(ValueError, leg.legder, [0], .5)\n assert_raises(ValueError, leg.legder, [0], -1)\n\n # check that zeroth deriviative does nothing\n for i in range(5) :\n tgt = [1] + [0]*i\n res = leg.legder(tgt, m=0)\n assert_equal(trim(res), trim(tgt))\n\n # check that derivation is the inverse of integration\n for i in range(5) :\n for j in range(2,5) :\n tgt = [1] + [0]*i\n res = leg.legder(leg.legint(tgt, m=j), m=j)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check derivation with scaling\n for i in range(5) :\n for j in range(2,5) :\n tgt = [1] + [0]*i\n res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5)\n assert_almost_equal(trim(res), trim(tgt))\n\n\nclass TestMisc(TestCase) :\n\n def test_legfromroots(self) :\n res = leg.legfromroots([])\n assert_almost_equal(trim(res), [1])\n for i in range(1,5) :\n roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])\n pol = leg.legfromroots(roots)\n res = leg.legval(roots, pol)\n tgt = 0\n assert_(len(pol) == i + 1)\n assert_almost_equal(leg.leg2poly(pol)[-1], 1)\n assert_almost_equal(res, tgt)\n\n def test_legroots(self) :\n assert_almost_equal(leg.legroots([1]), [])\n assert_almost_equal(leg.legroots([1, 2]), [-.5])\n for i in range(2,5) :\n tgt = np.linspace(-1, 1, i)\n res = leg.legroots(leg.legfromroots(tgt))\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_legvander(self) :\n # check for 1d x\n x = np.arange(3)\n v = leg.legvander(x, 3)\n assert_(v.shape == (3,4))\n for i in range(4) :\n coef = [0]*i + [1]\n assert_almost_equal(v[...,i], leg.legval(x, coef))\n\n # check for 2d x\n x = np.array([[1,2],[3,4],[5,6]])\n v = leg.legvander(x, 3)\n assert_(v.shape == (3,2,4))\n for i in range(4) :\n coef = [0]*i + [1]\n assert_almost_equal(v[...,i], leg.legval(x, coef))\n\n def test_legfit(self) :\n def f(x) :\n return x*(x - 1)*(x - 2)\n\n # Test exceptions\n assert_raises(ValueError, leg.legfit, [1], [1], -1)\n assert_raises(TypeError, leg.legfit, [[1]], [1], 0)\n assert_raises(TypeError, leg.legfit, [], [1], 0)\n assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0)\n assert_raises(TypeError, leg.legfit, [1, 2], [1], 0)\n assert_raises(TypeError, leg.legfit, [1], [1, 2], 0)\n assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]])\n assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1,1])\n\n # Test fit\n x = np.linspace(0,2)\n y = f(x)\n #\n coef3 = leg.legfit(x, y, 3)\n assert_equal(len(coef3), 4)\n assert_almost_equal(leg.legval(x, coef3), y)\n #\n coef4 = leg.legfit(x, y, 4)\n assert_equal(len(coef4), 5)\n assert_almost_equal(leg.legval(x, coef4), y)\n #\n coef2d = leg.legfit(x, np.array([y,y]).T, 3)\n assert_almost_equal(coef2d, np.array([coef3,coef3]).T)\n # test weighting\n w = np.zeros_like(x)\n yw = y.copy()\n w[1::2] = 1\n y[0::2] = 0\n wcoef3 = leg.legfit(x, yw, 3, w=w)\n assert_almost_equal(wcoef3, coef3)\n #\n wcoef2d = leg.legfit(x, np.array([yw,yw]).T, 3, w=w)\n assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)\n\n def test_legtrim(self) :\n coef = [2, -1, 1, 0]\n\n # Test exceptions\n assert_raises(ValueError, leg.legtrim, coef, -1)\n\n # Test results\n assert_equal(leg.legtrim(coef), coef[:-1])\n assert_equal(leg.legtrim(coef, 1), coef[:-3])\n assert_equal(leg.legtrim(coef, 2), [0])\n\n def test_legline(self) :\n assert_equal(leg.legline(3,4), [3, 4])\n\n def test_leg2poly(self) :\n for i in range(10) :\n assert_almost_equal(leg.leg2poly([0]*i + [1]), Plist[i])\n\n def test_poly2leg(self) :\n for i in range(10) :\n assert_almost_equal(leg.poly2leg(Plist[i]), [0]*i + [1])\n\n\ndef assert_poly_almost_equal(p1, p2):\n assert_almost_equal(p1.coef, p2.coef)\n assert_equal(p1.domain, p2.domain)\n\n\nclass TestLegendreClass(TestCase) :\n\n p1 = leg.Legendre([1,2,3])\n p2 = leg.Legendre([1,2,3], [0,1])\n p3 = leg.Legendre([1,2])\n p4 = leg.Legendre([2,2,3])\n p5 = leg.Legendre([3,2,3])\n\n def test_equal(self) :\n assert_(self.p1 == self.p1)\n assert_(self.p2 == self.p2)\n assert_(not self.p1 == self.p2)\n assert_(not self.p1 == self.p3)\n assert_(not self.p1 == [1,2,3])\n\n def test_not_equal(self) :\n assert_(not self.p1 != self.p1)\n assert_(not self.p2 != self.p2)\n assert_(self.p1 != self.p2)\n assert_(self.p1 != self.p3)\n assert_(self.p1 != [1,2,3])\n\n def test_add(self) :\n tgt = leg.Legendre([2,4,6])\n assert_(self.p1 + self.p1 == tgt)\n assert_(self.p1 + [1,2,3] == tgt)\n assert_([1,2,3] + self.p1 == tgt)\n\n def test_sub(self) :\n tgt = leg.Legendre([1])\n assert_(self.p4 - self.p1 == tgt)\n assert_(self.p4 - [1,2,3] == tgt)\n assert_([2,2,3] - self.p1 == tgt)\n\n def test_mul(self) :\n tgt = leg.Legendre([4.13333333, 8.8, 11.23809524, 7.2, 4.62857143])\n assert_poly_almost_equal(self.p1 * self.p1, tgt)\n assert_poly_almost_equal(self.p1 * [1,2,3], tgt)\n assert_poly_almost_equal([1,2,3] * self.p1, tgt)\n\n def test_floordiv(self) :\n tgt = leg.Legendre([1])\n assert_(self.p4 // self.p1 == tgt)\n assert_(self.p4 // [1,2,3] == tgt)\n assert_([2,2,3] // self.p1 == tgt)\n\n def test_mod(self) :\n tgt = leg.Legendre([1])\n assert_((self.p4 % self.p1) == tgt)\n assert_((self.p4 % [1,2,3]) == tgt)\n assert_(([2,2,3] % self.p1) == tgt)\n\n def test_divmod(self) :\n tquo = leg.Legendre([1])\n trem = leg.Legendre([2])\n quo, rem = divmod(self.p5, self.p1)\n assert_(quo == tquo and rem == trem)\n quo, rem = divmod(self.p5, [1,2,3])\n assert_(quo == tquo and rem == trem)\n quo, rem = divmod([3,2,3], self.p1)\n assert_(quo == tquo and rem == trem)\n\n def test_pow(self) :\n tgt = leg.Legendre([1])\n for i in range(5) :\n res = self.p1**i\n assert_(res == tgt)\n tgt = tgt*self.p1\n\n def test_call(self) :\n # domain = [-1, 1]\n x = np.linspace(-1, 1)\n tgt = 3*(1.5*x**2 - .5) + 2*x + 1\n assert_almost_equal(self.p1(x), tgt)\n\n # domain = [0, 1]\n x = np.linspace(0, 1)\n xx = 2*x - 1\n assert_almost_equal(self.p2(x), self.p1(xx))\n\n def test_degree(self) :\n assert_equal(self.p1.degree(), 2)\n\n def test_cutdeg(self) :\n assert_raises(ValueError, self.p1.cutdeg, .5)\n assert_raises(ValueError, self.p1.cutdeg, -1)\n assert_equal(len(self.p1.cutdeg(3)), 3)\n assert_equal(len(self.p1.cutdeg(2)), 3)\n assert_equal(len(self.p1.cutdeg(1)), 2)\n assert_equal(len(self.p1.cutdeg(0)), 1)\n\n def test_convert(self) :\n x = np.linspace(-1,1)\n p = self.p1.convert(domain=[0,1])\n assert_almost_equal(p(x), self.p1(x))\n\n def test_mapparms(self) :\n parms = self.p2.mapparms()\n assert_almost_equal(parms, [-1, 2])\n\n def test_trim(self) :\n coef = [1, 1e-6, 1e-12, 0]\n p = leg.Legendre(coef)\n assert_equal(p.trim().coef, coef[:3])\n assert_equal(p.trim(1e-10).coef, coef[:2])\n assert_equal(p.trim(1e-5).coef, coef[:1])\n\n def test_truncate(self) :\n assert_raises(ValueError, self.p1.truncate, .5)\n assert_raises(ValueError, self.p1.truncate, 0)\n assert_equal(len(self.p1.truncate(4)), 3)\n assert_equal(len(self.p1.truncate(3)), 3)\n assert_equal(len(self.p1.truncate(2)), 2)\n assert_equal(len(self.p1.truncate(1)), 1)\n\n def test_copy(self) :\n p = self.p1.copy()\n assert_(self.p1 == p)\n\n def test_integ(self) :\n p = self.p2.integ()\n assert_almost_equal(p.coef, leg.legint([1,2,3], 1, 0, scl=.5))\n p = self.p2.integ(lbnd=0)\n assert_almost_equal(p(0), 0)\n p = self.p2.integ(1, 1)\n assert_almost_equal(p.coef, leg.legint([1,2,3], 1, 1, scl=.5))\n p = self.p2.integ(2, [1, 2])\n assert_almost_equal(p.coef, leg.legint([1,2,3], 2, [1,2], scl=.5))\n\n def test_deriv(self) :\n p = self.p2.integ(2, [1, 2])\n assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef)\n assert_almost_equal(p.deriv(2).coef, self.p2.coef)\n\n def test_roots(self) :\n p = leg.Legendre(leg.poly2leg([0, -1, 0, 1]), [0, 1])\n res = p.roots()\n tgt = [0, .5, 1]\n assert_almost_equal(res, tgt)\n\n def test_linspace(self):\n xdes = np.linspace(0, 1, 20)\n ydes = self.p2(xdes)\n xres, yres = self.p2.linspace(20)\n assert_almost_equal(xres, xdes)\n assert_almost_equal(yres, ydes)\n\n def test_fromroots(self) :\n roots = [0, .5, 1]\n p = leg.Legendre.fromroots(roots, domain=[0, 1])\n res = p.coef\n tgt = leg.poly2leg([0, -1, 0, 1])\n assert_almost_equal(res, tgt)\n\n def test_fit(self) :\n def f(x) :\n return x*(x - 1)*(x - 2)\n x = np.linspace(0,3)\n y = f(x)\n\n # test default value of domain\n p = leg.Legendre.fit(x, y, 3)\n assert_almost_equal(p.domain, [0,3])\n\n # test that fit works in given domains\n p = leg.Legendre.fit(x, y, 3, None)\n assert_almost_equal(p(x), y)\n assert_almost_equal(p.domain, [0,3])\n p = leg.Legendre.fit(x, y, 3, [])\n assert_almost_equal(p(x), y)\n assert_almost_equal(p.domain, [-1, 1])\n # test that fit accepts weights.\n w = np.zeros_like(x)\n yw = y.copy()\n w[1::2] = 1\n yw[0::2] = 0\n p = leg.Legendre.fit(x, yw, 3, w=w)\n assert_almost_equal(p(x), y)\n\n def test_identity(self) :\n x = np.linspace(0,3)\n p = leg.Legendre.identity()\n assert_almost_equal(p(x), x)\n p = leg.Legendre.identity([1,3])\n assert_almost_equal(p(x), x)\n#\n\nif __name__ == \"__main__\":\n run_module_suite()\n", "\"\"\"Tests for chebyshev module.\n\n\"\"\"\n\n\nimport numpy as np\nimport numpy.polynomial.chebyshev as ch\nfrom numpy.testing import *\n\ndef trim(x) :\n return ch.chebtrim(x, tol=1e-6)\n\nT0 = [ 1]\nT1 = [ 0, 1]\nT2 = [-1, 0, 2]\nT3 = [ 0, -3, 0, 4]\nT4 = [ 1, 0, -8, 0, 8]\nT5 = [ 0, 5, 0, -20, 0, 16]\nT6 = [-1, 0, 18, 0, -48, 0, 32]\nT7 = [ 0, -7, 0, 56, 0, -112, 0, 64]\nT8 = [ 1, 0, -32, 0, 160, 0, -256, 0, 128]\nT9 = [ 0, 9, 0, -120, 0, 432, 0, -576, 0, 256]\n\nTlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]\n\n\nclass TestPrivate(TestCase) :\n\n def test__cseries_to_zseries(self) :\n for i in range(5) :\n inp = np.array([2] + [1]*i, np.double)\n tgt = np.array([.5]*i + [2] + [.5]*i, np.double)\n res = ch._cseries_to_zseries(inp)\n assert_equal(res, tgt)\n\n def test__zseries_to_cseries(self) :\n for i in range(5) :\n inp = np.array([.5]*i + [2] + [.5]*i, np.double)\n tgt = np.array([2] + [1]*i, np.double)\n res = ch._zseries_to_cseries(inp)\n assert_equal(res, tgt)\n\n\nclass TestConstants(TestCase) :\n\n def test_chebdomain(self) :\n assert_equal(ch.chebdomain, [-1, 1])\n\n def test_chebzero(self) :\n assert_equal(ch.chebzero, [0])\n\n def test_chebone(self) :\n assert_equal(ch.chebone, [1])\n\n def test_chebx(self) :\n assert_equal(ch.chebx, [0, 1])\n\n\nclass TestArithmetic(TestCase) :\n\n def test_chebadd(self) :\n for i in range(5) :\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n tgt = np.zeros(max(i,j) + 1)\n tgt[i] += 1\n tgt[j] += 1\n res = ch.chebadd([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_chebsub(self) :\n for i in range(5) :\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n tgt = np.zeros(max(i,j) + 1)\n tgt[i] += 1\n tgt[j] -= 1\n res = ch.chebsub([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_chebmulx(self):\n assert_equal(ch.chebmulx([0]), [0])\n assert_equal(ch.chebmulx([1]), [0,1])\n for i in range(1, 5):\n ser = [0]*i + [1]\n tgt = [0]*(i - 1) + [.5, 0, .5]\n assert_equal(ch.chebmulx(ser), tgt)\n\n def test_chebmul(self) :\n for i in range(5) :\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n tgt = np.zeros(i + j + 1)\n tgt[i + j] += .5\n tgt[abs(i - j)] += .5\n res = ch.chebmul([0]*i + [1], [0]*j + [1])\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_chebdiv(self) :\n for i in range(5) :\n for j in range(5) :\n msg = \"At i=%d, j=%d\" % (i,j)\n ci = [0]*i + [1]\n cj = [0]*j + [1]\n tgt = ch.chebadd(ci, cj)\n quo, rem = ch.chebdiv(tgt, ci)\n res = ch.chebadd(ch.chebmul(quo, ci), rem)\n assert_equal(trim(res), trim(tgt), err_msg=msg)\n\n def test_chebval(self) :\n def f(x) :\n return x*(x**2 - 1)\n\n #check empty input\n assert_equal(ch.chebval([], [1]).size, 0)\n\n #check normal input)\n for i in range(5) :\n tgt = 1\n res = ch.chebval(1, [0]*i + [1])\n assert_almost_equal(res, tgt)\n tgt = (-1)**i\n res = ch.chebval(-1, [0]*i + [1])\n assert_almost_equal(res, tgt)\n zeros = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])\n tgt = 0\n res = ch.chebval(zeros, [0]*i + [1])\n assert_almost_equal(res, tgt)\n x = np.linspace(-1,1)\n tgt = f(x)\n res = ch.chebval(x, [0, -.25, 0, .25])\n assert_almost_equal(res, tgt)\n\n #check that shape is preserved\n for i in range(3) :\n dims = [2]*i\n x = np.zeros(dims)\n assert_equal(ch.chebval(x, [1]).shape, dims)\n assert_equal(ch.chebval(x, [1,0]).shape, dims)\n assert_equal(ch.chebval(x, [1,0,0]).shape, dims)\n\n\nclass TestCalculus(TestCase) :\n\n def test_chebint(self) :\n # check exceptions\n assert_raises(ValueError, ch.chebint, [0], .5)\n assert_raises(ValueError, ch.chebint, [0], -1)\n assert_raises(ValueError, ch.chebint, [0], 1, [0,0])\n\n # test integration of zero polynomial\n for i in range(2, 5):\n k = [0]*(i - 2) + [1]\n res = ch.chebint([0], m=i, k=k)\n assert_almost_equal(res, [0, 1])\n\n # check single integration with integration constant\n for i in range(5) :\n scl = i + 1\n pol = [0]*i + [1]\n tgt = [i] + [0]*i + [1/scl]\n chebpol = ch.poly2cheb(pol)\n chebint = ch.chebint(chebpol, m=1, k=[i])\n res = ch.cheb2poly(chebint)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check single integration with integration constant and lbnd\n for i in range(5) :\n scl = i + 1\n pol = [0]*i + [1]\n chebpol = ch.poly2cheb(pol)\n chebint = ch.chebint(chebpol, m=1, k=[i], lbnd=-1)\n assert_almost_equal(ch.chebval(-1, chebint), i)\n\n # check single integration with integration constant and scaling\n for i in range(5) :\n scl = i + 1\n pol = [0]*i + [1]\n tgt = [i] + [0]*i + [2/scl]\n chebpol = ch.poly2cheb(pol)\n chebint = ch.chebint(chebpol, m=1, k=[i], scl=2)\n res = ch.cheb2poly(chebint)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with default k\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = ch.chebint(tgt, m=1)\n res = ch.chebint(pol, m=j)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with defined k\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = ch.chebint(tgt, m=1, k=[k])\n res = ch.chebint(pol, m=j, k=list(range(j)))\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with lbnd\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = ch.chebint(tgt, m=1, k=[k], lbnd=-1)\n res = ch.chebint(pol, m=j, k=list(range(j)), lbnd=-1)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check multiple integrations with scaling\n for i in range(5) :\n for j in range(2,5) :\n pol = [0]*i + [1]\n tgt = pol[:]\n for k in range(j) :\n tgt = ch.chebint(tgt, m=1, k=[k], scl=2)\n res = ch.chebint(pol, m=j, k=list(range(j)), scl=2)\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_chebder(self) :\n # check exceptions\n assert_raises(ValueError, ch.chebder, [0], .5)\n assert_raises(ValueError, ch.chebder, [0], -1)\n\n # check that zeroth deriviative does nothing\n for i in range(5) :\n tgt = [1] + [0]*i\n res = ch.chebder(tgt, m=0)\n assert_equal(trim(res), trim(tgt))\n\n # check that derivation is the inverse of integration\n for i in range(5) :\n for j in range(2,5) :\n tgt = [1] + [0]*i\n res = ch.chebder(ch.chebint(tgt, m=j), m=j)\n assert_almost_equal(trim(res), trim(tgt))\n\n # check derivation with scaling\n for i in range(5) :\n for j in range(2,5) :\n tgt = [1] + [0]*i\n res = ch.chebder(ch.chebint(tgt, m=j, scl=2), m=j, scl=.5)\n assert_almost_equal(trim(res), trim(tgt))\n\n\nclass TestMisc(TestCase) :\n\n def test_chebfromroots(self) :\n res = ch.chebfromroots([])\n assert_almost_equal(trim(res), [1])\n for i in range(1,5) :\n roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])\n tgt = [0]*i + [1]\n res = ch.chebfromroots(roots)*2**(i-1)\n assert_almost_equal(trim(res),trim(tgt))\n\n def test_chebroots(self) :\n assert_almost_equal(ch.chebroots([1]), [])\n assert_almost_equal(ch.chebroots([1, 2]), [-.5])\n for i in range(2,5) :\n tgt = np.linspace(-1, 1, i)\n res = ch.chebroots(ch.chebfromroots(tgt))\n assert_almost_equal(trim(res), trim(tgt))\n\n def test_chebvander(self) :\n # check for 1d x\n x = np.arange(3)\n v = ch.chebvander(x, 3)\n assert_(v.shape == (3,4))\n for i in range(4) :\n coef = [0]*i + [1]\n assert_almost_equal(v[...,i], ch.chebval(x, coef))\n\n # check for 2d x\n x = np.array([[1,2],[3,4],[5,6]])\n v = ch.chebvander(x, 3)\n assert_(v.shape == (3,2,4))\n for i in range(4) :\n coef = [0]*i + [1]\n assert_almost_equal(v[...,i], ch.chebval(x, coef))\n\n def test_chebfit(self) :\n def f(x) :\n return x*(x - 1)*(x - 2)\n\n # Test exceptions\n assert_raises(ValueError, ch.chebfit, [1], [1], -1)\n assert_raises(TypeError, ch.chebfit, [[1]], [1], 0)\n assert_raises(TypeError, ch.chebfit, [], [1], 0)\n assert_raises(TypeError, ch.chebfit, [1], [[[1]]], 0)\n assert_raises(TypeError, ch.chebfit, [1, 2], [1], 0)\n assert_raises(TypeError, ch.chebfit, [1], [1, 2], 0)\n assert_raises(TypeError, ch.chebfit, [1], [1], 0, w=[[1]])\n assert_raises(TypeError, ch.chebfit, [1], [1], 0, w=[1,1])\n\n # Test fit\n x = np.linspace(0,2)\n y = f(x)\n #\n coef3 = ch.chebfit(x, y, 3)\n assert_equal(len(coef3), 4)\n assert_almost_equal(ch.chebval(x, coef3), y)\n #\n coef4 = ch.chebfit(x, y, 4)\n assert_equal(len(coef4), 5)\n assert_almost_equal(ch.chebval(x, coef4), y)\n #\n coef2d = ch.chebfit(x, np.array([y,y]).T, 3)\n assert_almost_equal(coef2d, np.array([coef3,coef3]).T)\n # test weighting\n w = np.zeros_like(x)\n yw = y.copy()\n w[1::2] = 1\n y[0::2] = 0\n wcoef3 = ch.chebfit(x, yw, 3, w=w)\n assert_almost_equal(wcoef3, coef3)\n #\n wcoef2d = ch.chebfit(x, np.array([yw,yw]).T, 3, w=w)\n assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)\n\n def test_chebtrim(self) :\n coef = [2, -1, 1, 0]\n\n # Test exceptions\n assert_raises(ValueError, ch.chebtrim, coef, -1)\n\n # Test results\n assert_equal(ch.chebtrim(coef), coef[:-1])\n assert_equal(ch.chebtrim(coef, 1), coef[:-3])\n assert_equal(ch.chebtrim(coef, 2), [0])\n\n def test_chebline(self) :\n assert_equal(ch.chebline(3,4), [3, 4])\n\n def test_cheb2poly(self) :\n for i in range(10) :\n assert_almost_equal(ch.cheb2poly([0]*i + [1]), Tlist[i])\n\n def test_poly2cheb(self) :\n for i in range(10) :\n assert_almost_equal(ch.poly2cheb(Tlist[i]), [0]*i + [1])\n\n def test_chebpts1(self):\n #test exceptions\n assert_raises(ValueError, ch.chebpts1, 1.5)\n assert_raises(ValueError, ch.chebpts1, 0)\n\n #test points\n tgt = [0]\n assert_almost_equal(ch.chebpts1(1), tgt)\n tgt = [-0.70710678118654746, 0.70710678118654746]\n assert_almost_equal(ch.chebpts1(2), tgt)\n tgt = [-0.86602540378443871, 0, 0.86602540378443871]\n assert_almost_equal(ch.chebpts1(3), tgt)\n tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325]\n assert_almost_equal(ch.chebpts1(4), tgt)\n\n\n def test_chebpts2(self):\n #test exceptions\n assert_raises(ValueError, ch.chebpts2, 1.5)\n assert_raises(ValueError, ch.chebpts2, 1)\n\n #test points\n tgt = [-1, 1]\n assert_almost_equal(ch.chebpts2(2), tgt)\n tgt = [-1, 0, 1]\n assert_almost_equal(ch.chebpts2(3), tgt)\n tgt = [-1, -0.5, .5, 1]\n assert_almost_equal(ch.chebpts2(4), tgt)\n tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0]\n assert_almost_equal(ch.chebpts2(5), tgt)\n\n\n\n\nclass TestChebyshevClass(TestCase) :\n\n p1 = ch.Chebyshev([1,2,3])\n p2 = ch.Chebyshev([1,2,3], [0,1])\n p3 = ch.Chebyshev([1,2])\n p4 = ch.Chebyshev([2,2,3])\n p5 = ch.Chebyshev([3,2,3])\n\n def test_equal(self) :\n assert_(self.p1 == self.p1)\n assert_(self.p2 == self.p2)\n assert_(not self.p1 == self.p2)\n assert_(not self.p1 == self.p3)\n assert_(not self.p1 == [1,2,3])\n\n def test_not_equal(self) :\n assert_(not self.p1 != self.p1)\n assert_(not self.p2 != self.p2)\n assert_(self.p1 != self.p2)\n assert_(self.p1 != self.p3)\n assert_(self.p1 != [1,2,3])\n\n def test_add(self) :\n tgt = ch.Chebyshev([2,4,6])\n assert_(self.p1 + self.p1 == tgt)\n assert_(self.p1 + [1,2,3] == tgt)\n assert_([1,2,3] + self.p1 == tgt)\n\n def test_sub(self) :\n tgt = ch.Chebyshev([1])\n assert_(self.p4 - self.p1 == tgt)\n assert_(self.p4 - [1,2,3] == tgt)\n assert_([2,2,3] - self.p1 == tgt)\n\n def test_mul(self) :\n tgt = ch.Chebyshev([7.5, 10., 8., 6., 4.5])\n assert_(self.p1 * self.p1 == tgt)\n assert_(self.p1 * [1,2,3] == tgt)\n assert_([1,2,3] * self.p1 == tgt)\n\n def test_floordiv(self) :\n tgt = ch.Chebyshev([1])\n assert_(self.p4 // self.p1 == tgt)\n assert_(self.p4 // [1,2,3] == tgt)\n assert_([2,2,3] // self.p1 == tgt)\n\n def test_mod(self) :\n tgt = ch.Chebyshev([1])\n assert_((self.p4 % self.p1) == tgt)\n assert_((self.p4 % [1,2,3]) == tgt)\n assert_(([2,2,3] % self.p1) == tgt)\n\n def test_divmod(self) :\n tquo = ch.Chebyshev([1])\n trem = ch.Chebyshev([2])\n quo, rem = divmod(self.p5, self.p1)\n assert_(quo == tquo and rem == trem)\n quo, rem = divmod(self.p5, [1,2,3])\n assert_(quo == tquo and rem == trem)\n quo, rem = divmod([3,2,3], self.p1)\n assert_(quo == tquo and rem == trem)\n\n def test_pow(self) :\n tgt = ch.Chebyshev([1])\n for i in range(5) :\n res = self.p1**i\n assert_(res == tgt)\n tgt *= self.p1\n\n def test_call(self) :\n # domain = [-1, 1]\n x = np.linspace(-1, 1)\n tgt = 3*(2*x**2 - 1) + 2*x + 1\n assert_almost_equal(self.p1(x), tgt)\n\n # domain = [0, 1]\n x = np.linspace(0, 1)\n xx = 2*x - 1\n assert_almost_equal(self.p2(x), self.p1(xx))\n\n def test_degree(self) :\n assert_equal(self.p1.degree(), 2)\n\n def test_cutdeg(self) :\n assert_raises(ValueError, self.p1.cutdeg, .5)\n assert_raises(ValueError, self.p1.cutdeg, -1)\n assert_equal(len(self.p1.cutdeg(3)), 3)\n assert_equal(len(self.p1.cutdeg(2)), 3)\n assert_equal(len(self.p1.cutdeg(1)), 2)\n assert_equal(len(self.p1.cutdeg(0)), 1)\n\n def test_convert(self) :\n x = np.linspace(-1,1)\n p = self.p1.convert(domain=[0,1])\n assert_almost_equal(p(x), self.p1(x))\n\n def test_mapparms(self) :\n parms = self.p2.mapparms()\n assert_almost_equal(parms, [-1, 2])\n\n def test_trim(self) :\n coef = [1, 1e-6, 1e-12, 0]\n p = ch.Chebyshev(coef)\n assert_equal(p.trim().coef, coef[:3])\n assert_equal(p.trim(1e-10).coef, coef[:2])\n assert_equal(p.trim(1e-5).coef, coef[:1])\n\n def test_truncate(self) :\n assert_raises(ValueError, self.p1.truncate, .5)\n assert_raises(ValueError, self.p1.truncate, 0)\n assert_equal(len(self.p1.truncate(4)), 3)\n assert_equal(len(self.p1.truncate(3)), 3)\n assert_equal(len(self.p1.truncate(2)), 2)\n assert_equal(len(self.p1.truncate(1)), 1)\n\n def test_copy(self) :\n p = self.p1.copy()\n assert_(self.p1 == p)\n\n def test_integ(self) :\n p = self.p2.integ()\n assert_almost_equal(p.coef, ch.chebint([1,2,3], 1, 0, scl=.5))\n p = self.p2.integ(lbnd=0)\n assert_almost_equal(p(0), 0)\n p = self.p2.integ(1, 1)\n assert_almost_equal(p.coef, ch.chebint([1,2,3], 1, 1, scl=.5))\n p = self.p2.integ(2, [1, 2])\n assert_almost_equal(p.coef, ch.chebint([1,2,3], 2, [1,2], scl=.5))\n\n def test_deriv(self) :\n p = self.p2.integ(2, [1, 2])\n assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef)\n assert_almost_equal(p.deriv(2).coef, self.p2.coef)\n\n def test_roots(self) :\n p = ch.Chebyshev(ch.poly2cheb([0, -1, 0, 1]), [0, 1])\n res = p.roots()\n tgt = [0, .5, 1]\n assert_almost_equal(res, tgt)\n\n def test_linspace(self):\n xdes = np.linspace(0, 1, 20)\n ydes = self.p2(xdes)\n xres, yres = self.p2.linspace(20)\n assert_almost_equal(xres, xdes)\n assert_almost_equal(yres, ydes)\n\n def test_fromroots(self) :\n roots = [0, .5, 1]\n p = ch.Chebyshev.fromroots(roots, domain=[0, 1])\n res = p.coef\n tgt = ch.poly2cheb([0, -1, 0, 1])\n assert_almost_equal(res, tgt)\n\n def test_fit(self) :\n def f(x) :\n return x*(x - 1)*(x - 2)\n x = np.linspace(0,3)\n y = f(x)\n\n # test default value of domain\n p = ch.Chebyshev.fit(x, y, 3)\n assert_almost_equal(p.domain, [0,3])\n\n # test that fit works in given domains\n p = ch.Chebyshev.fit(x, y, 3, None)\n assert_almost_equal(p(x), y)\n assert_almost_equal(p.domain, [0,3])\n p = ch.Chebyshev.fit(x, y, 3, [])\n assert_almost_equal(p(x), y)\n assert_almost_equal(p.domain, [-1, 1])\n # test that fit accepts weights.\n w = np.zeros_like(x)\n yw = y.copy()\n w[1::2] = 1\n yw[0::2] = 0\n p = ch.Chebyshev.fit(x, yw, 3, w=w)\n assert_almost_equal(p(x), y)\n\n def test_identity(self) :\n x = np.linspace(0,3)\n p = ch.Chebyshev.identity()\n assert_almost_equal(p(x), x)\n p = ch.Chebyshev.identity([1,3])\n assert_almost_equal(p(x), x)\n#\n\nif __name__ == \"__main__\":\n run_module_suite()\n" ]
[ [ "numpy.polynomial.legendre.legline", "numpy.polynomial.legendre.legval", "numpy.polynomial.legendre.legder", "numpy.polynomial.legendre.poly2leg", "numpy.polynomial.legendre.legfromroots", "numpy.polynomial.legendre.legmul", "numpy.polynomial.legendre.legroots", "numpy.polynomial.legendre.Legendre.fromroots", "numpy.polynomial.legendre.legmulx", "numpy.linspace", "numpy.polynomial.legendre.legdiv", "numpy.zeros", "numpy.polynomial.legendre.legint", "numpy.polynomial.legendre.leg2poly", "numpy.arange", "numpy.polynomial.legendre.legvander", "numpy.polynomial.legendre.Legendre", "numpy.polynomial.legendre.Legendre.fit", "numpy.zeros_like", "numpy.polynomial.legendre.legtrim", "numpy.polynomial.legendre.legfit", "numpy.polynomial.legendre.Legendre.identity", "numpy.polynomial.polynomial.polyval", "numpy.polynomial.legendre.legsub", "numpy.array", "numpy.polynomial.legendre.legadd" ], [ "numpy.polynomial.chebyshev.cheb2poly", "numpy.polynomial.chebyshev.chebmul", "numpy.polynomial.chebyshev.chebval", "numpy.polynomial.chebyshev.chebline", "numpy.polynomial.chebyshev._cseries_to_zseries", "numpy.polynomial.chebyshev.chebfit", "numpy.polynomial.chebyshev._zseries_to_cseries", "numpy.polynomial.chebyshev.chebsub", "numpy.linspace", "numpy.polynomial.chebyshev.chebpts1", "numpy.polynomial.chebyshev.chebpts2", "numpy.polynomial.chebyshev.chebder", "numpy.zeros", "numpy.polynomial.chebyshev.chebroots", "numpy.polynomial.chebyshev.poly2cheb", "numpy.polynomial.chebyshev.Chebyshev.fit", "numpy.arange", "numpy.polynomial.chebyshev.chebmulx", "numpy.polynomial.chebyshev.Chebyshev.fromroots", "numpy.polynomial.chebyshev.chebint", "numpy.zeros_like", "numpy.polynomial.chebyshev.chebadd", "numpy.polynomial.chebyshev.chebvander", "numpy.polynomial.chebyshev.Chebyshev.identity", "numpy.polynomial.chebyshev.Chebyshev", "numpy.array", "numpy.polynomial.chebyshev.chebtrim", "numpy.polynomial.chebyshev.chebdiv", "numpy.polynomial.chebyshev.chebfromroots" ] ]
AceCoooool/segmentation
[ "2f4d5ac193cab580eb8ba789e79db6dadcfecfd0" ]
[ "model/seg_models/pspnet.py" ]
[ "\"\"\"Pyramid Scene Parsing Network\"\"\"\nimport os\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom model.seg_models.segbase import SegBaseModel\nfrom model.module.basic import _FCNHead\n\n__all__ = ['PSPNet', 'get_psp',\n 'get_psp_resnet101_voc',\n 'get_psp_resnet101_citys']\n\n\n# head\ndef _PSP1x1Conv(in_channels, out_channels):\n return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),\n nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))\n\n\nclass _PyramidPooling(nn.Module):\n def __init__(self, in_channels):\n super(_PyramidPooling, self).__init__()\n out_channels = in_channels // 4\n self.conv1 = _PSP1x1Conv(in_channels, out_channels)\n self.conv2 = _PSP1x1Conv(in_channels, out_channels)\n self.conv3 = _PSP1x1Conv(in_channels, out_channels)\n self.conv4 = _PSP1x1Conv(in_channels, out_channels)\n\n @staticmethod\n def pool(x, size):\n return F.adaptive_avg_pool2d(x, output_size=size)\n\n @staticmethod\n def upsample(x, h, w):\n return F.interpolate(x, (h, w), mode='bilinear', align_corners=True)\n\n def forward(self, x):\n _, _, h, w = x.shape\n feat1 = self.upsample(self.conv1(self.pool(x, 1)), h, w)\n feat2 = self.upsample(self.conv2(self.pool(x, 2)), h, w)\n feat3 = self.upsample(self.conv3(self.pool(x, 3)), h, w)\n feat4 = self.upsample(self.conv4(self.pool(x, 4)), h, w)\n return torch.cat([x, feat1, feat2, feat3, feat4], dim=1)\n\n\nclass _PSPHead(nn.Module):\n def __init__(self, nclass, **kwargs):\n super(_PSPHead, self).__init__(**kwargs)\n self.psp = _PyramidPooling(2048)\n self.block = list()\n self.block.append(nn.Conv2d(4096, 512, kernel_size=3, padding=1, bias=False))\n self.block.append(nn.BatchNorm2d(512))\n self.block.append(nn.ReLU(inplace=True))\n self.block.append(nn.Dropout(0.1))\n self.block.append(nn.Conv2d(512, nclass, kernel_size=1))\n self.block = nn.Sequential(*self.block)\n\n def forward(self, x):\n x = self.psp(x)\n return self.block(x)\n\n\nclass PSPNet(SegBaseModel):\n def __init__(self, nclass, backbone='resnet50', aux=True, dilated=True, jpu=False,\n pretrained_base=True, base_size=520, crop_size=480, **kwargs):\n super(PSPNet, self).__init__(nclass, aux, backbone, base_size=base_size, dilated=dilated, jpu=jpu,\n crop_size=crop_size, pretrained_base=pretrained_base, **kwargs)\n self.head = _PSPHead(nclass, **kwargs)\n if self.aux:\n self.auxlayer = _FCNHead(1024, nclass, **kwargs)\n\n self.__setattr__('others', ['head', 'auxlayer'] if self.aux else ['head'])\n\n def forward(self, x):\n c3, c4 = self.base_forward(x)\n outputs = []\n x = self.head(c4)\n x = F.interpolate(x, self._up_kwargs, mode='bilinear', align_corners=True)\n outputs.append(x)\n\n if self.aux:\n auxout = self.auxlayer(c3)\n auxout = F.interpolate(auxout, self._up_kwargs, mode='bilinear', align_corners=True)\n outputs.append(auxout)\n return tuple(outputs)\n\n\ndef get_psp(dataset='pascal_voc', backbone='resnet101', pretrained=False, pretrained_base=True,\n jpu=False, root=os.path.expanduser('~/.torch/models'), **kwargs):\n acronyms = {\n 'pascal_voc': 'voc',\n 'citys': 'citys',\n }\n from data import datasets\n # infer number of classes\n model = PSPNet(datasets[dataset].NUM_CLASS, backbone=backbone,\n pretrained_base=pretrained_base, jpu=jpu, **kwargs)\n if pretrained:\n from model.model_store import get_model_file\n name = 'psp_%s_%s' % (backbone, acronyms[dataset])\n name = name + '_jpu' if jpu else name\n model.load_state_dict(torch.load(get_model_file(name, root=root)))\n return model\n\n\ndef get_psp_resnet101_voc(**kwargs):\n return get_psp('pascal_voc', 'resnet101', **kwargs)\n\n\ndef get_psp_resnet101_citys(**kwargs):\n return get_psp('citys', 'resnet101', **kwargs)\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.Dropout", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.ReLU", "torch.cat", "torch.nn.functional.interpolate" ] ]
joanvaquer/SDV
[ "83e4fdf0ff72e6c5b72cfc8c6ec9584dbd34de28" ]
[ "tests/test_modeler.py" ]
[ "from unittest import TestCase\nfrom unittest.mock import Mock, call\n\nimport pandas as pd\n\nfrom sdv.metadata import Metadata\nfrom sdv.modeler import Modeler\nfrom sdv.models.base import SDVModel\nfrom sdv.models.copulas import GaussianCopula\n\n\nclass TestModeler(TestCase):\n\n def test___init__default(self):\n \"\"\"Test create new Modeler instance with default values\"\"\"\n # Run\n modeler = Modeler('test')\n\n # Asserts\n assert modeler.models == dict()\n assert modeler.metadata == 'test'\n assert modeler.model == GaussianCopula\n assert modeler.model_kwargs == dict()\n\n def test___init__with_arguments(self):\n # Run\n model = Mock()\n modeler = Modeler({'some': 'metadata'}, model=model, model_kwargs={'some': 'kwargs'})\n\n # Asserts\n assert modeler.models == dict()\n assert modeler.metadata == {'some': 'metadata'}\n assert modeler.model == model\n assert modeler.model_kwargs == {'some': 'kwargs'}\n\n def test__get_extensions(self):\n \"\"\"Test get list of extensions from childs\"\"\"\n # Setup\n model = Mock(spec=SDVModel)\n model.return_value = model\n model.get_parameters.side_effect = [\n {'model': 'data 1'},\n {'model': 'data 2'},\n {'model': 'data 3'}\n ]\n\n modeler = Mock(spec=Modeler)\n modeler.model = model\n modeler.model_kwargs = dict()\n modeler.metadata = Mock(spec=Metadata)\n\n # Run\n child_table = pd.DataFrame({'foo': ['aaa', 'bbb', 'ccc']})\n result = Modeler._get_extension(modeler, 'some_name', child_table, 'foo')\n\n # Asserts\n expected = pd.DataFrame({\n '__some_name__model': ['data 1', 'data 2', 'data 3'],\n '__some_name__child_rows': [1, 1, 1]\n }, index=['aaa', 'bbb', 'ccc'])\n pd.testing.assert_frame_equal(result, expected)\n assert model.get_parameters.call_count == 3\n\n def test_cpa_with_tables_no_primary_key(self):\n \"\"\"Test CPA with tables and no primary key.\"\"\"\n # Setup\n modeler = Mock(spec=Modeler)\n modeler.metadata = Mock(spec=Metadata)\n modeler.model = Mock(spec=SDVModel)\n modeler.model_kwargs = dict()\n modeler.models = dict()\n modeler.table_sizes = {'data': 5}\n modeler.metadata.transform.return_value = pd.DataFrame({'data': [1, 2, 3]})\n modeler.metadata.get_primary_key.return_value = None\n\n # Run\n tables = {'test': pd.DataFrame({'data': ['a', 'b', 'c']})}\n result = Modeler.cpa(modeler, 'test', tables)\n\n # Asserts\n expected = pd.DataFrame({'data': [1, 2, 3]})\n expected_transform_call = pd.DataFrame({'data': ['a', 'b', 'c']})\n\n assert modeler.metadata.load_table.call_count == 0\n assert modeler.metadata.transform.call_args[0][0] == 'test'\n pd.testing.assert_frame_equal(\n modeler.metadata.transform.call_args[0][1],\n expected_transform_call\n )\n pd.testing.assert_frame_equal(result, expected)\n\n def test_model_database(self):\n \"\"\"Test model using RCPA\"\"\"\n # Setup\n def rcpa_side_effect(table_name, tables):\n tables[table_name] = table_name\n\n metadata_table_names = ['foo', 'bar', 'tar']\n metadata_parents = [None, 'bar_parent', None]\n\n modeler = Mock()\n modeler.metadata.get_tables.return_value = metadata_table_names\n modeler.metadata.get_parents.side_effect = metadata_parents\n modeler.rcpa.side_effect = rcpa_side_effect\n modeler.models = dict()\n\n # Run\n Modeler.model_database(modeler)\n\n # Asserts\n expected_metadata_parents_call_count = 3\n expected_metadata_parents_call = [call('foo'), call('bar'), call('tar')]\n assert modeler.metadata.get_parents.call_count == expected_metadata_parents_call_count\n assert modeler.metadata.get_parents.call_args_list == expected_metadata_parents_call\n" ]
[ [ "pandas.DataFrame", "pandas.testing.assert_frame_equal" ] ]
greerviau/HackUMass
[ "25ef2ea9fecbe4bbfa91f0a9f32bd9f2703a176a" ]
[ "cache.py" ]
[ "import numpy as np\nimport math\n\nclass Cache():\n def __init__(self, max_size=10):\n self.cache = []\n self.size = 0\n self.max_size=max_size\n \n def add(self, element):\n self.cache.append(element)\n self.size+=1\n if self.size > self.max_size:\n del self.cache[0]\n self.size = self.max_size\n \n def mean(self):\n return np.mean(np.array(self.cache), axis=0)\n\n def empty(self):\n return self.size == 0\n\n def get_size(self):\n return self.size\n\n def get_last(self):\n return self.cache[self.size-1]\n\n def print_cache(self):\n for e in self.cache:\n print(e)\n\n \nif __name__ == '__main__':\n print('===Test Cache===')\n cache = Cache(max_size=5)\n cache.add([5,4])\n print(cache.get_size())\n print(cache.print_cache())\n\n cache.add([8,1])\n cache.add([3,2])\n cache.add([4,5])\n cache.add([6,2])\n print(cache.get_size())\n print(cache.print_cache())\n\n cache.add([1,4])\n print(cache.get_size())\n print(cache.print_cache())\n print(cache.mean())\n" ]
[ [ "numpy.array" ] ]
rohinkumar/CorrelCalc
[ "d7887448af8d3dc3170c00c0aae6ee2561b8a3d5" ]
[ "correlcalc/antpcf.py" ]
[ "__author__ = 'Rohin Kumar Y'\n\n\n# Calculate anisotropic 2pCF\nfrom tpcf import *\nimport scipy as sp\n# antpcf(dat,datR,bins,parmetric,permetric) returns numpy 2d array DD, RR, DR correl\n# poserr(xi,DD) returns (1.0+xi)/np.sqrt(DD)\n\n\ndef atpcf(datfile, binspar, binsper, **kwargs):\n \"\"\"Main function to calculate anisotropic 2pCF. Takes multiple arguments such as randfile, maskfile, calculation estimator etc. for different geometry, cosmology models\n Usage of the package is given in jupyter notebook \"Using correlcalc example-anisotropic.nb\" and in `main.py`\n\n All the methods in correlcalc can be imported using the following command\n\n `from correlcalc import *`\n\n We first need to define bins (in $c/H_0$ units) to calculate 2pCF. For e.g. to calculate correlation between 0-180Mpc in steps of 6Mpc, we say\n\n `bins=np.arange(0.002,0.06,0.002)`\n\n To calculate anisotropic 2pCF using input data file (both ascii and fits files are supported), use `atpcf` method as follows\n\n `correl3d, poserr=atpcf('/path/to/datfile.dat',binspar, binsper, randfile='/path/to/randomfile.dat', vtype='sigpi', weights=True)`\n\n\n If random file is not available or not provided, we can generate random catalog by providing the mangle mask file in `.ply` format along with specifying the size of the catalog in multiples of size of data catalog (default 2x size). To do this\n\n `correl3d, poserr=atpcf('/path/to/datfile.dat', binspar, binsper, maskfile='/path/to/maskfile.ply', vtype='smu', weights='eq', randfact=3)`\n\n This returns `correl3d` and `poserr` `numpy` arrays corresponding to anisotropic Two-point correlation and Poisson error\n\n ### Keyword Arguments\n The following keyword arguments can be included as needed\n\n #### Data file (Mandatory)\n\n Data file of galaxy/quasar redshift survey must be passed as the first argument to both `tpcf` and `atpcf` methods.\n\n **Supported filetypes**: ascii text files with columns, csv files or fits files are all supported. Most files provided by SDSS Value added catalogs should be directly usable.\n\n **To contain**: Any type of file provided must at least have columns named **Z** (redshift), **RA** (Right Ascension), **DEC** (Declination). These column names can be in any case.\n\n If one intends to use `weights=True` option (must to obtain accurate results) the data file must also contain radial weights with column title **radial_weight** or **WEIGHT_SYSTOT**\n\n #### binspar (Mandatory)\n\n A numpy array with ascending values in $c/H_0$ units (for distances) or $\\delta z$ as per choice of `'vtype'` must be provided as the second argument to `atpcf` method.\n\n #### binsper (Mandatory)\n\n A numpy array with ascending values in $c/H_0$ units (for distances), $z\\delta \\theta$ or $\\mu = \\cos \\alpha$ must be provided as the third argument to `atpcf` method.\n\n\n #### `randfile=` Path to random file (semi-Optional)\n\n If not provided, `maskfile=` argument must be given `.ply` file.\n\n **Supported filetypes**: ascii text files with columns, csv files or fits files are all supported. Most files provided by SDSS Value added catalogs should be directly usable.\n\n **To contain**: Any type of file provided must at least have columns named **Z** (redshift), **RA** (Right Ascension), **DEC** (Declination). These column names can be in any case.\n\n If one intends to use `weights=True` option the data file must also contain radial weights with column title **radial_weight** or **WEIGHT_SYSTOT**\n\n **Beta Testing:** Beta support for other column titles for weights is added.\n\n Also added is calculation of weights from n(z) during random catalog generation.\n\n #### `mask=` Path to mangle polygon file (semi-Optional)\n\n If not provided, `randfile=` argument must be provided.\n\n **Supported filetypes**: `.ply` file containing Mangle polygons describing survey geometry in the standard format. Most files provided by SDSS Value added catalogs should be directly usable.\n\n #### `randfact=` (Optional)\n\n Size of the random catalog in integer multiples of size of data catalog if random catalog file is not provided. Default value is `2`\n\n #### `weights=` (Optional)\n\n It is highly recommended to use weights argument by providing `weights=True` or `weights='eq'` to obtain accurate two-point correlation calculations. This picks up radial weights in the prescribed format (with column title **radial_weight** or **WEIGHT_SYSTOT** ) from the data and random files provided.\n\n `weights=`eq'` sets equal weights and hence adds *+1* - This implementation is parallelized and is faster than `weights=False` implementation on most machines\n\n If `weights=False`, by default *+1* will be added for each galaxy/random pair found within the bin instead of adding total weight. For more details on weights and references, see http://www.sdss3.org/dr9/tutorials/lss_galaxy.php\n\n #### Metrics in parallel and perpendicular directions\n\n Calculates anisotropic 2pCF for the following cases.\n\n #### `vtype=`\n\n Valuation method\n\n **Available options**:\n\n `'smu'` (default)- Calculates 2pCF in s - mu\n\n `'sigpi'` - Calculates 2pCF using parallel and perpendicular distances\n\n `'ap'` calculates 2pCF for small $\\Delta \\theta$ and $z \\Delta\\theta$ . But results can be converted to any cosmology model of choice (ref: https://arxiv.org/pdf/1312.0003.pdf)\n\n **Customization**\n\n Formulae for calculation of distances in parallel and perpendicular directions is taken from https://arxiv.org/pdf/1312.0003.pdf. Using the formulae in this paper, $\\Delta z$ and $z \\Delta \\theta$ are computed in the `metrics.pyx` file for the above mentioned. `Cython` is chosen for implementation to obtain faster results in building `BallTree`s calculating `cdist` and to reduce `query` time.\n\n One can customize metric definitions as per one's need by editing the `metrics.pyx` file. After changing this compile it using `python metricsetup.py build_ext --inplace`\n\n **To add:**\n\n Direct calculation of distances in LOS and perpendicular to the LOS to be added to support standard model Cosmology and other popular models. For now, one needs to manually convert the angular bins to physical distances to get the approximate results\n\n\n #### `cosmology='lcdm'` (Optional)\n\n Used to calculate co-moving distances from redshifts.\n\n **Available options**:\n\n `'lcdm'` (default)- for Lambda CDM model\n\n `'lc'` - for $R_h=ct$ and linear coasting models\n\n **To add**: `wcdm` and other popular cosmology models soon\n\n #### `geometry='flat'` (Optional)\n\n Used to calculate co-moving distances between a pair of objects\n\n **Available options**:\n\n `'flat'` (default)- for Lambda CDM model\n\n `'open'`\n\n `'close'`\n\n\n #### `estimator=` (Optional)\n\n **Available options**:\n\n `'dp'` - Davis - Peebles estimator (default - fastest)\n\n `'ls'`- Landy - Szalay estimator\n\n `'ph'` - Peebles- Hauser estimator\n\n `'hew'` - Hewitt estimator\n\n `'h'` - Hamilton estimator\n\n For more details on estimator formulae see https://arxiv.org/pdf/1211.6211.pdf\n\n \"\"\"\n # Default function arguments\n global binsparv\n global binsperv\n global maxrad\n global dat\n global datR\n global Nd\n global Nr\n DD = DR = RD = RR = np.zeros((len(binspar)-1, len(binsper)-1))\n weightsflag = True\n useones = True\n cosmology = 'lcdm'\n sflag = True\n geometry='flat'\n filtermetric = flatdistsq\n permetric = musqlcdmf\n parmetric = flatdistsq\n vtype = 'smu'\n randcatfact = 2\n estimator = 'dp'\n binsparv = binspar**2\n binsperv = binsper**2\n randfile = None\n maskfile = None\n\n # Options for correl calculation estimators and cosmology models\n mlist = ['dp', 'ls', 'ph', 'hew', 'h']\n clist = ['lcdm', 'lc'] # to add wcdm\n glist = ['flat', 'open', 'close']\n parper = ['ap', 'sigpi', 'smu']\n\n if kwargs is not None:\n for key, value in kwargs.items():\n # print (key, value)\n if key.lower() == 'randfile':\n randfile = value\n\n elif key.lower() == 'randfact':\n randcatfact = value\n\n elif key.lower() == 'geometry':\n if value.lower() in glist:\n geometry = value.lower()\n # geometry = 'flat'\n # filtermetric = flatdistsq\n # elif value.lower() == 'open':\n # geometry = 'open'\n # filtermetric = opendistsq\n # elif value.lower() == 'close':\n # geometry = 'close'\n # filtermetric = closedistsq\n\n elif key.lower() == 'cosmology':\n if value.lower() in clist:\n cosmology = value.lower()\n else:\n print(\"Incorrect Cosmology provided! Using 'lcdm' as default\")\n\n elif key.lower() == 'vtype':\n if value.lower() in parper:\n vtype = value.lower()\n\n elif key.lower() == 'estimator':\n if value.lower() in mlist:\n estimator = value.lower()\n else:\n print(\"Incorrect estimator provided! Using 'dp' as default\")\n\n elif key.lower() == 'mask':\n maskfile = value\n elif key.lower() == 'weights':\n if value is True:\n weightsflag = True\n useones = False\n elif isinstance(value, str):\n if value.lower() == 'eq':\n weightsflag = True\n useones = True\n else:\n weightsflag = False\n else:\n print (\"key argument `%s` not valid\" % key)\n else:\n print (\"Refer documentation to enter valid keyword arguments\")\n\n if vtype == 'ap':\n parmetric = APdz\n binsparv = binspar\n binsperv = binsper\n sflag = False\n filtermetric = APzdth\n permetric = APzdth\n maxrad = max(np.sqrt(binsparv**2 + binsperv**2))\n\n elif vtype == 'smu':\n # binsparv = binspar**2\n # binsperv = binsper**2\n maxrad = max(binsparv)\n if geometry == 'open':\n parmetric = opendistsq\n filtermetric = opendistsq\n if cosmology == 'lc':\n permetric = musqlco\n else:\n permetric = musqlcdmo\n\n elif geometry == 'close':\n parmetric = closedistsq\n filtermetric = closedistsq\n if cosmology == 'lc':\n permetric = musqlcc\n else:\n permetric = musqlcdmc\n else:\n parmetric = flatdistsq\n filtermetric = flatdistsq\n if cosmology == 'lc':\n permetric = musqlcf\n else:\n permetric = musqlcdmf\n\n elif vtype == 'sigpi':\n # binsparv = binspar**2\n # binsperv = binsper**2\n maxrad = max(binsparv+binsperv)\n if geometry == 'open':\n filtermetric = opendistsq\n if cosmology == 'lc':\n parmetric = sparsqlc\n permetric = spersqlco\n else:\n parmetric = sparsqlcdm\n permetric = spersqlcdmo\n\n elif geometry == 'close':\n filtermetric = closedistsq\n if cosmology == 'lc':\n parmetric = sparsqlc\n permetric = spersqlcc\n else:\n parmetric = sparsqlcdm\n permetric = spersqlcdmc\n else:\n filtermetric = flatdistsq\n if cosmology == 'lc':\n parmetric = sparsqlc\n permetric = spersqlcf\n else:\n parmetric = sparsqlcdm\n permetric = spersqlcdmf\n\n else:\n print (\"No valid valuation method provided. Using 'smu' as default\")\n\n\n print(\"Calculating Anisotropic Correlation function with the following parameters\")\n print(\"data file=\")\n print(datfile)\n print(\"random file=\")\n print(randfile)\n print(\"Random catalog size factor(if random file is None)=\")\n print(randcatfact)\n print(\"mask/window file=\")\n print(maskfile)\n print (\"Cosmology=\")\n print(cosmology)\n print (\"Geometry=\")\n print (geometry)\n print(\"Weights=\")\n print(weightsflag)\n print (\"Using ones as weights?=\")\n print (useones)\n print(\"perpendicular metric=\")\n print(permetric)\n print(\"parallel metric=\")\n print(parmetric)\n print(\"Correl estimator=\")\n print(estimator)\n print(\"Valuation type=\")\n print(vtype)\n print (\"binsparv=\")\n print (binsparv)\n print (\"binsperv=\")\n print (binsperv)\n print(\"---------------------------------------------------------------------------\")\n\n if sflag is False:\n # Prepare dat from data file\n dat, weights = datprepz(datfile, 'data', cosmology)\n Nd = len(dat)\n # Prepare datR from random file or generate a random catalog\n if randfile is None:\n randcatsize = randcatfact*Nd\n if maskfile is None:\n print (\"Mask file compulsory. Please provide mask='maskfilepath.ply'\")\n else:\n datR, rweights = randcatprepz(datfile, randcatsize, maskfile, cosmology)\n else:\n datR, rweights = datprepz(randfile, 'random', cosmology)\n\n else:\n # Prepare dat from data file\n dat, weights = datprep(datfile, 'data', cosmology)\n\n Nd = len(dat)\n # Prepare datR from random file or generate a random catalog\n if randfile is None:\n randcatsize = randcatfact*Nd\n if maskfile is None:\n print (\"Mask file compulsory. Please provide mask='maskfilepath.ply'\")\n else:\n datR, rweights = randcatprep(datfile, randcatsize, maskfile, cosmology)\n else:\n datR, rweights = datprep(randfile, 'random', cosmology)\n\n Nr = len(datR)\n fact = (1.0*Nr)/Nd\n global adbt\n global arbt\n\n print (\"Creating BallTree for data points using ...\")\n print (filtermetric)\n adbt = BallTree(dat, metric='pyfunc', func=filtermetric)\n\n print (\"Creating BallTree for random points using ...\")\n print (filtermetric)\n arbt = BallTree(datR, metric='pyfunc', func=filtermetric)\n\n rng = np.array([[min(binsparv), max(binsparv)], [min(binsperv), max(binsperv)]])\n print (\"Calculating anisotropic 2pCF...\")\n\n # Reference: arXiv: 1211.6211\n if estimator == 'dp':\n if weightsflag is False: # or len(weights) != Nd\n # print (weightsflag)\n # print(len(weights))\n # print(len(datR))\n DD = aDDcalc(dat, binsparv, binsperv, parmetric, permetric, rng)\n DR = aDRcalc(dat, datR, binsparv, binsperv, parmetric, permetric, rng)\n RD = aRDcalc(dat, datR, binsparv, binsperv, parmetric, permetric, rng)\n else:\n # if len(rweights)!=len(datR):\n # DD = aDDwcalc(dat, binsq, parmetric, permetric, rng, weights)\n if useones is True or len(weights) != Nd:\n weights = np.ones(Nd)\n rweights = np.ones(Nr)\n print (\"Calculating anisotropic DD with weights (parallelized)...\\n DD=\")\n DD = amulti_autocp(dat, binsparv, binsperv, parmetric, permetric, rng, weights, Nd, pcpus)\n # DR = aRDwcalc(dat, datR, binsq, parmetric, permetric, rng, weights)\n print (\"Calculating anisotropic DR with weights (parallelized)...\\n DR=\")\n DR = amulti_crosscp(dat, datR, binsparv, binsperv, parmetric, permetric, rng, weights, Nr, pcpus)\n print (\"Calculating anisotropic RD with weights (parallelized)...\\n RD=\")\n RD = amulti_crosscpr(dat, datR, binsparv, binsperv, parmetric, permetric, rng, rweights, Nd, pcpus)\n # else:\n # DD=aDDwcalc(dat,binsq,parmetric,permetric,rng,weights)\n # DR=aDRwcalc(dat,datR,binsq,parmetric,permetric,rng,weights,rweights)\n\n print (\"Using Davis-Peebles estimator\")\n correl = fact*(DD*2.0/(DR+RD))-1.0\n\n elif estimator == 'ph':\n if weightsflag is False: # or len(weights) != Nd or len(rweights) != len(datR):\n DD = aDDcalc(dat, binsparv, binsperv, parmetric, permetric, rng)\n RR = aRRcalc(datR, binsparv, binsperv, parmetric, permetric, rng)\n else:\n if useones is True or len(weights) != Nd:\n weights = np.ones(Nd)\n rweights = np.ones(Nr)\n print (\"Calculating anisotropic DD with weights (parallelized)...\\n DD=\")\n # DD = aDDwcalc(dat, binsq, parmetric, permetric, rng, weights)\n DD = amulti_autocp(dat, binsparv, binsperv, parmetric, permetric, rng, weights, Nd, pcpus)\n # if len(rweights) != Nr:\n # RR = aRRcalc(datR, binsparv, binsperv, parmetric, permetric, rng)\n # else:\n print (\"Calculating anisotropic RR with weights (parallelized)...\\n RR=\")\n RR = amulti_autocpr(datR, binsparv, binsperv, parmetric, permetric, rng, rweights, Nr, pcpus)\n print (\"Using Peebles-Hauser estimator\")\n correl = fact**2*(DD/RR)-1.0\n else:\n if weightsflag is False: # or len(weights) != Nd or len(rweights) != len(datR):\n DD = aDDcalc(dat, binsparv, binsperv, parmetric, permetric, rng)\n RR = aRRcalc(datR, binsparv, binsperv, parmetric, permetric, rng)\n DR = aDRcalc(dat, datR, binsparv, binsperv, parmetric, permetric, rng)\n RD = aRDcalc(dat, datR, binsparv, binsperv, parmetric, permetric, rng)\n else:\n if useones is True or len(weights) != Nd:\n weights = np.ones(Nd)\n rweights = np.ones(Nr)\n print (\"Calculating anisotropic DD with weights (parallelized)...\\n DD=\")\n # DD = aDDwcalc(dat, binsq, parmetric, permetric, rng, weights)\n DD = amulti_autocp(dat, binsparv, binsperv, parmetric, permetric, rng, weights, Nd, pcpus)\n # print (\"Calculating anisotropic RR with weights (parallelized)...\\n RR=\")\n # RR = aRRwcalc(datR, binsq, parmetric, permetric, rng, rweights)\n # RR = amulti_autocpr(datR, binsq, parmetric, permetric, rng, rweights, Nr, pcpus)\n # DR = aRDwcalc(dat, datR, binsq, parmetric, permetric, rng, weights)\n print (\"Calculating anisotropic DR with weights (parallelized)...\\n DR=\")\n DR = amulti_crosscp(dat, datR, binsparv, binsperv, parmetric, permetric, rng, weights, Nr, pcpus)\n print (\"Calculating anisotropic RD with weights (parallelized)...\\n RD=\")\n RD = amulti_crosscpr(dat, datR, binsparv, binsperv, parmetric, permetric, rng, rweights, Nd, pcpus)\n # if len(rweights) != Nr:\n # RR = aRRcalc(datR, binsparv, binsperv, parmetric, permetric, rng)\n # else:\n print (\"Calculating anisotropic RR with weights (parallelized)...\\n RR=\")\n RR = amulti_autocpr(datR, binsparv, binsperv, parmetric, permetric, rng, rweights, Nr, pcpus)\n if estimator == 'ls':\n print (\"Using Landy-Szalay estimator\")\n correl = fact**2*(DD/RR)-fact*(DR+RD)/RR+1.0\n # correl = fact**2*(DD/RR)-2.0*fact*(DR/RR)+1.0\n elif estimator == 'hew':\n print (\"Using Hewett estimator\")\n correl = fact**2*(DD/RR)-fact*0.5*(DR+RD)/RR\n # correl = fact**2*(DD/RR)-fact*(DR/RR)\n elif estimator == 'h':\n print (\"Using Hamilton estimator\")\n correl = (4.0*DD*RR)/(DR+RD)**2 - 1.0\n # correl = (DD*RR)/DR**2 - 1.0\n correlerr = poserr(correl, DD)\n print(\"Anisotropic Two-point correlation=\")\n np.savetxt(\"aDD_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", DD)\n np.savetxt(\"aDR_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", DR)\n np.savetxt(\"aRD_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", RD)\n np.savetxt(\"aRR_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", RR)\n np.savetxt(\"abinspar_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", binspar)\n np.savetxt(\"abinsper_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", binsper)\n np.savetxt(\"atpcf_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", correl)\n np.savetxt(\"atpcferr_\"+str(cosmology)+\"_\"+str(geometry)+\"_\"+str(vtype)+\"_\"+str(estimator)+\".txt\", correlerr)\n print (correl, correlerr)\n return correl, correlerr\n\n\ndef aDDcalc(dat, binspar, binsper, parmetric, permetric, rng):\n print (\"Calculating anisotropic DD...\\n DD=\")\n dd = np.zeros((len(binspar)-1, len(binsper)-1))\n for i in tqdm(range(len(dat))):\n ind = adbt.query_radius(dat[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([dat[i], ], dat[j[j>i]], parmetric)[0]\n # print(\"dist0\")\n # print dist0\n dist1 = dist.cdist([dat[i], ], dat[j[j>i]], permetric)[0]\n # print(\"dist1\")\n # print dist1\n # print np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper))[0]\n dd += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper))[0]\n # print (\"rng\")\n # print rng\n # print(\"binspar\")\n # print binspar\n # print(\"binsper\")\n # print binsper\n # print dd\n dd[dd == 0] = 1.0\n # Nd = len(dat)\n # DD = dd/(Nd*(Nd-1.0))\n print (dd)\n return dd\n\n\ndef aRRcalc(datR, binspar, binsper, parmetric, permetric, rng):\n print (\"Calculating anisotropic RR...\\n RR=\")\n rr = np.zeros((len(binspar)-1, len(binsper)-1))\n # rrbt = BallTree(datR, metric='pyfunc', func=permetric)\n for i in tqdm(range(len(datR))):\n ind = arbt.query_radius(datR[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([datR[i], ], datR[j[j>i]], parmetric)[0]\n dist1 = dist.cdist([datR[i], ], datR[j[j>i]], permetric)[0]\n rr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper))[0]\n rr[rr == 0] = 1.0\n # Nr = len(datR)\n # RR = rr/(Nr*(Nr-1.0))\n print (rr)\n return rr\n\n\ndef aDRcalc(dat, datR, binspar, binsper, parmetric, permetric, rng):\n print (\"Calculating anisotropic DR...\\n DR=\")\n dr = np.zeros((len(binspar)-1, len(binsper)-1))\n # rrbt = BallTree(datR, metric='pyfunc', func=permetric)\n for i in tqdm(range(len(dat))):\n ind = arbt.query_radius(dat[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([dat[i], ], datR[j[j>i]], parmetric)[0]\n dist1 = dist.cdist([dat[i], ], datR[j[j>i]], permetric)[0]\n dr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper))[0]\n dr[dr == 0] = 1.0\n # Nd = len(dat)\n # Nr = len(datR)\n # DR = dr/(Nd*Nr)\n print (dr)\n return dr\n\n\ndef aRDcalc(dat, datR, binspar, binsper, parmetric, permetric, rng):\n print (\"Calculating anisotropic RD...\\n RD=\")\n rd = np.zeros((len(binspar)-1, len(binsper)-1))\n # rrbt = BallTree(datR, metric='pyfunc', func=permetric)\n for i in tqdm(range(len(datR))):\n ind = arbt.query_radius(datR[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([datR[i], ], dat[j[j>i]], parmetric)[0]\n dist1 = dist.cdist([datR[i], ], dat[j[j>i]], permetric)[0]\n rd += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper))[0]\n rd[rd == 0] = 1.0\n # Nd = len(dat)\n # Nr = len(datR)\n # DR = dr/(Nd*Nr)\n print (rd)\n return rd\n\n\ndef aDDwcalc(dat, binspar, binsper, parmetric, permetric, rng, weights):\n print (\"Calculating anisotropic DD with weights...\\n DD=\")\n dd = np.zeros((len(binspar)-1, len(binsper)-1))\n # ddbt = BallTree(dat, metric='pyfunc', func=permetric)\n for i in tqdm(range(len(dat))):\n ind = adbt.query_radius(dat[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([dat[i], ], dat[j[j>i]], parmetric)[0]\n dist1 = dist.cdist([dat[i], ], dat[j[j>i]], permetric)[0]\n dd += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=weights[j[j>i]])[0]\n dd[dd == 0] = 1.0\n # Nd = len(dat)\n # DD = dd/(Nd*(Nd-1.0)) # factor of 2 cancels with 1/2 that needs to be done to remove double counting of pairs\n # print (dd)\n return dd\n\n\ndef aRRwcalc(datR, binspar, binsper, parmetric, permetric, rng, rweights):\n print (\"Calculating anisotropic RR with weights...\\n RR=\")\n rr = np.zeros((len(binspar)-1, len(binsper)-1))\n for i in tqdm(range(len(datR))):\n ind = arbt.query_radius(datR[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([datR[i], ], datR[j[j>i]], parmetric)[0]\n dist1 = dist.cdist([datR[i], ], datR[j[j>i]], permetric)[0]\n rr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=rweights[j[j>i]])[0]\n rr[rr == 0] = 1.0\n # Nr = len(datR)\n # RR = rr/(Nr*(Nr-1.0)) # factor of 2 cancels with 1/2 that needs to be done to remove double counting of pairs\n # print (rr)\n return rr\n\n\ndef aDRwcalc(dat, datR, binspar, binsper, parmetric, permetric, rng, rweights):\n print (\"Calculating anisotropic DR with weights...\\n DR=\")\n dr = np.zeros((len(binspar)-1, len(binsper)-1))\n # rrbt = BallTree(datR, metric='pyfunc', func=permetric)\n for i in tqdm(range(len(dat))):\n ind = arbt.query_radius(dat[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([dat[i], ], datR[j], parmetric)[0]\n dist1 = dist.cdist([dat[i], ], datR[j], permetric)[0]\n dr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=rweights[j])[0]\n dr[dr == 0] = 1.0\n # Nd = len(dat)\n # Nr = len(datR)\n # DR = dr/(Nd*Nr)\n # print (dr/2.0)\n return dr/2.0\n\n\ndef aRDwcalc(dat, datR, binspar, binsper, parmetric, permetric, rng, weights):\n print (\"Calculating anisotropic RD with weights...\\n DR=\")\n dr = np.zeros((len(binspar)-1, len(binsper)-1))\n # bt = BallTree(dat, metric='pyfunc', func=permetric)\n for i in tqdm(range(len(datR))):\n ind = arbt.query_radius(datR[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([datR[i], ], dat[j], parmetric)[0]\n dist1 = dist.cdist([datR[i], ], dat[j], permetric)[0]\n dr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=weights[j])[0]\n dr[dr == 0] = 1.0\n # DR = dr/(Nd*Nr)\n # print (dr/2.0)\n return dr/2.0\n\n\ndef aDDwcalcp(dat, binspar, binsper, parmetric, permetric, rng, weights, rNd, multi=False, queue=0):\n dd = np.zeros((len(binspar)-1, len(binsper)-1))\n # ddbt = BallTree(dat, metric='pyfunc', func=permetric)\n for i in tqdm(rNd):\n ind = adbt.query_radius(dat[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([dat[i], ], dat[j[j>i]], parmetric)[0]\n dist1 = dist.cdist([dat[i], ], dat[j[j>i]], permetric)[0]\n dd += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=weights[j[j>i]])[0]\n if multi:\n queue.put(dd)\n else:\n return dd\n # print (DD)\n return dd\n\n\ndef aRRwcalcp(datR, binspar, binsper, parmetric, permetric, rng, rweights, rNr, multi=False, queue=0):\n rr = np.zeros((len(binspar)-1, len(binsper)-1))\n # rrbt = BallTree(datR, metric='pyfunc', func=permetric)\n for i in tqdm(rNr):\n ind = arbt.query_radius(datR[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([datR[i], ], datR[j[j>i]], parmetric)[0]\n dist1 = dist.cdist([datR[i], ], datR[j[j>i]], permetric)[0]\n rr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=rweights[j[j>i]])[0]\n if multi:\n queue.put(rr)\n else:\n return rr\n # rr[rr == 0] = 1.0\n # Nr = len(datR)\n # RR = rr/(Nr*(Nr-1.0)) # factor of 2 cancels with 1/2 that needs to be done to remove double counting of pairs\n # print (RR)\n return rr\n\n\ndef aDRwcalcp(dat, datR, binspar, binsper, parmetric, permetric, rng, rweights, rNd, multi=False, queue=0):\n # print (\"Calculating anisotropic DR with weights (parallelized)...\\n DR=\")\n dr = np.zeros((len(binspar)-1, len(binsper)-1))\n # rrbt = BallTree(datR, metric='pyfunc', func=permetric)\n for i in tqdm(rNd):\n ind = arbt.query_radius(dat[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([dat[i], ], datR[j], parmetric)[0]\n dist1 = dist.cdist([dat[i], ], datR[j], permetric)[0]\n dr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=rweights[j])[0]\n if multi:\n queue.put(dr)\n else:\n return dr\n # print (DR)\n return dr\n\n\ndef aRDwcalcp(dat, datR, binspar, binsper, parmetric, permetric, rng, weights, rNr, multi=False, queue=0):\n # print (\"Calculating anisotropic RD with weights (parallelized)...\\n DR=\")\n dr = np.zeros((len(binspar)-1, len(binsper)-1))\n # bt = BallTree(dat, metric='pyfunc', func=permetric)\n for i in tqdm(rNr):\n ind = adbt.query_radius(datR[i].reshape(1, -1), maxrad)\n for j in ind:\n dist0 = dist.cdist([datR[i], ], dat[j], parmetric)[0]\n dist1 = dist.cdist([datR[i], ], dat[j], permetric)[0]\n dr += np.histogram2d(dist0, dist1, range=rng, bins=(binspar, binsper), weights=weights[j])[0]\n if multi:\n queue.put(dr)\n else:\n return dr\n return dr\n\n\ndef amulti_autocp(dat, binspar, binsper, parmetric, permetric, rng, weights, Nd, CORES=pcpus):\n\n DD = np.zeros((len(binspar)-1, len(binsper)-1))\n queues = [RetryQueue() for i in range(CORES)]\n args = [(dat, binspar, binsper, parmetric, permetric, rng, weights, range(int(Nd*i/CORES),int(Nd*(i+1)/CORES)), True, queues[i]) for i in range(CORES)]\n jobs = [Process(target=aDDwcalcp, args=(a)) for a in args]\n for j in jobs: j.start()\n for q in queues: DD += q.get()\n for j in jobs: j.join()\n DD[DD == 0] = 1.0\n # DD = DD/(Nd*(Nd-1.0)) # factor of 2 cancels with 1/2 that needs to be done to remove double counting of pairs\n print (DD)\n return DD\n\n\ndef amulti_autocpr(datR, binspar, binsper, parmetric, permetric, rng, rweights, Nr, CORES=pcpus):\n\n RR = np.zeros((len(binspar)-1, len(binsper)-1))\n queues = [RetryQueue() for i in range(CORES)]\n args = [(datR, binspar, binsper, parmetric, permetric, rng, rweights, range(int(Nr*i/CORES),int(Nr*(i+1)/CORES)), True, queues[i]) for i in range(CORES)]\n jobs = [Process(target=aRRwcalcp, args=(a)) for a in args]\n for j in jobs: j.start()\n for q in queues: RR += q.get()\n for j in jobs: j.join()\n RR[RR == 0] = 1.0\n # RR = RR/(Nr*(Nr-1.0)) # factor of 2 cancels with 1/2 that needs to be done to remove double counting of pairs\n print (RR)\n return RR\n\n\ndef amulti_crosscp(dat, datR, binspar, binsper, parmetric, permetric, rng, weights, Nr, CORES=pcpus):\n\n RD = np.zeros((len(binspar)-1, len(binsper)-1))\n queues = [RetryQueue() for i in range(CORES)]\n args = [(dat, datR, binspar, binsper, parmetric, permetric, rng, weights, range(int(Nr*i/CORES), int(Nr*(i+1)/CORES)), True, queues[i]) for i in range(CORES)]\n jobs = [Process(target=aRDwcalcp, args=(a)) for a in args]\n for j in jobs: j.start()\n for q in queues: RD += q.get()\n for j in jobs: j.join()\n RD[RD == 0] = 1.0\n # Nd=len(dat)\n # DR = DR/(Nd*Nr)\n print (RD/2.0)\n return RD/2.0\n\n\ndef amulti_crosscpr(dat, datR, binspar, binsper, parmetric, permetric, rng, rweights, Nd, CORES=pcpus):\n\n DR = np.zeros((len(binspar)-1, len(binsper)-1))\n queues = [RetryQueue() for i in range(CORES)]\n args = [(dat, datR, binspar, binsper, parmetric, permetric, rng, rweights, range(int(Nd*i/CORES), int(Nd*(i+1)/CORES)), True, queues[i]) for i in range(CORES)]\n jobs = [Process(target=aDRwcalcp, args=(a)) for a in args]\n for j in jobs: j.start()\n for q in queues: DR += q.get()\n for j in jobs: j.join()\n DR[DR == 0] = 1.0\n # Nd=len(dat)\n # DR = DR/(Nd*Nr)\n print (DR/2.0)\n return DR/2.0\n\n\ndef ximonopole(correlsmu, mu):\n xi0 = np.sum(correlsmu*sp.special.legendre(0)(mu),axis=1)/len(mu)\n np.savetxt(\"xi0.txt\",xi0)\n return xi0\n\n\ndef xidipole(correlsmu, mu):\n xi2 = np.sum(5.0*correlsmu*sp.special.legendre(2)(mu),axis=1)/len(mu)\n np.savetxt(\"xi2.txt\",xi2)\n return xi2\n\n\ndef xiquadpole(correlsmu, mu):\n xi4 = np.sum(9.0*correlsmu*sp.special.legendre(4)(mu),axis=1)/len(mu)\n np.savetxt(\"xi4.txt\",xi4)\n return xi4\n\ndef beta(correlsmu, mu):\n xis0 = ximonopole(correlsmu,mu)\n xis2 = xidipole(correlsmu,mu)\n xis4 = xiquadpole(correlsmu,mu)\n xir = xis0*sp.special.legendre(0)(mu) + xis2*sp.special.legendre(2)(mu) + xis4*sp.special.legendre(4)(mu)\n r = xir/xis0\n return 5.0/3.0*(np.sqrt(1.8*r-0.8)-1.0)\n\n# def beta(correlsmu, mu):\n# betav =\n" ]
[ [ "scipy.special.legendre" ] ]
davidcrowland/layer_vb_tagging
[ "83865d67b7a931a9eff4ba6fd4d033b2219225f1" ]
[ "extra/tsfresh_examples/tsfresh/feature_selection/feature_selector.py" ]
[ "# -*- coding: utf-8 -*-\n# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)\n# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016\n\"\"\"\nContains a feature selection method that evaluates the importance of the different extracted features. To do so,\nfor every feature the influence on the target is evaluated by an univariate tests and the p-Value is calculated.\nThe methods that calculate the p-values are called feature selectors.\n\nAfterwards the Benjamini Hochberg procedure which is a multiple testing procedure decides which features to keep and\nwhich to cut off (solely based on the p-values).\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom functools import partial\n\nfrom builtins import zip\nfrom builtins import range\nimport os\nimport numpy as np\nimport pandas as pd\nimport logging\nfrom multiprocessing import Pool\nfrom tsfresh.feature_selection.significance_tests import target_binary_feature_real_test, \\\n target_real_feature_binary_test, target_real_feature_real_test, target_binary_feature_binary_test\nfrom tsfresh import defaults\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef check_fs_sig_bh(X, y,\n n_processes=defaults.N_PROCESSES,\n chunksize=defaults.CHUNKSIZE,\n fdr_level=defaults.FDR_LEVEL,\n hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT,\n test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE):\n \"\"\"\n The wrapper function that calls the significance test functions in this package.\n In total, for each feature from the input pandas.DataFrame an univariate feature significance test is conducted.\n Those tests generate p values that are then evaluated by the Benjamini Hochberg procedure to decide which features\n to keep and which to delete.\n\n We are testing\n \n :math:`H_0` = the Feature is not relevant and can not be added\n\n against\n\n :math:`H_1` = the Feature is relevant and should be kept\n \n or in other words\n \n :math:`H_0` = Target and Feature are independent / the Feature has no influence on the target\n\n :math:`H_1` = Target and Feature are associated / dependent\n\n When the target is binary this becomes\n \n :math:`H_0 = \\\\left( F_{\\\\text{target}=1} = F_{\\\\text{target}=0} \\\\right)`\n\n :math:`H_1 = \\\\left( F_{\\\\text{target}=1} \\\\neq F_{\\\\text{target}=0} \\\\right)`\n \n Where :math:`F` is the distribution of the target.\n\n In the same way we can state the hypothesis when the feature is binary\n \n :math:`H_0 = \\\\left( T_{\\\\text{feature}=1} = T_{\\\\text{feature}=0} \\\\right)`\n\n :math:`H_1 = \\\\left( T_{\\\\text{feature}=1} \\\\neq T_{\\\\text{feature}=0} \\\\right)`\n\n Here :math:`T` is the distribution of the target.\n\n TODO: And for real valued?\n\n :param X: The DataFrame containing all the features and the target\n :type X: pandas.DataFrame\n\n :param y: The target vector\n :type y: pandas.Series\n\n :param test_for_binary_target_real_feature: Which test to be used for binary target, real feature\n :type test_for_binary_target_real_feature: str\n\n :param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant\n features among all created features.\n :type fdr_level: float\n\n :param hypotheses_independent: Can the significance of the features be assumed to be independent?\n Normally, this should be set to False as the features are never\n independent (e.g. mean and median)\n :type hypotheses_independent: bool\n\n :param n_processes: Number of processes to use during the p-value calculation\n :type n_processes: int\n\n :param chunksize: Size of the chunks submitted to the worker processes\n :type chunksize: int\n\n :return: A pandas.DataFrame with each column of the input DataFrame X as index with information on the significance\n of this particular feature. The DataFrame has the columns\n \"Feature\",\n \"type\" (binary, real or const),\n \"p_value\" (the significance of this feature as a p-value, lower means more significant)\n \"rejected\" (if the Benjamini Hochberg procedure rejected this feature)\n :rtype: pandas.DataFrame\n\n \"\"\"\n target_is_binary = len(set(y)) == 2\n\n # todo: solve the multiclassification case. for a multi classification the algorithm considers the target to be\n # regression. Instead one could perform a binary one versus all classification.\n\n # Only allow entries for which the target is known!\n y = y.astype(np.float)\n X = X.copy().loc[~(y == np.NaN), :]\n\n # Create the DataFrame df_features containing the information about the different hypotheses\n # Every row contains information over one feature column from X\n df_features = pd.DataFrame()\n\n df_features['Feature'] = list(set(X.columns))\n df_features = df_features.set_index('Feature', drop=False)\n\n # Add relevant columns to df_features\n df_features[\"rejected\"] = np.nan\n df_features[\"type\"] = np.nan\n df_features[\"p_value\"] = np.nan\n\n # Calculate the feature significance in parallel\n pool = Pool(n_processes)\n\n # Helper function which wrapps the _calculate_p_value with many arguments already set\n f = partial(_calculate_p_value, y=y,\n target_is_binary=target_is_binary,\n test_for_binary_target_real_feature=test_for_binary_target_real_feature)\n results = pool.map(f, [X[feature] for feature in df_features['Feature']], chunksize=chunksize)\n p_values_of_features = pd.DataFrame(results)\n df_features.update(p_values_of_features)\n\n pool.close()\n pool.join()\n\n # Perform the real feature rejection\n if \"const\" in set(df_features.type):\n df_features_bh = benjamini_hochberg_test(df_features.loc[~(df_features.type == \"const\")],\n hypotheses_independent, fdr_level)\n df_features = pd.concat([df_features_bh, df_features.loc[df_features.type == \"const\"]])\n else:\n df_features = benjamini_hochberg_test(df_features, hypotheses_independent, fdr_level)\n \n # It is very important that we have a boolean \"rejected\" column, so we do a cast here to be sure\n df_features[\"rejected\"] = df_features[\"rejected\"].astype(\"bool\")\n\n if defaults.WRITE_SELECTION_REPORT:\n # Write results of BH - Test to file\n if not os.path.exists(defaults.RESULT_DIR):\n os.mkdir(defaults.RESULT_DIR)\n\n with open(os.path.join(defaults.RESULT_DIR, \"fs_bh_results.txt\"), 'w') as file_out:\n file_out.write((\"Performed BH Test to control the false discovery rate(FDR); \\n\"\n \"FDR-Level={0};Hypothesis independent={1}\\n\"\n ).format(fdr_level, hypotheses_independent))\n df_features.to_csv(index=False, path_or_buf=file_out, sep=';', float_format='%.4f')\n return df_features\n\n\ndef _calculate_p_value(feature_column, y, target_is_binary, test_for_binary_target_real_feature):\n \"\"\"\n Internal helper function to calculate the p-value of a given feature using one of the dedicated\n functions target_*_feature_*_test.\n\n :param feature_column: the feature column.\n :type feature_column: pandas.Series\n\n :param y: the binary target vector\n :type y: pandas.Series\n\n :param target_is_binary: Whether the target is binary or not\n :type target_is_binary: bool\n\n :param test_for_binary_target_real_feature: The significance test to be used for binary target and real valued\n features. Either ``'mann'`` for the Mann-Whitney-U test or ``'smir'``\n for the Kolmogorov-Smirnov test.\n :type test_for_binary_target_real_feature: str\n\n :return: the p-value of the feature significance test and the type of the tested feature as a Series.\n Lower p-values indicate a higher feature significance.\n :rtype: pd.Series\n \"\"\"\n # Do not process constant features\n if len(pd.unique(feature_column.values)) == 1:\n _logger.warning(\"[test_feature_significance] Feature {} is constant\".format(feature_column.name))\n return pd.Series({\"type\": \"const\", \"rejected\": False}, name=feature_column.name)\n\n else:\n if target_is_binary:\n # Decide if the current feature is binary or not\n if len(set(feature_column.values)) == 2:\n type = \"binary\"\n p_value = target_binary_feature_binary_test(feature_column, y)\n else:\n type = \"real\"\n p_value = target_binary_feature_real_test(feature_column, y, test_for_binary_target_real_feature)\n else:\n # Decide if the current feature is binary or not\n if len(set(feature_column.values)) == 2:\n type = \"binary\"\n p_value = target_real_feature_binary_test(feature_column, y)\n else:\n type = \"real\"\n p_value = target_real_feature_real_test(feature_column, y)\n\n return pd.Series({\"p_value\": p_value, \"type\": type}, name=feature_column.name)\n\n\ndef benjamini_hochberg_test(df_pvalues, hypotheses_independent, fdr_level):\n \"\"\"\n This is an implementation of the benjamini hochberg procedure that calculates which of the hypotheses belonging\n to the different p-Values from df_p to reject. While doing so, this test controls the false discovery rate,\n which is the ratio of false rejections by all rejections:\n\n .. math::\n\n FDR = \\\\mathbb{E} \\\\left [ \\\\frac{ |\\\\text{false rejections}| }{ |\\\\text{all rejections}|} \\\\right]\n\n\n References\n ----------\n\n .. [1] Benjamini, Yoav and Yekutieli, Daniel (2001).\n The control of the false discovery rate in multiple testing under dependency.\n Annals of statistics, 1165--1188\n\n\n :param df_pvalues: This DataFrame should contain the p_values of the different hypotheses in a column named\n \"p_values\".\n :type df_pvalues: pandas.DataFrame\n\n :param hypotheses_independent: Can the significance of the features be assumed to be independent?\n Normally, this should be set to False as the features are never\n independent (e.g. mean and median)\n :type hypotheses_independent: bool\n\n :param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant\n features among all created features.\n :type fdr_level: float\n\n :return: The same DataFrame as the input, but with an added boolean column \"rejected\".\n :rtype: pandas.DataFrame\n \"\"\"\n\n # Get auxiliary variables and vectors\n df_pvalues = df_pvalues.sort_values(by=\"p_value\")\n m = len(df_pvalues)\n K = list(range(1, m + 1))\n\n # Calculate the weight vector C\n if hypotheses_independent:\n # c(k) = 1\n C = [1] * m\n else:\n # c(k) = \\sum_{i=1}^m 1/i\n C = [sum([1.0 / i for i in range(1, k + 1)]) for k in K]\n\n # Calculate the vector T to compare to the p_value\n T = [fdr_level * k / m * 1.0 / c for k, c in zip(K, C)]\n\n # Get the last rejected p_value\n try:\n k_max = list(df_pvalues.p_value <= T).index(False)\n except ValueError:\n k_max = m\n\n # Add the column denoting if hypothesis was rejected\n df_pvalues[\"rejected\"] = [True] * k_max + [False] * (m - k_max)\n\n return df_pvalues\n" ]
[ [ "pandas.Series", "pandas.unique", "pandas.DataFrame", "pandas.concat" ] ]
prakass1/InteractiveSimilarityExplorer
[ "2fa5fb91c7df6424b9ed777ef4373ed7094c2348" ]
[ "machine_learning_model.py" ]
[ "import utility\nimport static_sim_functions as smf\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import *\nfrom time_series_grp import TimeSeriesGroupProcessing\nfrom RandomNeighbors import RandomNeighbors\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.model_selection import KFold\n\nimport ml_modelling_ts as ml_ts\n\n'''\nThis is just a run of the approaches using the methodologies, save the neighborhood for UI.\n'''\n\n\ndef common_processing(df):\n # Getting percentage between 0 to 1 rather than score values\n df[\"tschq12\"] = df[\"tschq12\"].apply(lambda x: x / 100)\n df[\"tschq16\"] = df[\"tschq16\"].apply(lambda x: x / 100)\n df[\"tschq17\"] = df[\"tschq17\"].apply(lambda x: x / 100)\n\n # Feature engineering family history\n df[\"tschq04\"] = df.apply(smf.create_cols_family_hist, axis=1)\n\n return df\n\n\ndef get_common_cols(col1, col2):\n common_elements = set(col1).intersection(col2)\n return common_elements\n\nimport properties\nimport pandas as pd\ndef initial_processing():\n # Read the csv of the tschq data and make the necessary things\n tschq = pd.read_pickle(properties.data_location + \"/input_pckl/\" + \"3_q.pckl\")\n\n # Cleaning tschq05 question. There is an abstraction for a row we add common value\n\n def filter_age(x):\n if isinstance(x, int):\n # Append the most common value obtained\n return tschq[\"tschq05\"].value_counts().head(1).index[0]\n else:\n return x\n\n tschq[\"tschq05\"] = tschq[\"tschq05\"].apply(filter_age)\n\n # Drop the questionnaire_id and created_at\n tschq.drop([\"questionnaire_id\", \"created_at\"], axis=1, inplace=True)\n\n # Lets read and join two questionnaires tschq and hq\n hq = pd.read_pickle(\"data/input_pckl/4_q.pckl\")\n hq.isna().sum(axis=0)\n # By looking at the output we are sure that h5 and h6 do not contribute much and can be dropped\n hq.drop([\"hq05\", \"hq06\"], axis=1, inplace=True)\n hq_df = hq.set_index(\"user_id\")\n df = tschq.join(hq_df.iloc[:, 2:], on=\"user_id\")\n\n drop_cols = [\"tschq01\", \"tschq25\", \"tschq07-2\",\n \"tschq13\", \"tschq04-1\", \"tschq04-2\"]\n\n # Getting percentage between 0 to 1 rather than score values\n df[\"tschq12\"] = df[\"tschq12\"].apply(lambda x: x / 100)\n df[\"tschq16\"] = df[\"tschq16\"].apply(lambda x: x / 100)\n df[\"tschq17\"] = df[\"tschq17\"].apply(lambda x: x / 100)\n\n df[\"tschq04\"] = df.apply(smf.create_cols_family_hist, axis=1)\n\n df.drop(drop_cols, axis=1, inplace=True)\n\n # Set the heom object, while using the required similarity\n # Alternative\n # Categorical boolean mask\n categorical_feature_mask = df.iloc[:, 1:].infer_objects().dtypes == object\n other_feature_mask = df.iloc[:, 1:].infer_objects().dtypes != object\n # filter categorical columns using mask and turn it into a list\n categorical_cols = df.iloc[:, 1:].columns[categorical_feature_mask].tolist()\n num_cols = df.iloc[:, 1:].columns[other_feature_mask].tolist()\n cat_idx = [df.iloc[:, 1:].columns.get_loc(val) for val in categorical_cols]\n num_idx = [df.iloc[:, 1:].columns.get_loc(val) for val in num_cols]\n\n return cat_idx, num_idx, df\n\nimport os\nimport traceback\ndef save_data_objs(df, quest_cmbs=\"all\"):\n try:\n if not os.path.isdir(properties.model_location + quest_cmbs):\n os.makedirs(properties.model_location + quest_cmbs)\n utility.save_model(\"\".join(quest_cmbs + \"/\" + quest_cmbs + \"_stat_q_data\"), df)\n\n encoded_combined_df = smf.preprocess(df, quest_cmbs, age_bin=False,\n process_model_name=\"\".join(quest_cmbs + \"/\" +\n quest_cmbs + \"_stat_q_data_oe_model\"),\n prediction=False)\n\n # Save this encoded_data\n utility.save_model(\"\".join(quest_cmbs + \"/\" +\n quest_cmbs + \"_stat_q_data_encoded\"), encoded_combined_df)\n\n return encoded_combined_df\n\n # Use this data to build the data over static data.\n except Exception:\n print(traceback.print_exc())\n\n\ndef weighted_average(distress_list):\n average = np.asarray(distress_list, dtype=float).mean()\n return average\n\n\n\n# Function computes the weighted average as predictions for given prediction time point\ndef compute_weighted_avg(n_idx, encoded_data, pred_at_list, method=\"mean\", dist_nn=None, wt_flag=False):\n\n preds = list()\n # Prediction for four time points\n for pval in pred_at_list:\n distress_list = list()\n for vals in n_idx:\n u_id = encoded_data[\"user_id\"].iloc[vals]\n user_ts = tsg_data.get_usr_mday_ts_predict(int(u_id))\n # 3rd val of the series is s03 of the neighbor\n print(\"{}, {} Values \".format(int(pval), int(u_id)))\n if len(user_ts) > int(pval):\n value = user_ts[int(pval), :][3]\n elif len(user_ts) <= int(pval):\n value = user_ts[len(user_ts)-1, :][3]\n\n distress_list.append(value)\n\n\n if wt_flag:\n print(\"Calling by weighted distance prediction for distress\")\n preds.append(weighted_distance_prediction(distress_list, dist_nn))\n else:\n print(\"Calling weighted average to predict distress\")\n preds.append(weighted_average(distress_list))\n return preds\n\n\ndef weighted_distance_prediction(p_preds, distance):\n # Inverse distance so that highest weight is given to the nearest one and least to the farther\n inv_dist = np.divide(1, distance)\n\n #s03 - tinnitus distress weighted by distance is given as\n s03_pred = (np.sum(np.multiply(p_preds, inv_dist)) / (np.sum(inv_dist)))\n\n return s03_pred\n\n\ndef compute(test_nn, encoded_data,\n pred_list, method=\"mean\", dist_nn=None, wt_dist=False):\n from sklearn.linear_model import LinearRegression\n\n preds = list()\n for point in pred_list:\n nn_preds = list()\n intercepts_list = list()\n coeff_list = list()\n for nn in test_nn:\n u_id = encoded_data[\"user_id\"].iloc[nn]\n user_ts = tsg_data.get_usr_mday_ts_predict(int(u_id))\n # Obtain the time series until time point and fit the data for linear regression\n diff_arr = np.abs(np.subtract(point, user_ts[:, 1]))\n diff_near_idx = np.where(diff_arr == diff_arr.min())\n print(\"minimum to the time point is at -- \", diff_near_idx)\n # difference near index. Handling for the length of users\n usr_idx = diff_near_idx[0][0]\n\n user_ts_p = user_ts[:usr_idx]\n user_ts_df = pd.DataFrame(user_ts_p, columns=[\"day\", \"day_sess_index\",\n \"s02\", \"s03\", \"s04\",\n \"s05\", \"s06\", \"s07\"])\n X = user_ts_df[[\"day_sess_index\"]]\n # We show for tinnitus distress. This can be extended to other physiological variables as well.\n y = user_ts_df[[\"s03\"]]\n\n # Fit on X axis as time and Y as the s03 predictive value.\n reg_fit = LinearRegression(normalize=True)\n reg_fit.fit(X, y)\n\n # If weighted_distance is true, then predict by each of the nn_user and add to list. This will be used for\n # calculating weighted_distance_predictions.\n if wt_dist:\n nn_pred = reg_fit.predict(np.asarray(point).reshape(1, -1))\n nn_preds.append(nn_pred[0][0])\n else:\n intercepts_list.append(reg_fit.intercept_)\n coeff_list.append(reg_fit.coef_)\n\n if wt_dist:\n print(\"Predicting the value of s03 for the user by a weighted average weighted by distance\")\n preds.append(weighted_distance_prediction(nn_preds, dist_nn))\n else:\n print(\"Predicting the value of s3 over the averaged slope and intercepts of \"\n \"observations of the neighbors\")\n\n # y = mx + c, where m is the average slope of the neighbors and c is the average intercept obtained.\n print(\"The equation to estimate s03 for the user is {}\".format(\"\".join(str(np.asarray(coeff_list).mean())) +\n \"* time_index + \" +\n str(np.asarray(intercepts_list).mean())))\n y = np.multiply(np.asarray(coeff_list).mean(), point) + np.asarray(intercepts_list).mean()\n preds.append(y)\n\n return preds\n\n\ndef compute_linear_regression(test_nn, encoded_data, pred_list, method=\"mean\"):\n #test_nn = test_user_nn\n #pred_list = prediction_at_list\n from sklearn.linear_model import LinearRegression\n preds = list()\n for point in pred_list:\n attr_list = list()\n intercepts_list = list()\n coeff_list = list()\n for nn in test_nn:\n u_id = encoded_data[\"user_id\"].iloc[nn]\n user_ts = tsg_data.get_m_day_ts_enumerate(int(11))\n diff_arr = np.abs(np.subtract(point, user_ts[:, 1]))\n diff_near_idx = np.where(diff_arr == diff_arr.min())\n print(diff_near_idx)\n # difference near index\n usr_vals = np.array([user_ts[n_id] for n_id in diff_near_idx[0]])\n if len(usr_vals) > 1:\n value = usr_vals.mean(axis=0)\n print(\"vavg\" + str(value))\n else:\n value = usr_vals[0]\n print(\"v\" + str(value))\n\n attr_list.append(value)\n\n\n df = pd.DataFrame(user_ts)\n df.columns = [\"day\", \"day_session_id\",\n \"s02\", \"s03\",\n \"s04\", \"s05\",\n \"s06\", \"s07\"]\n reg_model = LinearRegression(normalize=True)\n user_x = df[[\"day_session_id\", \"s04\", \"s05\", \"s06\"]].to_numpy()\n user_s03 = df[[\"s03\"]].to_numpy().ravel()\n reg_model.fit(user_x, user_s03)\n intercepts_list.append(reg_model.intercept_)\n coeff_list.append(reg_model.coef_)\n # y = mx + c, where m is the average slope of the neighbors and c is the average intercept obtained.\n\n # convert coeff's to numpy for manipulations\n numpy_attr_list = np.array(attr_list)\n print(numpy_attr_list)\n avg_np_attr_list = numpy_attr_list[:, 4:].mean(axis=0)\n\n print(avg_np_attr_list)\n\n numpy_coeff_list = np.array(coeff_list)\n\n print(numpy_coeff_list)\n print(numpy_coeff_list.mean(axis=0))\n\n # Day_index, s02, s04, s05, s06 ,s07 - Use only the fit independent features to estimate the dependent\n y = np.multiply(numpy_coeff_list[:, 0].mean(), point) + \\\n np.multiply(numpy_coeff_list[:, 1].mean(), avg_np_attr_list[0]) + \\\n np.multiply(numpy_coeff_list[:, 2].mean(), avg_np_attr_list[1]) + \\\n np.multiply(numpy_coeff_list[:, 3].mean(), avg_np_attr_list[2]) + \\\n np.asarray(intercepts_list).mean()\n preds.append(y)\n print(preds)\n return preds\n\n\n# Create test label as ground truth at prediction point.\ndef create_y_labels(test_data, prediction_at, method=\"mean\"):\n y_test = list()\n for i in range(0, len(test_data)):\n test_ts_test1 = tsg_data.get_usr_mday_ts_predict(int(test_data.iloc[i][\"user_id\"]))\n # print(len(test_ts_test1))\n if len(test_ts_test1) >= prediction_at:\n y_test.append(test_ts_test1[prediction_at - 1][2])\n elif len(test_ts_test1) < prediction_at:\n y_test.append(test_ts_test1[len(test_ts_test1) - 1][2])\n return y_test\n\n\n# Create reference points for multiple reference predictions\ndef get_pred_ref_points(user_id, ndays, method=\"mean\"):\n # Using the default tsg which is mean observations of the user\n test_user_ts = tsg_data.get_usr_mday_ts_predict(user_id)\n\n user_ts_idx = test_user_ts[:, 1]\n # [\"date\", \"time_idx\", \"s02\", \"s03\", \"s04\", \"s05\", \"s06\", \"s07]\n user_distress = test_user_ts[:, 3]\n\n # Near evaluation. Change this for farther evaluations\n # Near -> 0.20, 0.10\n # Far -> 1 - (Near)\n\n # Near points are of the sequence of observation because we are sure all stay until here.\n #prediction_at = 10\n\n # Far prediction point is the last N% of the test user time series\n # It is tested for 0.75, 0.8, 0.9\n prediction_at = round(len(user_ts_idx) * 0.80)\n y_labels = user_distress[prediction_at:prediction_at + ndays].tolist()\n prediction_at_list = user_ts_idx[prediction_at:prediction_at + ndays].tolist()\n\n return y_labels, prediction_at_list\n\n\ndef do_test(test_data, out_writer, csv_out_writer,\n ndays, near_idxs, encoded_data, fold_count=\"final\",\n method=\"mean\", dist_nn=None, wt_dist_flag=False):\n for i in range(0, len(test_data)):\n user_id = int(test_data.iloc[i][\"user_id\"])\n print(\"User- Id \", user_id)\n y_labels, prediction_at_list = get_pred_ref_points(user_id, ndays, method=method)\n\n # y_labels = create_y_labels(X_test, preds, method=\"mean\")\n # Weighting by inverse of neighbor\n if wt_dist_flag:\n test_user_nn = near_idxs[i]\n test_user_dist = dist_nn[i]\n pred_weighted_average = compute_weighted_avg(test_user_nn, encoded_data, prediction_at_list,\n method=method, dist_nn=test_user_dist, wt_flag=wt_dist_flag)\n\n pred_lr = compute(test_user_nn, encoded_data, prediction_at_list,\n method=method, dist_nn=test_user_dist, wt_dist=wt_dist_flag)\n else:\n test_user_nn = near_idxs[i]\n pred_weighted_average = compute_weighted_avg(test_user_nn, encoded_data, prediction_at_list,\n method=method, dist_nn=None, wt_flag=False)\n pred_lr = compute(test_user_nn, encoded_data, prediction_at_list,\n method=method, dist_nn=None, wt_dist=False)\n\n\n # calculate\n if not fold_count == \"final\":\n print(\"Evaluating for the fold-\" + str(fold_count) + \" for the forecast reference points - \" +\n str(prediction_at_list))\n out_writer.write(\"Evaluating for the forecast reference points -- \" +\n str(prediction_at_list) + \"for the method evaluation -- \" + str(method) + \"\\n\")\n else:\n print(\"Evaluating for forecast reference points - \" +\n str(prediction_at_list))\n out_writer.write(\"Evaluating over the forecast reference points -- \" +\n str(prediction_at_list) + \"for the method evaluation -- \" + str(method) + \"\\n\")\n\n print(\"Computing RMSE for weighted average based predictions on the User -- \" + str(user_id))\n print(\"---------------------------------------------------------------\")\n out_writer.write(\"---------------------------------------------------------------\\n\")\n\n print(\"RMSE -- \", np.sqrt(mean_squared_error(y_labels, pred_weighted_average)))\n out_writer.write(\"RMSE -- \" + str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + \"\\n\")\n\n\n # Writing to csv file\n if not fold_count == \"final\":\n csv_out_writer.write(\"\".join(str(user_id) + \",\" +\n str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + \",\" +\n \"weighted_average\" + \",\"\n + str(y_labels[0]) + \",\" + str(y_labels[1]) + \",\" + str(y_labels[2])\n + \",\" + str(pred_weighted_average[0]) + \",\" + str(pred_weighted_average[1])\n + \",\" + str(pred_weighted_average[2]) + \"\\n\"))\n else:\n csv_out_writer.write(\"\".join(str(user_id) + \",\" +\n str(np.sqrt(mean_squared_error(y_labels, pred_weighted_average))) + \",\" +\n \"weighted_average\" + \",\"\n + str(y_labels[0]) + \",\" + str(y_labels[1]) + \",\" + str(y_labels[2])\n + \",\" + str(pred_weighted_average[0]) + \",\" + str(pred_weighted_average[1])\n + \",\" + str(pred_weighted_average[2]) + \"\\n\"))\n\n print(\"-----------------------------------------------------------------------------\")\n out_writer.write(\"---------------------------------------------------------------\\n\")\n print(\"Computing RMSE for {} {} based predictions for the user -- {}\"\n .format(str(\"weighted_distance\" + str(wt_dist_flag)), str(\"linear_regression\"), str(user_id)))\n out_writer.write(\"Computing RMSE for {} {} based predictions for the user -- {} \\n\"\n .format(str(\"weighted_distance\" + str(wt_dist_flag)), str(\"linear_regression\"), str(user_id)))\n print(\"RMSE -- \", np.sqrt(mean_squared_error(y_labels, pred_lr)))\n out_writer.write(\"RMSE -- \" + str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + \"\\n\")\n print(\"---------------------------------------------------------------\")\n out_writer.write(\"---------------------------------------------------------------\\n\")\n\n # Write to csv file\n if not fold_count == \"final\":\n csv_out_writer.write(\"\".join(str(user_id) + \",\" +\n str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + \",\" +\n str(\"lr\") + \",\"\n + str(y_labels[0]) + \",\" + str(y_labels[1]) + \",\" + str(y_labels[2])\n + \",\" + str(pred_lr[0]) + \",\" + str(pred_lr[1]) + \",\" + str(\n pred_lr[2]) + \"\\n\"))\n else:\n csv_out_writer.write(\"\".join(str(user_id) + \",\" +\n str(np.sqrt(mean_squared_error(y_labels, pred_lr))) + \",\" +\n str(\"lr\") + \",\"\n + str(y_labels[0]) + \",\" + str(y_labels[1]) + \",\" + str(y_labels[2])\n + \",\" + str(pred_lr[0]) + \",\" + str(pred_lr[1]) + \",\" + str(\n pred_lr[2]) + \"\\n\"))\n\n\n# Change method and execute to get the predictions appropriately, these are configurations\neval_method = \"mean\"\n# Default day readings for all test users must be at mean and prediction are between min - mean - max\n\ntsg_data = TimeSeriesGroupProcessing(method=eval_method)\n# For all combinations evaluation it must be set to True\nquest_cmb_all = False\n# Same random state needs to be maintained to get consistent test data over all combinations and repeatable results\nrandom_state = 1220\n# It is the setting to get the ahead prediction for tinnitus distress, 3 here means for 3 days\n# min it is a day and max of about 60days between points which is not an usual scenario\nndays = 3\n\n# Build the default NN with all the combination.\nif not quest_cmb_all:\n for key, val in properties.quest_comb.items():\n # Build NN for each category\n print(\"Building NN for the question combination -- \" + str(key))\n\n cat_idx, num_idx, combined_df = smf.initial_processing(key, val, append_synthethic=False)\n # Build and get the knn NN for prediction over test instances.\n # Save the data objs\n\n encoded_data = save_data_objs(combined_df, key)\n\n out_writer = open(\"\".join(\"output/output_\" + str(key) + \"_\" + str(eval_method) + \"_heom_norm.txt\"), \"w+\")\n csv_out_writer = open(\"\".join(\"output/output_\" + str(key) + \"_\" + str(eval_method) + \"_heom_norm.csv\"), \"w+\")\n\n csv_out_writer.write(\"\".join(\"user_id,rmse,algorithm,\"\n \"ref_p1,ref_p2,ref_p3,pred_p1,pred_p2,pred_p3\\n\"))\n\n #Create a test set\n X, test = train_test_split(encoded_data,\n test_size=0.20,\n random_state=random_state)\n\n def filter_train_ids(x):\n # print(x)\n if x[\"user_id\"] in train_user_ids:\n return x\n\n def filter_test_ids(x):\n # print(x)\n if x[\"user_id\"] in test_user_ids:\n return x\n\n train_user_ids = X[\"user_id\"].to_list()\n\n X_train_data_ui = combined_df.apply(filter_train_ids, axis=1, result_type=\"broadcast\").dropna()\n X_train_data_ui[\"user_id\"] = X_train_data_ui[\"user_id\"].apply(int)\n # Save the non encoded train data for visualization purposes\n utility.save_model(\"\".join(key + \"/\" + key + \"_train_stat_q_data\"), X_train_data_ui)\n\n # filter and get the data to show to the UI for the test data.\n test_user_ids = test[\"user_id\"].to_list()\n\n X_test_data_ui = combined_df.apply(filter_test_ids, axis=1, result_type=\"broadcast\").dropna()\n\n X_test_data_ui[\"user_id\"] = X_test_data_ui[\"user_id\"].apply(int)\n\n # Save the data_ui object as json\n #test_data = {}\n #test_data[\"users\"] = X_test_data_ui.to_dict(\"r\")\n #utility.save_data(\"\".join(\"test_data_ui_\" + key), test_data)\n\n from HEOM import HEOM\n # Can be done at prediction too.\n from sklearn.metrics.pairwise import cosine_distances\n from sklearn.linear_model import LinearRegression\n from scipy.spatial.distance import pdist, squareform\n from scipy.stats import zscore\n\n heom = HEOM(X.to_numpy(), cat_idx, num_idx)\n sim_matrix = pdist(X.to_numpy()[:, 1:], heom.heom_distance)\n mean_heom_distance = sim_matrix.mean()\n\n knn = NearestNeighbors(n_neighbors=5, metric=heom.heom_distance, radius=mean_heom_distance)\n knn.fit(X.iloc[:, 1:])\n dist, test_idx = knn.kneighbors(test.to_numpy()[:, 1:], n_neighbors=5)\n\n # Execute without any varying for saving the KNN as pickle to be used by UI\n do_test(test, out_writer, csv_out_writer, ndays, test_idx, X,\n fold_count=\"final\", method=eval_method, dist_nn=None, wt_dist_flag=False)\n\n utility.save_model(\"\".join(key + \"/\" + \"knn_static\"), knn)\n utility.save_model(\"\".join(key + \"/\" + \"train_sim_data.pckl\"), X)\n\n out_writer.close()\n csv_out_writer.close()\n\n\n# All feature combinations\n\ncat_idx, num_idx, combined_df = initial_processing()\n\n\n# Build KNN for each category\nprint(\"Building KNN for the question combination -- \" + str(\"overall\"))\n\n# Save the data objs\nencoded_data = save_data_objs(combined_df, \"overall\")\n\n\nX, test = train_test_split(encoded_data,\n test_size=0.20,\n random_state=random_state)\n\n\ndef filter_train_ids(x):\n # print(x)\n if x[\"user_id\"] in train_user_ids:\n return x\n\n\ndef filter_test_ids(x):\n # print(x)\n if x[\"user_id\"] in test_user_ids:\n return x\n\n\ntrain_user_ids = X[\"user_id\"].to_list()\n\nX_train_data_ui = combined_df.apply(filter_train_ids, axis=1, result_type=\"broadcast\").dropna()\nX_train_data_ui[\"user_id\"] = X_train_data_ui[\"user_id\"].apply(int)\n\n# Save in overall.\nutility.save_model(\"\".join(\"overall\" + \"/\" + \"overall\" + \"_train_stat_q_data\"), X_train_data_ui)\n\n# filter and get the data to show to the UI for the test data.\ntest_user_ids = test[\"user_id\"].to_list()\n\nX_test_data_ui = combined_df.apply(filter_test_ids, axis=1, result_type=\"broadcast\").dropna()\n\nX_test_data_ui[\"user_id\"] = X_test_data_ui[\"user_id\"].apply(int)\n\n# Save the data_ui object as json\ntest_data = {}\ntest_data[\"users\"] = X_test_data_ui.to_dict(\"r\")\nutility.save_data(\"test_data_ui_x_test\", test_data)\n\n# Save the results to out_writer\nout_writer = open(\"output/overall_output_folds_\" + str(eval_method) + \".txt\", \"w+\")\ncsv_out_writer = open(\"output/overall_output_folds_\" + str(eval_method) + \".csv\", \"w+\")\n\n# First get the time series for a given test patient and the reference point and iterate to evaluate\ncsv_out_writer.write(\"user_id,rmse,algorithm,\"\n \"ref_p1,ref_p2,ref_p3,pred_p1,pred_p2,pred_p3\\n\")\n\n\n# Split the data into train and test\nfrom sklearn.model_selection import train_test_split\nimport utility\nfrom HEOM import HEOM\n#Can be done at prediction too.\nfrom sklearn.metrics.pairwise import cosine_distances\nfrom sklearn.linear_model import LinearRegression\nfrom scipy.spatial.distance import pdist, squareform\nfrom scipy.stats import zscore\n\n\nheom = HEOM(X.to_numpy()[:, 1:], cat_idx, num_idx)\nsim_matrix = pdist(X.to_numpy()[:, 1:], heom.heom_distance)\nmean_heom_distance = sim_matrix.mean()\n\nknn = NearestNeighbors(n_neighbors=5, metric=heom.heom_distance, radius=mean_heom_distance)\nknn.fit(X.to_numpy()[:, 1:])\ndist, idx_test = knn.kneighbors(test.to_numpy()[:, 1:], n_neighbors=5)\n\n# First get the time series for a given test patient and the reference point and iterate to evaluate\n\ndo_test(test, out_writer, csv_out_writer, ndays, idx_test, X,\n fold_count=\"final\", method=eval_method, dist_nn=None, wt_dist_flag=False)\n\nout_writer.close()\ncsv_out_writer.close()\n\n# End save the nearest neighbor as data objects, so that can be used from the UI\nutility.save_model(\"\".join(\"overall/\" + \"knn_static\"), knn)\nutility.save_model(\"\".join(\"overall\" + \"/\" + \"train_sim_data.pckl\"), X)\n\n\n'''\n ML Modelling based on s02 - loudness.\n Note: This has to be run once the all feature execution is completed since we build upon a custom similarity matrix,\n it is essential that the same split of train test happen so that it can be verified from the application.\n'''\n\n# Create train and test containing same users in train and test as per static data. (Note: Run above code and then this\n# because same set of train test users are used)\n\ndef splitData(dataset, test_user_ids):\n train_data = dataset[~dataset[\"user_id\"].isin(test_user_ids)]\n test_data = dataset[dataset[\"user_id\"].isin(test_user_ids)]\n return train_data, test_data\n\n\n# Save both train and test matrix\ndef save_ts_objs(train, test, location_name):\n try:\n if not os.path.isdir(properties.model_location + location_name):\n os.makedirs(properties.model_location + location_name)\n utility.save_model(\"\".join(location_name + \"/\" + location_name + \"_train_data\"), train)\n utility.save_model(\"\".join(location_name + \"/\" + location_name + \"_test_data\"), test)\n\n except Exception:\n print(traceback.print_exc())\n\n\nX = ml_ts.process_data(grouping=\"day\")\n\n# Calculate pairwise distance and create a dataframe for the same\nfrom scipy.spatial.distance import pdist, squareform\n# Cross validate here based on the same split of static data here.\n# Note: Only one combination will be present\nC = np.zeros((X.shape[0], X.shape[0]))\nfor i in range(0, len(X)):\n for j in range(0, len(X)):\n dist = ml_ts.compute_dist(X[:, 1][i], X[:, 1][j])\n C[i][j] = dist\n\nC_df = pd.DataFrame(C)\n\n\n#C_df.to_csv(\"sim_ema.csv\")\n\n# Threshold overall distance for making within radius\nthreshold_distance = sum(C_df.mean())/len(C_df)\n\n\nuser_ids = []\nfor val in X:\n user_ids.append(val[0])\n\nC_df[\"user_id\"] = user_ids\n\n\ntrain_data, test_data = splitData(C_df, test_user_ids)\n# Save the time series data objects as dynamic_ts into model folder\nsave_ts_objs(train_data, test_data, \"dynamic_ts\")\n\nout_writer = open(\"\".join(\"output/output_ema_\" + str(eval_method) + \"_.txt\"), \"w+\")\ncsv_out_writer = open(\"\".join(\"output/output_ema_\" + str(eval_method) + \"_.csv\"), \"w+\")\n\ncsv_out_writer.write(\"user_id,rmse,algorithm,\"\n \"ref_p1,ref_p2,ref_p3,pred_p1,pred_p2,pred_p3\\n\")\n\n# Test on the final test set. Note there is no varying K just to save the NN here.\n# It should be noted we use NearesetNeighbors and not KNearestNeighbors classifier.\nknn_ema = NearestNeighbors(n_neighbors=5, metric=\"precomputed\", radius=threshold_distance)\nknn_ema.fit(train_data[train_data.index])\nema_dist, ema_idx = knn_ema.kneighbors(test_data[train_data.index], n_neighbors=5)\n# First get the time series for a given test patient and the reference point and iterate to evaluate\ndo_test(test_data, out_writer, csv_out_writer, ndays, ema_idx, encoded_data,\n fold_count=\"final\", method=eval_method, dist_nn=None, wt_dist_flag=False)\n\n# Close the writers\nout_writer.close()\ncsv_out_writer.close()\n\n# Save the similarity search index KNN\nutility.save_model(\"\".join(\"dynamic_ts\" + \"/\" + \"dynamic_ts\" + \"_knn\"), knn_ema)\n" ]
[ [ "numpy.sum", "pandas.read_pickle", "numpy.multiply", "sklearn.neighbors.NearestNeighbors", "numpy.divide", "numpy.zeros", "numpy.subtract", "pandas.DataFrame", "sklearn.linear_model.LinearRegression", "numpy.asarray", "numpy.array", "sklearn.model_selection.train_test_split" ] ]
felmoltor/kismet-heatmap
[ "d145a865f80db16ad8c6d0bb1dd35e0238706f3b" ]
[ "gpsxml2png.py" ]
[ "#!/usr/bin/env python2\n\n\"\"\"\nCopyright (c) 2016, Bliksem Labs B.V.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, \nare permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this \n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice, \n this list of conditions and the following disclaimer in the documentation \n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND \nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED \nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES \n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; \nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON \nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS \nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport sys\nimport numpy\nfrom scipy.interpolate import griddata\nimport matplotlib.pyplot as plt\n\n\ntry:\n from lxml import etree\nexcept ImportError:\n try:\n # Python 2.5\n import xml.etree.cElementTree as etree\n except ImportError:\n try:\n # Python 2.5\n import xml.etree.ElementTree as etree\n except ImportError:\n try:\n # normal cElementTree install\n import cElementTree as etree\n except ImportError:\n try:\n # normal ElementTree install\n import elementtree.ElementTree as etree\n except ImportError:\n print(\"Failed to import ElementTree from any known place\")\n\n\n# Process the Kismet GPSXML into columns.\n\ndef parse_xml(filename):\n\ttree = etree.parse(open(filename, 'rb'))\n\n\tts = []\n\tbssid = []\n\tsignal = []\n\tlat = []\n\tlon = []\n\twalked_lon = []\n\twalked_lat = []\n\n\tfor z in tree.findall('.//gps-point'):\n\t\t# A lon/lat filter might be applied here\n\t\t# if float(z.get('lon')) < 3.942:\n\t\t#\tcontinue\n\n\t\tif z.get('bssid') == 'GP:SD:TR:AC:KL:OG':\n\t\t\twalked_lon.append(float(z.get('lon')))\n\t\t\twalked_lat.append(float(z.get('lat')))\n\n\t\telif z.get('signal_dbm') is not None:\n\t\t\tbssid.append(z.get('bssid'))\n\t\t\tts.append(int(z.get('time-sec')))\n\t\t\tlat.append(float(z.get('lat')))\n\t\t\tlon.append(float(z.get('lon')))\n\t\t\tsignal.append(int(z.get('signal_dbm')))\n\n\treturn (ts, bssid, signal, lat, lon, walked_lon, walked_lat,)\n\n\n# Draw parsed data on a surface\n\ndef draw_data(ts, bssid, signal, lat, lon, walked_lon, walked_lat):\n\n\t# We create a grid of 1000x1000\n\tgrid_x, grid_y = numpy.mgrid[min(walked_lon):max(walked_lon):1000j, min(walked_lat):max(walked_lat):1000j]\n\n\t# We want to draw all unique APs\n\tbssids = list(set(bssid))\n\n\t# For each BSSID...\n\tfor s in bssids:\n\t\tpoints_lon = []\n\t\tpoints_lat = []\n\t\tvalues = []\n\t\th = []\n\t\t\n\t\t# Apply all points on an intermediate surface\n\t\t# so we can distinct points where we were, without reception\n\t\tfor i in range(0, len(bssid)):\n\t\t\tif bssid[i] == s:\n\t\t\t\thc = hash((lon[i], lat[i]))\n\t\t\t\tif hc not in h:\n\t\t\t\t\tpoints_lon.append(lon[i])\n\t\t\t\t\tpoints_lat.append(lat[i])\n\t\t\t\t\tvalues.append(float(signal[i]))\n\t\t\t\t\th.append(hash((lon[i], lat[i])))\n\n\t\t# Optional: apply -100dBm where we don't have gathered data\n\t\tfor i in range(0, len(walked_lon)):\n\t\t\thc = hash((walked_lon[i], walked_lat[i]))\n\t\t\tif hc not in h:\n\t\t\t\tpoints_lon.append(lon[i])\n\t\t\t\tpoints_lat.append(lat[i])\n\t\t\t\tvalues.append(float(-100))\n\t\t\t\th.append(hash((walked_lon[i], walked_lat[i])))\n\n\t\t# Interpolate the data\n\t\tgrid = griddata((points_lon, points_lat), numpy.array(values), (grid_x, grid_y), method='cubic')\n\n\t\t# Store the bitmap in the current folder.\n\t\tplt.show()\n\t\tplt.imsave('%s.png' % (s), grid.T)\n\n\t\t# Calculate the World File for use in Qgis\n\t\ta = ((max(walked_lon)-min(walked_lon))/1000)\n\t\tb = 0\n\t\tc = 0\n\t\td = ((max(walked_lat)-min(walked_lat))/1000)\n\t\te = min(walked_lon)\n\t\tf = min(walked_lat)\n\n\t\t# Write the World File\n\t\topen('%s.pngw' % (s), 'w').write('%.16f\\n%d\\n%d\\n%.16f\\n%.16f\\n%.16f' % (a, b, c, d, e, f,))\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) != 2:\n\t\tprint(\"Usage %s << /path/to/Kismet.gpsxml >>\" % (sys.argv[0]))\n\t\tsys.exit(-1)\n\t\n\tdraw_data(*parse_xml(sys.argv[1]))\n\n\n\t\n" ]
[ [ "numpy.array", "matplotlib.pyplot.imsave", "matplotlib.pyplot.show" ] ]
keshabb/GamestonkTerminal
[ "419c3691db220c467d2979b19ca308b3b800c0bd" ]
[ "gamestonk_terminal/options/op_helpers.py" ]
[ "\"\"\"Option helper functions\"\"\"\n__docformat__ = \"numpy\"\n\nimport argparse\nfrom typing import List\n\nimport pandas as pd\nimport numpy as np\n\nfrom gamestonk_terminal.helper_funcs import (\n parse_known_args_and_warn,\n check_non_negative,\n)\n\n# pylint: disable=R1710\n\n\ndef load(other_args: List[str]) -> str:\n \"\"\"Load ticker into object\n\n Parameters\n ----------\n other_args: List[str]\n Agrparse arguments\n\n Returns\n -------\n str:\n Ticker\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"opload\",\n description=\"Load a ticker into option menu\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--ticker\",\n action=\"store\",\n dest=\"ticker\",\n required=\"-h\" not in other_args,\n help=\"Stock ticker\",\n )\n\n try:\n if other_args:\n if \"-t\" not in other_args and \"-h\" not in other_args:\n other_args.insert(0, \"-t\")\n\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return \"\"\n print(\"\")\n return ns_parser.ticker\n except Exception as e:\n print(e, \"\\n\")\n return \"\"\n except SystemExit:\n print(\"\")\n return \"\"\n\n\n# pylint: disable=no-else-return\n\n\ndef select_option_date(avalaiable_dates: List[str], other_args: List[str]) -> str:\n \"\"\"Select an option date out of a supplied list\n\n Parameters\n ----------\n avalaiable_dates: List[str]\n Possible date options\n other_args: List[str]\n Arparse arguments\n Returns\n -------\n expiry_date: str\n Selected expiry date\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"exp\",\n description=\"See and set expiration date\",\n )\n parser.add_argument(\n \"-d\",\n \"--date\",\n dest=\"n_date\",\n action=\"store\",\n type=int,\n default=-1,\n choices=range(len(avalaiable_dates)),\n help=\"Select index for expiry date.\",\n )\n\n parser.add_argument(\n \"-D\",\n dest=\"date\",\n type=str,\n choices=avalaiable_dates + [\"\"],\n help=\"Select date (YYYY-MM-DD)\",\n default=\"\",\n )\n\n try:\n if other_args:\n if \"-\" not in other_args[0]:\n other_args.insert(0, \"-d\")\n\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return \"\"\n\n # Print possible expiry dates\n if ns_parser.n_date == -1 and not ns_parser.date:\n print(\"\\nAvailable expiry dates:\")\n for i, d in enumerate(avalaiable_dates):\n print(f\" {(2 - len(str(i))) * ' '}{i}. {d}\")\n print(\"\")\n return \"\"\n\n # It means an expiry date was correctly selected\n else:\n if ns_parser.date:\n if ns_parser.date in avalaiable_dates:\n print(f\"Expiraration set to {ns_parser.date} \\n\")\n return ns_parser.date\n else:\n print(\"Expiration not an option\")\n return \"\"\n else:\n expiry_date = avalaiable_dates[ns_parser.n_date]\n print(f\"Expiraration set to {expiry_date} \\n\")\n return expiry_date\n\n except Exception as e:\n print(e, \"\\n\")\n return \"\"\n\n\ndef get_loss_at_strike(strike: float, chain: pd.DataFrame) -> float:\n \"\"\"Function to get the loss at the given expiry\n\n Parameters\n ----------\n strike: Union[int,float]\n Value to calculate total loss at\n chain: Dataframe:\n Dataframe containing at least strike and openInterest\n\n Returns\n -------\n loss: Union[float,int]\n Total loss\n \"\"\"\n\n itm_calls = chain[chain.index < strike][[\"OI_call\"]]\n itm_calls[\"loss\"] = (strike - itm_calls.index) * itm_calls[\"OI_call\"]\n call_loss = itm_calls[\"loss\"].sum()\n\n itm_puts = chain[chain.index > strike][[\"OI_put\"]]\n itm_puts[\"loss\"] = (itm_puts.index - strike) * itm_puts[\"OI_put\"]\n put_loss = itm_puts.loss.sum()\n loss = call_loss + put_loss\n\n return loss\n\n\ndef calculate_max_pain(chain: pd.DataFrame) -> int:\n \"\"\"Returns the max pain for a given call/put dataframe\n\n Parameters\n ----------\n chain: DataFrame\n Dataframe to calculate value from\n\n Returns\n -------\n max_pain : int\n Max pain value\n \"\"\"\n\n strikes = np.array(chain.index)\n if (\"OI_call\" not in chain.columns) or (\"OI_put\" not in chain.columns):\n print(\"Incorrect columns. Unable to parse max pain\")\n return np.nan\n\n loss = []\n for price_at_exp in strikes:\n loss.append(get_loss_at_strike(price_at_exp, chain))\n\n chain[\"loss\"] = loss\n max_pain = chain[\"loss\"].idxmin()\n\n return max_pain\n\n\ndef vol(other_args: List[str]):\n \"\"\"Parse volume argparse\n\n Parameters\n ----------\n other_args: List[str]\n Argparse arguments\n\n Returns\n -------\n ns_parser: argparse.Namespace\n Parsed namespace\n \"\"\"\n\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"vol\",\n description=\"Plot volume. Volume refers to the number of contracts traded today.\",\n )\n\n parser.add_argument(\n \"-m\",\n \"--min\",\n default=-1,\n type=check_non_negative,\n help=\"Min strike to plot\",\n dest=\"min\",\n )\n parser.add_argument(\n \"-M\",\n \"--max\",\n default=-1,\n type=check_non_negative,\n help=\"Max strike to plot\",\n dest=\"max\",\n )\n\n parser.add_argument(\n \"--calls\",\n action=\"store_true\",\n default=False,\n dest=\"calls\",\n help=\"Flag to plot call options only\",\n )\n\n parser.add_argument(\n \"--puts\",\n action=\"store_true\",\n default=False,\n dest=\"puts\",\n help=\"Flag to plot put options only\",\n )\n\n parser.add_argument(\n \"--source\",\n type=str,\n default=\"tr\",\n choices=[\"tr\", \"yf\"],\n dest=\"source\",\n help=\"Source to get data from\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n return ns_parser\n\n except Exception as e:\n print(e, \"\\n\")\n\n\ndef voi(other_args: List[str]):\n \"\"\"Parse Volume + open interest argparse\n\n Parameters\n ----------\n other_args: List[str]\n Argparse arguments\n\n Returns\n -------\n ns_parser: argparse.Namespace\n Parsed namespace\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"voi\",\n description=\"\"\"\n Plots Volume + Open Interest of calls vs puts.\n \"\"\",\n )\n parser.add_argument(\n \"-v\",\n \"--minv\",\n dest=\"min_vol\",\n type=check_non_negative,\n default=-1,\n help=\"minimum volume (considering open interest) threshold of the plot.\",\n )\n parser.add_argument(\n \"-m\",\n \"--min\",\n dest=\"min_sp\",\n type=check_non_negative,\n default=-1,\n help=\"minimum strike price to consider in the plot.\",\n )\n parser.add_argument(\n \"-M\",\n \"--max\",\n dest=\"max_sp\",\n type=check_non_negative,\n default=-1,\n help=\"maximum strike price to consider in the plot.\",\n )\n parser.add_argument(\n \"--source\",\n type=str,\n default=\"tr\",\n choices=[\"tr\", \"yf\"],\n dest=\"source\",\n help=\"Source to get data from\",\n )\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return None\n return ns_parser\n\n except Exception as e:\n print(e, \"\\n\")\n return None\n\n\ndef oi(other_args: List[str]):\n \"\"\"Parse Open Interest argparse\n\n Parameters\n ----------\n other_args: List[str]\n Argparse arguments\n\n Returns\n -------\n ns_parser: argparse.Namespace\n Parsed namespace\n \"\"\"\n\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"oi\",\n description=\"Plot open interest. Open interest represents the number of contracts that exist.\",\n )\n\n parser.add_argument(\n \"-m\",\n \"--min\",\n default=-1,\n type=check_non_negative,\n help=\"Min strike to plot\",\n dest=\"min\",\n )\n parser.add_argument(\n \"-M\",\n \"--max\",\n default=-1,\n type=check_non_negative,\n help=\"Max strike to plot\",\n dest=\"max\",\n )\n\n parser.add_argument(\n \"--calls\",\n action=\"store_true\",\n default=False,\n dest=\"calls\",\n help=\"Flag to plot call options only\",\n )\n\n parser.add_argument(\n \"--puts\",\n action=\"store_true\",\n default=False,\n dest=\"puts\",\n help=\"Flag to plot put options only\",\n )\n parser.add_argument(\n \"--source\",\n type=str,\n default=\"tr\",\n choices=[\"tr\", \"yf\"],\n dest=\"source\",\n help=\"Source to get data from\",\n )\n\n try:\n\n ns_parser = parse_known_args_and_warn(parser, other_args)\n\n if not ns_parser:\n return None\n\n return ns_parser\n\n except Exception as e:\n print(e, \"\\n\")\n return None\n" ]
[ [ "numpy.array" ] ]
skylian/flare
[ "c920abcae975cc49c052f5f2abb6bbee5c39a11e" ]
[ "flare/framework/agent.py" ]
[ "from abc import ABCMeta, abstractmethod\nfrom multiprocessing import Process, Value\nimport numpy as np\nfrom flare.common.log import GameLogEntry\nfrom flare.common.communicator import AgentCommunicator\nfrom flare.common.replay_buffer import NoReplacementQueue, ReplayBuffer, Experience\n\n\nclass AgentHelper(object):\n \"\"\"\n AgentHelper abstracts some part of Agent's data processing and the I/O\n communication between Agent and ComputationDataProcessor (CDP). It receives a\n Communicator from one CDP and uses it to send data to the CDP.\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self, name, communicator, sample_interval):\n assert isinstance(communicator, AgentCommunicator)\n self.name = name\n self.comm = communicator\n self.counter = 0\n assert sample_interval >= 2\n self.sample_interval = sample_interval\n\n def unpack_exps(self, exp_seqs):\n \"\"\"\n The input `exp_seqs` is always a list of sequences, each sequence\n containing multiple Experience instances.\n \"\"\"\n\n def concat_lists(lists):\n return [x for l in lists for x in l]\n\n def extract_key(seq, k):\n assert seq\n return [e.val(k) for e in seq]\n\n ret = dict(\n inputs={},\n next_inputs={},\n next_alive={},\n rewards={},\n actions={},\n next_actions={},\n states=None,\n next_states=None)\n\n for k in self.input_keys:\n ipt_seqs = [extract_key(exp_seq, k) for exp_seq in exp_seqs]\n ret[\"inputs\"][k] = [ipt_seq[:-1] for ipt_seq in ipt_seqs]\n ret[\"next_inputs\"][k] = [ipt_seq[1:] for ipt_seq in ipt_seqs]\n\n for k in self.action_keys:\n act_seqs = [extract_key(exp_seq, k) for exp_seq in exp_seqs]\n ret[\"actions\"][k] = [act_seq[:-1] for act_seq in act_seqs]\n ret[\"next_actions\"][k] = [act_seq[1:] for act_seq in act_seqs]\n\n for k in self.reward_keys:\n ret[\"rewards\"][\n k] = [extract_key(exp_seq[:-1], k) for exp_seq in exp_seqs]\n\n if self.state_keys:\n ret[\"states\"] = dict()\n ret[\"next_states\"] = dict()\n\n for k in self.state_keys:\n ## we only take the first(second) element of a seq for states(next_states)\n ret[\"states\"][\n k] = [extract_key(exp_seq[:1], k)[0] for exp_seq in exp_seqs]\n ret[\"next_states\"][k] = [\n extract_key(exp_seq[1:2], k)[0] for exp_seq in exp_seqs\n ]\n\n ret[\"next_alive\"][\"alive\"] \\\n = [extract_key(exp_seq[1:], \"alive\") for exp_seq in exp_seqs]\n\n ## HERE we decide whether the data are instances or seqs\n ## according to the existence of states\n if not self.state_keys:\n # sample instances\n for k in ret.keys():\n if ret[k] is not None:\n for kk in ret[k].keys():\n ret[k][kk] = concat_lists(ret[k][kk])\n\n return ret, len(exp_seqs)\n\n def predict(self, inputs, states=dict()):\n \"\"\"\n Process the input data (if necessary), send them to CDP for prediction,\n and receive the outcome.\n\n Args:\n inputs(dict): data used for prediction. It is caller's job\n to make sure inputs contains all data needed and they are in the\n right form.\n \"\"\"\n data = dict(inputs=inputs, states=states)\n self.comm.put_prediction_data(data, 1)\n ret = self.comm.get_prediction_return()\n return ret\n\n @abstractmethod\n def add_experience(self, e):\n \"\"\"\n Implements how to record an experience.\n Will be called by self.store_data()\n \"\"\"\n pass\n\n def _store_data(self, alive, data):\n \"\"\"\n Store the past experience for later use, e.g., experience replay.\n\n Args:\n data(dict): data to store.\n \"\"\"\n assert isinstance(data, dict)\n data[\"alive\"] = [alive]\n t = Experience(data)\n self.add_experience(t)\n self.counter += 1\n if self.counter % self.sample_interval == 0:\n return self.learn()\n\n @abstractmethod\n def sample_experiences(self):\n \"\"\"\n Implements how to retrieve experiences from past.\n Will be called by self.learn()\n \"\"\"\n pass\n\n def learn(self):\n \"\"\"\n Sample data from past experiences and send them to CDP for learning.\n Optionally, it receives learning outcomes sent back from CW and does\n some processing.\n\n Depends on users' need, this function can be called in three ways:\n 1. In Agent's run_one_episode\n 2. In store_data(), e.g., learning once every few steps\n 3. As a separate thread, e.g., using experience replay\n \"\"\"\n exp_seqs = self.sample_experiences()\n if not exp_seqs:\n return\n data, size = self.unpack_exps(exp_seqs)\n self.comm.put_training_data(data, size)\n ret = self.comm.get_training_return()\n return ret\n\n\nclass OnlineHelper(AgentHelper):\n \"\"\"\n Online helper. It calls `learn()` every `sample_interval`\n steps.\n\n While waiting for learning return, the calling `Agent` is blocked.\n \"\"\"\n\n def __init__(self, name, communicator, sample_interval=5):\n super(OnlineHelper, self).__init__(name, communicator, sample_interval)\n # NoReplacementQueue used to store past experience.\n self.exp_queue = NoReplacementQueue()\n\n @staticmethod\n def exp_replay():\n return False\n\n def add_experience(self, e):\n self.exp_queue.add(e)\n\n def sample_experiences(self):\n return self.exp_queue.sample()\n\n\nclass ExpReplayHelper(AgentHelper):\n \"\"\"\n Example of applying experience replay. It starts a separate threads to\n run learn().\n \"\"\"\n\n def __init__(self,\n name,\n communicator,\n buffer_capacity,\n num_experiences,\n sample_interval=5,\n num_seqs=1):\n super(ExpReplayHelper, self).__init__(name, communicator,\n sample_interval)\n # replay buffer for experience replay\n self.replay_buffer = ReplayBuffer(buffer_capacity)\n self.num_experiences = num_experiences\n self.num_seqs = num_seqs\n\n @staticmethod\n def exp_replay():\n return True\n\n def add_experience(self, e):\n self.replay_buffer.add(e)\n\n def sample_experiences(self):\n return self.replay_buffer.sample(self.num_experiences, self.num_seqs)\n\n\nclass Agent(Process):\n \"\"\"\n Agent implements the control flow and logics of how Robot interacts with\n the environment and does computation. It is a subclass of Process. The entry\n function of the Agent process is run().\n\n Some members:\n env: the environment\n num_games: number of games to run\n learning: Whether learn or not (only do testing)\n helpers: a dictionary of `AgentHelper`, each corresponds to one\n `ComputationTask`\n log_q: communication channel between `Agent` and the centralized logger\n running: the `Agent` will keep running as long as `running` is True.\n \"\"\"\n __metaclass__ = ABCMeta\n\n def __init__(self, num_games, actrep, learning):\n super(Agent, self).__init__()\n self.id = -1 # just created, not added to the Robot yet\n self.num_games = num_games\n self.learning = learning\n self.state_specs = None\n self.helpers = {}\n self.log_q = None\n self.running = Value('i', 0)\n self.daemon = True ## Process member\n self.alive = 1\n self.env_f = None\n self.actrep = actrep\n\n def set_env(self, env_class, *args, **kwargs):\n \"\"\"\n Set the environment for the agent. For now, only create a lambda\n function. Once the agent process starts running, we will call this\n function.\n\n env_class: The environment class to create\n args, kwargs: The arguments for creating the class\n \"\"\"\n self.env_f = lambda: env_class(*args, **kwargs)\n\n def add_agent_helper(self, helper, input_keys, action_keys, state_keys,\n reward_keys):\n \"\"\"\n Add an AgentHelper, with its name (also the name of its\n correspoding `ComputationTask`) as key.\n \"\"\"\n assert isinstance(helper, AgentHelper)\n helper.input_keys = input_keys\n helper.action_keys = action_keys\n helper.state_keys = state_keys\n helper.reward_keys = reward_keys\n self.helpers[helper.name] = helper\n\n def _make_zero_states(self, prop):\n dtype = prop[\"dtype\"] if \"dtype\" in prop else \"float32\"\n return np.zeros(prop[\"shape\"]).astype(dtype)\n\n ## The following three functions hide the `AgentHelper` from the users of\n ## `Agent`.\n def predict(self, alg_name, inputs, states=dict()):\n ## Convert single instances to batches of size 1\n ## The reason for this conversion is that we want to reuse the\n ## _pack_data() and _unpack_data() of the CDP for handling both training\n ## and prediction data. These two functions assume that data are stored\n ## as mini batches instead of single instances in the prediction and learning\n ## queues.\n inputs_ = {k: [v] for k, v in inputs.items()}\n states_ = {k: [v] for k, v in states.items()}\n prediction, next_states = self.helpers[alg_name].predict(inputs_,\n states_)\n ## convert back to single instances\n prediction = {k: v[0] for k, v in prediction.items()}\n next_states = {k: v[0] for k, v in next_states.items()}\n return prediction, next_states\n\n def run(self):\n \"\"\"\n Default entry function of Agent process.\n \"\"\"\n assert self.env_f is not None, \"You should first call self.set_env()!\"\n ## Only call the env function now to make sure there is only one\n ## environment (OpenGL context) in each process\n self.env = self.env_f()\n self.running.value = 1\n for i in range(self.num_games):\n self._run_one_episode()\n if not self.running.value:\n return\n self.running.value = 0\n\n def _store_data(self, alg_name, data):\n if self.learning: ## only store when the agent is learning\n return self.helpers[alg_name]._store_data(self.alive, data)\n\n def _run_one_episode(self):\n def __store_data(observations, actions, states, rewards):\n learning_ret = self._cts_store_data(observations, actions, states,\n rewards) ## written by user\n if learning_ret is not None:\n for k, v in learning_ret.items():\n self.log_entry.add_key(k, v)\n\n observations = self._reset_env()\n states = self._get_init_states() ## written by user\n\n while self.alive and (not self.env.time_out()):\n actions, next_states = self._cts_predict(\n observations, states) ## written by user\n assert isinstance(actions, dict)\n assert isinstance(next_states, dict)\n next_observations, rewards, next_game_over = self._step_env(\n actions)\n __store_data(observations, actions, states, rewards)\n\n observations = next_observations\n states = next_states\n ## next_game_over == 1: success\n ## next_game_over == -1: failure\n self.alive = 1 - abs(next_game_over)\n\n ## self.alive: 0 -- success/failure\n ## 1 -- normal\n ## -1 -- timeout\n if self.env.time_out():\n self.alive = -1\n actions, _ = self._cts_predict(observations, states)\n zero_rewards = {k: [0] * len(v) for k, v in rewards.items()}\n __store_data(observations, actions, states, zero_rewards)\n\n ## Record success. For games that do not have a defintion of\n ## 'success' (e.g., 'breakout' never ends), this quantity will\n ## always be zero\n self.log_entry.add_key(\"success\", next_game_over > 0)\n return self._total_reward()\n\n def _reset_env(self):\n self.alive = 1\n ## currently we only support a single logger for all CTs\n self.log_entry = GameLogEntry(self.id, 'All')\n obs = self.env.reset()\n assert isinstance(obs, dict)\n return obs\n\n def _step_env(self, actions):\n next_observations, rewards, next_game_over = self.env.step(actions,\n self.actrep)\n assert isinstance(next_observations, dict)\n assert isinstance(rewards, dict)\n self.log_entry.add_key(\"num_steps\", 1)\n self.log_entry.add_key(\"total_reward\", sum(map(sum, rewards.values())))\n return next_observations, rewards, next_game_over\n\n def _total_reward(self):\n self.log_q.put(self.log_entry)\n return self.log_entry.total_reward\n\n def _get_init_states(self):\n \"\"\"\n By default, there is no state. The user needs to override this function\n to return a dictionary of init states if necessary.\n \"\"\"\n return dict()\n\n @abstractmethod\n def _cts_predict(self, observations, states):\n \"\"\"\n The user needs to override this function to specify how different CTs\n make predictions given observations and states.\n\n Output: actions: a dictionary of actions, each action being a vector\n If the action is discrete, then it is a length-one\n list of an integer.\n states (optional): a dictionary of states, each state being a floating vector\n \"\"\"\n pass\n\n @abstractmethod\n def _cts_store_data(self, observations, actions, states, rewards):\n \"\"\"\n The user needs to override this function to specify how different CTs\n store their corresponding experiences, by calling self._store_data().\n Each input should be a dictionary.\n \"\"\"\n pass\n" ]
[ [ "numpy.zeros" ] ]
c-w-m/pyldpc
[ "c7eb471359086b7336d7b40f11cc912f0daf0476" ]
[ "pyldpc/decoder.py" ]
[ "\"\"\"Decoding module.\"\"\"\nimport numpy as np\nimport warnings\nfrom . import utils\n\nfrom numba import njit, int64, types, float64\n\n\ndef decode(H, y, snr, maxiter=1000):\n \"\"\"Decode a Gaussian noise corrupted n bits message using BP algorithm.\n\n Decoding is performed in parallel if multiple codewords are passed in y.\n\n Parameters\n ----------\n H: array (n_equations, n_code). Decoding matrix H.\n y: array (n_code, n_messages) or (n_code,). Received message(s) in the\n codeword space.\n maxiter: int. Maximum number of iterations of the BP algorithm.\n\n Returns\n -------\n x: array (n_code,) or (n_code, n_messages) the solutions in the\n codeword space.\n\n \"\"\"\n m, n = H.shape\n\n bits_hist, bits_values, nodes_hist, nodes_values = utils._bitsandnodes(H)\n\n _n_bits = np.unique(H.sum(0))\n _n_nodes = np.unique(H.sum(1))\n\n if _n_bits * _n_nodes == 1:\n solver = _logbp_numba_regular\n bits_values = bits_values.reshape(n, -1)\n nodes_values = nodes_values.reshape(m, -1)\n\n else:\n solver = _logbp_numba\n\n var = 10 ** (-snr / 10)\n\n if y.ndim == 1:\n y = y[:, None]\n # step 0: initialization\n\n Lc = 2 * y / var\n _, n_messages = y.shape\n\n Lq = np.zeros(shape=(m, n, n_messages))\n\n Lr = np.zeros(shape=(m, n, n_messages))\n for n_iter in range(maxiter):\n Lq, Lr, L_posteriori = solver(bits_hist, bits_values, nodes_hist,\n nodes_values, Lc, Lq, Lr, n_iter)\n x = np.array(L_posteriori <= 0).astype(int)\n product = utils.incode(H, x)\n if product:\n break\n if n_iter == maxiter - 1:\n warnings.warn(\"\"\"Decoding stopped before convergence. You may want\n to increase maxiter\"\"\")\n return x.squeeze()\n\n\noutput_type_log2 = types.Tuple((float64[:, :, :], float64[:, :, :],\n float64[:, :]))\n\n\n@njit(output_type_log2(int64[:], int64[:], int64[:], int64[:], float64[:, :],\n float64[:, :, :], float64[:, :, :], int64), cache=True)\ndef _logbp_numba(bits_hist, bits_values, nodes_hist, nodes_values, Lc, Lq, Lr,\n n_iter):\n \"\"\"Perform inner ext LogBP solver.\"\"\"\n m, n, n_messages = Lr.shape\n # step 1 : Horizontal\n\n bits_counter = 0\n nodes_counter = 0\n for i in range(m):\n # ni = bits[i]\n ff = bits_hist[i]\n ni = bits_values[bits_counter: bits_counter + ff]\n bits_counter += ff\n for j in ni:\n nij = ni[:]\n\n X = np.ones(n_messages)\n if n_iter == 0:\n for kk in range(len(nij)):\n if nij[kk] != j:\n X *= np.tanh(0.5 * Lc[nij[kk]])\n else:\n for kk in range(len(nij)):\n if nij[kk] != j:\n X *= np.tanh(0.5 * Lq[i, nij[kk]])\n num = 1 + X\n denom = 1 - X\n for ll in range(n_messages):\n if num[ll] == 0:\n Lr[i, j, ll] = -1\n elif denom[ll] == 0:\n Lr[i, j, ll] = 1\n else:\n Lr[i, j, ll] = np.log(num[ll] / denom[ll])\n\n # step 2 : Vertical\n for j in range(n):\n # mj = nodes[j]\n ff = nodes_hist[j]\n mj = nodes_values[nodes_counter: nodes_counter + ff]\n nodes_counter += ff\n for i in mj:\n mji = mj[:]\n Lq[i, j] = Lc[j]\n\n for kk in range(len(mji)):\n if mji[kk] != i:\n Lq[i, j] += Lr[mji[kk], j]\n\n # LLR a posteriori:\n L_posteriori = np.zeros((n, n_messages))\n nodes_counter = 0\n for j in range(n):\n ff = nodes_hist[j]\n mj = nodes_values[nodes_counter: nodes_counter + ff]\n nodes_counter += ff\n L_posteriori[j] = Lc[j] + Lr[mj, j].sum(axis=0)\n\n return Lq, Lr, L_posteriori\n\n\n@njit(output_type_log2(int64[:], int64[:, :], int64[:], int64[:, :],\n float64[:, :], float64[:, :, :], float64[:, :, :],\n int64), cache=True)\ndef _logbp_numba_regular(bits_hist, bits_values, nodes_hist, nodes_values, Lc,\n Lq, Lr, n_iter):\n \"\"\"Perform inner ext LogBP solver.\"\"\"\n m, n, n_messages = Lr.shape\n # step 1 : Horizontal\n for i in range(m):\n ni = bits_values[i]\n for j in ni:\n nij = ni[:]\n\n X = np.ones(n_messages)\n if n_iter == 0:\n for kk in range(len(nij)):\n if nij[kk] != j:\n X *= np.tanh(0.5 * Lc[nij[kk]])\n else:\n for kk in range(len(nij)):\n if nij[kk] != j:\n X *= np.tanh(0.5 * Lq[i, nij[kk]])\n num = 1 + X\n denom = 1 - X\n for ll in range(n_messages):\n if num[ll] == 0:\n Lr[i, j, ll] = -1\n elif denom[ll] == 0:\n Lr[i, j, ll] = 1\n else:\n Lr[i, j, ll] = np.log(num[ll] / denom[ll])\n\n # step 2 : Vertical\n for j in range(n):\n mj = nodes_values[j]\n for i in mj:\n mji = mj[:]\n Lq[i, j] = Lc[j]\n\n for kk in range(len(mji)):\n if mji[kk] != i:\n Lq[i, j] += Lr[mji[kk], j]\n\n # LLR a posteriori:\n L_posteriori = np.zeros((n, n_messages))\n for j in range(n):\n mj = nodes_values[j]\n L_posteriori[j] = Lc[j] + Lr[mj, j].sum(axis=0)\n\n return Lq, Lr, L_posteriori\n\n\ndef get_message(tG, x):\n \"\"\"Compute the original `n_bits` message from a `n_code` codeword `x`.\n\n Parameters\n ----------\n tG: array (n_code, n_bits) coding matrix tG.\n x: array (n_code,) decoded codeword of length `n_code`.\n\n Returns\n -------\n message: array (n_bits,). Original binary message.\n\n \"\"\"\n n, k = tG.shape\n\n rtG, rx = utils.gausselimination(tG, x)\n\n message = np.zeros(k).astype(int)\n\n message[k - 1] = rx[k - 1]\n for i in reversed(range(k - 1)):\n message[i] = rx[i]\n message[i] -= utils.binaryproduct(rtG[i, list(range(i+1, k))],\n message[list(range(i+1, k))])\n\n return abs(message)\n" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.log", "numpy.array", "numpy.tanh" ] ]
code-lab-org/sys611
[ "3b8c46788dee629a9f2d6b7f84373e041b918ff0" ]
[ "previous/week12/object-oriented/FactorySystemOO.py" ]
[ "\"\"\"\nSYS-611: Example factory model in SimPy (object-oriented).\n\n@author: Paul T. Grogan, pgrogan@stevens.edu\n\"\"\"\n\n# import the python3 behavior for importing, division, and printing in python2\nfrom __future__ import absolute_import, division, print_function\n\n# import the simpy package \n# see https://simpy.readthedocs.io/en/latest/api_reference for documentation\nimport simpy\n\n# import the numpy package and refer to it as `np`\n# see http://docs.scipy.org/doc/numpy/reference/ for documentation\nimport numpy as np\n\n# import the matplotlib pyplot package and refer to it as `plt`\n# see http://matplotlib.org/api/pyplot_api.html for documentation\nimport matplotlib.pyplot as plt\n\n#%% SECTION TO CONFIGURE SIMULATION\n\n# number of simulation runs to perform\nNUM_RUNS = 1\n# simulation duration (hours)\nSIM_DURATION = 5*8*52\n# number of spares to purchase (S)\nNUM_SPARES = 20\n# number of repairers to hire (R)\nNUM_REPAIRERS = 5\n\n#%% SECTION TO DEFINE SIMULATION\n\nclass Factory(object):\n \"\"\" Defines a factory simulation. \"\"\"\n def __init__(self, env, num_repairers, num_spares):\n \"\"\" Initializes this factory.\n \n Args:\n env (simpy.Environment): the simulation environment\n num_repairers (int): the number of repairers to hire\n num_spares (int): the number of spares to purchase\n \"\"\"\n self.repairers = simpy.Resource(env, capacity=num_repairers) \n self.spares = simpy.Container(env, init=num_spares, capacity=num_spares)\n self.env = env\n self.cost = 0\n self.daily_cost = 3.75*8*num_repairers + 30*num_spares\n \n def run(self):\n \"\"\" Process to run this simulation. \"\"\"\n # launch the 50 machine processes\n for i in range(50):\n self.env.process(factory.operate_machine(i+1))\n # update the daily costs each day\n while True:\n self.cost += self.daily_cost\n yield self.env.timeout(8.0)\n \n def operate_machine(self, machine):\n \"\"\" Process to operate a machine.\n \n Args:\n machine (int): the machine number\n \"\"\"\n while True:\n # wait until the machine breaks\n yield self.env.timeout(np.random.uniform(132,182))\n time_broken = self.env.now\n if NUM_RUNS <= 1:\n print('machine {} broke at {:.2f} ({} spares available)'.format(\n machine, time_broken, self.spares.level))\n # launch the repair process\n self.env.process(self.repair_machine())\n # wait for a spare to become available\n yield self.spares.get(1)\n time_replaced = self.env.now\n if NUM_RUNS <= 1:\n print('machine {} replaced at {:.2f}'.format(machine, time_replaced))\n # update the cost for being out of service\n self.cost += 20*(time_replaced-time_broken)\n \n def repair_machine(self):\n \"\"\" Process to repair a machine. \"\"\"\n with self.repairers.request() as request:\n # wait for a repairer to become available\n yield request\n # perform the repair\n yield self.env.timeout(np.random.uniform(4,10))\n # put the machine back in the spares pool\n yield self.spares.put(1)\n if NUM_RUNS <= 1:\n print('repair complete at {:.2f} ({} spares available)'.format(\n self.env.now, self.spares.level))\n\n# arrays to record data\nobs_time = []\nobs_cost = []\nobs_spares = []\n\ndef observe(env, factory):\n \"\"\" Process to observe the factory during a simulation.\n \n Args:\n env (simpy.Environment): the simulation environment\n factory (Factory): the factory\n \"\"\"\n while True:\n obs_time.append(env.now)\n obs_cost.append(factory.cost)\n obs_spares.append(factory.spares.level)\n yield env.timeout(1.0)\n\n#%% SECTION TO RUN ANALYSIS\n\n# array to store outputs\nCOST = []\n\nfor i in range(NUM_RUNS):\n # set the random number seed\n np.random.seed(i)\n \n # create the simpy environment\n env = simpy.Environment()\n # create the factory\n factory = Factory(env, NUM_REPAIRERS, NUM_SPARES)\n # add the factory run process\n env.process(factory.run())\n # add the observation process\n env.process(observe(env, factory))\n # run simulation\n env.run(until=SIM_DURATION)\n # record the final observed cost\n COST.append(obs_cost[-1])\n \n if NUM_RUNS <= 1:\n # output the total cost\n print('Total cost: {:.2f}'.format(factory.cost))\n \n # plot the number of spares available\n plt.figure()\n plt.step(obs_time, obs_spares, where='post')\n plt.xlabel('Time (hour)')\n plt.ylabel('Number Spares Available')\n \n # plot the total cost accumulation\n plt.figure()\n plt.step(obs_time, obs_cost, where='post')\n plt.xlabel('Time (hour)')\n plt.ylabel('Total Cost')\n\n # print final results to console\n print('Factory costs for N={:} runs with R={:} repairers and S={:} spares:'.format(\n NUM_RUNS, NUM_REPAIRERS, NUM_SPARES))\n print('\\n'.join('{:.2f}'.format(i) for i in COST))\n\n#%% SECTION TO WRITE RESULTS TO CSV FILE\n\nimport csv\n\nwith open('factory.csv', 'w') as output:\n writer = csv.writer(output)\n for sample in COST:\n writer.writerow([sample])" ]
[ [ "numpy.random.uniform", "matplotlib.pyplot.figure", "numpy.random.seed", "matplotlib.pyplot.step", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel" ] ]
ozgurozkan123/deepchem
[ "7b6248db5f7172ff2a833a1c7c99f48565befe67" ]
[ "deepchem/dock/binding_pocket.py" ]
[ "\"\"\"\nComputes putative binding pockets on protein.\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n__author__ = \"Bharath Ramsundar\"\n__copyright__ = \"Copyright 2017, Stanford University\"\n__license__ = \"MIT\"\n\nimport os\nimport tempfile\nimport numpy as np\nfrom subprocess import call\nfrom scipy.spatial import ConvexHull\nfrom deepchem.feat.binding_pocket_features import BindingPocketFeaturizer\nfrom deepchem.feat.fingerprints import CircularFingerprint\nfrom deepchem.models.sklearn_models import SklearnModel\nfrom deepchem.utils import rdkit_util\n\n\ndef extract_active_site(protein_file, ligand_file, cutoff=4):\n \"\"\"Extracts a box for the active site.\"\"\"\n protein_coords = rdkit_util.load_molecule(\n protein_file, add_hydrogens=False)[0]\n ligand_coords = rdkit_util.load_molecule(\n ligand_file, add_hydrogens=True, calc_charges=True)[0]\n num_ligand_atoms = len(ligand_coords)\n num_protein_atoms = len(protein_coords)\n pocket_inds = []\n pocket_atoms = set([])\n for lig_atom_ind in range(num_ligand_atoms):\n lig_atom = ligand_coords[lig_atom_ind]\n for protein_atom_ind in range(num_protein_atoms):\n protein_atom = protein_coords[protein_atom_ind]\n if np.linalg.norm(lig_atom - protein_atom) < cutoff:\n if protein_atom_ind not in pocket_atoms:\n pocket_atoms = pocket_atoms.union(set([protein_atom_ind]))\n # Should be an array of size (n_pocket_atoms, 3)\n pocket_atoms = list(pocket_atoms)\n n_pocket_atoms = len(pocket_atoms)\n pocket_coords = np.zeros((n_pocket_atoms, 3))\n for ind, pocket_ind in enumerate(pocket_atoms):\n pocket_coords[ind] = protein_coords[pocket_ind]\n\n x_min = int(np.floor(np.amin(pocket_coords[:, 0])))\n x_max = int(np.ceil(np.amax(pocket_coords[:, 0])))\n y_min = int(np.floor(np.amin(pocket_coords[:, 1])))\n y_max = int(np.ceil(np.amax(pocket_coords[:, 1])))\n z_min = int(np.floor(np.amin(pocket_coords[:, 2])))\n z_max = int(np.ceil(np.amax(pocket_coords[:, 2])))\n return (((x_min, x_max), (y_min, y_max), (z_min, z_max)), pocket_atoms,\n pocket_coords)\n\n\ndef compute_overlap(mapping, box1, box2):\n \"\"\"Computes overlap between the two boxes.\n\n Overlap is defined as % atoms of box1 in box2. Note that\n overlap is not a symmetric measurement.\n \"\"\"\n atom1 = set(mapping[box1])\n atom2 = set(mapping[box2])\n return len(atom1.intersection(atom2)) / float(len(atom1))\n\n\ndef get_all_boxes(coords, pad=5):\n \"\"\"Get all pocket boxes for protein coords.\n\n We pad all boxes the prescribed number of angstroms.\n\n TODO(rbharath): It looks like this may perhaps be non-deterministic?\n \"\"\"\n hull = ConvexHull(coords)\n boxes = []\n for triangle in hull.simplices:\n # coords[triangle, 0] gives the x-dimension of all triangle points\n # Take transpose to make sure rows correspond to atoms.\n points = np.array(\n [coords[triangle, 0], coords[triangle, 1], coords[triangle, 2]]).T\n # We voxelize so all grids have integral coordinates (convenience)\n x_min, x_max = np.amin(points[:, 0]), np.amax(points[:, 0])\n x_min, x_max = int(np.floor(x_min)) - pad, int(np.ceil(x_max)) + pad\n y_min, y_max = np.amin(points[:, 1]), np.amax(points[:, 1])\n y_min, y_max = int(np.floor(y_min)) - pad, int(np.ceil(y_max)) + pad\n z_min, z_max = np.amin(points[:, 2]), np.amax(points[:, 2])\n z_min, z_max = int(np.floor(z_min)) - pad, int(np.ceil(z_max)) + pad\n boxes.append(((x_min, x_max), (y_min, y_max), (z_min, z_max)))\n return boxes\n\n\ndef boxes_to_atoms(atom_coords, boxes):\n \"\"\"Maps each box to a list of atoms in that box.\n\n TODO(rbharath): This does a num_atoms x num_boxes computations. Is\n there a reasonable heuristic we can use to speed this up?\n \"\"\"\n mapping = {}\n for box_ind, box in enumerate(boxes):\n box_atoms = []\n (x_min, x_max), (y_min, y_max), (z_min, z_max) = box\n print(\"Handing box %d/%d\" % (box_ind, len(boxes)))\n for atom_ind in range(len(atom_coords)):\n atom = atom_coords[atom_ind]\n x_cont = x_min <= atom[0] and atom[0] <= x_max\n y_cont = y_min <= atom[1] and atom[1] <= y_max\n z_cont = z_min <= atom[2] and atom[2] <= z_max\n if x_cont and y_cont and z_cont:\n box_atoms.append(atom_ind)\n mapping[box] = box_atoms\n return mapping\n\n\ndef merge_boxes(box1, box2):\n \"\"\"Merges two boxes.\"\"\"\n (x_min1, x_max1), (y_min1, y_max1), (z_min1, z_max1) = box1\n (x_min2, x_max2), (y_min2, y_max2), (z_min2, z_max2) = box2\n x_min = min(x_min1, x_min2)\n y_min = min(y_min1, y_min2)\n z_min = min(z_min1, z_min2)\n x_max = max(x_max1, x_max2)\n y_max = max(y_max1, y_max2)\n z_max = max(z_max1, z_max2)\n return ((x_min, x_max), (y_min, y_max), (z_min, z_max))\n\n\ndef merge_overlapping_boxes(mapping, boxes, threshold=.8):\n \"\"\"Merge boxes which have an overlap greater than threshold.\n\n TODO(rbharath): This merge code is terribly inelegant. It's also quadratic\n in number of boxes. It feels like there ought to be an elegant divide and\n conquer approach here. Figure out later...\n \"\"\"\n num_boxes = len(boxes)\n outputs = []\n for i in range(num_boxes):\n box = boxes[0]\n new_boxes = []\n new_mapping = {}\n # If overlap of box with previously generated output boxes, return\n contained = False\n for output_box in outputs:\n # Carry forward mappings\n new_mapping[output_box] = mapping[output_box]\n if compute_overlap(mapping, box, output_box) == 1:\n contained = True\n if contained:\n continue\n # We know that box has at least one atom not in outputs\n unique_box = True\n for merge_box in boxes[1:]:\n overlap = compute_overlap(mapping, box, merge_box)\n if overlap < threshold:\n new_boxes.append(merge_box)\n new_mapping[merge_box] = mapping[merge_box]\n else:\n # Current box has been merged into box further down list.\n # No need to output current box\n unique_box = False\n merged = merge_boxes(box, merge_box)\n new_boxes.append(merged)\n new_mapping[merged] = list(\n set(mapping[box]).union(set(mapping[merge_box])))\n if unique_box:\n outputs.append(box)\n new_mapping[box] = mapping[box]\n boxes = new_boxes\n mapping = new_mapping\n return outputs, mapping\n\n\nclass BindingPocketFinder(object):\n \"\"\"Abstract superclass for binding pocket detectors\"\"\"\n\n def find_pockets(self, protein_file, ligand_file):\n \"\"\"Finds potential binding pockets in proteins.\"\"\"\n raise NotImplementedError\n\n\nclass ConvexHullPocketFinder(BindingPocketFinder):\n \"\"\"Implementation that uses convex hull of protein to find pockets.\n\n Based on https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4112621/pdf/1472-6807-14-18.pdf\n \"\"\"\n\n def __init__(self, pad=5):\n self.pad = pad\n\n def find_all_pockets(self, protein_file):\n \"\"\"Find list of binding pockets on protein.\"\"\"\n # protein_coords is (N, 3) tensor\n coords = rdkit_util.load_molecule(protein_file)[0]\n return get_all_boxes(coords, self.pad)\n\n def find_pockets(self, protein_file, ligand_file):\n \"\"\"Find list of suitable binding pockets on protein.\"\"\"\n protein_coords = rdkit_util.load_molecule(\n protein_file, add_hydrogens=False, calc_charges=False)[0]\n ligand_coords = rdkit_util.load_molecule(\n ligand_file, add_hydrogens=False, calc_charges=False)[0]\n boxes = get_all_boxes(protein_coords, self.pad)\n mapping = boxes_to_atoms(protein_coords, boxes)\n pockets, pocket_atoms_map = merge_overlapping_boxes(mapping, boxes)\n pocket_coords = []\n for pocket in pockets:\n atoms = pocket_atoms_map[pocket]\n coords = np.zeros((len(atoms), 3))\n for ind, atom in enumerate(atoms):\n coords[ind] = protein_coords[atom]\n pocket_coords.append(coords)\n return pockets, pocket_atoms_map, pocket_coords\n\n\nclass RFConvexHullPocketFinder(BindingPocketFinder):\n \"\"\"Uses pre-trained RF model + ConvexHulPocketFinder to select pockets.\"\"\"\n\n def __init__(self, pad=5):\n self.pad = pad\n self.convex_finder = ConvexHullPocketFinder(pad)\n\n # Load binding pocket model\n self.base_dir = tempfile.mkdtemp()\n print(\"About to download trained model.\")\n # TODO(rbharath): Shift refined to full once trained.\n call((\n \"wget -nv -c http://deepchem.io.s3-website-us-west-1.amazonaws.com/trained_models/pocket_random_refined_RF.tar.gz\"\n ).split())\n call((\"tar -zxvf pocket_random_refined_RF.tar.gz\").split())\n call((\"mv pocket_random_refined_RF %s\" % (self.base_dir)).split())\n self.model_dir = os.path.join(self.base_dir, \"pocket_random_refined_RF\")\n\n # Fit model on dataset\n self.model = SklearnModel(model_dir=self.model_dir)\n self.model.reload()\n\n # Create featurizers\n self.pocket_featurizer = BindingPocketFeaturizer()\n self.ligand_featurizer = CircularFingerprint(size=1024)\n\n def find_pockets(self, protein_file, ligand_file):\n \"\"\"Compute features for a given complex\n\n TODO(rbharath): This has a log of code overlap with\n compute_binding_pocket_features in\n examples/binding_pockets/binding_pocket_datasets.py. Find way to refactor\n to avoid code duplication.\n \"\"\"\n # if not ligand_file.endswith(\".sdf\"):\n # raise ValueError(\"Only .sdf ligand files can be featurized.\")\n # ligand_basename = os.path.basename(ligand_file).split(\".\")[0]\n # ligand_mol2 = os.path.join(\n # self.base_dir, ligand_basename + \".mol2\")\n #\n # # Write mol2 file for ligand\n # obConversion = ob.OBConversion()\n # conv_out = obConversion.SetInAndOutFormats(str(\"sdf\"), str(\"mol2\"))\n # ob_mol = ob.OBMol()\n # obConversion.ReadFile(ob_mol, str(ligand_file))\n # obConversion.WriteFile(ob_mol, str(ligand_mol2))\n #\n # # Featurize ligand\n # mol = Chem.MolFromMol2File(str(ligand_mol2), removeHs=False)\n # if mol is None:\n # return None, None\n # # Default for CircularFingerprint\n # n_ligand_features = 1024\n # ligand_features = self.ligand_featurizer.featurize([mol])\n #\n # # Featurize pocket\n # pockets, pocket_atoms_map, pocket_coords = self.convex_finder.find_pockets(\n # protein_file, ligand_file)\n # n_pockets = len(pockets)\n # n_pocket_features = BindingPocketFeaturizer.n_features\n #\n # features = np.zeros((n_pockets, n_pocket_features+n_ligand_features))\n # pocket_features = self.pocket_featurizer.featurize(\n # protein_file, pockets, pocket_atoms_map, pocket_coords)\n # # Note broadcast operation\n # features[:, :n_pocket_features] = pocket_features\n # features[:, n_pocket_features:] = ligand_features\n # dataset = NumpyDataset(X=features)\n # pocket_preds = self.model.predict(dataset)\n # pocket_pred_proba = np.squeeze(self.model.predict_proba(dataset))\n #\n # # Find pockets which are active\n # active_pockets = []\n # active_pocket_atoms_map = {}\n # active_pocket_coords = []\n # for pocket_ind in range(len(pockets)):\n # #################################################### DEBUG\n # # TODO(rbharath): For now, using a weak cutoff. Fix later.\n # #if pocket_preds[pocket_ind] == 1:\n # if pocket_pred_proba[pocket_ind][1] > .15:\n # #################################################### DEBUG\n # pocket = pockets[pocket_ind]\n # active_pockets.append(pocket)\n # active_pocket_atoms_map[pocket] = pocket_atoms_map[pocket]\n # active_pocket_coords.append(pocket_coords[pocket_ind])\n # return active_pockets, active_pocket_atoms_map, active_pocket_coords\n # # TODO(LESWING)\n raise ValueError(\"Karl Implement\")\n" ]
[ [ "scipy.spatial.ConvexHull", "numpy.linalg.norm", "numpy.ceil", "numpy.zeros", "numpy.floor", "numpy.amin", "numpy.amax", "numpy.array" ] ]
shanyi15/tensorflow
[ "ebb3429856441149e41388dfbea59496f8dbf17b" ]
[ "tensorflow/python/saved_model/load_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for checkpointable object SavedModel loading.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.saved_model import load\nfrom tensorflow.python.saved_model import save\nfrom tensorflow.python.training.checkpointable import tracking\n\n\nclass LoadTest(test.TestCase):\n\n def cycle(self, obj):\n path = tempfile.mkdtemp(prefix=self.get_temp_dir())\n save.save(obj, path, signatures={})\n return load.load(path)\n\n def test_structure_import(self):\n root = tracking.Checkpointable()\n root.dep_one = tracking.Checkpointable()\n root.dep_two = tracking.Checkpointable()\n root.dep_two.dep = tracking.Checkpointable()\n root.dep_three = root.dep_two.dep\n imported = self.cycle(root)\n self.assertIs(imported.dep_three, imported.dep_two.dep)\n self.assertIsNot(imported.dep_one, imported.dep_two)\n\n def test_variables(self):\n root = tracking.Checkpointable()\n root.v1 = variables.Variable(1., trainable=True)\n root.v2 = variables.Variable(2., trainable=False)\n imported = self.cycle(root)\n self.assertEquals(imported.v1.numpy(), 1.0)\n self.assertTrue(imported.v1.trainable)\n self.assertEquals(imported.v2.numpy(), 2.0)\n self.assertFalse(imported.v2.trainable)\n\n def test_capture_variables(self):\n root = tracking.Checkpointable()\n root.weights = variables.Variable(2.)\n root.f = def_function.function(\n lambda x: root.weights * x,\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n imported = self.cycle(root)\n self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())\n imported.weights.assign(4.0)\n self.assertEqual(8., imported.f(constant_op.constant(2.)).numpy())\n\n def _make_asset(self, contents):\n filename = tempfile.mktemp(prefix=self.get_temp_dir())\n with open(filename, \"w\") as f:\n f.write(contents)\n return filename\n\n def test_assets(self):\n file1 = self._make_asset(\"contents 1\")\n file2 = self._make_asset(\"contents 2\")\n\n root = tracking.Checkpointable()\n root.asset1 = tracking.TrackableAsset(file1)\n root.asset2 = tracking.TrackableAsset(file2)\n\n save_dir = os.path.join(self.get_temp_dir(), \"save_dir\")\n save.save(root, save_dir, signatures={})\n\n file_io.delete_file(file1)\n file_io.delete_file(file2)\n load_dir = os.path.join(self.get_temp_dir(), \"load_dir\")\n file_io.rename(save_dir, load_dir)\n\n imported = load.load(load_dir)\n with open(imported.asset1.asset_path.numpy(), \"r\") as f:\n self.assertEquals(\"contents 1\", f.read())\n with open(imported.asset2.asset_path.numpy(), \"r\") as f:\n self.assertEquals(\"contents 2\", f.read())\n\n def test_capture_assets(self):\n root = tracking.Checkpointable()\n root.vocab = tracking.TrackableAsset(self._make_asset(\"contents\"))\n root.f = def_function.function(\n lambda: root.vocab.asset_path,\n input_signature=[])\n imported = self.cycle(root)\n origin_output = root.f().numpy()\n imported_output = imported.f().numpy()\n self.assertNotEqual(origin_output, imported_output)\n with open(imported_output, \"r\") as f:\n self.assertEquals(\"contents\", f.read())\n\n def test_dedup_assets(self):\n vocab = self._make_asset(\"contents\")\n root = tracking.Checkpointable()\n root.asset1 = tracking.TrackableAsset(vocab)\n root.asset2 = tracking.TrackableAsset(vocab)\n imported = self.cycle(root)\n self.assertEqual(imported.asset1.asset_path.numpy(),\n imported.asset2.asset_path.numpy())\n\n def test_implicit_input_signature(self):\n @def_function.function\n def func(x):\n return 2 * x\n\n root = tracking.Checkpointable()\n root.f = func\n\n # Add two traces.\n root.f(constant_op.constant(1.))\n root.f(constant_op.constant(1))\n\n imported = self.cycle(root)\n\n self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())\n self.assertEqual(14, imported.f(constant_op.constant(7)).numpy())\n\n def test_explicit_input_signature(self):\n @def_function.function(\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n def func(x):\n return 2 * x\n\n root = tracking.Checkpointable()\n root.f = func\n\n imported = self.cycle(root)\n self.assertEqual(4., imported.f(constant_op.constant(2.0)).numpy())\n\n def test_nested_functions(self):\n f = def_function.function(\n lambda x: x*2.0,\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n g = def_function.function(\n lambda x: f(x) + 1.0,\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n\n root = tracking.Checkpointable()\n root.g = g\n imported = self.cycle(root)\n imported.g(constant_op.constant([1.0]))\n\n def test_function_with_default_bool_input(self):\n\n def func(x, training=False):\n if training:\n return 2 * x\n else:\n return 7\n\n root = tracking.Checkpointable()\n root.f = def_function.function(func)\n\n self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())\n self.assertEqual(7, root.f(constant_op.constant(1)).numpy())\n self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())\n\n imported = self.cycle(root)\n\n self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())\n self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())\n\n def test_positional_arguments(self):\n def func(x, training=False, abc=7.1, defg=7.7):\n del abc\n if training:\n return 2 * x\n if defg == 7:\n return 6\n else:\n return 7\n\n root = tracking.Checkpointable()\n root.f = def_function.function(func)\n\n self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())\n self.assertEqual(7, root.f(constant_op.constant(1)).numpy())\n self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())\n self.assertEqual(6, root.f(constant_op.constant(1), defg=7.0).numpy())\n\n imported = self.cycle(root)\n\n self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())\n self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())\n self.assertEqual(6, imported.f(constant_op.constant(1), defg=7.0).numpy())\n\n def test_member_function(self):\n class CheckpointableWithMember(tracking.Checkpointable):\n\n def __init__(self):\n super(CheckpointableWithMember, self).__init__()\n self._some_value = 20\n\n @def_function.function\n def f(self, x, training=False):\n if training:\n return 2 * x\n else:\n return 7 + self._some_value\n\n root = CheckpointableWithMember()\n\n self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())\n self.assertEqual(27, root.f(constant_op.constant(1)).numpy())\n self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())\n\n imported = self.cycle(root)\n\n self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())\n self.assertEqual(27, imported.f(constant_op.constant(2)).numpy())\n\n def test_side_effect_listing(self):\n class M(tracking.Checkpointable):\n\n def __init__(self):\n super(M, self).__init__()\n self.var = None\n\n @def_function.function(\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n def f(self, x):\n if self.var is None:\n self.var = variables.Variable(2.)\n return x * self.var\n\n m = M()\n self.cycle(m)\n self.assertEquals(4.0, m.f(constant_op.constant(2.0)).numpy())\n\n def test_basic_backprop(self):\n weight = variables.Variable(1., trainable=True)\n bias = variables.Variable(0., trainable=True)\n g = def_function.function(\n lambda x: x*weight + bias,\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n\n root = tracking.Checkpointable()\n root.weight = weight\n root.bias = bias\n root.g = g\n imported = self.cycle(root)\n with backprop.GradientTape(watch_accessed_variables=True) as t:\n x = constant_op.constant([3.5])\n loss = imported.g(x)\n grad = t.gradient(loss, [imported.weight, imported.bias])\n self.assertAllClose(grad, [3.5, 1.0])\n\n def test_callable(self):\n class M1(tracking.Checkpointable):\n\n @def_function.function(\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])\n def __call__(self, x):\n return x\n\n root = tracking.Checkpointable()\n root.m1 = M1()\n root.m2 = tracking.Checkpointable()\n root.m2.__call__ = def_function.function(\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])(\n lambda x: x*3.0)\n imported = self.cycle(root)\n x = constant_op.constant(1.0)\n\n self.assertTrue(callable(imported.m1))\n self.assertAllEqual(root.m1(x), imported.m1(x))\n\n # Note: `root.m2` was not callable since `__call__` attribute was set\n # into the instance and not on the class. But after a serialization cycle\n # that starts to work.\n self.assertTrue(callable(imported.m2))\n self.assertAllEqual(root.m2.__call__(x), imported.m2(x))\n\n # Verify that user objects without `__call__` attribute are not callable.\n self.assertFalse(callable(imported))\n\n def test_chain_callable(self):\n func = def_function.function(\n input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])(\n lambda x: x*3.0)\n root = tracking.Checkpointable()\n root.__call__ = tracking.Checkpointable()\n root.__call__.__call__ = tracking.Checkpointable()\n root.__call__.__call__.__call__ = func\n\n imported = self.cycle(root)\n self.assertTrue(callable(imported))\n x = constant_op.constant(1.0)\n self.assertAllEqual(imported(x).numpy(), 3.0)\n\n\nif __name__ == \"__main__\":\n test.main()\n" ]
[ [ "tensorflow.python.saved_model.load.load", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.training.checkpointable.tracking.TrackableAsset", "tensorflow.python.lib.io.file_io.delete_file", "tensorflow.python.training.checkpointable.tracking.Checkpointable", "tensorflow.python.eager.backprop.GradientTape", "tensorflow.python.saved_model.save.save", "tensorflow.python.eager.test.main", "tensorflow.python.lib.io.file_io.rename", "tensorflow.python.eager.def_function.function", "tensorflow.python.ops.variables.Variable", "tensorflow.python.framework.constant_op.constant" ] ]
joytianya/google_bert
[ "06f131241163a745747da33c5f563abe4413897b" ]
[ "zuo/bert/tokenization.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tokenization classes.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport re\nimport unicodedata\nimport six\nimport tensorflow as tf\n\n\ndef validate_case_matches_checkpoint(do_lower_case, init_checkpoint):\n \"\"\"Checks whether the casing config is consistent with the checkpoint name.\"\"\"\n\n # The casing has to be passed in by the user and there is no explicit check\n # as to whether it matches the checkpoint. The casing information probably\n # should have been stored in the bert_config.json file, but it's not, so\n # we have to heuristically detect it to validate.\n\n if not init_checkpoint:\n return\n\n m = re.match(\"^.*?([A-Za-z0-9_-]+)/bert_model.ckpt\", init_checkpoint)\n if m is None:\n return\n\n model_name = m.group(1)\n\n lower_models = [\n \"uncased_L-24_H-1024_A-16\", \"uncased_L-12_H-768_A-12\",\n \"multilingual_L-12_H-768_A-12\", \"chinese_L-12_H-768_A-12\"\n ]\n\n cased_models = [\n \"cased_L-12_H-768_A-12\", \"cased_L-24_H-1024_A-16\",\n \"multi_cased_L-12_H-768_A-12\"\n ]\n\n is_bad_config = False\n if model_name in lower_models and not do_lower_case:\n is_bad_config = True\n actual_flag = \"False\"\n case_name = \"lowercased\"\n opposite_flag = \"True\"\n\n if model_name in cased_models and do_lower_case:\n is_bad_config = True\n actual_flag = \"True\"\n case_name = \"cased\"\n opposite_flag = \"False\"\n\n if is_bad_config:\n raise ValueError(\n \"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. \"\n \"However, `%s` seems to be a %s model, so you \"\n \"should pass in `--do_lower_case=%s` so that the fine-tuning matches \"\n \"how the model was pre-training. If this error is wrong, please \"\n \"just comment out this check.\" % (actual_flag, init_checkpoint,\n model_name, case_name, opposite_flag))\n\n\ndef convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n while True:\n token = convert_to_unicode(reader.readline())\n if not token:\n break\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab\n\n\ndef convert_by_vocab(vocab, items):\n \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n output = []\n #print(\"items:\",items) #['[CLS]', '日', '##期', ',', '但', '被', '##告', '金', '##东', '##福', '载', '##明', '[MASK]', 'U', '##N', '##K', ']', '保', '##证', '本', '##月', '1', '##4', '[MASK]', '到', '##位', ',', '2', '##0', '##1', '##5', '年', '6', '[MASK]', '1', '##1', '日', '[', 'U', '##N', '##K', ']', ',', '原', '##告', '[MASK]', '认', '##可', '于', '2', '##0', '##1', '##5', '[MASK]', '6', '月', '[MASK]', '[MASK]', '日', '##向', '被', '##告', '主', '##张', '权', '##利', '。', '而', '[MASK]', '[MASK]', '自', '[MASK]', '[MASK]', '[MASK]', '[MASK]', '年', '6', '月', '1', '##1', '日', '[SEP]', '原', '##告', '于', '2', '##0', '##1', '##6', '[MASK]', '6', '[MASK]', '2', '##4', '日', '起', '##诉', ',', '主', '##张', '保', '##证', '责', '##任', ',', '已', '超', '##过', '保', '##证', '期', '##限', '[MASK]', '保', '##证', '人', '依', '##法', '不', '##再', '承', '##担', '保', '##证', '[MASK]', '[MASK]', '[MASK]', '[SEP]']\n for i,item in enumerate(items):\n #print(i,\"item:\",item) # ##期\n output.append(vocab[item])\n return output\n\n\ndef convert_tokens_to_ids(vocab, tokens):\n return convert_by_vocab(vocab, tokens)\n\n\ndef convert_ids_to_tokens(inv_vocab, ids):\n return convert_by_vocab(inv_vocab, ids)\n\n\ndef whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens\n\n\nclass FullTokenizer(object):\n \"\"\"Runs end-to-end tokenziation.\"\"\"\n\n def __init__(self, vocab_file, do_lower_case=True):\n self.vocab = load_vocab(vocab_file)\n self.inv_vocab = {v: k for k, v in self.vocab.items()}\n self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)\n\n def tokenize(self, text):\n split_tokens = []\n for token in self.basic_tokenizer.tokenize(text):\n for sub_token in self.wordpiece_tokenizer.tokenize(token):\n split_tokens.append(sub_token)\n\n return split_tokens\n\n def convert_tokens_to_ids(self, tokens):\n return convert_by_vocab(self.vocab, tokens)\n\n def convert_ids_to_tokens(self, ids):\n return convert_by_vocab(self.inv_vocab, ids)\n\n\nclass BasicTokenizer(object):\n \"\"\"Runs basic tokenization (punctuation splitting, lower casing, etc.).\"\"\"\n\n def __init__(self, do_lower_case=True):\n \"\"\"Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n \"\"\"\n self.do_lower_case = do_lower_case\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text.\"\"\"\n text = convert_to_unicode(text)\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n text = self._tokenize_chinese_chars(text)\n\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if self.do_lower_case:\n token = token.lower()\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens\n\n def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)\n\n def _run_split_on_punc(self, text):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]\n\n def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if ((cp >= 0x4E00 and cp <= 0x9FFF) or #\n (cp >= 0x3400 and cp <= 0x4DBF) or #\n (cp >= 0x20000 and cp <= 0x2A6DF) or #\n (cp >= 0x2A700 and cp <= 0x2B73F) or #\n (cp >= 0x2B740 and cp <= 0x2B81F) or #\n (cp >= 0x2B820 and cp <= 0x2CEAF) or\n (cp >= 0xF900 and cp <= 0xFAFF) or #\n (cp >= 0x2F800 and cp <= 0x2FA1F)): #\n return True\n\n return False\n\n def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n\nclass WordpieceTokenizer(object):\n \"\"\"Runs WordPiece tokenziation.\"\"\"\n\n def __init__(self, vocab, unk_token=\"[UNK]\", max_input_chars_per_word=200):\n self.vocab = vocab\n self.unk_token = unk_token\n self.max_input_chars_per_word = max_input_chars_per_word\n\n def tokenize(self, text):\n \"\"\"Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = \"unaffable\"\n output = [\"un\", \"##aff\", \"##able\"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n \"\"\"\n\n text = convert_to_unicode(text)\n\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = \"\".join(chars[start:end])\n if start > 0:\n substr = \"##\" + substr\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens\n\n\ndef _is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False\n\n\ndef _is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True\n return False\n\n\ndef _is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False\n" ]
[ [ "tensorflow.gfile.GFile" ] ]
imandr/RLpy
[ "f01cf7af47b6054e4e52d663ceafc463df6f6166", "f01cf7af47b6054e4e52d663ceafc463df6f6166" ]
[ "rlpy/gradnet/samples/single_agent/single_agent_ttt_env.py", "rlpy/gradnet/samples/single_agent/tank_target_env.py" ]
[ "#\n# Tic Tac Toe\n#\n\nimport numpy as np\nfrom gym import spaces\n\nWinMasks = [\n [\n [1,0,0],\n [1,0,0],\n [1,0,0],\n ],\n [\n [0,1,0],\n [0,1,0],\n [0,1,0],\n ],\n [\n [0,0,1],\n [0,0,1],\n [0,0,1],\n ],\n \n [\n [1,1,1],\n [0,0,0],\n [0,0,0],\n ],\n [\n [0,0,0],\n [1,1,1],\n [0,0,0],\n ],\n [\n [0,0,0],\n [0,0,0],\n [1,1,1],\n ],\n\n [\n [1,0,0],\n [0,1,0],\n [0,0,1],\n ],\n [\n [0,0,1],\n [0,1,0],\n [1,0,0],\n ]\n]\n\nWinMasks = np.array(WinMasks).reshape((-1,9))\n\nclass SingleAgentTicTacToeEnv(object):\n \n NActions = 9\n ObservationShape = (9,)\n NState = 9\n \n def __init__(self):\n self.Board = np.zeros((9,))\n self.action_space = spaces.Discrete(self.NActions)\n high = np.ones((self.NActions,))\n self.observation_space = spaces.Box(-high, high, dtype=np.float32)\n \n def reset(self):\n self.Done = False\n self.Board[...] = 0.0\n self.BoardHistory = []\n self.Side = 1\n self.FirstMove = True\n return self.observation(self.Side), {\"valid_actions\":np.array([1,1,0,0,1,0,0,0,0], dtype=np.float32)}\n \n def observation(self, side):\n return self.Board * side\n \n def step(self, action):\n win = False\n draw = False\n side = self.Side\n other_side = -side\n color = side\n \n reward = 0.0\n done = False\n\n if self.Board[action] != 0:\n # invalid move\n reward = -1.0\n done = True\n else:\n self.Board[action] = side\n self.BoardHistory.append(self.Board.reshape((3,3)).copy())\n \n for win_mask in WinMasks:\n masked = self.Board*color*win_mask\n if np.sum(masked) == 3:\n reward = 1.0\n done = True\n break\n \n if np.all(self.Board != 0):\n done = True # draw\n self.Side = other_side\n self.Done = done\n self.Reward = reward\n return self.observation(self.Side), reward, done, {\"valid_actions\":np.asarray(self.Board==0, dtype=np.float32)}\n \n def render(self):\n if self.Done:\n last_move = -self.Side\n history = self.BoardHistory\n sep = \"+---\"*len(history) + \"+\"\n lines = [sep]\n for irow in (0,1,2):\n line = \"|\"\n for b in history:\n row = \"\".join(\".xo\"[int(c)] for c in b[irow])\n line += row + \"|\"\n lines.append(line)\n outcome = \"draw\"\n if self.Reward:\n outcome = \"%s won\" % (\".xo\"[int(last_move)])\n lines.append(sep + \" \" + outcome)\n print(\"\\n\".join(lines))\n \nif __name__ == \"__main__\":\n \n import random\n \n def show_board(board):\n sep = \"+---\"*3 + \"+\"\n out = [sep]\n for row in board.reshape((3,3)):\n line = \"| \"\n for x in row:\n line += \" OX\"[int(x)] + \" | \"\n out.append(line)\n out.append(sep)\n return \"\\n\".join(out)\n \n class Agent(object):\n \n def __init__(self, side):\n self.Side = side\n self.Sign = \"XO\"[side]\n self.Color = side*2-1\n \n def reset(self):\n pass\n \n def action(self, reward, observation, available_actions):\n print(f\"{self.Sign}: action:\", reward, observation, available_actions)\n choices = [i for i, x in enumerate(available_actions) if x]\n i = random.choice(choices)\n return i\n \n def reward(self, r):\n #print(f\"{self.Sign}: reward: {r}\")\n pass\n \n def done(self, r, last_observation):\n if r > 0:\n print(f\"===== {self.Sign} won\")\n elif r < 0:\n print(f\"===== {self.Sign} lost\")\n else:\n print(\"===== draw\")\n \n class Callback(object):\n \n def end_turn(self, agents, data):\n print(show_board(data[\"board\"]))\n \n def end_episode(self, agents, data):\n print(\"--- game over ---\")\n print(env.show_history(data[\"board_history\"]))\n \n x_agent = Agent(0)\n y_agent = Agent(1)\n \n env = TicTacToeEnv()\n env.run([x_agent, y_agent], [Callback])\n \n \n \n ", "import random\nimport numpy as np\nimport math, time\nfrom gym import spaces\nfrom draw2d import Viewer, Frame, Line, Polygon, Circle, Text\n\n\nclass TankTargetEnv(object):\n \n FireRange = 0.1\n Speed = 0.02\n RotSpeed = math.pi*2/50\n Width = 0.01\n TimeHorizon = 100\n GasReward = 0.0\n IdleReward = 0.0\n MissReward = -0.02\n HitReward = 10.0\n \n X0 = 0.0\n X1 = 1.0\n Y0 = 0.0\n Y1 = 1.0\n Margin = 0.1\n \n FIRE = 0\n FWD = 1\n FFWD = 2\n LEFT = 3\n RIGHT = 4\n NActions = 5\n NState = 6\n \n \n\n def __init__(self):\n self.Viewer=None\n self.Hit = False\n self.Fire = False\n self.EpisodeReward = 0.0\n self.T = self.TimeHorizon\n \n high = np.array([1.0]*self.NState, dtype=np.float32)\n self.action_space = spaces.Discrete(self.NActions)\n self.observation_space = spaces.Box(-high, high, dtype=np.float32)\n \n \n def bind_angle(self, a):\n while a < -math.pi:\n a += math.pi*2\n while a >= math.pi:\n a -= math.pi*2\n return a\n \n def observation(self):\n obs = np.empty((self.NState,))\n obs[0] = self.X\n obs[1] = self.Y\n obs[2] = self.Angle\n dx = self.TargetX - self.X\n dy = self.TargetY - self.Y\n obs[3] = math.sqrt(dx*dx + dy*dy)\n c = math.atan2(dy, dx)\n obs[4] = self.bind_angle(c-self.Angle)\n obs[5] = self.T/self.TimeHorizon\n return obs\n \n def seed(self, x):\n pass\n \n def reset(self):\n self.TargetX = self.Margin + random.random()*(self.X1-self.X0-self.Margin*2)\n self.TargetY = self.Margin + random.random()*(self.Y1-self.Y0-self.Margin*2)\n self.X = self.Margin + random.random()*(self.X1-self.X0-self.Margin*2)\n self.Y = self.Margin + random.random()*(self.X1-self.X0-self.Margin*2)\n self.Angle = self.bind_angle(random.random()*2*math.pi - math.pi)\n self.EpisodeReward = 0.0\n self.T = self.TimeHorizon\n \n return self.observation()\n \n def step(self, action):\n self.Hit = self.Fire = False\n self.Reward = 0.0\n done = False\n reward = self.IdleReward\n if action in (self.FWD, self.FFWD):\n d = self.Speed/2 if action == self.FWD else self.Speed*2\n reward = self.GasReward/4 if action == self.FWD else self.GasReward*2\n x = self.X + math.cos(self.Angle)*d\n y = self.Y + math.sin(self.Angle)*d\n x1 = max(self.X0, min(self.X1, x))\n y1 = max(self.Y0, min(self.Y1, y))\n if x1 != x or y1 != y: # bump ?\n reward = -1.0\n done = True\n self.X, self.Y = x1, y1\n #self.Reward += 0.001\n elif action == self.FIRE:\n self.Fire = True\n dx = self.TargetX - self.X\n dy = self.TargetY - self.Y\n a = math.atan2(dy, dx)\n distance = math.sqrt(dx*dx + dy*dy)\n delta = distance * math.sin(abs(a-self.Angle))\n self.Hit = abs(self.Angle - a) < math.pi/4 and delta < self.Width and distance < self.FireRange + self.Width\n if self.Hit:\n print(\"hit\")\n done = True\n reward = self.HitReward\n else:\n reward = self.MissReward\n elif action == self.LEFT:\n self.Angle += self.RotSpeed\n self.Angle = self.bind_angle(self.Angle)\n elif action == self.RIGHT:\n self.Angle -= self.RotSpeed\n self.Angle = self.bind_angle(self.Angle)\n \n self.T -= 1\n if self.T <= 0:\n done = True\n self.Reward = reward\n self.EpisodeReward += self.Reward\n \n return self.observation(), reward, done, {}\n \n def render(self):\n if self.Viewer is None:\n self.Viewer = Viewer(600, 600)\n self.Frame = self.Viewer.frame(0.0, 1.0, 0.0, 1.0)\n \n self.Tank = Frame()\n self.Tank.add(\n Polygon([(-0.02, -0.01), (0.02, 0.0), (-0.02, 0.01)]).color(0.0, 0.5, 0.1)\n )\n self.Beam = Line(end=(self.FireRange, 0)).color(1.0, 0.5, 0.0)\n self.Tank.add(self.Beam)\n self.Frame.add(self.Tank)\n\n self.Target = Circle(self.Width, filled=False)\n self.Frame.add(self.Target)\n \n self.ScoreText = Text(\"\", anchor_x=\"left\", size=8).color(0.5, 0.5, 0.5)\n self.Frame.add(self.ScoreText, at=(0.01, 0.01))\n \n self.Tank.move_to(self.X, self.Y)\n self.Tank.rotate_to(self.Angle)\n self.Beam.hidden = not self.Fire\n self.Target.move_to(self.TargetX, self.TargetY)\n if self.Hit:\n self.Target.color(1.0, 1.0, 0.5)\n else:\n self.Target.color(0.5, 0.5, 0.5)\n \n self.ScoreText.Text = \"r:%.3f R:%.3f %s\" % (self.Reward, self.EpisodeReward, self.observation())\n \n self.Viewer.render()\n \n if self.Hit:\n time.sleep(0.2)\n \n\n \n \n \n \n \n\n" ]
[ [ "numpy.ones", "numpy.sum", "numpy.zeros", "numpy.asarray", "numpy.all", "numpy.array" ], [ "numpy.array", "numpy.empty" ] ]
CentraleNantesRobotics/ping360_sonar_python
[ "f461594aa0345a417f5bb711b8f4500fb4b4727d" ]
[ "ping360_sonar/ping360_sonar/sonar_interface.py" ]
[ "#!/usr/bin/env python\n\nfrom ping360_sonar.sensor import Ping360\nfrom numpy import pi, sqrt, tan, cos, sign\nfrom brping import definitions\n\nclass SonarInterface:\n \n samplePeriodTickDuration = 25e-9\n firmwareMinTransmitDuration = 5\n firmwareMaxTransmitDuration = 500\n firmwareMaxSamples = 1200\n firmwareMinSamplePeriod = 80\n maxDurationRatio = 64e6\n \n def __init__(self, port, baudrate, fallback_emulated):\n \n self.angle = 0\n try:\n self.sonar = Ping360(port, baudrate)\n if self.sonar.initialize():\n return\n except:\n pass\n \n if not fallback_emulated:\n raise RuntimeError('Cannot initialize sonar')\n print('Using emulated sonar')\n self.sonar = None\n \n def configureAngles(self, aperture_deg, step_deg, ensure_divisor):\n # to gradians\n target_half_aperture = int(aperture_deg*200/360+0.5)\n best_half_aperture = target_half_aperture\n self.angle_step = int(round(step_deg*400/360))\n\n # ensure angle_step is a divisor of max-min in gradians, necessary for LaserScan messages\n if ensure_divisor: \n # look around step, allow increased aperture\n target_step = self.angle_step\n \n # not too far from requested aperture, as close as possible to requested step (impacts turn duration)\n computeCost = lambda step,half_aperture: 1000 if half_aperture%step != 0 else abs(step-target_step) + abs(half_aperture-target_half_aperture)\n \n best_cost = computeCost(self.angle_step, target_half_aperture)\n if best_cost != 0: \n for step in range(1, target_step*2):\n for half_aperture in range(target_half_aperture, min(target_half_aperture+10, 200)+1):\n cost = computeCost(step, half_aperture)\n if cost < best_cost:\n best_cost = cost\n self.angle_step = step\n best_half_aperture = half_aperture\n \n self.angle_min = -best_half_aperture\n self.angle_max = best_half_aperture\n if self.angle_max == 200: \n self.angle_max -= self.angle_step\n if self.angle < self.angle_min or self.angle > self.angle_max or (self.angle-self.angle_min) % self.angle_step != 0:\n self.angle = 0\n \n @staticmethod\n def grad2rad(grad):\n return grad*pi/200\n \n def angleMin(self):\n return self.grad2rad(self.angle_min)\n def angleMax(self):\n return self.grad2rad(self.angle_max)\n def angleStep(self):\n return self.grad2rad(self.angle_step)\n def currentAngle(self):\n return self.grad2rad(self.angle)\n def angleCount(self):\n return (self.angle_max-self.angle_min)//self.angle_step\n def angleIndex(self):\n if self.angle_step > 0:\n return (self.angle-self.angle_min)//self.angle_step\n return (self.angle-self.angle_max)//self.angle_step\n def rangeFrom(self, index):\n return (index+1)*self.max_range/self.samples\n \n def configureTransducer(self, gain, frequency, speed_of_sound, max_range):\n \n self.gain = gain\n self.frequency = frequency\n \n self.samples = int(min(self.firmwareMaxSamples,2*max_range/(self.firmwareMinSamplePeriod*speed_of_sound*self.samplePeriodTickDuration)))\n \n self.sample_period = int((2.*max_range)/\n (self.samples*speed_of_sound*self.samplePeriodTickDuration));\n \n\n #* Per firmware engineer:\n #* 1. Starting point is TxPulse in usec = ((one-way range in metres) * 8000) / (Velocity of sound in metres\n #* per second)\n #* 2. Then check that TxPulse is wide enough for currently selected sample interval in usec, i.e.,\n #* if TxPulse < (2.5 * sample interval) then TxPulse = (2.5 * sample interval)\n #* 3. Perform limit checking\n\n #1\n one_way_duration_us = (8000.*max_range)/speed_of_sound\n # 2 (transmit duration is microseconds, sample_period_ns is nanoseconds) \n sample_period_ns = self.sample_period * self.samplePeriodTickDuration\n self.transmit_duration = max(2.5*sample_period_ns/1000, one_way_duration_us)\n # 3 ensure bounds \n if self.transmit_duration < self.firmwareMinTransmitDuration:\n self.transmit_duration = self.firmwareMinTransmitDuration\n else:\n max_duration = min(self.firmwareMaxTransmitDuration, sample_period_ns*self.maxDurationRatio)\n if self.transmit_duration > max_duration:\n self.transmit_duration = max_duration\n self.transmit_duration = int(self.transmit_duration)\n \n def transmitDuration(self):\n # microseconds to seconds\n return self.transmit_duration/1e6\n \n def updateAngle(self):\n self.angle += self.angle_step\n \n if self.angle_min == -200:\n # full scan\n end_turn = self.angle + self.angle_step > self.angle_max\n if self.angle > self.angle_max:\n self.angle = self.angle_min\n return end_turn\n \n # sector scan, check near end of sector\n if self.angle + self.angle_step >= self.angle_max or self.angle + self.angle_step <= self.angle_min:\n self.angle_step *= -1\n return True\n return False\n \n def read(self):\n # update angle before transmit\n end_turn = self.updateAngle()\n \n if self.sonar is not None:\n print(f'transmit: {self.transmit_duration}')\n \n self.sonar.control_transducer(\n 0, # reserved\n self.gain,\n self.angle,\n self.transmit_duration,\n self.sample_period,\n self.frequency,\n self.samples,\n 1,\n 0)\n self.sonar.wait_message([definitions.PING360_DEVICE_DATA, definitions.COMMON_NACK], 4.0)\n self.data = bytearray(self.sonar._data)\n return (len(self.data) != 0, end_turn)\n \n # emulated sonar\n from random import randint\n from time import sleep \n self.data = [0 for _ in range(self.samples)]\n scale = 5*abs((self.angle+400) % 400 - 200)\n for i in range(self.samples):\n if randint(self.samples,2*self.samples) < 1.1*i + scale:\n self.data[i] = randint(220, 255)\n # emulate transmit duration in microseconds\n #sleep(self.transmit_duration/1000000)\n return (True, end_turn)\n\n\n\n# handles an angular sector of the image\nclass Bound:\n radius = 0\n def __init__(self, x, tm, tM):\n self.x = x\n if type(tM) == int:\n self.low = Bound.clamp(tm*x)\n self.up = int(tM*sqrt(Bound.radius**2-x**2-1))\n else:\n self.low = Bound.clamp(x*tm)\n self.up = Bound.clamp(x*tM)\n \n if self.up**2 + x**2 > Bound.radius**2:\n self.up = int(sign(self.up) * sqrt(Bound.radius**2-x**2-1))\n \n if self.up < self.low:\n self.low,self.up = self.up,self.low\n \n #staticmethod\n def clamp(coord):\n if coord < -Bound.radius+1:\n return -Bound.radius+1\n elif coord > Bound.radius-1:\n return Bound.radius-1\n return int(coord)\n \nclass Sector:\n def __init__(self):\n self.dr = None\n \n def configure(self, samples, radius):\n self.dr = radius/samples\n Bound.radius = radius\n \n def init(self, angle, step):\n angle_min = angle-step/2\n angle_max = angle+step/2\n xmin, xmax,same_side = self.xLimits(angle_min, angle_max)\n tm, tM = tan(angle_min), tan(angle_max) \n self.bounds = []\n\n if same_side:\n # same side\n if abs(tm) > abs(tM):\n tm,tM = tM,tm\n for x in range(xmin, xmax+1):\n self.bounds.append(Bound(x,tm,tM))\n else:\n f = 1 if abs(angle-pi/2) < abs(angle+pi/2) else -1\n \n if f == -1:\n tm,tM = tM,tm\n \n for x in range(xmin, 0):\n self.bounds.append(Bound(x, tM,f))\n for x in range(0, xmax+1):\n self.bounds.append(Bound(x, tm,f))\n \n self.cur = -1\n \n def xLimits(self, angle_min, angle_max):\n cm = cos(angle_min)\n cM = cos(angle_max)\n if cM < cm:\n cm,cM = cM,cm\n if cm*cM > 0:\n if cM < 0:\n cM = 0\n else:\n cm = 0\n return Bound.clamp(round(Bound.radius*cm)), Bound.clamp(round(Bound.radius*cM)), cm*cM >= 0\n \n def nextPoint(self, x, y):\n if self.cur == -1:\n self.cur = 0\n x = self.bounds[0].x\n y = self.bounds[0].low\n elif y < self.bounds[self.cur].up:\n y += 1\n else:\n self.cur += 1\n if self.cur == len(self.bounds):\n return False, 0, 0, 0\n x = self.bounds[self.cur].x\n y = self.bounds[self.cur].low\n return True, x, y, int(round(sqrt(x*x+y*y)/self.dr))\n" ]
[ [ "numpy.sqrt", "numpy.tan", "numpy.sign", "numpy.cos" ] ]
aknckaan/scrl
[ "bff485e27d8785628e35d2cb73dce06f10065b1f" ]
[ "torchlars/wrapper.py" ]
[ "import torch\r\nfrom torch.optim import Optimizer\r\n\r\nclass OptimWrapper(Optimizer):\r\n\r\n # Mixin class that defines convenient functions for writing Optimizer Wrappers\r\n\r\n def __init__(self, optim):\r\n self.optim = optim\r\n\r\n def __getstate__(self):\r\n return self.optim.__getstate__()\r\n\r\n def __setstate__(self, state):\r\n self.optim.__setstate__(state)\r\n\r\n @property\r\n def state(self):\r\n return self.optim.state\r\n\r\n @property\r\n def param_groups(self):\r\n return self.optim.param_groups\r\n\r\n @param_groups.setter\r\n def param_groups(self, value):\r\n self.optim.param_groups = value\r\n\r\n def state_dict(self):\r\n return self.optim.state_dict()\r\n\r\n def load_state_dict(self, state_dict):\r\n self.optim.load_state_dict(state_dict)\r\n\r\n def zero_grad(self):\r\n self.optim.zero_grad()\r\n\r\n def add_param_group(self, param_group):\r\n self.optim.add_param_group(param_group)\r\n\r\n @property\r\n def defaults(self):\r\n return self.optim.defaults\r\n\r\n @defaults.setter\r\n def defaults(self, defaults):\r\n self.optim.defaults = defaults\r\n\r\n @torch.no_grad()\r\n def step(self, closure=None):\r\n self.optim.step(closure=closure)\r\n\r\n def __repr__(self):\r\n return \"%s(%r)\" % (self.__class__.__name__, self.optim)" ]
[ [ "torch.no_grad" ] ]
schmitse/zfit
[ "d42588f1d43532a34a81f31e602d2471780690e2" ]
[ "zfit/models/basic.py" ]
[ "\"\"\"Basic PDFs are provided here.\n\nGauss, exponential... that can be used together with Functors to build larger models.\n\"\"\"\n\n# Copyright (c) 2021 zfit\nimport contextlib\n\nimport numpy as np\nimport tensorflow as tf\n\nimport zfit.z.numpy as znp\nfrom zfit import z\n\nfrom ..core.basepdf import BasePDF\nfrom ..core.space import ANY_LOWER, ANY_UPPER, Space\nfrom ..util import ztyping\nfrom ..util.exception import (AnalyticIntegralNotImplemented,\n BreakingAPIChangeError)\nfrom ..util.warnings import warn_advanced_feature\n\n\nclass Exponential(BasePDF):\n _N_OBS = 1\n\n def __init__(self, lam=None, obs: ztyping.ObsTypeInput = None, name: str = \"Exponential\", lambda_=None):\n \"\"\"Exponential function exp(lambda * x).\n\n The function is normalized over a finite range and therefore a pdf. So the PDF is precisely\n defined as :math:`\\\\frac{ e^{\\\\lambda \\\\cdot x}}{ \\\\int_{lower}^{upper} e^{\\\\lambda \\\\cdot x} dx}`\n\n Args:\n lam: Accessed as parameter \"lambda\".\n obs: The :py:class:`~zfit.Space` the pdf is defined in.\n name: Name of the pdf.\n dtype:\n \"\"\"\n if lambda_ is not None:\n if lam is None:\n lam = lambda_\n else:\n raise BreakingAPIChangeError(\"The 'lambda' parameter has been renamed from 'lambda_' to 'lam'.\")\n params = {'lambda': lam}\n super().__init__(obs, name=name, params=params)\n\n self._calc_numerics_data_shift = lambda: z.constant(0.)\n\n if not self.space.has_limits:\n warn_advanced_feature(\"Exponential pdf relies on a shift of the input towards 0 to keep the numerical \"\n f\"stability high. The space {self.space} does not have limits set and no shift\"\n f\" will occure. To set it manually, set _numerics_data_shift to the expected\"\n f\" average values given to this function _in case you want things to be set_.\"\n f\"If this sounds unfamiliar, regard this as an error and use a normalization range.\",\n identifier='exp_shift')\n self._set_numerics_data_shift(self.space)\n\n def _unnormalized_pdf(self, x):\n lambda_ = self.params['lambda']\n x = x.unstack_x()\n probs = znp.exp(lambda_ * (self._shift_x(x)))\n tf.debugging.assert_all_finite(probs, f\"Exponential PDF {self} has non valid values. This is likely caused\"\n f\" by numerical problems: if the exponential is too steep, this will\"\n f\" yield NaNs or infs. Make sure that your lambda is small enough and/or\"\n f\" the initial space is in the same\"\n f\" region as your data (and norm_range, if explicitly set differently).\"\n f\" If this issue still persists, please oben an issue on Github:\"\n f\" https://github.com/zfit/zfit\")\n return probs # Don't use exp! will overflow.\n\n def _shift_x(self, x):\n return x - self._calc_numerics_data_shift()\n\n @contextlib.contextmanager\n def _set_numerics_data_shift(self, limits):\n if limits:\n def calc_numerics_data_shift():\n lower, upper = [], []\n for limit in limits:\n low, up = limit.rect_limits\n lower.append(z.convert_to_tensor(low[:, 0]))\n upper.append(z.convert_to_tensor(up[:, 0]))\n lower = z.convert_to_tensor(lower)\n upper = z.convert_to_tensor(upper)\n lower_val = znp.min(lower, axis=0)\n upper_val = znp.max(upper, axis=0)\n\n return (upper_val + lower_val) / 2\n\n old_value = self._calc_numerics_data_shift\n\n self._calc_numerics_data_shift = calc_numerics_data_shift\n yield\n self._calc_numerics_data_shift = old_value\n else:\n yield\n\n # All hooks are needed to set the right shift when \"entering\" the pdf. The norm range is taken where both are\n # available. No special need needs to be taken for sampling (it samples from the correct region, the limits, and\n # uses the predictions by the `unnormalized_prob` -> that is shifted correctly\n def _single_hook_integrate(self, limits, norm_range, x):\n with self._set_numerics_data_shift(norm_range):\n return super()._single_hook_integrate(limits, norm_range, x=x)\n\n def _single_hook_analytic_integrate(self, limits, norm_range):\n with self._set_numerics_data_shift(limits=norm_range):\n return super()._single_hook_analytic_integrate(limits, norm_range)\n\n def _single_hook_numeric_integrate(self, limits, norm_range):\n with self._set_numerics_data_shift(limits=norm_range):\n return super()._single_hook_numeric_integrate(limits, norm_range)\n\n def _single_hook_partial_integrate(self, x, limits, norm_range):\n with self._set_numerics_data_shift(limits=norm_range):\n return super()._single_hook_partial_integrate(x, limits, norm_range)\n\n def _single_hook_partial_analytic_integrate(self, x, limits, norm_range):\n with self._set_numerics_data_shift(limits=norm_range):\n return super()._single_hook_partial_analytic_integrate(x, limits, norm_range)\n\n def _single_hook_partial_numeric_integrate(self, x, limits, norm_range):\n with self._set_numerics_data_shift(limits=norm_range):\n return super()._single_hook_partial_numeric_integrate(x, limits, norm_range)\n\n # def _single_hook_normalization(self, limits):\n # with self._set_numerics_data_shift(limits=limits):\n # return super()._single_hook_normalization(limits)\n\n #\n # # TODO: remove component_norm_range? But needed for integral?\n # def _single_hook_unnormalized_pdf(self, x, name):\n # if component_norm_range.limits_are_false:\n # component_norm_range = self.space\n # if component_norm_range.limits_are_set:\n # with self._set_numerics_data_shift(limits=component_norm_range):\n # return super()._single_hook_unnormalized_pdf(x, name)\n # else:\n # return super()._single_hook_unnormalized_pdf(x, name)\n #\n def _single_hook_pdf(self, x, norm_range):\n with self._set_numerics_data_shift(limits=norm_range):\n return super()._single_hook_pdf(x, norm_range)\n\n #\n def _single_hook_log_pdf(self, x, norm_range):\n with self._set_numerics_data_shift(limits=norm_range):\n return super()._single_hook_log_pdf(x, norm_range)\n\n def _single_hook_sample(self, n, limits, x=None):\n with self._set_numerics_data_shift(limits=limits):\n return super()._single_hook_sample(n, limits, x)\n\n\ndef _exp_integral_from_any_to_any(limits, params, model):\n lambda_ = params['lambda']\n lower, upper = limits.rect_limits\n # if any(np.isinf([lower, upper])):\n # raise AnalyticIntegralNotImplemented\n\n integral = _exp_integral_func_shifting(lambd=lambda_, lower=lower, upper=upper, model=model)\n return integral[0]\n\n\ndef _exp_integral_func_shifting(lambd, lower, upper, model):\n def raw_integral(x):\n return z.exp(lambd * (model._shift_x(x))) / lambd # needed due to overflow in exp otherwise\n\n lower_int = raw_integral(x=lower)\n upper_int = raw_integral(x=upper)\n integral = (upper_int - lower_int)\n return integral\n\n\ndef exp_icdf(x, params, model):\n lambd = params['lambda']\n x = z.unstack_x(x)\n x = model._shift_x(x)\n return znp.log(lambd * x) / lambd\n\n\n# Exponential.register_inverse_analytic_integral(exp_icdf) # TODO: register icdf for exponential\n# TODO: cleanup, make cdf registrable _and_ inverse integral, but real\n\nlimits = Space(axes=0, limits=(ANY_LOWER, ANY_UPPER))\nExponential.register_analytic_integral(func=_exp_integral_from_any_to_any, limits=limits)\n" ]
[ [ "tensorflow.debugging.assert_all_finite" ] ]
bobub/distil_labse
[ "ad587d7e4e49101a22fb1459b724b25733715caa" ]
[ "distil_labse_repo/distil_funcs.py" ]
[ "# Imports\nimport torch\nfrom labml_nn.transformers.switch import SwitchTransformer, SwitchTransformerLayer, SwitchFeedForward\nfrom labml_nn.transformers import MultiHeadAttention\nfrom labml_nn.transformers.feed_forward import FeedForward\nimport numpy as np\nfrom transformers import AutoConfig, AutoModel\nimport torch.nn as nn\nimport math\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.metrics import mean_squared_error\nfrom random import choice\nfrom sklearn.decomposition import PCA\nfrom copy import deepcopy\nfrom transformers import BertModel, BertConfig\n\n\n# Custom dataset function to store Open Subtitles data\nclass CustomDataset(torch.utils.data.Dataset):\n 'Characterizes a dataset for PyTorch'\n def __init__(self, input_ids, token_type_ids, attention_masks):\n 'Initialization'\n self.input_ids = input_ids\n self.token_type_ids = token_type_ids\n self.attention_masks = attention_masks\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.input_ids)\n\n def __getitem__(self, index):\n 'Generates one sample of data'\n\n input_id = self.input_ids[index]\n token_type_ID = self.token_type_ids[index]\n attention_mask = self.attention_masks[index]\n sample = {'input_ids':input_id, 'token_type_ids':token_type_ID , 'attention_mask':attention_mask}\n\n return sample\n\n# Weights init and switch init initialise the weights for the model as desribed in Switch Transformer paper\ndef weights_init(tensor: torch.Tensor):\n if isinstance(tensor, nn.Linear):\n switch_init(tensor.weight.data)\n torch.nn.init.zeros_(tensor.bias.data)\n if isinstance(tensor, nn.LayerNorm):\n torch.nn.init.zeros_(tensor.weight.data)\n torch.nn.init.zeros_(tensor.bias.data)\n\ndef switch_init(tensor: torch.Tensor, s: float = 0.1, mean: float=0) -> torch.Tensor:\n fan_in, fan_out = torch.nn.init._calculate_fan_in_and_fan_out(tensor)\n std = math.sqrt(s/fan_in)\n\n return torch.nn.init.trunc_normal_(tensor=tensor, mean=mean, std=std)\n\n\nclass LaBSE_Switch(nn.Module):\n \"\"\"\n Torch module for to create a Switch Transformer for LaBSE. \n Can be used for other BERT based models too, just change the input_id\n tokenization and word_embedding module.\n\n Inputs:\n config = dictionary of configuration\n word_embeddings_module = torch module mapping token ids to word embeddings\n\n Forward:\n Input_ids = ids using labse tokenizer \n attention_mask = binary, indicates to model which tokens should be attended to,\n and which should not.\n\n Outputs:\n outputs = a dictionary containing x, counts, route_prob, n_dropped, logits, attention, values\n\n See Switch Transformer paper to understand all except:\n attention, values and logits, which are used during knowledge distillation.\n \n \"\"\"\n\n def __init__(self, config, word_embeddings_module):\n\n super().__init__()\n # set the switch transformer as the actual neural net\n self.switch_model = SwitchTransformer(\n \n SwitchTransformerLayer(\n d_model=config['d_model'],\n attn=MultiHeadAttention(config['heads'], config['d_model'], config['dropout']),\n\n feed_forward=SwitchFeedForward(\n capacity_factor=config['capacity_factor'],\n drop_tokens=config['drop_tokens'],\n is_scale_prob=config['is_scale_prob'],\n n_experts=config['n_experts'],\n expert=FeedForward(config['d_model'], config['d_ff'], config['dropout_ffn']),\n d_model=config['d_model']),\n dropout_prob=config['dropout']),\n config['n_layers'],\n d_out = int(768),\n dropout_prob = config['dropout'])\n # initialise weights\n # self.switch_model.apply(weights_init)\n \n # module that maps input tokens into embedding vectors\n self.word_embeddings = word_embeddings_module\n\n # get attention weights from teacher\n # self.weight_init_from_teacher(teacher_model=teacher_model, int_matches=int_matches)\n \n def weight_init_from_teacher(self, teacher_model, int_matches):\n \n \n \"\"\"\n Initialises attention modules of student with those of the teacher for the --- specific to LaBSE and DistilSwitch\n int_matches should be a list of tuples of [(teacher_layer, student_layer),...]\n e.g. int_matches = [(5,0),(11,2)] --> give attention weights of teacher layer 5 to student layer 0 \n \"\"\"\n # teacher_model=load_teacher(device=torch.device('cuda'))\n self.switch_model.layers[int_matches[1]].attn.query.linear.weight = teacher_model.encoder.layer[int_matches[0]].attention.self.query.weight\n self.switch_model.layers[int_matches[1]].attn.query.linear.bias = teacher_model.encoder.layer[int_matches[0]].attention.self.query.bias\n self.switch_model.layers[int_matches[1]].attn.key.linear.weight = teacher_model.encoder.layer[int_matches[0]].attention.self.key.weight\n self.switch_model.layers[int_matches[1]].attn.key.linear.bias = teacher_model.encoder.layer[int_matches[0]].attention.self.key.bias\n self.switch_model.layers[int_matches[1]].attn.value.linear.weight = teacher_model.encoder.layer[int_matches[0]].attention.self.value.weight\n self.switch_model.layers[int_matches[1]].attn.value.linear.bias = teacher_model.encoder.layer[int_matches[0]].attention.self.value.bias\n self.switch_model.layers[int_matches[1]].attn.output.weight = teacher_model.encoder.layer[int_matches[0]].attention.output.dense.weight\n self.switch_model.layers[int_matches[1]].attn.output.bias = teacher_model.encoder.layer[int_matches[0]].attention.output.dense.bias\n# self.switch_model.layers[int_matches[1]].norm_ff.weight = teacher_model.encoder.layer[int_matches[0]].output.LayerNorm.weight\n# self.switch_model.layers[int_matches[1]].norm_ff.bias = teacher_model.encoder.layer[int_matches[0]].output.LayerNorm.bias\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None):\n \n # masks and token type ids not used, as we're just creating sentence embeddings for classification tasks\n \n # word embeddings of shape [batch, seq_len, d_model]\n input_embeddings = self.word_embeddings(input_ids)\n\n # model input on shape [seq_len, batch, d_model] and mask\n _batch,_seq_len,_n_hid = input_embeddings.shape\n #print(_n_hid)\n\n # call switch transformer\n outputs = self.switch_model(torch.reshape(input_embeddings, (_seq_len, _batch, _n_hid)),\n attention_mask=None)\n \n return outputs\n\n# function to blackbox load the student for distillation - can be switch or bert based\ndef load_student(name, student_config, device, teacher_model, int_matches, N_LAYERS):\n\n if name!='switch':\n \n # for pretrained bert models - setup config\n student_config = BertConfig.from_pretrained(name)\n student_config.num_hidden_layers = N_LAYERS\n student_config.output_hidden_states = True\n student_config.output_attentions = True\n student_config.use_cache = True\n student_config.is_decoder = True\n \n # load model and set input embeddings\n student_model = BertModel.from_pretrained(name, config=student_config)\n student_model.set_input_embeddings(teacher_model.get_input_embeddings())\n student_model = student_model.float()\n student_model.to(device=device)\n \n return student_model\n\n if name=='switch':\n \n # create compressed word embeddings from those of the teacher\n word_embeddings = deepcopy(teacher_model.get_input_embeddings())\n compressed_word_embeddings = word_embedding_compression(word_embeddings, student_config['d_model'])\n \n # create student model\n student_model = LaBSE_Switch(config=student_config, word_embeddings_module=compressed_word_embeddings)\n \n # initialise weights\n student_model.switch_model.apply(weights_init)\n student_model.weight_init_from_teacher(teacher_model=teacher_model, int_matches=int_matches)\n \n # convert model to float32 and move to device\n student_model = student_model.float() \n student_model.to(device=device)\n \n return student_model\n\n# loads teacher model from Huggingface\ndef load_teacher(device):\n teacher_config = AutoConfig.from_pretrained('sentence-transformers/LaBSE')\n teacher_config.output_hidden_states = True\n teacher_config.output_attentions = True\n teacher_config.use_cache = True\n teacher_config.is_decoder = True\n teacher_model = AutoModel.from_pretrained('sentence-transformers/LaBSE', config=teacher_config)\n teacher_model.float() # needs to be 32 bit precision to get decent results from distillation\n teacher_model.to(device=device)\n \n return teacher_model\n\n# Adaptor for BERT based models\ndef simple_adaptor(batch, model_outputs):\n \n # values need to be reformatted from Huggingface 'past_key_values' output\n values = []\n for i in model_outputs['past_key_values']:\n values.append(i[1])\n values = torch.stack(values)\n \n attentions = []\n for j in model_outputs['attentions']:\n attentions.append(inv_softmax(j))\n attentions = torch.stack(attentions)\n \n # we use pooler output as logits\n return {'logits': model_outputs['pooler_output'],\n 'hidden': model_outputs['hidden_states'],\n #'attention': model_outputs['attentions'],\n 'attention':attentions,\n 'inputs_mask': batch['attention_mask'],\n 'value_relation': values,\n 'pooler_output':model_outputs['pooler_output']}\n\ndef inv_softmax(x,C=-50):\n # reverses softmax operation - used in teacher_adaptor\n # C variable sets the min value of the scores, -50 works well.\n result = torch.log(x)\n result = torch.where(result <= float('-inf'), torch.full_like(result,C), result)\n return result\n\ndef teacher_adaptor(batch, model_outputs):\n # selects relevant model and batch outputs used for distillation loss calculation\n values = []\n for i in model_outputs['past_key_values']:\n values.append(i[1])\n values = torch.stack(values)\n \n attentions = []\n for j in model_outputs['attentions']:\n attentions.append(inv_softmax(j))\n attentions = torch.stack(attentions)\n \n # print(model_outputs['pooler_output'].requires_grad)\n\n return {#'logits': model_outputs['last_hidden_state'],\n 'logits':model_outputs['pooler_output'],\n 'hidden': model_outputs['hidden_states'],\n #'attention': model_outputs['attentions'],\n 'attention': attentions,\n 'inputs_mask': batch['attention_mask'],\n 'value_relation': values,\n 'pooler_output':model_outputs['pooler_output']}\n\n# adaptor for switch model\ndef switch_student_adaptor(batch, model_outputs):\n # selects relevant model and batch outputs and reformats them\n # needs to have same shapes as teacher adaptor\n\n # reformat attention\n layers, len, len, batch_size, heads = model_outputs['attention'].shape\n attention = model_outputs['attention'].reshape(layers, batch_size, heads, len, len)\n\n # reformat logits\n len, batch_size, d_model = model_outputs['logits'].shape\n logits = model_outputs['logits'].reshape(batch_size, len, d_model)\n # print(model_outputs['pooler_output'].requires_grad)\n\n # reformat values\n layers, len, batch_size, heads, embedding_per_head = model_outputs['values'].shape\n values = model_outputs['values'].reshape(layers, batch_size, heads, len, embedding_per_head)\n\n return {#'logits': logits,\n 'logits':model_outputs['pooler_output'],\n 'counts': model_outputs['counts'],\n 'attention': attention,\n 'inputs_mask': batch['attention_mask'],\n 'route_prob': model_outputs['route_prob'],\n 'n_dropped': model_outputs['n_dropped'],\n 'value_relation': values}\n\n# Predict function evaluates model every epoch to show training progress\ndef predict(model, teacher_model, eval_dataset, step, device, STUDENT, BATCH_SIZE, eval_metric='cosine_similarity', feedback=True):\n '''\n model = student_model\n teacher_model = labse\n eval_dataset = num of dev set samples to test model on per callback\n device = cuda or cpu\n student = switch or !switch\n eval_metric = metric to evaluate the model - mse or cosine_similarity\n '''\n model.eval()\n student_logits = []\n teacher_logits =[]\n batch_counts = []\n batch_n_dropped = []\n batch_route_prob = []\n \n dataloader = DataLoader(eval_dataset,batch_size=BATCH_SIZE)\n print('Running callback function on {} dev set samples...'.format(len(eval_dataset)))\n for batch in dataloader:\n input_ids = batch['input_ids'].to(device)\n attention_mask = batch['attention_mask'].to(device)\n\n with torch.no_grad():\n model_outputs = model(input_ids=input_ids, attention_mask=attention_mask)\n logits_S = model_outputs['pooler_output']\n logits_T = teacher_model(input_ids=input_ids, attention_mask=attention_mask)['pooler_output']\n cpu_logits_S = logits_S.detach().cpu()\n cpu_logits_T = logits_T.detach().cpu()\n \n if STUDENT=='switch' and feedback==True:\n counts = model_outputs['counts'].detach().cpu()\n n_dropped = model_outputs['n_dropped']\n route_prob = model_outputs['route_prob'].detach().cpu()\n\n for i in range(len(cpu_logits_S)):\n student_logits.append(cpu_logits_S[i].numpy())\n teacher_logits.append(cpu_logits_T[i].numpy())\n \n if STUDENT=='switch' and feedback==True:\n for i in range(len(counts)):\n batch_counts.append(counts[i].numpy())\n batch_n_dropped.append(n_dropped[i])\n batch_route_prob.append(route_prob[i].numpy())\n \n model.train()\n student_logits = np.array(student_logits)\n teacher_logits = np.array(teacher_logits)\n\n if eval_metric=='cosine_similarity':\n \n similarities = np.diag(cosine_similarity(student_logits, teacher_logits))\n print (\"Average cosine similarity for these samples: \", np.mean(similarities))\n \n if eval_metric=='mse':\n mse_error = mean_squared_error(student_logits, teacher_logits)\n print (\"Average mean squared error for these samples: \", mse_error)\n \n if STUDENT=='switch' and feedback==True:\n switch_counts = np.array(batch_counts)\n switch_n_dropped = np.array(batch_n_dropped)\n switch_route_prob = np.array(batch_route_prob)\n print('SWITCH BEHAVIOUR:')\n print('Counts Shape: \\n', switch_counts.shape)\n print('Counts: \\n', switch_counts)\n print('N_dropped: \\n', switch_n_dropped)\n print('Route Prob: \\n', switch_route_prob)\n\n return torch.Tensor([np.mean(similarities)])\n\n# generates random parameters for hyperparam tuning\ndef generate_random_params(params):\n # input: params dictionary containing lists of possible values\n chosen_params = {}\n for param in params:\n chosen_params[param] = choice(params[param])\n return chosen_params\n\ndef word_embedding_compression(word_embedding_module, d_model):\n \n \"\"\"\n Compresses a given word_embedding_module (type torch.Embedding) into a module of d_model dimensionality.\n \"\"\"\n word_embedding_matrix = word_embedding_module.weight\n assert word_embedding_matrix.shape[1]>=d_model, 'The desired word embedding dimensionality is greater than the teacher word embeddings. That is not compression! Make d_model smaller.'\n # return the module if it's the same dimensionality\n if word_embedding_matrix.shape[1]==d_model:\n return word_embedding_module\n # else compress\n pca = PCA(n_components = d_model)\n compressed_word_embedding_matrix = pca.fit_transform(word_embedding_matrix.detach().cpu().numpy())\n compressed_word_embedding_matrix = torch.from_numpy(compressed_word_embedding_matrix)\n word_embedding_module.weight = torch.nn.parameter.Parameter(compressed_word_embedding_matrix)\n return word_embedding_module" ]
[ [ "torch.utils.data.DataLoader", "torch.stack", "sklearn.metrics.mean_squared_error", "numpy.mean", "torch.nn.init._calculate_fan_in_and_fan_out", "torch.no_grad", "torch.reshape", "torch.log", "torch.from_numpy", "torch.nn.init.trunc_normal_", "torch.nn.init.zeros_", "torch.full_like", "numpy.array", "sklearn.metrics.pairwise.cosine_similarity", "torch.nn.parameter.Parameter", "sklearn.decomposition.PCA" ] ]
StanfordVL/bullet3_ik
[ "52da668d60b32bfe6eea96d3ef3b9d442b2b8926" ]
[ "examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_logging.py" ]
[ "\"\"\"A proto buffer based logging system for minitaur experiments.\n\nThe logging system records the time since reset, base position, orientation,\nangular velocity and motor information (joint angle, speed, and torque) into a\nproto buffer. See minitaur_logging.proto for more details. The episode_proto is\nupdated per time step by the environment and saved onto disk for each episode.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport os\nimport time\n\nimport tensorflow as tf\nimport minitaur_logging_pb2\n\nNUM_MOTORS = 8\n\n\ndef _update_base_state(base_state, values):\n base_state.x = values[0]\n base_state.y = values[1]\n base_state.z = values[2]\n\n\ndef preallocate_episode_proto(episode_proto, max_num_steps):\n \"\"\"Preallocate the memory for proto buffer.\n\n Dynamically allocating memory as the protobuf expands causes unexpected delay\n that is not tolerable with locomotion control.\n\n Args:\n episode_proto: The proto that holds the state/action data for the current\n episode.\n max_num_steps: The max number of steps that will be recorded in the proto.\n The state/data over max_num_steps will not be stored in the proto.\n \"\"\"\n for _ in range(max_num_steps):\n step_log = episode_proto.state_action.add()\n step_log.info_valid = False\n step_log.time.seconds = 0\n step_log.time.nanos = 0\n for _ in range(NUM_MOTORS):\n motor_state = step_log.motor_states.add()\n motor_state.angle = 0\n motor_state.velocity = 0\n motor_state.torque = 0\n motor_state.action = 0\n _update_base_state(step_log.base_position, [0, 0, 0])\n _update_base_state(step_log.base_orientation, [0, 0, 0])\n _update_base_state(step_log.base_angular_vel, [0, 0, 0])\n\n\ndef update_episode_proto(episode_proto, minitaur, action, step):\n \"\"\"Update the episode proto by appending the states/action of the minitaur.\n\n Note that the state/data over max_num_steps preallocated\n (len(episode_proto.state_action)) will not be stored in the proto.\n Args:\n episode_proto: The proto that holds the state/action data for the current\n episode.\n minitaur: The minitaur instance. See envs.minitaur for details.\n action: The action applied at this time step. The action is an 8-element\n numpy floating-point array.\n step: The current step index.\n \"\"\"\n max_num_steps = len(episode_proto.state_action)\n if step >= max_num_steps:\n tf.logging.warning(\n \"{}th step is not recorded in the logging since only {} steps were \"\n \"pre-allocated.\".format(step, max_num_steps))\n return\n step_log = episode_proto.state_action[step]\n step_log.info_valid = minitaur.IsObservationValid()\n time_in_seconds = minitaur.GetTimeSinceReset()\n step_log.time.seconds = int(time_in_seconds)\n step_log.time.nanos = int((time_in_seconds - int(time_in_seconds)) * 1e9)\n\n motor_angles = minitaur.GetMotorAngles()\n motor_velocities = minitaur.GetMotorVelocities()\n motor_torques = minitaur.GetMotorTorques()\n for i in range(minitaur.num_motors):\n step_log.motor_states[i].angle = motor_angles[i]\n step_log.motor_states[i].velocity = motor_velocities[i]\n step_log.motor_states[i].torque = motor_torques[i]\n step_log.motor_states[i].action = action[i]\n\n _update_base_state(step_log.base_position, minitaur.GetBasePosition())\n _update_base_state(step_log.base_orientation, minitaur.GetBaseRollPitchYaw())\n _update_base_state(step_log.base_angular_vel,\n minitaur.GetBaseRollPitchYawRate())\n\n\nclass MinitaurLogging(object):\n \"\"\"A logging system that records the states/action of the minitaur.\"\"\"\n\n def __init__(self, log_path=None):\n self._log_path = log_path\n\n # TODO(jietan): Consider using recordio to write the logs.\n def save_episode(self, episode_proto):\n \"\"\"Save episode_proto to self._log_path.\n\n self._log_path is the directory name. A time stamp is the file name of the\n log file. For example, when self._log_path is \"/tmp/logs/\", the actual\n log file would be \"/tmp/logs/yyyy-mm-dd-hh:mm:ss\".\n\n Args:\n episode_proto: The proto that holds the states/action for the current\n episode that needs to be save to disk.\n Returns:\n The full log path, including the directory name and the file name.\n \"\"\"\n if not self._log_path or not episode_proto.state_action:\n return self._log_path\n if not tf.gfile.Exists(self._log_path):\n tf.gfile.MakeDirs(self._log_path)\n ts = time.time()\n time_stamp = datetime.datetime.fromtimestamp(ts).strftime(\n \"%Y-%m-%d-%H:%M:%S\")\n log_path = os.path.join(self._log_path,\n \"minitaur_log_{}\".format(time_stamp))\n with tf.gfile.Open(log_path, \"w\") as f:\n f.write(episode_proto.SerializeToString())\n return log_path\n\n def restore_episode(self, log_path):\n \"\"\"Restore the episodic proto from the log path.\n\n Args:\n log_path: The full path of the log file.\n Returns:\n The minitaur episode proto.\n \"\"\"\n with tf.gfile.Open(log_path) as f:\n content = f.read()\n episode_proto = minitaur_logging_pb2.MinitaurEpisode()\n episode_proto.ParseFromString(content)\n return episode_proto\n" ]
[ [ "tensorflow.gfile.MakeDirs", "tensorflow.gfile.Exists", "tensorflow.gfile.Open" ] ]
zacjohnston/pyburst
[ "f7d5ae9a229704d1cbcf656afb9fb3e29fb71c0c", "f7d5ae9a229704d1cbcf656afb9fb3e29fb71c0c" ]
[ "pyburst/mcmc/mcmc_plot.py", "pyburst/misc/alpha.py" ]
[ "import numpy as np\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nimport chainconsumer\nfrom math import ceil\n\n# pyburst\nfrom . import mcmc_versions\nfrom . import mcmc_tools\nfrom . import burstfit\nfrom . import mcmc_params\nfrom pyburst.observations import obs_tools\nfrom pyburst.plotting import plot_tools\nfrom pyburst.grids.grid_strings import get_source_path, print_warning\nfrom pyburst.misc.pyprint import printv\n\nGRIDS_PATH = os.environ['KEPLER_GRIDS']\n\n\ndef default_plt_options():\n \"\"\"Initialise default plot parameters\"\"\"\n params = {'mathtext.default': 'regular',\n 'font.family': 'serif',\n 'text.usetex': False}\n plt.rcParams.update(params)\n\n\ndefault_plt_options()\n\n\ndef save_plot(fig, prefix, save, source, version, display, chain=None, n_dimensions=None,\n n_walkers=None, n_steps=None, label=None, extension='.png',\n enforce_chain_info=True):\n \"\"\"Handles saving/displaying of a figure passed to it\n \"\"\"\n if enforce_chain_info and (None in (n_dimensions, n_walkers, n_steps)):\n if chain is None:\n raise ValueError('Must provide chain, or specify each of '\n '(n_dimensions, n_walkers, n_steps)')\n else:\n n_walkers, n_steps, n_dimensions = chain.shape\n\n if save:\n filename = mcmc_tools.get_mcmc_string(source=source, version=version,\n n_walkers=n_walkers, n_steps=n_steps,\n prefix=prefix, label=label,\n extension=extension)\n source_path = get_source_path(source)\n filepath = os.path.join(source_path, 'plots', prefix, f'{filename}')\n fig.savefig(filepath)\n\n if display:\n plt.show(block=False)\n else:\n plt.close(fig)\n\n\ndef save_multiple_synth(series, source, version, n_steps, discard, n_walkers=960,\n walkers=True, posteriors=True, contours=False,\n display=False, mass_radius=True,\n synth=True, compressed=False):\n \"\"\"Save plots for multiple series in a synthetic data batch\n \"\"\"\n # TODO reuse max_lhood point\n default_plt_options()\n for ser in series:\n if synth:\n full_source = f'{source}_{ser}'\n else:\n full_source = source\n\n chain = mcmc_tools.load_chain(full_source, n_walkers=n_walkers, n_steps=n_steps,\n version=version, compressed=compressed)\n\n if walkers:\n plot_walkers(chain, source=full_source, save=True,\n display=display, version=version)\n\n if posteriors:\n plot_posteriors(chain, source=full_source, save=True, discard=discard,\n display=display, version=version)\n\n if contours:\n plot_contours(chain, source=full_source, save=True, discard=discard,\n display=display, version=version)\n\n if mass_radius:\n plot_mass_radius(chain, source=full_source, save=True, discard=discard,\n display=display, version=version)\n\n\ndef save_all_plots(source, version, discard, n_steps, n_walkers=1000, display=False,\n save=True, cap=None, posteriors=True, contours=True,\n redshift=True, mass_radius=True, verbose=True, compressed=False):\n \"\"\"Saves (and/or displays) main MCMC plots\n \"\"\"\n chain = mcmc_tools.load_chain(source, version=version, n_steps=n_steps,\n n_walkers=n_walkers, verbose=verbose,\n compressed=compressed)\n if posteriors:\n printv('Plotting posteriors', verbose=verbose)\n plot_posteriors(chain, source=source, save=save, discard=discard, cap=cap,\n display=display, version=version)\n\n if contours:\n printv('Plotting contours', verbose=verbose)\n plot_contours(chain, source=source, save=save, discard=discard, cap=cap,\n display=display, version=version)\n\n if mass_radius:\n printv('Plotting mass-radius', verbose=verbose)\n plot_mass_radius(chain, source=source, save=save, discard=discard, cap=cap,\n display=display, version=version)\n\n if redshift:\n printv('Plotting redshift', verbose=verbose)\n plot_redshift(chain, source=source, save=save, discard=discard, cap=cap,\n display=display, version=version)\n\n\ndef plot_contours(chain, discard, source, version, cap=None,\n display=True, save=False, truth_values=None, parameters=None,\n sigmas=np.linspace(0, 2, 5), cc=None, summary=False, fontsize=14,\n max_ticks=4):\n \"\"\"Plots posterior contours of mcmc chain\n\n parameters : [str]\n specify which parameters to plot\n \"\"\"\n default_plt_options()\n\n if cc is None:\n pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')\n pkey_labels = plot_tools.convert_mcmc_labels(param_keys=pkeys)\n cc = mcmc_tools.setup_chainconsumer(chain=chain, param_labels=pkey_labels,\n discard=discard, cap=cap, sigmas=sigmas,\n summary=summary, fontsize=fontsize,\n max_ticks=max_ticks)\n if parameters is not None:\n parameters = plot_tools.convert_mcmc_labels(param_keys=parameters)\n\n # TODO: figsize\n if truth_values is not None:\n fig = cc.plotter.plot(truth=truth_values, parameters=parameters)\n else:\n fig = cc.plotter.plot(parameters=parameters)\n\n save_plot(fig, prefix='contours', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_posteriors(chain, discard, source, version, cap=None,\n display=True, save=False, truth_values=None,\n cc=None):\n \"\"\"Plots posterior distributions of mcmc chain\n\n truth_values : list|dict\n Specify parameters of point (e.g. the true value) to draw on the distributions.\n \"\"\"\n default_plt_options()\n pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')\n pkey_labels = plot_tools.convert_mcmc_labels(param_keys=pkeys)\n if cc is None:\n cc = mcmc_tools.setup_chainconsumer(chain=chain, param_labels=pkey_labels,\n discard=discard, cap=cap)\n height = 3 * ceil(len(pkeys) / 4)\n\n if truth_values is not None:\n fig = cc.plotter.plot_distributions(figsize=[10, height],\n truth=truth_values)\n else:\n fig = cc.plotter.plot_distributions(figsize=[10, height])\n\n plt.tight_layout()\n save_plot(fig, prefix='posteriors', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_mass_radius(chain, discard, source, version, cap=None,\n display=True, save=False, summary=False,\n sigmas=np.linspace(0, 2, 5), fontsize=18, figsize='column'):\n \"\"\"Plots contours of mass versus radius from a given chain\n \"\"\"\n default_plt_options()\n mass_nw, mass_gr = mcmc_params.get_constant_masses(source, version)\n mass_radius_chain = mcmc_params.get_mass_radius_chain(chain=chain, discard=discard,\n source=source, version=version,\n cap=cap, mass_nw=mass_nw,\n mass_gr=mass_gr)\n\n cc = mcmc_tools.setup_custom_chainconsumer(mass_radius_chain, parameters=['R', 'M'],\n sigmas=sigmas, summary=summary,\n fontsize=fontsize)\n fig = cc.plotter.plot(figsize=figsize)\n fig.subplots_adjust(left=0.16, bottom=0.15)\n\n save_plot(fig, prefix='mass-radius', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_redshift(chain, discard, source, version, cap=None, display=True, save=False):\n \"\"\"Plots posterior distribution of redshift given a chain\n \"\"\"\n mass_nw, mass_gr = mcmc_params.get_constant_masses(source, version)\n redshift_chain = mcmc_params.get_redshift_chain(chain=chain, discard=discard,\n source=source, version=version,\n cap=cap, mass_nw=mass_nw,\n mass_gr=mass_gr)\n\n cc = mcmc_tools.setup_custom_chainconsumer(redshift_chain, parameters=['1+z'])\n fig = cc.plotter.plot_distributions(figsize=[5, 5])\n plt.tight_layout()\n\n save_plot(fig, prefix='redshift', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_gravitational_contours(chain, discard, source, version, cap=None, display=True,\n save=False, r_nw=10, sigmas=np.linspace(0, 2, 5),\n summary=False, unit_labels=True, fontsize=16,\n fixed_grav=False, figsize=None):\n \"\"\"Plots contours of gravitational parameters\n \"\"\"\n cc = mcmc_tools.setup_gravitational_chainconsumer(chain=chain, discard=discard,\n source=source, version=version,\n cap=cap, fixed_grav=fixed_grav,\n summary=summary, r_nw=r_nw,\n unit_labels=unit_labels,\n sigmas=sigmas, fontsize=fontsize)\n if fixed_grav:\n fig = cc.plotter.plot_distributions(figsize=figsize)\n plt.tight_layout()\n else:\n fig = cc.plotter.plot()\n\n save_plot(fig, prefix='gravitational', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_inclination(chain, discard, source, version, cap=None, display=True,\n save=False, disc_model='he16_a', sigmas=np.linspace(0, 2, 5),\n summary=False, unit_labels=True, figsize=(4, 4), fontsize=18):\n \"\"\"Plots contours of parameters derived using disc model\n \"\"\"\n disc_chain = mcmc_params.get_disc_chain(chain=chain, discard=discard, cap=cap,\n source=source, version=version,\n disc_model=disc_model)\n\n cc = mcmc_tools.setup_custom_chainconsumer(disc_chain, parameters=['d', 'i'],\n sigmas=sigmas, summary=summary,\n unit_labels=unit_labels, fontsize=fontsize)\n fig = cc.plotter.plot(figsize=figsize)\n fig.subplots_adjust(left=0.15, bottom=0.15)\n save_plot(fig, prefix='disc', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_distance_anisotropy(chain, discard, source, version, cap=None, display=True,\n save=False, sigmas=np.linspace(0, 2, 5), summary=False,\n figsize=(4, 4), unit_labels=True, fontsize=18):\n \"\"\"Plots contours of MCMC parameters d_b, xi_ratio\n \"\"\"\n d_b_chain = mcmc_params.get_param_chain(chain, param='d_b', discard=discard,\n source=source, version=version, cap=cap)\n xi_ratio_chain = mcmc_params.get_param_chain(chain, param='xi_ratio', discard=discard,\n source=source, version=version, cap=cap)\n\n flat_chain = np.column_stack([d_b_chain, xi_ratio_chain])\n cc = mcmc_tools.setup_custom_chainconsumer(flat_chain, parameters=['d_b', 'xi_ratio'],\n sigmas=sigmas, summary=summary,\n unit_labels=unit_labels, fontsize=fontsize)\n\n fig = cc.plotter.plot(figsize=figsize)\n fig.subplots_adjust(left=0.2, bottom=0.2)\n save_plot(fig, prefix='distance', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_xedd(chain, discard, source, version, cap=None, display=True,\n save=False, cloud=True, sigmas=np.linspace(0, 2, 10), figsize=(5, 5)):\n \"\"\"Plots posterior for Eddington hydrogen composition (X_Edd)\n \"\"\"\n default_plt_options()\n xedd_chain = mcmc_params.get_xedd_chain(chain=chain, discard=discard, source=source,\n version=version, cap=cap)\n\n label = plot_tools.quantity_label('xedd')\n cc = mcmc_tools.setup_custom_chainconsumer(xedd_chain, parameters=[label],\n sigmas=sigmas, cloud=cloud)\n fig = cc.plotter.plot(figsize=figsize)\n\n save_plot(fig, prefix='xedd', chain=chain, save=save, source=source,\n version=version, display=display)\n return fig\n\n\ndef plot_walkers(chain, source, version, params=None, n_lines=30, xlim=-1,\n display=True, save=False, label=''):\n \"\"\"Plots walkers vs steps (i.e. \"time\")\n\n Parameters\n ----------\n source : str\n version : int\n chain : np.array\n chain as returned by load_chain()\n params : [str]\n parameter(s) of which to plot walkers.\n n_lines : int\n approx number of lines/walkers to plot on parameter\n xlim : int\n x-axis limit to plot (n_steps), i.e. ax.set_xlim((0, xlim))\n label : str\n optional label to add to filename when saving\n display : bool\n save : bool\n \"\"\"\n default_plt_options()\n pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')\n\n # ===== Default to splitting all params into 2 plots =====\n if params is None:\n half = int(len(pkeys) / 2)\n for i, param_split in enumerate((pkeys[:half], pkeys[half:])):\n plot_walkers(chain=chain, source=source, version=version,\n params=param_split, n_lines=n_lines, xlim=xlim,\n display=display, save=save, label=f'P{i + 1}')\n return\n\n n_walkers, n_steps, n_dim = chain.shape\n n_params = len(params)\n\n jump_size = round(n_walkers / n_lines)\n steps = np.arange(n_steps)\n walker_idxs = np.arange(0, n_walkers, jump_size)\n\n # noinspection PyTypeChecker\n fig, ax = plt.subplots(n_params, 1, sharex=True, figsize=(10, 12))\n\n for i in range(n_params):\n p_idx = pkeys.index(params[i])\n\n for j in walker_idxs:\n walker = chain[j, :, p_idx]\n ax[i].plot(steps, walker, linewidth=0.5, color='black')\n ax[i].set_ylabel(params[i])\n\n if xlim == -1:\n xlim = n_steps\n\n ax[-1].set_xlabel('Step')\n ax[-1].set_xlim([0, xlim])\n plt.tight_layout()\n\n if display:\n plt.show(block=False)\n\n save_plot(fig, prefix='walkers', chain=chain, save=save, source=source,\n version=version, display=display,\n label=label, extension='.png')\n\n\ndef plot_qb_mdot(chain, source, version, discard, cap=None, display=True, save=False,\n figsize=(5, 5), fontsize=16, sigmas=(1, 2)):\n \"\"\"Plots 2D contours of Qb versus Mdot for each epoch (from multi-epoch chain)\n \"\"\"\n mv = mcmc_versions.McmcVersion(source=source, version=version)\n chain_flat = mcmc_tools.slice_chain(chain, discard=discard, cap=cap, flatten=True)\n\n system_table = obs_tools.load_summary(mv.system)\n epochs = list(system_table.epoch)\n cc = chainconsumer.ChainConsumer()\n\n param_labels = []\n for param in ['mdot', 'qb']:\n param_labels += [plot_tools.full_label(param)]\n\n for i, epoch in enumerate(epochs):\n mdot_idx = mv.param_keys.index(f'mdot{i + 1}')\n qb_idx = mv.param_keys.index(f'qb{i + 1}')\n param_idxs = [mdot_idx, qb_idx]\n\n cc.add_chain(chain_flat[:, param_idxs], parameters=param_labels,\n name=str(epoch))\n\n cc.configure(kde=False, smooth=0, label_font_size=fontsize,\n tick_font_size=fontsize-2, sigmas=sigmas)\n fig = cc.plotter.plot(display=False, figsize=figsize)\n fig.subplots_adjust(left=0.2, bottom=0.2)\n\n save_plot(fig, prefix='qb', save=save, source=source, version=version,\n display=display, chain=chain)\n return fig\n\n\ndef plot_epoch_posteriors(master_cc, source, version, display=True, save=False,\n col_wrap=None, alt_params=True, unit_labels=True,\n add_text=True, fontsize=16):\n \"\"\"Plot posteriors for multiiple epoch chains\n\n parameters\n ----------\n master_cc : ChainConsumer\n Contains the multi-epoch chain, created with setup_master_chainconsumer()\n source : str\n version : int\n display : bool (optional)\n save : bool (optional)\n col_wrap : int (optional)\n \"\"\"\n param_order = {\n 'grid5': ['mdot1', 'mdot2', 'mdot3', 'qb1', 'qb2', 'qb3', 'x', 'z', 'm_nw',\n 'm_gr', 'd_b', 'xi_ratio'],\n 'he2': ['mdot1', 'mdot2', 'qb1', 'qb2', 'm_gr', 'd_b', 'xi_ratio'],\n }\n\n param_keys = param_order[source]\n\n # TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n # quick and dirty patch!\n if alt_params:\n param_keys = ['mdot1', 'mdot2', 'mdot3', 'qb1', 'qb2', 'qb3', 'x', 'z', 'g',\n 'M', 'd_b', 'xi_ratio']\n # TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n\n formatted_params = plot_tools.convert_mcmc_labels(param_keys, unit_labels=unit_labels)\n n_epochs = len(master_cc.chains) - 1\n\n if col_wrap is None:\n col_wrap = n_epochs\n\n height = 3 * ceil(len(param_keys) / n_epochs)\n fig = master_cc.plotter.plot_distributions(parameters=formatted_params,\n col_wrap=col_wrap,\n figsize=[8, height],\n display=False)\n if add_text:\n add_epoch_text(fig, fontsize=fontsize)\n\n plt.tight_layout()\n\n save_plot(fig, prefix='multi_posteriors', save=save, source=source, version=version,\n display=display, enforce_chain_info=False)\n return fig\n\n\ndef plot_max_lhood(source, version, n_walkers, n_steps, verbose=True, re_interp=False,\n display=True, save=False):\n default_plt_options()\n max_params, max_lhood = mcmc_tools.get_max_lhood_params(source, version=version,\n n_walkers=n_walkers,\n n_steps=n_steps,\n verbose=verbose,\n return_lhood=True)\n bfit = burstfit.BurstFit(source=source, version=version, verbose=False, re_interp=re_interp)\n lhood, fig = bfit.lhood(max_params, plot=True)\n\n if lhood != max_lhood:\n print_warning(f'lhoods do not match (original={max_lhood:.2f}, current={lhood:.2f}). '\n + 'BurstFit (e.g. lhood, lnhood) or interpolator may have changed')\n\n save_plot(fig, prefix='compare', n_dimensions=len(max_params),\n n_walkers=n_walkers, n_steps=n_steps, save=save, source=source,\n version=version, display=display)\n\n\ndef plot_bprop_sample(bp_sample, source, version, bprops=None, legend=True,\n subplot_figsize=(3, 2.5), bfit=None, fontsize=14,\n vlines=True):\n \"\"\"Plot burst properties from large sample against observations\n\n bprop_sample : np.array\n obtained using mcmc_tools.bprop_sample()\n \"\"\"\n if bfit is None:\n bfit = burstfit.BurstFit(source=source, version=version, verbose=False)\n\n if bprops is None:\n bprops = bfit.mcmc_version.bprops\n\n cc = mcmc_tools.setup_bprop_chainconsumer(chain=None, n=None, discard=None,\n source=source, version=version,\n bp_sample=bp_sample)\n bp_summary = mcmc_tools.extract_bprop_summary(cc, source=source, version=version)\n\n n_bprops = len(bprops)\n n_rows = int(np.ceil(n_bprops / 2))\n n_cols = {False: 1, True: 2}.get(n_bprops > 1)\n\n figsize = (n_cols * subplot_figsize[0], n_rows * subplot_figsize[1])\n fig, ax = plt.subplots(n_rows, n_cols, sharex=False, figsize=figsize)\n\n if n_bprops % 2 == 1 and n_bprops > 1: # blank odd-numbered subplot\n ax[-1, -1].axis('off')\n\n for i, bprop in enumerate(bprops):\n subplot_row = int(np.floor(i / 2))\n subplot_col = i % 2\n if n_cols > 1:\n axis = ax[subplot_row, subplot_col]\n else:\n axis = ax\n u_model = np.diff(bp_summary[:, :, i], axis=0)\n bfit.plot_compare(model=bp_summary[1, :, i], u_model=u_model,\n bprop=bprop, fontsize=fontsize,\n ax=axis, display=False, vlines=vlines,\n legend=True if (i == 0 and legend) else False,\n xlabel=True if (i in [n_bprops-1, ]) else False)\n\n fig.subplots_adjust(wspace=0.4)\n plt.show(block=False)\n return fig\n\n\ndef plot_autocorrelation(chain, source, version, n_points=10, load=True, save_tau=True,\n ylims=None):\n \"\"\"Plots estimated integrated autocorrelation time\n\n Note: Adapted from https://dfm.io/posts/autocorr/\n \"\"\"\n mv = mcmc_versions.McmcVersion(source=source, version=version)\n params_fmt = plot_tools.convert_mcmc_labels(mv.param_keys)\n\n if load:\n sample_steps, autoc = mcmc_tools.load_autocorrelation(source, version=version,\n n_steps=chain.shape[1])\n else:\n sample_steps, autoc = mcmc_tools.get_autocorrelation(chain, source=source,\n version=version,\n n_points=n_points,\n save=save_tau)\n fig, ax = plt.subplots()\n\n for i, param in enumerate(mv.param_keys):\n ax.loglog(sample_steps, autoc[i], \"o-\", label=rf\"{params_fmt[i]}\")\n\n ax.plot(sample_steps, sample_steps / 10.0, \"--k\", label=r\"$\\tau = N/10$\")\n\n if ylims is None:\n xlim = ax.get_xlim()\n ylims = [5, xlim[1] / 10]\n\n ax.set_ylim(ylims)\n ax.set_xlabel(\"N steps\")\n ax.set_ylabel(r\"$\\tau$ estimate (N)\")\n ax.legend(fontsize=14, ncol=2, labelspacing=0.3)\n plt.show(block=False)\n\n return fig\n\n\ndef add_epoch_text(fig, fontsize, epochs=(1998, 2000, 2007),\n colours=('C0', 'C2', 'C3')):\n \"\"\"Adds text of epoch to figure subplots\n \"\"\"\n for i, epoch in enumerate(epochs):\n ax = fig.axes[i]\n ax.text(0.95, 0.95, str(epoch), color=colours[i], fontsize=fontsize,\n transform=ax.transAxes, va='top', ha='right')\n", "import numpy as np\nfrom astropy import units\n\nfrom pyburst.grids import grid_analyser\nfrom pyburst.physics import gravity\n\n\ndef add_alpha(kgrid):\n \"\"\"Adds alpha column to given Kgrid\n\n parameters\n ----------\n kgrid : grid_analyser.Kgrid\n grid object containing model data\n \"\"\"\n add_redshift(kgrid)\n add_phi(kgrid)\n add_lum_acc(kgrid)\n add_acc_energy(kgrid)\n\n summ = kgrid.summ\n kgrid.summ['alpha'] = summ.acc_energy / summ.fluence\n kgrid.summ['u_alpha'] = summ.alpha * np.sqrt((summ.u_acc_energy / summ.acc_energy)**2\n + (summ.u_fluence / summ.fluence)**2)\n\n\ndef add_lum_acc(kgrid):\n \"\"\"Adds accretion luminosity column to given Kgrid\n\n parameters\n ----------\n kgrid : grid_analyser.Kgrid\n grid object containing model data\n \"\"\"\n mdot_edd = 1.75e-8 # M_sun / yr\n msunyr_to_gramsec = (units.M_sun / units.year).to(units.g / units.s)\n check_column(kgrid.params, column='phi', label='params', remedy='add_phi()')\n\n mdot = kgrid.params.accrate * mdot_edd * msunyr_to_gramsec\n lum_acc = -mdot * kgrid.params.phi\n kgrid.params['lum_acc'] = lum_acc\n\n\ndef add_acc_energy(kgrid):\n \"\"\"Adds accretion energy column to given Kgrid\n\n parameters\n ----------\n kgrid : grid_analyser.Kgrid\n grid object containing model data\n \"\"\"\n check_column(kgrid.params, column='lum_acc', label='params', remedy='add_lum_acc()')\n kgrid.summ['acc_energy'] = kgrid.params.lum_acc * kgrid.summ.dt\n kgrid.summ['u_acc_energy'] = kgrid.params.lum_acc * kgrid.summ.u_dt\n\n\ndef add_redshift(kgrid, m_ratio=1.0):\n \"\"\"Adds redshift (1+z) column to given Kgrid\n\n parameters\n ----------\n kgrid : grid_analyser.Kgrid\n grid object containing model data\n m_ratio : flt (optional)\n mass ratio, M_gr / M_newton\n \"\"\"\n default_radius = 10\n\n if 'radius' not in kgrid.params.columns:\n print('Using default radius=10km')\n kgrid.params['radius'] = default_radius\n\n radii = np.array(kgrid.params.radius)\n masses = np.array(kgrid.params.mass)\n\n r_ratios, redshifts = gravity.gr_corrections(r=radii, m=masses, phi=m_ratio)\n kgrid.params['radius_gr'] = radii * r_ratios\n kgrid.params['mass_gr'] = masses * m_ratio\n kgrid.params['redshift'] = redshifts\n\n\ndef add_phi(kgrid):\n \"\"\"Adds phi (gravitational potential) column to given Kgrid\n\n parameters\n ----------\n kgrid : grid_analyser.Kgrid\n grid object containing model data\n \"\"\"\n check_column(kgrid.params, column='redshift', label='params', remedy='add_redshift()')\n\n phi = gravity.get_potential_gr(redshift=kgrid.params.redshift)\n kgrid.params['phi'] = phi\n\n\ndef check_column(table, column, label, remedy):\n \"\"\"Checks if column exists in table\n\n parameters\n ----------\n table : pd.DataFrame\n table to check for columns\n column : str\n name of column to check for\n label : str\n name of table\n remedy : str\n suggested function to use\n \"\"\"\n if column not in table.columns:\n raise ValueError(f'No {column} column in kgrid.{label}, try using {remedy}')\n" ]
[ [ "numpy.ceil", "numpy.diff", "matplotlib.pyplot.tight_layout", "numpy.floor", "numpy.column_stack", "matplotlib.pyplot.subplots", "matplotlib.pyplot.rcParams.update", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.close", "numpy.linspace" ], [ "numpy.array", "numpy.sqrt" ] ]
vishalbelsare/pyjanitor
[ "9c5ff2c4ad5969ee4bc683ba82010b55b55fd2bb" ]
[ "tests/functions/test_process_text.py" ]
[ "import numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal\n\n\n@pytest.fixture\ndef process_test_df():\n \"Base DataFrame\"\n return pd.DataFrame(\n {\"text\": [\"a_b_c\", \"c_d_e\", np.nan, \"f_g_h\"], \"numbers\": range(1, 5)}\n )\n\n\n@pytest.fixture\ndef test_returns_dataframe():\n \"Base DataFrame\"\n return pd.DataFrame(\n {\"text\": [\"a1a2\", \"b1\", \"c1\"], \"numbers\": [1, 2, 3]},\n index=[\"A\", \"B\", \"C\"],\n )\n\n\ndef test_column_name_type(process_test_df):\n \"\"\"Raise TypeError if `column_name` type is not `str`.\"\"\"\n with pytest.raises(TypeError):\n process_test_df.process_text([\"text\"])\n\n\n@pytest.mark.xfail(reason=\"new_column_names is deprecated.\")\ndef test_new_column_names_type(process_test_df):\n \"\"\"Raise TypeError if `new_column_names` type is not string or list.\"\"\"\n with pytest.raises(TypeError):\n process_test_df.process_text(\n column_name=\"text\", new_column_names={\"nutext\": \"rar\"}\n )\n\n\ndef test_column_name_presence(process_test_df):\n \"\"\"Raise ValueError if `column_name` is not in dataframe.\"\"\"\n with pytest.raises(ValueError):\n process_test_df.process_text(\n column_name=\"Test\", string_function=\"lower\"\n )\n\n\n@pytest.mark.xfail(reason=\"new_column_names is deprecated.\")\ndef test_new_column_names_presence_str(test_returns_dataframe):\n \"\"\"\n Raise ValueError if `new_column_names` is a str\n and is in the dataframe.\n \"\"\"\n with pytest.raises(ValueError):\n test_returns_dataframe.process_text(\n column_name=\"text\",\n new_column_names=\"text\",\n string_function=\"extractall\",\n pat=r\"([ab])?(\\d)\",\n )\n\n\n@pytest.mark.xfail(reason=\"new_column_names is deprecated.\")\ndef test_new_column_names_presence_list(test_returns_dataframe):\n \"\"\"\n Raise ValueError if `new_column_names` is a list and at least\n one of the new names is in the dataframe.\n \"\"\"\n with pytest.raises(ValueError):\n test_returns_dataframe.process_text(\n column_name=\"text\",\n new_column_names=[\"numbers\", \"newtext\"],\n string_function=\"extractall\",\n pat=r\"([ab])?(\\d)\",\n )\n\n\n@pytest.mark.xfail(reason=\"merge_frame is deprecated.\")\ndef test_merge_frame_type(test_returns_dataframe):\n \"\"\"\n Raise TypeError if `merge_frame` type is not bool.\"\"\"\n with pytest.raises(TypeError):\n test_returns_dataframe.process_text(\n column_name=\"text\",\n new_column_names=[\"number\", \"newtext\"],\n string_function=\"extractall\",\n pat=r\"([ab])?(\\d)\",\n merge_frame=\"True\",\n )\n\n\n@pytest.mark.xfail(reason=\"string_function must be present.\")\ndef test_string_function_is_None(process_test_df):\n \"\"\"Test that dataframe is returned if string_function is None.\"\"\"\n result = process_test_df.process_text(column_name=\"text\")\n assert_frame_equal(result, process_test_df)\n\n\ndef test_str_split(process_test_df):\n \"\"\"Test wrapper for Pandas `str.split()` method.\"\"\"\n\n expected = process_test_df.assign(\n text=process_test_df[\"text\"].str.split(\"_\")\n )\n\n result = process_test_df.process_text(\n column_name=\"text\", string_function=\"split\", pat=\"_\"\n )\n\n assert_frame_equal(result, expected)\n\n\n@pytest.mark.xfail(reason=\"new_column_names is deprecated.\")\ndef test_new_column_names(process_test_df):\n \"\"\"\n Test that a new column name is created when\n `new_column_name` is not None.\n \"\"\"\n result = process_test_df.process_text(\n column_name=\"text\",\n new_column_names=\"new_text\",\n string_function=\"slice\",\n start=2,\n )\n expected = process_test_df.assign(\n new_text=process_test_df[\"text\"].str.slice(start=2)\n )\n assert_frame_equal(result, expected)\n\n\n@pytest.fixture\ndef no_nulls_df():\n return pd.DataFrame({\"text\": [\"a\", \"b\", \"c\", \"d\"], \"numbers\": range(1, 5)})\n\n\ndef test_str_cat(no_nulls_df):\n \"\"\"Test outcome for Pandas `.str.cat()` method.\"\"\"\n\n result = no_nulls_df.process_text(\n column_name=\"text\",\n string_function=\"cat\",\n others=[\"A\", \"B\", \"C\", \"D\"],\n )\n\n expected = no_nulls_df.assign(\n text=no_nulls_df[\"text\"].str.cat(others=[\"A\", \"B\", \"C\", \"D\"])\n )\n\n assert_frame_equal(result, expected)\n\n\ndef test_str_cat_result_is_a_string(no_nulls_df):\n \"\"\"\n Test wrapper for Pandas `.str.cat()` method\n when the outcome is a string.\n \"\"\"\n\n result = no_nulls_df.process_text(\n column_name=\"text\",\n string_function=\"cat\",\n )\n\n expected = no_nulls_df.assign(text=no_nulls_df[\"text\"].str.cat())\n\n assert_frame_equal(result, expected)\n\n\n@pytest.mark.xfail(reason=\"new_column_names is deprecated.\")\ndef test_str_cat_result_is_a_string_and_new_column_names(no_nulls_df):\n \"\"\"\n Test wrapper for Pandas `.str.cat()` method when the outcome is a string,\n and `new_column_names` is not None.\n \"\"\"\n\n result = no_nulls_df.process_text(\n column_name=\"text\", string_function=\"cat\", new_column_names=\"combined\"\n )\n\n expected = no_nulls_df.assign(combined=no_nulls_df[\"text\"].str.cat())\n\n assert_frame_equal(result, expected)\n\n\ndef test_str_get():\n \"\"\"Test outcome for Pandas `.str.get()` method.\"\"\"\n\n df = pd.DataFrame(\n {\"text\": [\"aA\", \"bB\", \"cC\", \"dD\"], \"numbers\": range(1, 5)}\n )\n\n expected = df.assign(text=df[\"text\"].str.get(1))\n\n result = df.process_text(column_name=\"text\", string_function=\"get\", i=-1)\n\n assert_frame_equal(result, expected)\n\n\ndef test_str_lower():\n \"\"\"Test string conversion to lowercase using `.str.lower()`.\"\"\"\n\n df = pd.DataFrame(\n {\n \"codes\": range(1, 7),\n \"names\": [\n \"Graham Chapman\",\n \"John Cleese\",\n \"Terry Gilliam\",\n \"Eric Idle\",\n \"Terry Jones\",\n \"Michael Palin\",\n ],\n }\n )\n\n expected = df.assign(names=df[\"names\"].str.lower())\n\n result = df.process_text(column_name=\"names\", string_function=\"lower\")\n\n assert_frame_equal(result, expected)\n\n\ndef test_str_wrong(process_test_df):\n \"\"\"Test that an invalid Pandas string method raises an exception.\"\"\"\n with pytest.raises(KeyError):\n process_test_df.process_text(\n column_name=\"text\", string_function=\"invalid_function\"\n )\n\n\ndef test_str_wrong_parameters(process_test_df):\n \"\"\"Test that invalid argument for Pandas string method raises an error.\"\"\"\n with pytest.raises(TypeError):\n process_test_df.process_text(\n column_name=\"text\", string_function=\"split\", pattern=\"_\"\n )\n\n\n@pytest.fixture\ndef returns_frame_1():\n return pd.DataFrame(\n {\n \"ticker\": [\n \"spx 5/25/2001 p500\",\n \"spx 5/25/2001 p600\",\n \"spx 5/25/2001 p700\",\n ]\n }\n )\n\n\n@pytest.mark.xfail(reason=\"merge_frame is deprecated.\")\ndef test_return_dataframe_merge_is_None(returns_frame_1):\n \"\"\"\n Test that the dataframe returned when `merge_frame` is None\n is the result of the text processing, and is not merged to\n the original dataframe.\n \"\"\"\n\n expected_output = returns_frame_1[\"ticker\"].str.split(\" \", expand=True)\n result = returns_frame_1.process_text(\n column_name=\"ticker\", string_function=\"split\", expand=True, pat=\" \"\n )\n assert_frame_equal(result, expected_output)\n\n\n@pytest.mark.xfail(reason=\"merge_frame is deprecated.\")\ndef test_return_dataframe_merge_is_not_None(returns_frame_1):\n \"\"\"\n Test that the dataframe returned when `merge_frame` is not None\n is a merger of the original dataframe, and the dataframe\n generated from the text processing.\n \"\"\"\n expected_output = pd.concat(\n [\n returns_frame_1,\n returns_frame_1[\"ticker\"]\n .str.split(\" \", expand=True)\n .add_prefix(\"new_\"),\n ],\n axis=\"columns\",\n )\n result = returns_frame_1.process_text(\n column_name=\"ticker\",\n new_column_names=\"new_\",\n merge_frame=True,\n string_function=\"split\",\n expand=True,\n pat=\" \",\n )\n assert_frame_equal(result, expected_output)\n\n\n@pytest.mark.xfail(reason=\"merge_frame is deprecated.\")\ndef test_return_dataframe_merge_is_not_None_new_column_names_is_a_list(\n returns_frame_1,\n):\n \"\"\"\n Test that the dataframe returned when `merge_frame` is not None\n is a merger of the original dataframe, and the dataframe\n generated from the text processing. Also, the `new_column_names`\n is a list.\n \"\"\"\n\n expected_output = pd.concat(\n [\n returns_frame_1,\n returns_frame_1[\"ticker\"]\n .str.split(\" \", expand=True)\n .set_axis([\"header1\", \"header2\", \"header3\"], axis=\"columns\"),\n ],\n axis=\"columns\",\n )\n result = returns_frame_1.process_text(\n column_name=\"ticker\",\n new_column_names=[\"header1\", \"header2\", \"header3\"],\n merge_frame=True,\n string_function=\"split\",\n expand=True,\n pat=\" \",\n )\n assert_frame_equal(result, expected_output)\n\n\n@pytest.mark.xfail(reason=\"new_column_names is deprecated.\")\ndef test_return_dataframe_new_column_names_is_a_list_len_unequal(\n returns_frame_1,\n):\n \"\"\"\n Raise error if text processing returns a dataframe,\n `new_column_names` is not None, and the length of\n `new_column_names` is not equal to the length of the\n new dataframe's columns.\n \"\"\"\n\n with pytest.raises(ValueError):\n returns_frame_1.process_text(\n column_name=\"ticker\",\n new_column_names=[\"header1\", \"header2\"],\n merge_frame=True,\n string_function=\"split\",\n expand=True,\n pat=\" \",\n )\n\n\ndef test_output_extractall(test_returns_dataframe):\n \"\"\"\n Raise ValueError if the output is a dataframe.\n \"\"\"\n with pytest.raises(ValueError):\n test_returns_dataframe.process_text(\n column_name=\"text\",\n string_function=\"extractall\",\n pat=r\"(?P<letter>[ab])?(?P<digit>\\d)\",\n )\n\n\n@pytest.mark.xfail(reason=\"merge_frame is deprecated.\")\ndef test_output_extractall_merge_frame_is_not_None(test_returns_dataframe):\n \"\"\"\n Test output when `string_function` is \"extractall\"\n and `merge_frame` is not None.\n \"\"\"\n expected_output = test_returns_dataframe[\"text\"].str.extractall(\n r\"(?P<letter>[ab])?(?P<digit>\\d)\"\n )\n expected_output = test_returns_dataframe.join(\n expected_output.reset_index(\"match\"), how=\"outer\"\n ).set_index(\"match\", append=True)\n result = test_returns_dataframe.process_text(\n column_name=\"text\",\n merge_frame=True,\n string_function=\"extractall\",\n pat=r\"(?P<letter>[ab])?(?P<digit>\\d)\",\n )\n assert_frame_equal(result, expected_output)\n" ]
[ [ "pandas.DataFrame", "pandas.testing.assert_frame_equal" ] ]
docileninja/Calvin-and-Hobbes-Viewer
[ "74ff2e090e040517e2b445432a5ea6c0f87df49b" ]
[ "comic_classify/comic_detect.py" ]
[ "from sklearn import svm, cluster\nfrom PIL import Image, ImageDraw\nimport os\nimport sys\nimport random\n\n\n\ndef load_images(dirname):\n\timages = []\n\tfor image_name in os.listdir(dirname):\n\t\tif image_name.startswith('.'):\n\t\t\tcontinue\n\t\timage = Image.open(dirname + '/' + image_name).convert('1')\n\t\tx, y = image.size\n\t\timage = image.resize((x, 280), Image.ANTIALIAS)\n\t\tdata = [0 if pixel == 0 else 1 for pixel in image.getdata()]\n\t\timages.append(data)\n\treturn images\n\nmin_len = 10000000\ndef normalize(X):\n\tglobal min_len\n\tmin_len = min(min_len, min(len(x) for x in X))\n\treturn [x[:min_len] for x in X]\n\ndef crossvalidate(edges, nonedges):\n\trandom.shuffle(edges)\n\trandom.shuffle(nonedges)\n\ttrain_edge_len, train_nonedge_len = len(edges) * 7 // 10, len(nonedges) * 7 // 10\n\tcross_edge_len, cross_nonedge_len = len(edges) - train_edge_len, len(nonedges) - train_nonedge_len\n\n\tX_train = normalize(nonedges[:train_nonedge_len] + \n\t\t\t\t\t\tedges[:train_edge_len])\n\ty_train = [0] * train_nonedge_len + [1] * train_edge_len\n\n\tX_cross = normalize(nonedges[train_nonedge_len:] + \n\t\t\t\t\t\tedges[train_edge_len:])\n\ty_cross = [0] * cross_nonedge_len + [1] * cross_edge_len\n\n\tclf = svm.SVC(gamma=.001, C=100.)\n\tclf.fit(X_train, y_train)\n\tprint(\"prediction: {}\".format(list(clf.predict(X_cross))))\n\tprint(\"actuallity: {}\".format(y_cross))\n\tprint(clf.score(X_cross, y_cross))\n\ndef get_column(img, i):\n\tw, h = img.size\n\tcolumn = []\n\tfor j in range(h):\n\t\t\tcolumn.append(0 if img.getpixel((i, j)) == 0 else 1)\n\treturn column\n\ndef search_picture(clf, image_name):\n\timage = Image.open(image_name).convert('1')\n\tx, y = image.size\n\timage = image.resize((x, 280), Image.ANTIALIAS)\n\tw, h = image.size\n\n\tcolumns = [get_column(image, i) for i in range(25)]\n\tdatas = []\n\tfor i in range(25, w):\n\t\tcolumns = columns[1:] + [get_column(image, i)]\n\t\tdata = [columns[i][j] for j in range(len(columns[0])) for i in range(len(columns))]\n\t\tdatas.append(data)\n\tdatas = normalize(datas)\n\tmatches = [[i] for i, m in enumerate(clf.predict(datas)) if m == 1]\n\tif len(matches) == 0:\n\t\treturn [], matches\n\tclst = cluster.DBSCAN(eps=20, min_samples=1)\n\tclst.fit(matches)\n\ttrimmed = [idx for idx in clst.components_ if idx > w // 6 and idx < w * 5 // 6]\n\tclst = cluster.KMeans(3, init='k-means++')\n\tclst.fit(trimmed)\n\tseps = list(sorted([int(v[0]) + 25//2 for v in clst.cluster_centers_]))\n\tfinal_seps = []\n\tfor start, end in zip(seps, seps[1:]):\n\t\tif (end - start) > w // 6:\n\t\t\tfinal_seps.append(start)\n\tfinal_seps.append(seps[-1])\n\treturn final_seps, matches\n\ndef train(edges, nonedges):\n\tclf = svm.SVC(gamma=.001, C=100.)\n\tX = normalize(nonedges + edges)\n\ty = [0] * len(nonedges) + [1] * len(edges)\n\tclf.fit(X, y)\n\treturn clf\n\n\ndef main(edge_dir, non_edge_dir):\n\tedges = load_images(edge_dir)\n\tnonedges = load_images(non_edge_dir)\n\n\tcrossvalidate(edges, nonedges)\n\n\tclf = train(edges, nonedges)\n\n\tfor comic in os.listdir('test'):\n\t\tprint(comic)\n\t\tpanels, matches = search_picture(clf, 'test/' + comic)\n\t\tprint(\"\\tpanels: {}\".format(panels))\n\t\timage = Image.open('test/' + comic).convert('RGBA')\n\t\tdraw = ImageDraw.Draw(image)\n\t\tw, h = image.size\n\t\tfor match in matches:\n\t\t\tmatch = match[0]\n\t\t\tdraw.line((match, 0) + (match, h), fill=(0,0,255,0))\n\t\tfor sep in panels:\n\t\t\tdraw.line((sep, 0) + (sep, h), fill=(255,0,0), width=3)\n\t\timage.show()\n\n\treturn clf\n\nif __name__ == '__main__':\n\tif len(sys.argv) != 3:\n\t\tprint('Usage: {} <edges-dir> <non-edges-dir>'.format(sys.argv[0]))\n\t\tsys.exit(1)\n\tedge_dir = sys.argv[1]\n\tnon_edge_dir = sys.argv[2]\n\tmain(edge_dir, non_edge_dir)\n\t" ]
[ [ "sklearn.cluster.KMeans", "sklearn.cluster.DBSCAN", "sklearn.svm.SVC" ] ]
UKPLab/cdcr-beyond-corpus-tailored
[ "52bf98692c7464f25628baea24addd1a988f9a1f" ]
[ "python/handwritten_baseline/pipeline/data/loader/ecb_reader_utils.py" ]
[ "import logging\nimport os\nimport re\nimport xml.etree.ElementTree as ET\nfrom pathlib import Path\nfrom typing import Any, Tuple, Optional\n\nimport pandas as pd\n\nfrom python import TOPIC_ID, SUBTOPIC, DOCUMENT_NUMBER, DOCUMENT_ID, SENTENCE_IDX, TOKEN_IDX, TOKEN_IDX_TO, \\\n TOKEN_IDX_FROM, TOKEN, MENTION_ID, EVENT, MENTION_TYPE, DESCRIPTION, MENTION_TYPES_ACTION\n\nlogger = logging.getLogger()\n\n\ndef read_xml(xml_path) -> Tuple[Any, Any, Any, Any, Any]:\n tree = ET.parse(xml_path)\n\n # 1: read document info\n root = tree.getroot()\n assert root.tag == \"Document\"\n doc_filename = root.attrib[\"doc_name\"]\n doc_id = root.attrib[\"doc_id\"]\n m = re.match(r\"(?P<topic_id>\\d+)_(?P<document_number>\\d+)(?P<subtopic>\\w+)\\.xml\", doc_filename)\n\n topic_id = m.group(\"topic_id\")\n subtopic = m.group(\"subtopic\")\n document_number = int(m.group(\"document_number\"))\n\n documents_index = pd.MultiIndex.from_tuples([(topic_id, subtopic, doc_id)],\n names=[TOPIC_ID, SUBTOPIC, DOCUMENT_ID])\n documents = pd.DataFrame({DOCUMENT_ID: pd.Series(doc_id, index=documents_index),\n DOCUMENT_NUMBER: pd.Series(document_number, index=documents_index)})\n\n # 2: read document content\n contents_rows = []\n contents_index = []\n for token_elmt in root.iter(\"token\"):\n # index content\n sentence_idx = int(token_elmt.attrib[\"sentence\"])\n token_idx = int(token_elmt.attrib[\"number\"])\n contents_index.append((doc_id, sentence_idx, token_idx))\n\n # content\n token = token_elmt.text\n contents_rows.append({TOKEN: token})\n contents_index = pd.MultiIndex.from_tuples(contents_index, names=[DOCUMENT_ID, SENTENCE_IDX, TOKEN_IDX])\n contents = pd.DataFrame(contents_rows, index=contents_index)\n\n # 3: read markables / mentions and entity/event descriptions\n mentions_rows = []\n mentions_index = []\n entities_events = []\n for markable in root.find(\"Markables\").getchildren():\n # Don't know what this is, skip it\n if markable.tag == \"UNKNOWN_INSTANCE_TAG\":\n continue\n\n mention_id = int(markable.attrib[\"m_id\"])\n\n # there are markables without spans, these are descriptions of entities / events which we want to keep\n if \"TAG_DESCRIPTOR\" in markable.attrib.keys():\n if \"instance_id\" in markable.attrib.keys():\n entities_events.append({\n EVENT: markable.attrib[\"instance_id\"],\n DESCRIPTION: markable.attrib[\"TAG_DESCRIPTOR\"]\n })\n continue\n\n token_ids = [int(anchor.attrib[\"t_id\"]) for anchor in markable.iter(\"token_anchor\")]\n token_ids_from, token_ids_to = min(token_ids), max(token_ids)\n\n # the token_ids are cumulative token indexes, remove their cumulative nature\n token_indexes = contents.index.get_level_values(TOKEN_IDX).values\n token_idx_from = token_indexes[\n token_ids_from - 1] # -1 because token_ids start at 1, so we need to access index 0 in the dataframe to find t_id 1\n token_idx_to = token_indexes[\n token_ids_to - 1] + 1 # additionally +1 here because we want mention spans represented as intervals [from, to[\n\n sentence_idx = contents.index.get_level_values(SENTENCE_IDX).values[token_ids_from - 1]\n\n # resolve non-contiguous mentions\n is_non_contiguous_mention = len(token_ids) < token_idx_from - token_idx_to\n if is_non_contiguous_mention:\n logger.info(\"Converted non-contiguous mention to contiguous mention.\")\n\n mentions_index.append((doc_id, mention_id))\n mentions_rows.append({SENTENCE_IDX: sentence_idx,\n TOKEN_IDX_FROM: token_idx_from,\n TOKEN_IDX_TO: token_idx_to,\n MENTION_TYPE: markable.tag})\n mentions_index = pd.MultiIndex.from_tuples(mentions_index, names=[DOCUMENT_ID, MENTION_ID])\n mentions = pd.DataFrame(mentions_rows, index=mentions_index)\n entities_events = pd.DataFrame(entities_events).set_index(EVENT)\n\n # 4. read relations (clusters)\n clusters_rows = []\n for relation in root.find(\"Relations\").getchildren():\n tags_of_interest = [\"CROSS_DOC_COREF\", \"INTRA_DOC_COREF\"]\n if not relation.tag in tags_of_interest:\n logger.info(\"Unexpected tag \" + relation.tag)\n raise NotImplementedError\n\n # There are relations with tags INTRA_DOC_COREF and CROSS_DOC_COREF. The cross-doc ones have a \"note\" attribute.\n if \"note\" in relation.attrib:\n # this is the case for CROSS_DOC_COREF tags\n relation_id = relation.attrib[\"note\"]\n else:\n # this is the case for INTRA_DOC_COREF tags\n relation_id = doc_id + \"_\" + relation.attrib[\"r_id\"]\n\n for mention in relation.iter(\"source\"):\n mention_id = int(mention.attrib[\"m_id\"])\n clusters_rows.append({EVENT: relation_id, DOCUMENT_ID: doc_id, MENTION_ID: mention_id})\n clusters = pd.DataFrame(clusters_rows)\n\n # 5. create relations for singletons\n # In ECB plus, there are ACTION_OCCURRENCE markables which are not assigned to a relation. These are singletons. We\n # add one entry for each singleton to `clusters` to ensure consistency. Note that the opposite also exists:\n # singleton mentions which are marked as participating in a cross-doc coref relation, but there is no second\n # mention for this relation.\n if clusters.empty:\n singletons = mentions.index.to_frame().reset_index(drop=True)\n else:\n # This can most likely be done in a nicer way using some index difference...\n outer = pd.merge(mentions, clusters, left_index=True, right_on=[DOCUMENT_ID, MENTION_ID], how=\"outer\")\n singletons = outer.loc[outer[EVENT].isna(), [DOCUMENT_ID, MENTION_ID]]\n singletons[EVENT] = \"SINGLETON_\" + singletons.astype(str).apply(\"_\".join, axis=1)\n clusters = clusters.append(singletons, sort=False).reset_index(drop=True)\n\n return documents, contents, mentions, clusters, entities_events\n\n\ndef read_split_data(root: Path, sentence_filter_csv: Optional[Path]):\n documents = []\n contents = []\n mentions = []\n clusters = []\n entities_events = []\n\n # enumerate files\n for root, dirs, files in os.walk(str(root.absolute())):\n for file in files:\n path = os.path.abspath(os.path.join(root, file))\n f_documents, f_contents, f_mentions, f_clusters, f_entities_events = read_xml(path)\n\n documents.append(f_documents)\n contents.append(f_contents)\n mentions.append(f_mentions)\n clusters.append(f_clusters)\n entities_events.append(f_entities_events)\n\n documents = pd.concat(documents).sort_index()\n contents = pd.concat(contents).sort_index()\n mentions = pd.concat(mentions).sort_index()\n clusters = pd.concat(clusters, sort=False)\n entities_events = pd.concat(entities_events).sort_index()\n\n # assert that every mention participates only in one cluster -> meaning we can just add an 'EVENT' column to each mention\n assert clusters.duplicated(subset=[DOCUMENT_ID, MENTION_ID]).value_counts().get(True, 0) == 0\n\n clusters = clusters.set_index([DOCUMENT_ID, MENTION_ID])\n mentions = pd.merge(mentions, clusters, left_index=True, right_index=True).sort_index()\n\n # read file which tells us from which sentences we should keep event mentions\n if sentence_filter_csv is not None:\n sent_filter = pd.read_csv(sentence_filter_csv)\n doc_number_and_subtopic = sent_filter[\"File\"].str.split(\"ecb\", expand=True)\n doc_number_and_subtopic.columns = [DOCUMENT_NUMBER, SUBTOPIC]\n doc_number_and_subtopic[DOCUMENT_NUMBER] = doc_number_and_subtopic[DOCUMENT_NUMBER].astype(int)\n doc_number_and_subtopic[SUBTOPIC].replace({\"plus\": \"ecbplus\", \"\": \"ecb\"}, inplace=True)\n sent_filter = pd.concat([sent_filter.drop(columns=\"File\"), doc_number_and_subtopic], axis=1)\n sent_filter.rename(columns={\"Topic\": TOPIC_ID, \"Sentence Number\": SENTENCE_IDX}, inplace=True)\n sent_filter[TOPIC_ID] = sent_filter[TOPIC_ID].astype(str)\n sent_filter = sent_filter[[TOPIC_ID, SUBTOPIC, DOCUMENT_NUMBER, SENTENCE_IDX]]\n\n # the sentence filter file applies to all splits, remove those topics that we don't have in the split we're loading\n topics_in_split = documents.index.get_level_values(TOPIC_ID).unique()\n sent_filter = sent_filter.loc[sent_filter[TOPIC_ID].isin(topics_in_split)].copy()\n\n # obtain doc-id from topic+subtopic+document number\n documents_with_doc_number_in_index = documents.set_index(DOCUMENT_NUMBER, append=True).reset_index(level=DOCUMENT_ID, drop=True).sort_index()\n sent_filter[DOCUMENT_ID] = sent_filter[[TOPIC_ID, SUBTOPIC, DOCUMENT_NUMBER]].apply(lambda row: documents_with_doc_number_in_index[DOCUMENT_ID].loc[tuple(row.values)], axis=1)\n\n all_mentions_to_keep = []\n for doc_id, df in mentions.groupby(DOCUMENT_ID):\n sentences_to_keep = sent_filter.loc[sent_filter[DOCUMENT_ID] == doc_id]\n\n # we only remove action phrases and leave the other mentions in place, so that we can potentially mask them for\n # analysis, see python.handwritten_baseline.pipeline.data.processing.masking.MentionMaskingStage\n is_official_evaluation_sentence = df[SENTENCE_IDX].isin(sentences_to_keep[SENTENCE_IDX])\n is_action_mention = df[MENTION_TYPE].isin(MENTION_TYPES_ACTION)\n mentions_to_keep = df.loc[is_official_evaluation_sentence | (~is_action_mention)]\n all_mentions_to_keep.append(mentions_to_keep)\n mentions = pd.concat(all_mentions_to_keep).sort_index()\n\n return documents, contents, mentions, entities_events" ]
[ [ "pandas.Series", "pandas.read_csv", "pandas.DataFrame", "pandas.merge", "pandas.MultiIndex.from_tuples", "pandas.concat" ] ]
nbro/probability
[ "07a6378155f0ed720b5aaccf5387e3f9a432bd10" ]
[ "tensorflow_probability/python/distributions/generalized_pareto_test.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for Generalized Pareto distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\n# Dependency imports\nimport hypothesis as hp\nimport hypothesis.strategies as hps\nimport numpy as np\nfrom scipy import stats as sp_stats\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps\nfrom tensorflow_probability.python.internal import test_util\n\ntfd = tfp.distributions\n\n\n# Pylint doesn't understand hps.composite.\n# pylint: disable=no-value-for-parameter\n\n\n@hps.composite\ndef generalized_paretos(draw, batch_shape=None):\n if batch_shape is None:\n batch_shape = draw(tfp_hps.shapes())\n\n constraints = dict(\n loc=tfp_hps.identity_fn,\n scale=tfp_hps.softplus_plus_eps(),\n concentration=lambda x: tf.math.tanh(x) * 0.24) # <.25==safe for variance\n\n params = draw(\n tfp_hps.broadcasting_params(\n batch_shape,\n params_event_ndims=dict(loc=0, scale=0, concentration=0),\n constraint_fn_for=constraints.get))\n dist = tfd.GeneralizedPareto(validate_args=draw(hps.booleans()), **params)\n if dist.batch_shape != batch_shape:\n raise AssertionError('batch_shape mismatch: expect {} but got {}'.format(\n batch_shape, dist))\n return dist\n\n\n@test_util.test_all_tf_execution_regimes\nclass GeneralizedParetoTest(test_util.TestCase):\n\n @hp.given(generalized_paretos())\n @tfp_hps.tfp_hp_settings(default_max_examples=5)\n def testShape(self, dist):\n # batch_shape == dist.batch_shape asserted in generalized_paretos()\n self.assertEqual(dist.batch_shape, self.evaluate(dist.batch_shape_tensor()))\n self.assertEqual(tf.TensorShape([]), dist.event_shape)\n self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))\n\n @hp.given(generalized_paretos(batch_shape=[]))\n @tfp_hps.tfp_hp_settings(default_max_examples=5)\n def testLogPDF(self, dist):\n xs = self.evaluate(dist.sample())\n\n logp = dist.log_prob(xs)\n self.assertEqual(dist.batch_shape, logp.shape)\n p = dist.prob(xs)\n self.assertEqual(dist.batch_shape, p.shape)\n\n loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])\n expected_logp = sp_stats.genpareto(conc, loc=loc, scale=scale).logpdf(xs)\n actual_logp = self.evaluate(logp)\n self.assertAllClose(expected_logp, actual_logp, rtol=1e-5)\n self.assertAllClose(np.exp(expected_logp), self.evaluate(p), rtol=1e-5)\n\n def testLogPDFBoundary(self):\n # When loc = concentration = 0, we have an exponential distribution. Check\n # that at 0 we have finite log prob.\n scale = np.array([0.1, 0.5, 1., 2., 5., 10.], dtype=np.float32)\n dist = tfd.GeneralizedPareto(loc=0, scale=scale, concentration=0)\n log_pdf = dist.log_prob(0.)\n self.assertAllClose(-np.log(scale), self.evaluate(log_pdf), rtol=1e-5)\n\n @hp.given(generalized_paretos(batch_shape=[]))\n @tfp_hps.tfp_hp_settings(default_max_examples=5)\n def testCDF(self, dist):\n xs = self.evaluate(dist.sample())\n cdf = dist.cdf(xs)\n self.assertEqual(dist.batch_shape, cdf.shape)\n\n loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])\n expected_cdf = sp_stats.genpareto(conc, loc=loc, scale=scale).cdf(xs)\n self.assertAllClose(expected_cdf, self.evaluate(cdf), rtol=5e-5)\n\n @hp.given(generalized_paretos(batch_shape=[]))\n @tfp_hps.tfp_hp_settings(default_max_examples=5)\n def testMean(self, dist):\n loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])\n self.assertEqual(dist.batch_shape, dist.mean().shape)\n if np.abs(conc) < 1e-5 and conc != 0:\n return # scipy does badly at small nonzero concentrations.\n expected = sp_stats.genpareto(conc, loc=loc, scale=scale).mean()\n actual = self.evaluate(dist.mean())\n self.assertAllClose(expected, actual, rtol=5e-4)\n\n @hp.given(generalized_paretos(batch_shape=[]))\n @tfp_hps.tfp_hp_settings(default_max_examples=5)\n def testVariance(self, dist):\n loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])\n self.assertEqual(dist.batch_shape, dist.variance().shape)\n expected = sp_stats.genpareto(conc, loc=loc, scale=scale).var()\n if np.abs(conc) < 1e-4 and conc != 0:\n return # scipy does badly at small nonzero concentrations.\n if expected <= 0:\n return # scipy sometimes returns nonsense zero or negative variances.\n actual = self.evaluate(dist.variance())\n print('var', loc, scale, conc, expected, actual, file=sys.stderr)\n self.assertAllClose(expected, actual, rtol=.01)\n\n @hp.given(generalized_paretos(batch_shape=[]))\n @tfp_hps.tfp_hp_settings(default_max_examples=5)\n def testEntropy(self, dist):\n loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])\n self.assertEqual(dist.batch_shape, dist.entropy().shape)\n expected = sp_stats.genpareto.entropy(conc, loc=loc, scale=scale)\n actual = self.evaluate(dist.entropy())\n self.assertAllClose(expected, actual)\n\n def testSample(self):\n loc = np.float32(-7.5)\n scale = np.float32(3.5)\n conc = np.float32(0.07)\n n = 100000\n dist = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=conc)\n samples = dist.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual((n,), samples.shape)\n self.assertEqual((n,), sample_values.shape)\n self.assertTrue(self._kstest(loc, scale, conc, sample_values))\n self.assertAllClose(\n sp_stats.genpareto.mean(conc, loc=loc, scale=scale),\n sample_values.mean(),\n rtol=.005)\n self.assertAllClose(\n sp_stats.genpareto.var(conc, loc=loc, scale=scale),\n sample_values.var(),\n rtol=.01)\n\n def testFullyReparameterized(self):\n loc = tf.constant(4.0)\n scale = tf.constant(3.0)\n conc = tf.constant(2.0)\n _, grads = tfp.math.value_and_gradient(\n lambda *args: tfd.GeneralizedPareto(*args).sample(100),\n [loc, scale, conc])\n self.assertLen(grads, 3)\n self.assertAllNotNone(grads)\n\n def testSampleKolmogorovSmirnovMultiDimensional(self):\n loc = np.linspace(-10, 10, 3).reshape(3, 1, 1)\n scale = np.linspace(1e-6, 7, 5).reshape(5, 1)\n conc = np.linspace(-1.3, 1.3, 7)\n\n dist = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=conc)\n n = 10000\n samples = dist.sample(n, seed=test_util.test_seed())\n sample_values = self.evaluate(samples)\n self.assertEqual((n, 3, 5, 7), samples.shape)\n self.assertEqual((n, 3, 5, 7), sample_values.shape)\n\n fails = 0\n trials = 0\n for li, l in enumerate(loc.reshape(-1)):\n for si, s in enumerate(scale.reshape(-1)):\n for ci, c in enumerate(conc.reshape(-1)):\n samps = sample_values[:, li, si, ci]\n trials += 1\n fails += 0 if self._kstest(l, s, c, samps) else 1\n self.assertLess(fails, trials * 0.01)\n\n def _kstest(self, loc, scale, conc, samples):\n # Uses the Kolmogorov-Smirnov test for goodness of fit.\n ks, _ = sp_stats.kstest(samples,\n sp_stats.genpareto(conc, loc=loc, scale=scale).cdf)\n # Return True when the test passes.\n return ks < 0.02\n\n def testPdfOfSampleMultiDims(self):\n dist = tfd.GeneralizedPareto(\n loc=0, scale=[[2.], [3.]], concentration=[-.37, .11])\n num = 50000\n samples = dist.sample(num, seed=test_util.test_seed())\n pdfs = dist.prob(samples)\n sample_vals, pdf_vals = self.evaluate([samples, pdfs])\n self.assertEqual((num, 2, 2), samples.shape)\n self.assertEqual((num, 2, 2), pdfs.shape)\n self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)\n self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)\n self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)\n self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)\n\n def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):\n s_p = zip(sample_vals, pdf_vals)\n prev = (0, 0)\n total = 0\n for k in sorted(s_p, key=lambda x: x[0]):\n pair_pdf = (k[1] + prev[1]) / 2\n total += (k[0] - prev[0]) * pair_pdf\n prev = k\n self.assertNear(1., total, err=err)\n\n def testNonPositiveInitializationParamsRaises(self):\n scale = tf.constant(0.0, name='scale')\n with self.assertRaisesOpError('Argument `scale` must be positive.'):\n dist = tfd.GeneralizedPareto(\n loc=0, scale=scale, concentration=1, validate_args=True)\n self.evaluate(dist.mean())\n\n def testGradientThroughConcentration(self):\n concentration = tf.Variable(3.)\n d = tfd.GeneralizedPareto(loc=0, scale=1, concentration=concentration)\n with tf.GradientTape() as tape:\n loss = -d.log_prob([1., 2., 4.])\n grad = tape.gradient(loss, d.trainable_variables)\n self.assertLen(grad, 1)\n self.assertAllNotNone(grad)\n\n def testAssertsPositiveScale(self):\n scale = tf.Variable([1., 2., -3.])\n self.evaluate(scale.initializer)\n with self.assertRaisesOpError('Argument `scale` must be positive.'):\n d = tfd.GeneralizedPareto(\n loc=0, scale=scale, concentration=1, validate_args=True)\n self.evaluate(d.sample())\n\n def testAssertsPositiveScaleAfterMutation(self):\n scale = tf.Variable([1., 2., 3.])\n self.evaluate(scale.initializer)\n d = tfd.GeneralizedPareto(\n loc=0, scale=scale, concentration=0.25, validate_args=True)\n self.evaluate(d.mean())\n with self.assertRaisesOpError('Argument `scale` must be positive.'):\n with tf.control_dependencies([scale.assign([1., 2., -3.])]):\n self.evaluate(d.sample())\n\n def testGradientThroughLocScale(self):\n loc = tf.Variable(1.)\n scale = tf.Variable(2.5)\n d = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=.15)\n with tf.GradientTape() as tape:\n loss = -d.log_prob([1., 2., 4.])\n grads = tape.gradient(loss, d.trainable_variables)\n self.assertLen(grads, 2)\n self.assertAllNotNone(grads)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "scipy.stats.genpareto.var", "scipy.stats.genpareto", "scipy.stats.genpareto.mean", "numpy.float32", "numpy.abs", "tensorflow.compat.v2.test.main", "numpy.exp", "tensorflow.compat.v2.GradientTape", "tensorflow.compat.v2.math.tanh", "numpy.log", "scipy.stats.genpareto.entropy", "tensorflow.compat.v2.TensorShape", "numpy.array", "tensorflow.compat.v2.constant", "numpy.linspace", "tensorflow.compat.v2.Variable" ] ]
Aaronga19/MachineLearning-A-Z
[ "e8e27f0a31ac3e3c05d4029e6e5e14ac8a911153" ]
[ "Apuntes/Preprocess/Missing Data.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 13 20:30:46 2020\n\n@author: Aaronga\n\"\"\"\n\n# Datos faltantes\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv(\"Data.csv\")\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 3].values\n\n# Tratamiento de los NaN \nfrom sklearn.preprocessing import Imputer\nimputer = Imputer(missing_values=\"NaN\", strategy=\"mean\", axis = 0)\nimputer = imputer.fit(X[:, 1:3])\nX[:, 1:3]= imputer.transform(X[:,1:3])\nprint(X)" ]
[ [ "pandas.read_csv", "sklearn.preprocessing.Imputer" ] ]
anonyma2020/dagnn
[ "3191b2cc4f923d523ece3962c96a0e3dd54f1a0b" ]
[ "ogbg-code/model/asap.py" ]
[ "import torch\nimport torch.nn.functional as F\nfrom torch.nn import Linear\nfrom torch_geometric.nn import (ASAPooling,\n GraphConv, global_mean_pool,\n JumpingKnowledge)\n\n\nclass ASAP(torch.nn.Module):\n def __init__(self, num_vocab, max_seq_len, node_encoder, emb_dim, num_layers, hidden, ratio=0.8, dropout=0, num_class=0):\n super(ASAP, self).__init__()\n\n self.num_class = num_class\n self.max_seq_len = max_seq_len\n self.node_encoder = node_encoder\n\n self.conv1 = GraphConv(emb_dim, hidden, aggr='mean')\n self.convs = torch.nn.ModuleList()\n self.pools = torch.nn.ModuleList()\n self.convs.extend([\n GraphConv(hidden, hidden, aggr='mean')\n for i in range(num_layers - 1)\n ])\n self.pools.extend([\n ASAPooling(hidden, ratio, dropout=dropout)\n for i in range((num_layers) // 2)\n ])\n self.jump = JumpingKnowledge(mode='cat')\n self.lin1 = Linear(num_layers * hidden, hidden)\n # self.lin2 = Linear(hidden, dataset.num_classes)\n\n if self.num_class > 0: # classification\n self.graph_pred_linear = torch.nn.Linear(hidden, self.num_class)\n else:\n self.graph_pred_linear_list = torch.nn.ModuleList()\n for i in range(max_seq_len):\n self.graph_pred_linear_list.append(torch.nn.Linear(hidden, num_vocab))\n\n def reset_parameters(self):\n self.conv1.reset_parameters()\n for conv in self.convs:\n conv.reset_parameters()\n for pool in self.pools:\n pool.reset_parameters()\n self.lin1.reset_parameters()\n self.lin2.reset_parameters()\n\n def forward(self, data):\n x, edge_index, node_depth, batch = data.x, data.edge_index, data.node_depth, data.batch\n\n x = self.node_encoder(x, node_depth.view(-1, ))\n\n edge_weight = None\n x = F.relu(self.conv1(x, edge_index))\n xs = [global_mean_pool(x, batch)]\n for i, conv in enumerate(self.convs):\n x = conv(x=x, edge_index=edge_index, edge_weight=edge_weight)\n x = F.relu(x)\n xs += [global_mean_pool(x, batch)]\n if i % 2 == 0 and i < len(self.convs) - 1:\n pool = self.pools[i // 2]\n x, edge_index, edge_weight, batch, _ = pool(\n x=x, edge_index=edge_index, edge_weight=edge_weight,\n batch=batch)\n x = self.jump(xs)\n x = F.relu(self.lin1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n # x = self.lin2(x)\n # return F.log_softmax(x, dim=-1)\n\n if self.num_class > 0:\n return self.graph_pred_linear(x)\n\n pred_list = []\n for i in range(self.max_seq_len):\n pred_list.append(self.graph_pred_linear_list[i](x))\n return pred_list\n\n def __repr__(self):\n return self.__class__.__name__\n\n" ]
[ [ "torch.nn.Linear", "torch.nn.ModuleList", "torch.nn.functional.dropout", "torch.nn.functional.relu" ] ]
xgmiao/AutoDL-Projects
[ "0dbbc286c9f56136291590136fffd513af881c36" ]
[ "exps/LFNA/basic-same.py" ]
[ "#####################################################\n# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.04 #\n#####################################################\n# python exps/LFNA/basic-same.py --srange 1-999 --env_version v1 --hidden_dim 16\n# python exps/LFNA/basic-same.py --srange 1-999 --env_version v2 --hidden_dim\n#####################################################\nimport sys, time, copy, torch, random, argparse\nfrom tqdm import tqdm\nfrom copy import deepcopy\nfrom pathlib import Path\n\nlib_dir = (Path(__file__).parent / \"..\" / \"..\" / \"lib\").resolve()\nif str(lib_dir) not in sys.path:\n sys.path.insert(0, str(lib_dir))\nfrom procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint\nfrom log_utils import time_string\nfrom log_utils import AverageMeter, convert_secs2time\n\nfrom utils import split_str2indexes\n\nfrom procedures.advanced_main import basic_train_fn, basic_eval_fn\nfrom procedures.metric_utils import SaveMetric, MSEMetric, ComposeMetric\nfrom datasets.synthetic_core import get_synthetic_env\nfrom models.xcore import get_model\n\nfrom lfna_utils import lfna_setup\n\n\ndef subsample(historical_x, historical_y, maxn=10000):\n total = historical_x.size(0)\n if total <= maxn:\n return historical_x, historical_y\n else:\n indexes = torch.randint(low=0, high=total, size=[maxn])\n return historical_x[indexes], historical_y[indexes]\n\n\ndef main(args):\n logger, env_info, model_kwargs = lfna_setup(args)\n\n # check indexes to be evaluated\n to_evaluate_indexes = split_str2indexes(args.srange, env_info[\"total\"], None)\n logger.log(\n \"Evaluate {:}, which has {:} timestamps in total.\".format(\n args.srange, len(to_evaluate_indexes)\n )\n )\n\n w_container_per_epoch = dict()\n\n per_timestamp_time, start_time = AverageMeter(), time.time()\n for i, idx in enumerate(to_evaluate_indexes):\n\n need_time = \"Time Left: {:}\".format(\n convert_secs2time(\n per_timestamp_time.avg * (len(to_evaluate_indexes) - i), True\n )\n )\n logger.log(\n \"[{:}]\".format(time_string())\n + \" [{:04d}/{:04d}][{:04d}]\".format(i, len(to_evaluate_indexes), idx)\n + \" \"\n + need_time\n )\n # train the same data\n historical_x = env_info[\"{:}-x\".format(idx)]\n historical_y = env_info[\"{:}-y\".format(idx)]\n # build model\n model = get_model(dict(model_type=\"simple_mlp\"), **model_kwargs)\n # build optimizer\n optimizer = torch.optim.Adam(model.parameters(), lr=args.init_lr, amsgrad=True)\n criterion = torch.nn.MSELoss()\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer,\n milestones=[\n int(args.epochs * 0.25),\n int(args.epochs * 0.5),\n int(args.epochs * 0.75),\n ],\n gamma=0.3,\n )\n train_metric = MSEMetric()\n best_loss, best_param = None, None\n for _iepoch in range(args.epochs):\n preds = model(historical_x)\n optimizer.zero_grad()\n loss = criterion(preds, historical_y)\n loss.backward()\n optimizer.step()\n lr_scheduler.step()\n # save best\n if best_loss is None or best_loss > loss.item():\n best_loss = loss.item()\n best_param = copy.deepcopy(model.state_dict())\n model.load_state_dict(best_param)\n with torch.no_grad():\n train_metric(preds, historical_y)\n train_results = train_metric.get_info()\n\n metric = ComposeMetric(MSEMetric(), SaveMetric())\n eval_dataset = torch.utils.data.TensorDataset(\n env_info[\"{:}-x\".format(idx)], env_info[\"{:}-y\".format(idx)]\n )\n eval_loader = torch.utils.data.DataLoader(\n eval_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0\n )\n results = basic_eval_fn(eval_loader, model, metric, logger)\n log_str = (\n \"[{:}]\".format(time_string())\n + \" [{:04d}/{:04d}]\".format(idx, env_info[\"total\"])\n + \" train-mse: {:.5f}, eval-mse: {:.5f}\".format(\n train_results[\"mse\"], results[\"mse\"]\n )\n )\n logger.log(log_str)\n\n save_path = logger.path(None) / \"{:04d}-{:04d}.pth\".format(\n idx, env_info[\"total\"]\n )\n w_container_per_epoch[idx] = model.get_w_container().no_grad_clone()\n save_checkpoint(\n {\n \"model_state_dict\": model.state_dict(),\n \"model\": model,\n \"index\": idx,\n \"timestamp\": env_info[\"{:}-timestamp\".format(idx)],\n },\n save_path,\n logger,\n )\n logger.log(\"\")\n per_timestamp_time.update(time.time() - start_time)\n start_time = time.time()\n\n save_checkpoint(\n {\"w_container_per_epoch\": w_container_per_epoch},\n logger.path(None) / \"final-ckp.pth\",\n logger,\n )\n\n logger.log(\"-\" * 200 + \"\\n\")\n logger.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"Use the data in the past.\")\n parser.add_argument(\n \"--save_dir\",\n type=str,\n default=\"./outputs/lfna-synthetic/use-same-timestamp\",\n help=\"The checkpoint directory.\",\n )\n parser.add_argument(\n \"--env_version\",\n type=str,\n required=True,\n help=\"The synthetic enviornment version.\",\n )\n parser.add_argument(\n \"--hidden_dim\",\n type=int,\n required=True,\n help=\"The hidden dimension.\",\n )\n parser.add_argument(\n \"--init_lr\",\n type=float,\n default=0.1,\n help=\"The initial learning rate for the optimizer (default is Adam)\",\n )\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=512,\n help=\"The batch size\",\n )\n parser.add_argument(\n \"--epochs\",\n type=int,\n default=1000,\n help=\"The total number of epochs.\",\n )\n parser.add_argument(\n \"--srange\", type=str, required=True, help=\"The range of models to be evaluated\"\n )\n parser.add_argument(\n \"--workers\",\n type=int,\n default=4,\n help=\"The number of data loading workers (default: 4)\",\n )\n # Random Seed\n parser.add_argument(\"--rand_seed\", type=int, default=-1, help=\"manual seed\")\n args = parser.parse_args()\n if args.rand_seed is None or args.rand_seed < 0:\n args.rand_seed = random.randint(1, 100000)\n assert args.save_dir is not None, \"The save dir argument can not be None\"\n args.save_dir = \"{:}-{:}-d{:}\".format(\n args.save_dir, args.env_version, args.hidden_dim\n )\n main(args)\n" ]
[ [ "torch.nn.MSELoss", "torch.utils.data.DataLoader", "torch.no_grad", "torch.randint" ] ]
brenov/ip-usp
[ "06f9f16229a4587e38a3ae89fbe3394d5f1572fd" ]
[ "05-Image-Descriptors/solution.py" ]
[ "# Name: Breno Maurício de Freitas Viana\n# NUSP: 11920060\n# Course Code: SCC5830\n# Year/Semester: 2021/1\n# Assignment 5: Image Descriptors\n\n\nimport math\nimport numpy as np\nimport imageio\nfrom scipy import ndimage\n\n\nnp.seterr(divide='ignore', invalid='ignore')\n\nLEVELS = 256\n\n# ----- (1) Read Parameters\n\n# Get the location of the object image `f`\nf = input().rstrip()\n# Get the location of the large image `g`\ng = input().rstrip()\n# Get the quantisation parameter `b`\nb = int(input())\n\n\n# --- Load images\n\n# Object image `f`\nf = imageio.imread(f)\n# Large image `g`\ng = imageio.imread(g)\n\n\n\n# ----- (2) Preprocessing and Quantisation\n\ndef luminance(img):\n \"\"\"\n Get a RGB image as input and return a black&white image.\n \"\"\"\n N, M, _ = img.shape\n out = np.empty(img.shape)\n out = 0.299 * img[:,:,0] + 0.587 * img[:,:,1] + 0.114 * img[:,:,2]\n return out.astype(np.uint8)\n\n\n# --- Convert the images to black&white\nf = luminance(f)\ng = luminance(g)\n\n\n# --- Quantise the images to `b` bits\nB = 8 - b\nf = f >> B\ng = g >> B\n\n\n# ----- (3) Image Descriptors\n\ndef nh_descriptor(f):\n \"\"\"\n Return the normalized histogram descriptor.\n \"\"\"\n hist, _ = np.histogram(f, bins=[i for i in range(2 ** b + 1)])\n hist = hist / hist.sum()\n dc = hist / np.linalg.norm(hist)\n return dc\n\ndef ht_descriptor(f):\n \"\"\"\n Return the Haralick texture descriptors (intensity-level co-ocurrence matrix).\n \"\"\"\n # Calculate the co-occurence matrix\n N, M = f.shape\n C = np.zeros((LEVELS, LEVELS))\n for x in range(N - 1):\n for y in range(M - 1):\n i = f[x, y]\n j = f[x + 1, y + 1]\n C[i][j] += 1\n C = C / C.sum()\n #\n # Computing the descriptors\n N, M = C.shape\n #\n energy = np.power(C, 2).sum()\n #\n epsilon = 0.001\n entropy = - (C * np.log(C + epsilon)).sum()\n #\n A = np.fromfunction(lambda i, j: (i - j) ** 2, (N, M), dtype=int)\n contrast = (1 / math.pow(N, 2)) * (C * A).sum()\n #\n mu_i, si_i = 0, 0\n mu_j, si_j = 0, 0\n for k in range(N):\n a1 = C[k,:].sum()\n mu_i += k * a1\n si_i += math.pow(k - mu_i, 2) * a1\n #\n a2 = C[:,k].sum()\n mu_j += k * a2\n si_j += math.pow(k - mu_j, 2) * a2\n #\n A = np.fromfunction(lambda i, j: (i - j) ** 2, (N, M), dtype=int)\n correlation = (A * C).sum() - mu_i * mu_j\n correlation /= (si_i * si_j)\n #\n homogeneity = 0\n #\n A = np.fromfunction(lambda i, j: (1 + abs(i - j)), (N, M), dtype=int)\n homogeneity = (C * A).sum()\n #\n # Return the Haralick texture descriptors\n dt = np.array([energy, entropy, contrast, correlation, homogeneity])\n dt = dt / np.linalg.norm(dt)\n return dt\n\ndef hg_descriptor(f):\n \"\"\"\n Return the histogram of oriented gradients descriptor.\n \"\"\"\n wsx = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])\n wsy = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n #\n f = f.astype(np.float64)\n fx = ndimage.convolve(f, wsx)\n fy = ndimage.convolve(f, wsy)\n #\n N, M = f.shape\n #\n div = np.sqrt(np.power(fx, 2) + np.power(fy, 2)).sum()\n Mg = np.sqrt(np.power(fx, 2) + np.power(fy, 2)) / div\n #\n sigma = np.zeros(f.shape)\n sigma = np.arctan(fy / fx) + np.pi / 2\n sigma = np.degrees(sigma)\n sigma = np.digitize(sigma, np.arange(0, 180, 20))\n sigma = sigma.astype(np.uint8)\n #\n dg = np.zeros(9)\n for x in range(N):\n for y in range(M):\n dg[sigma[x][y] - 1] += Mg[x][y]\n #\n dg = dg / np.linalg.norm(dg)\n return dg\n\n\n# --- Compute the image descriptors\n\n# Calculate the object image descriptors\ndc = nh_descriptor(f)\ndt = ht_descriptor(f)\ndg = hg_descriptor(f)\n\nd = np.concatenate((dc, dt, dg))\n\n\n\n# ----- (4) Finding Our Object\n\ndef distance(d, di):\n \"\"\"\n Calculate the distance of two descriptors.\n \"\"\"\n return math.sqrt(np.power(d - di, 2).sum())\n\n\n# --- Search for the object image location in the original image\n\nsize = f.shape[0]\nstep = size // 2\nN, M = g.shape\nN = N // step\nM = M // step\n\ndist = np.iinfo(np.uint8).max\n\npos_x = None\npos_y = None\n\nfor i in range(N - 1):\n for j in range(M - 1):\n # Calculate the window\n window = g[i*step:i*step+size, j*step:j*step+size]\n # Calculate the descriptors of the window\n window_dc = nh_descriptor(window)\n window_dt = ht_descriptor(window)\n window_dg = hg_descriptor(window)\n window_d = np.concatenate((window_dc, window_dt, window_dg))\n # Calculate the distance between the window and the object image\n ndist = distance(d, window_d)\n if dist > ndist:\n dist = ndist\n pos_x, pos_y = i, j\n\n\n# --- Print the found location\n\nprint(pos_x, pos_y)\n" ]
[ [ "numpy.fromfunction", "numpy.degrees", "numpy.empty", "numpy.zeros", "numpy.arctan", "scipy.ndimage.convolve", "numpy.seterr", "numpy.arange", "numpy.iinfo", "numpy.power", "numpy.log", "numpy.array", "numpy.concatenate", "numpy.linalg.norm" ] ]
CityU-AIM-Group/SFPolypDA
[ "3902577cf9549a65be7ba89e2c11a7115158b531" ]
[ "fcos_core/solver/build.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\nimport logging\nfrom .lr_scheduler import WarmupMultiStepLR\n\n\ndef make_optimizer(cfg, model):\n logger = logging.getLogger(\"fcos_core.trainer\")\n params = []\n for key, value in model.named_parameters():\n if not value.requires_grad:\n continue\n lr = cfg.SOLVER.BASE_LR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY\n if \"bias\" in key:\n lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS\n if key.endswith(\".offset.weight\") or key.endswith(\".offset.bias\"):\n logger.info(\"set lr factor of {} as {}\".format(\n key, cfg.SOLVER.DCONV_OFFSETS_LR_FACTOR\n ))\n lr *= cfg.SOLVER.DCONV_OFFSETS_LR_FACTOR\n params += [{\"params\": [value], \"lr\": lr, \"weight_decay\": weight_decay}]\n\n optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM)\n if cfg.SOLVER.ADAM:\n optimizer = torch.optim.Adam(params)\n return optimizer\n\n\ndef make_lr_scheduler(cfg, optimizer):\n return WarmupMultiStepLR(\n optimizer,\n cfg.SOLVER.STEPS,\n cfg.SOLVER.GAMMA,\n warmup_factor=cfg.SOLVER.WARMUP_FACTOR,\n warmup_iters=cfg.SOLVER.WARMUP_ITERS,\n warmup_method=cfg.SOLVER.WARMUP_METHOD,\n )\n" ]
[ [ "torch.optim.Adam", "torch.optim.SGD" ] ]
stevezheng23/fewshot_nlp_pt
[ "aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2", "aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2" ]
[ "src/transformersX/models/cutoffbert/modeling_cutoffbert.py", "src/transformersX/models/promptbert/modeling_promptbert.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch CUTOFFBERT model. \"\"\"\n\n\nimport math\nimport os\nimport warnings\nimport numpy as np\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.utils.checkpoint\nimport torch.nn.functional as F\nfrom packaging import version\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss\nfrom torch.distributions.beta import Beta\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n NextSentencePredictorOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n DualPassageEncoderModelOutput,\n)\nfrom ...modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ...utils import logging\nfrom .configuration_cutoffbert import CutoffBertConfig\nfrom ..bert.modeling_bert import BertEmbeddings as CutoffBertEmbeddings\nfrom ..bert.modeling_bert import BertEncoder as CutoffBertEncoder\nfrom ..bert.modeling_bert import BertPooler as CutoffBertPooler\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"bert-base-uncased\"\n_CONFIG_FOR_DOC = \"CutoffBertConfig\"\n_TOKENIZER_FOR_DOC = \"CutoffBertTokenizer\"\n\nCUTOFFBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"bert-base-uncased\",\n \"bert-large-uncased\",\n \"bert-base-cased\",\n \"bert-large-cased\",\n \"bert-base-multilingual-uncased\",\n \"bert-base-multilingual-cased\",\n # See all BERT models at https://huggingface.co/models?filter=bert\n]\n\n\ndef load_tf_weights_in_cutoffbert(model, config, tf_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(f\"Converting TensorFlow checkpoint from {tf_path}\")\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(f\"Loading TF weight {name} with shape {shape}\")\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(f\"Initialize PyTorch weight {name}\")\n pointer.data = torch.from_numpy(array)\n return model\n\n\nclass CutoffBertPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = CutoffBertConfig\n load_tf_weights = load_tf_weights_in_cutoffbert\n base_model_prefix = \"bert\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nCUTOFFBERT_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nCUTOFFBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.BertTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare CutoffBert Model transformer outputting raw hidden-states without any specific head on top.\",\n CUTOFFBERT_START_DOCSTRING,\n)\nclass CutoffBertModel(CutoffBertPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n \"\"\"\n\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = CutoffBertEmbeddings(config)\n self.encoder = CutoffBertEncoder(config)\n\n self.pooler = CutoffBertPooler(config) if add_pooling_layer else None\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPoolingAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n\n if token_type_ids is None:\n if hasattr(self.embeddings, \"token_type_ids\"):\n buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n CutoffBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled\n output) + Cut-off data augmentation support.\n \"\"\",\n CUTOFFBERT_START_DOCSTRING,\n)\nclass CutoffBertForSequenceClassification(CutoffBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.cls_token_id = config.cls_token_id\n self.sep_token_id = config.sep_token_id\n self.mask_token_id = config.mask_token_id\n self.masking_prob = config.cutoff_masking_prob\n self.temperature = config.cutoff_temperature\n self.mask_loss_wgt = config.cutoff_mask_loss_wgt\n self.js_loss_wgt = config.cutoff_js_loss_wgt\n self.config = config\n\n self.bert = CutoffBertModel(config)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n \n def _apply_cutoff(self, inputs):\n masked_inputs = inputs.clone()\n valid_masking_indices = (inputs != self.cls_token_id) & (inputs != self.sep_token_id)\n random_masking_indices = torch.bernoulli(torch.full(inputs.shape, self.masking_prob, device=inputs.device)).bool()\n masking_indices = random_masking_indices & valid_masking_indices\n masked_inputs[masking_indices] = self.mask_token_id\n return masked_inputs\n\n @add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is None: \n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = self.dropout(outputs[1])\n logits = self.classifier(pooled_output)\n\n if not return_dict:\n return (logits,) + outputs[2:]\n\n return SequenceClassifierOutput(\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n b, l = input_ids.size()\n masked_input_ids = self._apply_cutoff(input_ids.clone())\n flatten_input_ids = torch.stack((input_ids, masked_input_ids), dim=1).reshape(-1, l)\n flatten_attention_mask = attention_mask.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if attention_mask is not None else None\n flatten_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if token_type_ids is not None else None\n flatten_position_ids = position_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if position_ids is not None else None\n flatten_inputs_embeds = inputs_embeds.unsqueeze(1).expand(-1, 2, -1, -1).reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None\n\n flatten_outputs = self.bert(\n flatten_input_ids,\n attention_mask=flatten_attention_mask,\n token_type_ids=flatten_token_type_ids,\n position_ids=flatten_position_ids,\n head_mask=head_mask,\n inputs_embeds=flatten_inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n flatten_pooled_output = self.dropout(flatten_outputs[1])\n flatten_logits = self.classifier(flatten_pooled_output)\n\n logits, masked_logits = flatten_logits.reshape(b, 2, self.config.num_labels).chunk(2, dim=1)\n logits, masked_logits = logits.squeeze(dim=1).contiguous(), masked_logits.squeeze(dim=1).contiguous()\n\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if self.mask_loss_wgt is not None and self.mask_loss_wgt > 0.0:\n mask_loss = loss_fct(masked_logits.view(-1, self.num_labels), labels.view(-1))\n loss += mask_loss * self.mask_loss_wgt\n\n if self.js_loss_wgt is not None and self.js_loss_wgt > 0.0:\n kl_loss_fct = KLDivLoss(reduction=\"batchmean\")\n src_logits, trg_logits = logits, masked_logits\n mean_logits = (src_logits + trg_logits) * 0.5\n src_loss = kl_loss_fct(\n F.log_softmax(src_logits / self.temperature, dim=-1),\n F.softmax(mean_logits / self.temperature, dim=-1)\n ) * (self.temperature ** 2)\n trg_loss = kl_loss_fct(\n F.log_softmax(trg_logits / self.temperature, dim=-1),\n F.softmax(mean_logits / self.temperature, dim=-1)\n ) * (self.temperature ** 2)\n js_loss = (src_loss + trg_loss) * 0.5\n loss += js_loss * self.js_loss_wgt\n\n if not return_dict:\n return (loss, logits)\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n )\n", "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch PROMPTBERT model. \"\"\"\n\n\nimport math\nimport os\nimport warnings\nimport numpy as np\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.utils.checkpoint\nimport torch.nn.functional as F\nfrom packaging import version\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss\nfrom torch.distributions.beta import Beta\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n NextSentencePredictorOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n DualPassageEncoderModelOutput,\n)\nfrom ...modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ...utils import logging\nfrom .configuration_promptbert import PromptBertConfig\nfrom ..bert.modeling_bert import BertEmbeddings as PromptBertEmbeddings\nfrom ..bert.modeling_bert import BertEncoder as PromptBertEncoder\nfrom ..bert.modeling_bert import BertPooler as PromptBertPooler\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"bert-base-uncased\"\n_CONFIG_FOR_DOC = \"PromptBertConfig\"\n_TOKENIZER_FOR_DOC = \"PromptBertTokenizer\"\n\nPROMPTBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"bert-base-uncased\",\n \"bert-large-uncased\",\n \"bert-base-cased\",\n \"bert-large-cased\",\n \"bert-base-multilingual-uncased\",\n \"bert-base-multilingual-cased\",\n # See all BERT models at https://huggingface.co/models?filter=bert\n]\n\n\ndef load_tf_weights_in_promptbert(model, config, tf_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(f\"Converting TensorFlow checkpoint from {tf_path}\")\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(f\"Loading TF weight {name} with shape {shape}\")\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(f\"Initialize PyTorch weight {name}\")\n pointer.data = torch.from_numpy(array)\n return model\n\n\nclass PromptBertPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = PromptBertConfig\n load_tf_weights = load_tf_weights_in_promptbert\n base_model_prefix = \"bert\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nPROMPTBERT_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nPROMPTBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.BertTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare PromptBert Model transformer outputting raw hidden-states without any specific head on top.\",\n PROMPTBERT_START_DOCSTRING,\n)\nclass PromptBertModel(PromptBertPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n \"\"\"\n\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = PromptBertEmbeddings(config)\n self.encoder = PromptBertEncoder(config)\n\n self.pooler = PromptBertPooler(config) if add_pooling_layer else None\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPoolingAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n\n if token_type_ids is None:\n if hasattr(self.embeddings, \"token_type_ids\"):\n buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n PromptBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled output).\n \"\"\",\n PROMPTBERT_START_DOCSTRING,\n)\nclass PromptBertForSequenceClassification(PromptBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n\n self.bert = PromptBertModel(config)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Bert Model with a dual encoder head on top for passage retrieval tasks (a linear layer on top of the pooled output\n for computing source-target similarity).\n \"\"\",\n PROMPTBERT_START_DOCSTRING,\n)\nclass PromptBertForDualPassageEncoder(PromptBertPreTrainedModel):\n def __init__(self, config, cls_loss_wgt=None):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.cls_loss_wgt = cls_loss_wgt\n\n self.bert = PromptBertModel(config)\n self.pooler = PromptBertPooler(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n if self.cls_loss_wgt is not None and cls_loss_wgt > 0.0:\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format(\"batch_size, 2, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=DualPassageEncoderModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is None or len(input_ids.size()) < 3:\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = self.pooler(outputs[0])\n pooled_output = self.dropout(pooled_output)\n\n if not return_dict:\n return (pooled_output,) + outputs[2:]\n\n return DualPassageEncoderModelOutput(\n pooled_output=pooled_output,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n b, _, l = input_ids.size()\n flatten_input_ids = input_ids.reshape(-1, l)\n flatten_attention_mask = attention_mask.reshape(-1, l) if attention_mask is not None else None\n flatten_token_type_ids = token_type_ids.reshape(-1, l) if token_type_ids is not None else None\n flatten_position_ids = position_ids.reshape(-1, l) if position_ids is not None else None\n flatten_inputs_embeds = inputs_embeds.reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None\n\n flatten_outputs = self.bert(\n flatten_input_ids,\n attention_mask=flatten_attention_mask,\n token_type_ids=flatten_token_type_ids,\n position_ids=flatten_position_ids,\n head_mask=head_mask,\n inputs_embeds=flatten_inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n flatten_pooled_output = self.pooler(flatten_outputs[0])\n src_pooled_output, trg_pooled_output = flatten_pooled_output.reshape(b, 2, self.config.hidden_size).chunk(2, dim=1)\n src_pooled_output, trg_pooled_output = src_pooled_output.squeeze(dim=1).contiguous(), trg_pooled_output.squeeze(dim=1).contiguous()\n\n mask = (labels.unsqueeze(-1).expand(-1, b) == labels.unsqueeze(0).expand(b, -1)) & (1 - torch.eye(b)).to(labels.device).bool()\n cl_logits = torch.einsum('ik,jk->ij', src_pooled_output, trg_pooled_output).masked_fill(mask, float('-inf'))\n cl_labels = torch.arange(b).to(labels.device)\n \n loss_fct = CrossEntropyLoss()\n cl_loss = loss_fct(cl_logits.view(-1, labels.size(0)), cl_labels.view(-1))\n\n if self.cls_loss_wgt is not None and self.cls_loss_wgt > 0.0:\n flatten_logits = self.classifier(self.dropout(flatten_outputs[1]))\n src_logits, trg_logits = flatten_logits.reshape(b, 2, self.num_labels).chunk(2, dim=1)\n src_logits, trg_logits = src_logits.squeeze(dim=1).contiguous(), trg_logits.squeeze(dim=1).contiguous()\n src_loss = loss_fct(src_logits.view(-1, self.num_labels), labels.view(-1))\n trg_loss = loss_fct(trg_logits.view(-1, self.num_labels), labels.view(-1))\n cls_loss = src_loss + trg_loss\n cls_logits = src_logits + trg_logits\n loss = cl_loss + cls_loss * self.cls_loss_wgt\n logits = cls_logits\n else:\n loss = cl_loss\n logits = cl_logits\n\n if not return_dict:\n return (loss, logits,)\n\n return DualPassageEncoderModelOutput(\n loss=loss,\n logits=logits,\n )\n" ]
[ [ "torch.ones", "torch.stack", "torch.nn.Linear", "numpy.transpose", "torch.nn.functional.log_softmax", "tensorflow.train.list_variables", "torch.nn.functional.softmax", "torch.full", "torch.nn.CrossEntropyLoss", "torch.nn.KLDivLoss", "torch.from_numpy", "torch.zeros", "tensorflow.train.load_variable", "torch.nn.Dropout" ], [ "torch.ones", "torch.nn.Linear", "numpy.transpose", "torch.nn.MSELoss", "torch.einsum", "tensorflow.train.list_variables", "torch.nn.CrossEntropyLoss", "torch.from_numpy", "torch.arange", "torch.nn.BCEWithLogitsLoss", "torch.zeros", "tensorflow.train.load_variable", "torch.eye", "torch.nn.Dropout" ] ]
grassofsky/modules
[ "fe51de837fed6887228f2d3f8a455d5f4602d786" ]
[ "misc/pythontools/processors/VolumeExtractChannel.py" ]
[ "# Name: VolumeExtractChannel \n\nimport inviwopy as ivw\nimport numpy as np\n\nclass VolumeExtractChannel(ivw.Processor):\n def __init__(self, id, name):\n ivw.Processor.__init__(self, id, name)\n self.inport = ivw.data.VolumeInport(\"inport\")\n self.addInport(self.inport, owner=False)\n self.outport = ivw.data.VolumeOutport(\"outport\")\n self.addOutport(self.outport, owner=False)\n\n self.channel = ivw.properties.IntProperty(\"channel\", \"channel\", 0, 0, 4, 1)\n self.addProperty(self.channel, owner=False)\n\n @staticmethod\n def processorInfo():\n return ivw.ProcessorInfo(\n \t\tclassIdentifier = \"org.inviwo.VolumeExtractChannel\", \n \t\tdisplayName = \"Volume Extract Channel\",\n \t\tcategory = \"Volume Operation\",\n \t\tcodeState = ivw.CodeState.Stable,\n \t\ttags = ivw.Tags.PY\n )\n\n def getProcessorInfo(self):\n return VolumeExtractChannel.processorInfo()\n\n def process(self):\n volume = self.inport.getData()\n if len(volume.data.shape) <= 3:\n self.outport.setData(volume)\n return\n\n channels = volume.data.shape[3]\n\n volumeSlice = volume.data[:,:,:, np.clip(self.channel.value, 0, channels-1)]\n newVolume = ivw.data.Volume(volumeSlice)\n newVolume.dataMap = volume.dataMap\n newVolume.modelMatrix = volume.modelMatrix\n newVolume.worldMatrix = volume.worldMatrix\n newVolume.copyMetaDataFrom(volume)\n newVolume.swizzlemask = volume.swizzlemask\n newVolume.interpolation = volume.interpolation\n newVolume.wrapping = volume.wrapping\n\n self.outport.setData(newVolume)\n" ]
[ [ "numpy.clip" ] ]
dertilo/speech-recognition
[ "32dfd0a05480ecb3a4ea3eb9e28628da976e7065" ]
[ "data_related/data_augmentation/signal_augment.py" ]
[ "import os\nimport subprocess\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom typing import Dict\n\nMAX_FREQ = 7999\n\n\ndef to_str(v):\n if isinstance(v, tuple):\n s = \" \".join(str(x) for x in v)\n elif isinstance(v, float) or isinstance(v, int):\n s = str(v)\n else:\n assert False\n\n return s\n\n\ndef build_sox_distortions(audio_file, params):\n param_str = \" \".join([k + \" \" + to_str(v) for k, v in params.items()])\n sox_params = \"sox {} -p {} \".format(audio_file, param_str)\n return sox_params\n\n\ndef build_sox_noise(\n audio_file,\n amod_lowpass_cutoff=0.1,\n lowpass_cutoff=MAX_FREQ,\n highpass_cutoff=1,\n noise_gain=-4,\n):\n \"\"\"\n play original.wav synth whitenoise lowpass 0.1 synth whitenoise amod gain -n 0 lowpass 100 highpass 1\n \"\"\"\n\n sox_params = \"sox {audio_file} -p synth whitenoise lowpass {amod_lowpass_cutoff} synth whitenoise amod gain -n {noise_gain} lowpass {lowpass_cutoff} highpass {highpass_cutoff}\".format(\n audio_file=audio_file,\n amod_lowpass_cutoff=amod_lowpass_cutoff,\n lowpass_cutoff=lowpass_cutoff,\n highpass_cutoff=highpass_cutoff,\n noise_gain=noise_gain,\n )\n return sox_params\n\n\ndef build_varying_amplitude_factor(audio_file, lowpass_cutoff=1, ac_gain=-9):\n ac = \"sox {} -p synth whitenoise lowpass {} gain -n {}\".format(\n audio_file, lowpass_cutoff, ac_gain\n )\n dc = \"sox {} -p gain -90 dcshift 0.5\".format(audio_file)\n return \"sox -m <({}) <({}) -p\".format(ac, dc)\n\n\ndef multiply_signals(signal_a, signal_b):\n return (\"sox -T <({signal_a}) <({signal_b}) -p\").format(\n signal_a=signal_a, signal_b=signal_b,\n )\n\n\ndef build_sox_interference(\n interfere_file, interfere_signal, lowpass_cutoff=1, ac_gain=-6\n):\n factor = build_varying_amplitude_factor(interfere_file, lowpass_cutoff, ac_gain)\n return multiply_signals(factor, interfere_signal)\n\n\ndef add_signals_trim_to_len(original, signals, augmented):\n signals_to_add = \" \".join([\"<(%s)\" % s for s in signals])\n sox_cmd = \"sox -m {signals} -b 16 {augmented} trim 0 $(soxi -D {original})\".format(\n signals=signals_to_add, original=original, augmented=augmented\n )\n return sox_cmd\n\n\ndef build_random_bandpass(min_low=50, min_band_width=100, max_high=1000) -> Dict:\n d = {}\n max_high_cutoff = MAX_FREQ\n if np.random.choice([True, False], p=[0.5, 0.5]):\n lowpass = int(round(np.random.uniform(low=min_low, high=MAX_FREQ)))\n d[\"lowpass\"] = lowpass\n max_high_cutoff = lowpass - min_band_width\n\n if np.random.choice([True, False], p=[0.5, 0.5]):\n highpass = int(\n round(np.random.uniform(low=1, high=min(max_high, max_high_cutoff)))\n )\n d[\"highpass\"] = highpass\n\n return d\n\n\ndef augment_with_sox(original_file, audio_files, augmented_file):\n interfere_file = np.random.choice(audio_files)\n min_SNR = 20 # normal:20, less:30, evenless:40\n min_SIR = 5 # normal:10, less:20, evenless:30\n\n signal_gain = round(np.random.uniform(low=-10, high=0), 2)\n signal_params = {\n \"tempo\": round(np.random.triangular(left=0.7, mode=1.0, right=1.3), 2),\n \"pitch\": int(\n round(np.random.triangular(left=-200, mode=0, right=200))\n ), # normal 100, less: 50, evenless: 30\n \"reverb\": (int(round(np.random.uniform(low=0, high=50))), 50, 100, 100, 0, 0,),\n \"gain -n\": signal_gain,\n }\n signal_params.update(build_random_bandpass(1000, 1000, 100))\n\n interfere_params = {\n \"tempo\": round(np.random.uniform(low=0.6, high=1.4), 2),\n \"pitch\": int(round(np.random.uniform(low=-500, high=500))),\n \"reverb\": (int(round(np.random.uniform(low=0, high=100))), 50, 100, 100, 0, 0),\n \"gain -n\": round(np.random.uniform(low=-50, high=signal_gain - min_SIR), 2),\n }\n interfere_params.update(build_random_bandpass(50, 100, 1000))\n\n # params = {'signal_params':signal_params,'interfere_params':interfere_params,'noise_power':noise_power}\n # pprint(params)\n\n signal = build_sox_distortions(original_file, signal_params)\n interfere_signal = build_sox_distortions(interfere_file, interfere_params)\n\n noise_power = round(np.random.uniform(-60, signal_gain - min_SNR), 2)\n lowpass = int(round(np.random.uniform(low=100, high=MAX_FREQ)))\n highpass = int(round(np.random.uniform(low=1, high=lowpass)))\n noise = build_sox_noise(\n original_file, np.random.uniform(0.1, 2), lowpass, highpass, noise_power\n )\n\n interf = build_sox_interference(\n interfere_file,\n interfere_signal,\n lowpass_cutoff=np.random.uniform(0.5, 2),\n ac_gain=int(round(np.random.uniform(-9, -3))),\n )\n\n sox_cmd = add_signals_trim_to_len(\n original_file, [signal, noise, interf], augmented_file\n )\n FNULL = open(os.devnull, \"w\")\n subprocess.call([\"bash\", \"-c\", sox_cmd], stdout=FNULL, stderr=subprocess.STDOUT)\n # subprocess.call([\"bash\", \"-c\", sox_cmd])\n # output = subprocess.check_output([\"bash\", \"-c\", sox_cmd])\n # if len(output)>0 and 'FAIL' in output:\n # print(output)\n # return 1 if len(output)>0 else 0\n\n\ndef augment_with_specific_params():\n signal_gain = 0\n signal_params = dict(tempo=1.0, pitch=0, reverb=0)\n signal_params[\"gain -n\"] = 0\n signal = build_sox_distortions(original, signal_params)\n interfere_signal = build_sox_distortions(\n interfering, dict(gain=signal_gain - 10, tempo=0.8, pitch=100, reverb=50)\n )\n noise = build_sox_noise(\n original, noise_gain=signal_gain - 20, lowpass_cutoff=6000, highpass_cutoff=10\n )\n interf = build_sox_interference(interfering, interfere_signal)\n sox_cmd = add_signals_trim_to_len(original, [signal, noise, interf], augmented)\n subprocess.call([\"bash\", \"-c\", sox_cmd])\n\n\nif __name__ == \"__main__\":\n import librosa\n original = \"../../original.wav\"\n augmented = \"/tmp/augmented.wav\"\n interfering = \"../../interference.wav\"\n\n # augment_with_specific_params()\n\n for k in range(9):\n augment_with_sox(original, [interfering], \"/tmp/augmented_%d.wav\" % k)\n # assert False\n # path = os.environ['HOME']+\"/data/asr_data/SPANISH\"\n # audio_files = librosa.util.find_files(path)\n\n #\n # with open('spanish_train_manifest.csv') as f:\n # audio_text_files = f.readlines()\n # audio_files = [x.strip().split(\",\")[0] for x in audio_text_files]\n #\n # for k in tqdm(range(100000)):\n # original = np.random.choice(audio_files)\n # random_augmentation(original, audio_files, augmented)\n" ]
[ [ "numpy.random.uniform", "numpy.random.triangular", "numpy.random.choice" ] ]
marbre/mlir-npcomp
[ "30adf9e6b0c1e94db38050a9e143f20a5a461d17" ]
[ "frontends/pytorch/test/acap_regression/test_jit_add2.py" ]
[ "# -*- Python -*-\n# This file is licensed under a pytorch-style license\n# See frontends/pytorch/LICENSE for license information.\n\nimport torch\nimport npcomp.frontends.pytorch as torch_mlir\nimport npcomp.frontends.pytorch.test as test\n\n# RUN: %PYTHON %s | FileCheck %s\n\ndev = torch_mlir.mlir_device()\nt0 = torch.randn((4,4), device=dev)\nt1 = torch.randn((4,4), device=dev)\n\nt2 = t0 + t1\n\n#\n# Check the result tensor against the CPU\n#\nt0_cpu = t0.to('cpu')\nt1_cpu = t1.to('cpu')\nt2_cpu = t2.to('cpu')\n\nprint (t0_cpu, \" +\\n\", t1_cpu, \" =\\n\", t2_cpu)\n\n# CHECK: PASS! add2 check\ntest.compare(t2, t0_cpu + t1_cpu, \"add2\")\n" ]
[ [ "torch.randn" ] ]
GuoSuiming/mindspore
[ "665ec683d4af85c71b2a1f0d6829356f2bc0e1ff", "48afc4cfa53d970c0b20eedfb46e039db2a133d5", "59a277756eb4faad9ac9afcc7fd526e8277d4994", "59a277756eb4faad9ac9afcc7fd526e8277d4994", "48afc4cfa53d970c0b20eedfb46e039db2a133d5" ]
[ "tests/st/ops/gpu/test_tanh_grad_grad_op.py", "mindspore/ops/_grad/grad_nn_ops.py", "tests/ut/python/parallel/test_dropout_do_mask.py", "tests/st/ops/cpu/test_smooth_l1_loss_grad_op.py", "tests/ut/python/dataset/test_random_vertical_flip_with_bbox.py" ]
[ "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops.operations import _grad_ops as G\nfrom mindspore.ops import composite as C\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n\nclass NetTanhGrad(nn.Cell):\n def __init__(self):\n super(NetTanhGrad, self).__init__()\n self.tanh_grad = G.TanhGrad()\n\n def construct(self, y, grad):\n return self.tanh_grad(y, grad)\n\n\nclass NetTanhGradGrad(nn.Cell):\n def __init__(self, forward_net):\n super(NetTanhGradGrad, self).__init__()\n self.forward_net = forward_net\n self.gradOps = C.GradOperation(get_all=True, sens_param=True)\n\n def construct(self, y, grad, dout):\n backward_net = self.gradOps(self.forward_net)\n return backward_net(y, grad, dout)\n\n\n@pytest.mark.level0\n@pytest.mark.platform_x86_gpu_training\n@pytest.mark.env_onecard\ndef tanh_grad_grad_base(dtype, loss):\n np.random.seed(1)\n shape = (4, 2)\n y_np = (np.random.rand(*shape) * 2 - 1).astype(dtype)\n grad_np = (np.random.rand(*shape) * 20 - 10).astype(dtype)\n dout_np = (np.random.rand(*shape) * 20 - 10).astype(dtype)\n\n y_np_32 = y_np.astype(np.float32)\n grad_np_32 = grad_np.astype(np.float32)\n dout_np_32 = dout_np.astype(np.float32)\n dy_np = (dout_np_32 * grad_np_32 * (-2.0) * y_np_32).astype(dtype)\n dgrad_np = (dout_np_32 * (1 - y_np_32 * y_np_32)).astype(dtype)\n\n y_ms = Tensor(y_np)\n grad_ms = Tensor(grad_np)\n dout_ms = Tensor(dout_np)\n forward_net = NetTanhGrad()\n net = NetTanhGradGrad(forward_net)\n dy_ms, dgrad_ms = net(y_ms, grad_ms, dout_ms)\n\n assert np.allclose(dy_ms.asnumpy(), dy_np, loss, loss)\n assert np.allclose(dgrad_ms.asnumpy(), dgrad_np, loss, loss)\n\n\n@pytest.mark.level0\n@pytest.mark.platform_x86_gpu_training\n@pytest.mark.env_onecard\ndef test_tanh_grad_grad_float16():\n tanh_grad_grad_base(np.float16, 1e-3)\n\n\n@pytest.mark.level0\n@pytest.mark.platform_x86_gpu_training\n@pytest.mark.env_onecard\ndef test_tanh_grad_grad_float32():\n tanh_grad_grad_base(np.float32, 1e-4)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Define the grad rules of neural network related operations.\"\"\"\nimport os\nimport numpy as np\nfrom mindspore.ops import _selected_grad_ops as SG\nfrom mindspore.ops.primitive import constexpr\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.ops.operations import nn_ops as nps\nfrom .grad_base import bprop_getters\nfrom .. import functional as F\nfrom .. import operations as P\nfrom ...common import dtype as mstype\nfrom ..composite.multitype_ops.zeros_like_impl import zeros_like\nfrom ..operations import _grad_ops as G\nfrom ..operations import _inner_ops as inner\nfrom ... import context\n\nenv_force_bprop_seq = os.getenv(\"ENV_FORCE_BPROP_SEQ\")\n\n@bprop_getters.register(P.BiasAdd)\ndef get_bprop_bias_add(self):\n \"\"\"Grad definition for `BiasAdd` operation.\"\"\"\n bias_grad = SG.BiasAddGrad(self.data_format)\n\n def bprop(x, w, out, dout):\n return dout, bias_grad(dout)\n\n return bprop\n\n\n@bprop_getters.register(P.Conv2D)\ndef get_bprop_conv2d(self):\n \"\"\"Grad definition for `Conv2D` operation.\"\"\"\n input_grad = P.Conv2DBackpropInput(\n self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,\n dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format\n )\n filter_grad = G.Conv2DBackpropFilter(\n self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,\n dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format\n )\n get_shape = P.Shape()\n\n def bprop(x, w, out, dout):\n dx = input_grad(dout, w, get_shape(x))\n if env_force_bprop_seq == '1':\n x = F.depend(x, dx)\n dw = filter_grad(dout, x, get_shape(w))\n return dx, dw\n\n return bprop\n\n\n@bprop_getters.register(nps.Conv3D)\ndef get_bprop_conv3d(self):\n \"\"\"Grad definition for `Conv3D` operation.\"\"\"\n input_grad = nps.Conv3DBackpropInput(\n self.out_channel, self.kernel_size, self.mode, pad_mode=self.pad_mode,\n pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format\n )\n filter_grad = G.Conv3DBackpropFilter(\n self.out_channel, self.kernel_size, self.mode, pad_mode=self.pad_mode,\n pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format\n )\n get_shape = P.Shape()\n\n def bprop(x, w, out, dout):\n dx = input_grad(w, dout, get_shape(x))\n dw = filter_grad(x, dout, get_shape(w))\n return dx, dw\n\n return bprop\n\n\n@bprop_getters.register(nps.Conv3DTranspose)\ndef get_bprop_conv3d_transpose(self):\n \"\"\"Grad definition for `Conv3DTranspose` operation.\"\"\"\n input_grad = nps.Conv3D(\n out_channel=self.in_channel, kernel_size=self.kernel_size, mode=self.mode, pad_mode=\"pad\",\n pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format\n )\n filter_grad = G.Conv3DBackpropFilter(\n out_channel=self.in_channel, kernel_size=self.kernel_size, mode=self.mode, pad_mode=\"pad\",\n pad=self.pad, stride=self.stride, dilation=self.dilation, group=self.group, data_format=self.data_format\n )\n input_size = self.input_size\n\n def bprop(x, w, out, dout):\n dx = input_grad(dout, w)\n dw = filter_grad(dout, x, F.shape(w))\n return dx, dw, zeros_like(input_size)\n\n return bprop\n\n\n@bprop_getters.register(inner.ExtractImagePatches)\ndef get_bprop_extract_image_patches(self):\n \"\"\"Grad definition for `ExtractImagePatches` operation.\"\"\"\n get_shape = P.Shape()\n reshape = P.Reshape()\n extract_image_patches = inner.ExtractImagePatches(ksizes=self.ksizes,\n strides=self.strides,\n rates=self.rates,\n padding=self.padding)\n concat = P.Concat(axis=-1)\n expand_dims = P.ExpandDims()\n scatter_nd = P.ScatterNd()\n dtype = P.DType()\n fill = P.Fill()\n slice_op = P.Slice()\n transpose = P.Transpose()\n cast = P.Cast()\n matmul = P.MatMul()\n\n _, _, ksizes_row, ksizes_col = self.ksizes\n\n def bprop(x, out, dout):\n x_shape = get_shape(x)\n x_batch, x_depth, x_row, x_col = x_shape\n x_indices_num = x_row * x_col + 1\n x_idx = cast(F.tuple_to_array(range(1, x_indices_num)), mstype.float32)\n x_idx = reshape(x_idx, (1, 1, x_row, x_col))\n x_idx_patch = cast(extract_image_patches(x_idx), mstype.int32)\n x_idx_patch = transpose(x_idx_patch, (0, 2, 3, 1))\n\n out_shape = get_shape(out)\n _, _, out_row, out_col = out_shape\n out_indices_num = out_row * out_col * ksizes_row * ksizes_col\n out_idx = F.tuple_to_array(range(out_indices_num))\n out_idx = reshape(out_idx, (1, out_row, out_col, ksizes_row * ksizes_col))\n\n idx_tensor = concat((expand_dims(x_idx_patch, -1), expand_dims(out_idx, -1)))\n idx_tensor = reshape(idx_tensor, (-1, 2))\n sp_shape = (x_indices_num, out_indices_num)\n sp_tensor = scatter_nd(idx_tensor, fill(dtype(dout), (out_indices_num,), 1), sp_shape)\n sp_tensor = slice_op(sp_tensor, (1, 0), (x_indices_num - 1, out_indices_num))\n\n grad = transpose(dout, (0, 2, 3, 1))\n grad = reshape(grad, (x_batch, out_row, out_col, ksizes_row, ksizes_col, x_depth))\n grad = transpose(grad, (1, 2, 3, 4, 0, 5))\n grad = reshape(grad, (-1, x_batch * x_depth))\n\n jac = matmul(sp_tensor, grad)\n dx = reshape(jac, (x_row, x_col, x_batch, x_depth))\n dx = transpose(dx, (2, 3, 0, 1))\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.DepthwiseConv2dNative)\ndef get_bprop_depthwise_conv2d_native(self):\n \"\"\"Grad definition for `DepthwiseConv2dNative` operation.\"\"\"\n input_grad = G.DepthwiseConv2dNativeBackpropInput(\n self.channel_multiplier, self.kernel_size, self.pad_mode, self.pad, self.pad_list, self.mode, self.stride,\n self.dilation, self.group\n )\n filter_grad = G.DepthwiseConv2dNativeBackpropFilter(\n self.channel_multiplier, self.kernel_size, self.pad_mode, self.pad, self.pad_list, self.mode, self.stride,\n self.dilation, self.group\n )\n get_shape = P.Shape()\n\n def bprop(x, w, out, dout):\n dx = input_grad(get_shape(x), w, dout)\n if env_force_bprop_seq == '1':\n x = F.depend(x, dx)\n dw = filter_grad(x, get_shape(w), dout)\n return dx, dw\n\n return bprop\n\n\n@bprop_getters.register(P.MaxPoolWithArgmax)\ndef get_bprop_max_pool_with_argmax(self):\n \"\"\"Grad definition for `MaxPoolWithArgmax` operation.\"\"\"\n maxpool_grad = G.MaxPoolGradWithArgmax(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode)\n\n def bprop(x, out, dout):\n dx = maxpool_grad(x, dout[0], out[1])\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(G.MaxPoolGrad)\ndef get_bprop_max_pool_grad_grad(self):\n \"\"\"Grad definition for `MaxPoolGrad` operation.\"\"\"\n maxpool_grad_grad = G.MaxPoolGradGrad(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode)\n\n def bprop(x1, x2, grad, out, dout):\n dx1 = zeros_like(x1)\n dx2 = zeros_like(x2)\n dgrad = maxpool_grad_grad(x1, x2, dout)\n return (dx1, dx2, dgrad)\n\n return bprop\n\n\n@bprop_getters.register(G.MaxPoolGradGrad)\ndef get_bprop_max_pool_grad_grad_grad(self):\n \"\"\"Grad definition for `MaxPoolGradGrad` operation.\"\"\"\n maxpool_grad = G.MaxPoolGrad(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode)\n\n def bprop(x1, x2, grad, out, dout):\n dx1 = zeros_like(x1)\n dx2 = zeros_like(x2)\n dgrad = maxpool_grad(x1, x2, dout)\n return (dx1, dx2, dgrad)\n\n return bprop\n\n\n@bprop_getters.register(P.MaxPool)\ndef get_bprop_max_pool_grad(self):\n \"\"\"Grad definition for `MaxPool` operation.\"\"\"\n maxpool_grad = G.MaxPoolGrad(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode,\n data_format=self.format)\n\n def bprop(x, out, dout):\n dx = maxpool_grad(x, out, dout)\n return (dx,)\n\n return bprop\n\n\ndef _windowed_output_size(input_size, ksize, stride, pad_mode):\n \"\"\"\n helper func for AvgPoolGrad\n \"\"\"\n\n tmp_output = 0\n tmp_pad_need = 0\n tmp_pad_before = 0\n tmp_pad_after = 0\n if pad_mode == 'VALID':\n tmp_output = (input_size - ksize + stride) // stride\n tmp_pad_before = 0\n tmp_pad_after = 0\n elif pad_mode == 'SAME':\n tmp_output = (input_size + stride - 1) // stride\n tmp_pad_need = max(0, (tmp_output - 1) * stride + ksize - input_size)\n tmp_pad_before = tmp_pad_need // 2\n tmp_pad_after = tmp_pad_need - tmp_pad_before\n return tmp_output, tmp_pad_before, tmp_pad_after\n\n\n@constexpr\ndef _get_mean_matrix(x_shape, ksize, stride, pad_mode, x_dtype):\n \"\"\"\n helper func for AvgPoolGrad.\n\n `assist_input_matrix` is a 2d matrix with input_shape after padding,\n the value of element which is padded is 0, else are 1.\n For each element of output, it is mapped for slide window: `[h*h_stride : h*h_stride + h_ksize,\n w*w_stride : w*w_stride + w_ksize]` of `assist_input_matrix`, so the sum of slide window is the\n number of input that associate with output element.\n \"\"\"\n\n n_input, c_input, h_input, w_input = x_shape\n h_ksize, w_ksize = ksize[2], ksize[3]\n h_stride, w_stride = stride[2], stride[3]\n n_output = n_input\n c_output = c_input\n h_output, w_output = 0, 0\n pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0\n h_output, pad_top, pad_bottom = _windowed_output_size(h_input, h_ksize,\n h_stride, pad_mode)\n w_output, pad_left, pad_right = _windowed_output_size(w_input, w_ksize,\n w_stride, pad_mode)\n\n output_size = n_output * c_output * h_output * w_output\n output_shape = (n_output, c_output, h_output, w_output)\n output = np.array([0.0] * output_size)\n output = np.reshape(output, output_shape)\n\n in_shape_after_padding_2d = (h_input + pad_top + pad_bottom, w_input + pad_left + pad_right)\n assist_input_matrix = np.ones(in_shape_after_padding_2d).astype(np.float32)\n if pad_top > 0:\n assist_input_matrix[:pad_top, :] = 0\n if pad_bottom > 0:\n assist_input_matrix[-pad_bottom:, :] = 0\n if pad_left > 0:\n assist_input_matrix[:, :pad_left] = 0\n if pad_right > 0:\n assist_input_matrix[:, -pad_right:] = 0\n\n for h in range(h_output):\n for w in range(w_output):\n curr_input = assist_input_matrix[h*h_stride : h*h_stride + h_ksize, w*w_stride : w*w_stride + w_ksize]\n curr_sum = np.sum(curr_input)\n if curr_sum > 0:\n output[:, :, h, w] = 1. / curr_sum\n return Tensor(output, x_dtype)\n\n\n@constexpr\ndef _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, pad_mode, x_dtype):\n kernel_matrix = np.ones(kernel_matrix_shape)\n return Tensor(kernel_matrix, x_dtype)\n\n\n@bprop_getters.register(P.AvgPool)\ndef get_bprop_avg_pool_grad(self):\n \"\"\"Grad definition for `AvgPool` operation.\"\"\"\n\n # the parameter of AvgPoolGrad in GPU and TBE/CPU is not same\n if self.target == \"GPU\":\n avgpool_grad_gpu = G.AvgPoolGradGpu(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode,\n data_format=self.format)\n\n def bprop_gpu(x, out, dout):\n dx = avgpool_grad_gpu(x, out, dout)\n return (dx,)\n\n bprop_fn = bprop_gpu\n\n elif self.target == \"CPU\":\n avgpool_grad_cpu = G.AvgPoolGradCpu(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode,\n data_format=self.format)\n\n def bprop_cpu(x, out, dout):\n dx = avgpool_grad_cpu(x, out, dout)\n return (dx,)\n\n bprop_fn = bprop_cpu\n\n elif self.target == \"GE\":\n avgpool_grad_ge = G.AvgPoolGrad(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode)\n shape_op = P.Shape()\n\n def bprop_ge(x, out, dout):\n dx = avgpool_grad_ge(shape_op(x), dout)\n return (dx,)\n\n bprop_fn = bprop_ge\n\n else:\n avgpool_grad_vm = G.AvgPoolGradVm(\n kernel_size=self.kernel_size,\n strides=self.strides,\n pad_mode=self.pad_mode)\n k_size_nchw = avgpool_grad_vm.kernel_size\n stride_nchw = avgpool_grad_vm.strides\n pad_mode = self.pad_mode\n\n def bprop_vm(x, out, dout):\n x_shape_nchw = F.shape(x)\n x_dtype = F.dtype(x)\n kernel_matrix_shape = (1, x_shape_nchw[1],\n k_size_nchw[2],\n k_size_nchw[3])\n mean_matrix = _get_mean_matrix(x_shape_nchw, k_size_nchw, stride_nchw, pad_mode, x_dtype)\n kernel_matrix = _get_kernel_matrix(x_shape_nchw, kernel_matrix_shape, pad_mode, x_dtype)\n dx = avgpool_grad_vm(x_shape_nchw, dout, mean_matrix, kernel_matrix)\n return (dx,)\n\n bprop_fn = bprop_vm\n\n return bprop_fn\n\n\n@bprop_getters.register(P.DropoutGenMask)\ndef get_bprop_dropout_gen_mask(self):\n \"\"\"Grad definition for `DropoutGenMask` operation.\"\"\"\n\n def bprop(shape, keep_prob, out, dout):\n return (zeros_like(shape), zeros_like(keep_prob))\n\n return bprop\n\n\n@bprop_getters.register(P.DropoutDoMask)\ndef get_bprop_dropout_do_mask(self):\n \"\"\"Grad definition for `DropoutDoMask` operation.\"\"\"\n do_mask = P.DropoutDoMask()\n\n def bprop(x, y, keep_prob, out, dout):\n return (do_mask(dout, y, keep_prob), zeros_like(y), zeros_like(keep_prob))\n\n return bprop\n\n\n@bprop_getters.register(P.Mish)\ndef get_bprop_mish(self):\n \"\"\"Grad definition for `Mish` operation.\"\"\"\n tanh = P.Tanh()\n tanh_grad = SG.TanhGrad()\n softplus = P.Softplus()\n softplus_grad = G.SoftplusGrad()\n\n def bprop(x, out, dout):\n dx1 = tanh(softplus(x))\n dx2 = softplus_grad(tanh_grad(dx1, x * dout), x)\n dx = (dx1 * dout + dx2)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.SeLU)\ndef get_bprop_selu(self):\n \"\"\"Grad definition for `SeLU` operation.\"\"\"\n scale = 1.0507009873554804934193349852946\n elu_grad = G.EluGrad()\n\n def bprop(x, out, dout):\n dx = elu_grad(dout, out) * scale\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.MulNoNan)\ndef get_bprop_mul_no_nan(self):\n \"\"\"Grad definition for `MulNoNan` operation.\"\"\"\n mul_no_nan = P.MulNoNan()\n reduce_sum = P.ReduceSum()\n reshape = P.Reshape()\n\n def bprop(x, y, out, dout):\n x_shape = F.shape(x)\n y_shape = F.shape(y)\n dx = mul_no_nan(dout, y)\n dy = mul_no_nan(x, dout)\n broadcast_x, broadcast_y = F.broadcast_gradient_args(x_shape, y_shape)\n if broadcast_x != ():\n dx = reshape(reduce_sum(dx, broadcast_x), x_shape)\n if broadcast_y != ():\n dy = reshape(reduce_sum(dy, broadcast_y), y_shape)\n return dx, dy\n\n return bprop\n\n\n@bprop_getters.register(P.ReLU)\ndef get_bprop_relu(self):\n \"\"\"Grad definition for `ReLU` operation.\"\"\"\n input_grad = G.ReluGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(dout, out)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(G.ReluGrad)\ndef get_bprop_relu_grad(self):\n \"\"\"Grad definition for `ReLUGrad` operation.\"\"\"\n input_grad = G.ReluGrad()\n\n def bprop(grad, y, out, dout):\n dgrad = input_grad(dout, y)\n return dgrad, zeros_like(y)\n\n return bprop\n\n\n@bprop_getters.register(P.ReLU6)\ndef get_bprop_relu6(self):\n \"\"\"Grad definition for `ReLU6` operation.\"\"\"\n input_grad = G.ReLU6Grad()\n\n def bprop(x, out, dout):\n dx = input_grad(dout, x)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.ReLUV2)\ndef get_bprop_relu_v2(self):\n \"\"\"Grad definition for `ReLUV2` operation.\"\"\"\n input_grad = G.ReluGradV2()\n\n def bprop(x, out, dout):\n mask = out[1]\n dx = input_grad(dout[0], mask)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.HSwish)\ndef get_bprop_hswish(self):\n \"\"\"Grad definition for `HSwish` operation.\"\"\"\n input_grad = G.HSwishGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(dout, x)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.HSigmoid)\ndef get_bprop_hsigmoid(self):\n \"\"\"Grad definition for `HSigmoid` operation.\"\"\"\n input_grad = G.HSigmoidGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(dout, x)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.Elu)\ndef get_bprop_elu(self):\n \"\"\"Grad definition for `Elu` operation.\"\"\"\n input_grad = G.EluGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(dout, out)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.Sigmoid)\ndef get_bprop_sigmoid(self):\n \"\"\"Grad definition for `Sigmoid` operation.\"\"\"\n input_grad = G.SigmoidGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(out, dout)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(G.SigmoidGrad)\ndef get_bprop_sigmoid_grad(self):\n \"\"\"Grad definition for `SigmoidGrad` operation.\"\"\"\n sigmoid_grad = G.SigmoidGrad()\n\n def bprop(y, grad, out, dout):\n dy = dout * grad * (1. - 2 * y)\n dgrad = sigmoid_grad(y, dout)\n return dy, dgrad\n\n return bprop\n\n\n@constexpr\ndef _get_transpose_axis(x_shp, axis):\n rank = len(x_shp)\n if axis < 0:\n axis += rank\n reverse_axis = [i for i in range(rank)]\n reverse_axis[axis] = rank - 1\n reverse_axis[rank - 1] = axis\n return tuple(reverse_axis)\n\n\n@bprop_getters.register(P.Softmax)\ndef get_bprop_softmax(self):\n \"\"\"Grad definition for `Softmax` operation.\"\"\"\n sum_func = P.ReduceSum(keep_dims=True)\n sub = P.Sub()\n mul = P.Mul()\n get_shape = P.Shape()\n transpose = P.Transpose()\n axis = self.axis\n if not isinstance(axis, int):\n axis = axis[0]\n\n def bprop(x, out, dout):\n # dx = (dout - sum(dout * out)) * out\n # This formula is correct only when the `axis` is the last dimension.\n # In order to support the scenario where the `axis` is other values,\n # we transpose the data of the `axis` dimension to the last dimension for calculation,\n # and then transpose it back after the calculation.\n reverse_axis = _get_transpose_axis(get_shape(x), axis)\n out = transpose(out, reverse_axis)\n dout = transpose(dout, reverse_axis)\n dx = mul(out, sub(dout, sum_func(mul(out, dout), -1)))\n dx = transpose(dx, reverse_axis)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.LogSoftmax)\ndef get_bprop_log_softmax(self):\n \"\"\"Grad definition for `LogSoftmax` operation.\"\"\"\n logsoftmax_grad = G.LogSoftmaxGrad(self.axis)\n\n def bprop(x, out, dout):\n dx = logsoftmax_grad(out, dout)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.Softplus)\ndef get_bprop_softplus(self):\n \"\"\"Grad definition for `Softplus` operation.\"\"\"\n softplus_grad = G.SoftplusGrad()\n\n def bprop(x, out, dout):\n dx = softplus_grad(dout, x)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.Softsign)\ndef get_bprop_softsign(self):\n \"\"\"Grad definition for `Softsign` operation.\"\"\"\n mul = P.Mul()\n absolute = P.Abs()\n div = P.Div()\n square = P.Square()\n\n def bprop(x, out, dout):\n dx = mul(dout, div(1, square(1 + absolute(x))))\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.Tanh)\ndef get_bprop_tanh(self):\n \"\"\"Grad definition for `Tanh` operation.\"\"\"\n tanh_grad = SG.TanhGrad()\n\n def bprop(x, out, dout):\n dx = tanh_grad(out, dout)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(G.TanhGrad)\ndef get_bprop_tanh_grad(self):\n \"\"\"Grad definition for `TanhGrad` operation.\"\"\"\n tanh_grad = G.TanhGrad()\n\n def bprop(y, grad, out, dout):\n dy = dout * -2.0 * grad * y\n dgrad = tanh_grad(y, dout)\n return dy, dgrad\n\n return bprop\n\n\n@bprop_getters.register(P.Gelu)\ndef get_bprop_gelu(self):\n \"\"\"Grad definition for `Gelu` operation.\"\"\"\n input_grad = G.GeluGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(dout, x, out)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.FastGelu)\ndef get_bprop_fast_gelu(self):\n \"\"\"Grad definition for `FastGelu` operation.\"\"\"\n input_grad = G.FastGeluGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(dout, x)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.FusedBatchNorm)\ndef get_bprop_fused_batch_norm(self):\n \"\"\"Grad definition for `FusedBatchNorm` operation.\"\"\"\n input_grad = G.FusedBatchNormGrad(self.epsilon, self.momentum)\n target_cpu = False\n if self.target == \"CPU\":\n input_grad = G.FusedBatchNormGradCPU(self.epsilon, self.momentum)\n target_cpu = True\n def bprop(x, scale, b, mean, variance, out, dout):\n saved_mean = out[3]\n saved_variance = out[4]\n if target_cpu:\n out = input_grad(dout[0], x, scale, b, saved_mean, saved_variance)\n else:\n out = input_grad(dout[0], x, scale, saved_mean, saved_variance)\n dx = out[0]\n dscale = out[1]\n dbias = out[2]\n return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)\n\n return bprop\n\n\n@bprop_getters.register(P.FusedBatchNormEx)\ndef get_bprop_fused_batch_norm_ex(self):\n \"\"\"Grad definition for `FusedBatchNormEx` operation.\"\"\"\n input_grad = G.FusedBatchNormGradEx(self.epsilon, self.momentum, self.format)\n\n def bprop(x, scale, b, mean, variance, out, dout):\n saved_mean = out[3]\n saved_variance = out[4]\n reserve = out[5]\n out = input_grad(dout[0], x, scale, saved_mean, saved_variance, reserve)\n dx = out[0]\n dscale = out[1]\n dbias = out[2]\n return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)\n\n return bprop\n\n\n@bprop_getters.register(P.InstanceNorm)\ndef get_bprop_instance_norm(self):\n \"\"\"Grad definition for `InstanceNorm` operation.\"\"\"\n is_training = self.is_training\n input_grad = G.InstanceNormGrad(is_training, self.epsilon, self.momentum)\n\n def bprop(x, gamma, beta, mean, variance, out, dout):\n saved_mean = out[1]\n saved_variance = out[2]\n out = input_grad(dout[0], x, gamma, saved_mean, saved_variance)\n dx = out[0]\n dgamma = out[1]\n dbeta = out[2]\n return dx, dgamma, dbeta, zeros_like(mean), zeros_like(variance)\n\n return bprop\n\n\n@bprop_getters.register(P.BatchNorm)\ndef get_bprop_batch_norm(self):\n \"\"\"Grad definition for `BatchNorm` operation.\"\"\"\n is_training = self.is_training\n input_grad = G.BatchNormGrad(is_training, self.epsilon)\n\n def bprop(x, scale, b, mean, variance, out, dout):\n if is_training:\n saved_reserve_1 = out[3]\n saved_reserve_2 = out[4]\n else:\n saved_reserve_1 = mean\n saved_reserve_2 = variance\n out = input_grad(dout[0], x, scale, saved_reserve_1, saved_reserve_2)\n dx = out[0]\n dscale = out[1]\n dbias = out[2]\n return dx, dscale, dbias, zeros_like(mean), zeros_like(variance)\n\n return bprop\n\n\n@bprop_getters.register(P.LayerNorm)\ndef get_bprop_layer_norm(self):\n \"\"\"Grad definition for `LayerNorm` operation.\"\"\"\n layer_norm_grad = G.LayerNormGrad(self.begin_norm_axis, self.begin_params_axis)\n\n def bprop(x, gamma, beta, out, dout):\n dx, d_gamma, d_beta = layer_norm_grad(\n x, dout[0], out[2], out[1], gamma)\n return dx, d_gamma, d_beta\n\n return bprop\n\n\n@bprop_getters.register(G.LayerNormGrad)\ndef get_bprop_layer_norm_grad(self):\n \"\"\"Grad definition for `LayerNormGrad` operation.\"\"\"\n layer_norm_grad_grad = G.LayerNormGradGrad(self.begin_norm_axis, self.begin_params_axis)\n\n def bprop(x, dy, variance, mean, gamma, out, dout):\n d_x, d_dy, d_gamma = layer_norm_grad_grad(\n x, dy, variance, mean, gamma, dout[0], dout[1], dout[2])\n return d_x, d_dy, zeros_like(variance), zeros_like(mean), d_gamma\n\n return bprop\n\n\n@bprop_getters.register(P.L2Normalize)\ndef get_bprop_l2normalize(self):\n \"\"\"Grad definition for `L2Normalize` operation.\"\"\"\n input_grad = G.L2NormalizeGrad(self.axis, self.epsilon)\n\n def bprop(x, out, dout):\n dx = input_grad(x, out, dout)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.SoftmaxCrossEntropyWithLogits)\ndef get_bprop_softmax_cross_entropy_with_logits(self):\n \"\"\"Grad definition for `SoftmaxCrossEntropyWithLogits` operation.\"\"\"\n expand = P.ExpandDims()\n\n def bprop(logits, labels, out, dout):\n grad = out[1]\n grad = grad * expand(dout[0], -1)\n return grad, zeros_like(labels)\n\n return bprop\n\n\n@bprop_getters.register(P.NLLLoss)\ndef get_bprop_nll_loss(self):\n \"\"\"Grad definition for `NLLLoss` operation.\"\"\"\n nll_loss_grad = G.NLLLossGrad(reduction=self.reduction)\n\n def bprop(x, target, weight, out, dout):\n total_weight = out[1]\n dout_x = dout[0]\n dx = nll_loss_grad(x, dout_x, target, weight, total_weight)\n return dx, zeros_like(target), zeros_like(weight)\n\n return bprop\n\n\n@bprop_getters.register(P.SparseSoftmaxCrossEntropyWithLogits)\ndef get_bprop_sparse_softmax_cross_entropy_with_logits(self):\n \"\"\"Grad definition for `SparseSoftmaxCrossEntropyWithLogits` operation.\"\"\"\n is_grad = self.is_grad\n grad_op = P.SparseSoftmaxCrossEntropyWithLogits(is_grad=True)\n\n def bprop(logits, labels, out, dout):\n grad = out[0]\n if not is_grad:\n # if construct use loss\n grad = grad_op(logits, labels)\n grad = F.depend(grad, out)\n grad = grad * dout\n return grad, zeros_like(labels)\n\n return bprop\n\n\n@bprop_getters.register(P.ResizeBilinear)\ndef get_bprop_resize_bilinear(self):\n \"\"\"Grad definition for `ResizeBilinear` operation.\"\"\"\n resize_grad = G.ResizeBilinearGrad(self.align_corners)\n\n def bprop(x, out, dout):\n dx = resize_grad(dout, x)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.OneHot)\ndef get_bprop_onehot(self):\n \"\"\"Grad definition for `OneHot` operation.\"\"\"\n\n def bprop(indices, depth, on_value, off_value, out, dout):\n return zeros_like(indices), zeros_like(depth), zeros_like(on_value), zeros_like(off_value)\n\n return bprop\n\n\n@constexpr\ndef _range_op(start, limit, delta, dtype):\n \"\"\"helper function for Grad TopK\"\"\"\n output_tensor = Tensor(list(range(start, limit, delta)), dtype)\n return output_tensor\n\n@constexpr\ndef _get_1d_shape(in_shape):\n \"\"\"helper function for Grad TopK\"\"\"\n out_shape = 1\n for i in in_shape:\n out_shape *= i\n return (out_shape,)\n\n@bprop_getters.register(P.TopK)\ndef get_bprop_top_kv2(self):\n \"\"\"Grad definition for `TopK` operation.\"\"\"\n scatter = P.ScatterNd()\n expand_dims = P.ExpandDims()\n shape_op = P.Shape()\n reshape_op = P.Reshape()\n dtype = P.DType()\n\n def bprop(input_x, k, out, dout):\n\n in_shape = shape_op(input_x)\n in_lastdim = in_shape[-1]\n\n indices = out[1]\n ind_shape = shape_op(indices)\n ind_lastdim = ind_shape[-1]\n\n ind_2d = reshape_op(indices, (-1, ind_lastdim))\n outerdim = shape_op(ind_2d)[0]\n\n # [0, outterdim, 2*outerdim, ..., (k-1)*outerdim]\n indices_dtype = dtype(indices)\n range_flatten_index = _range_op(0, outerdim * in_lastdim, in_lastdim, indices_dtype)\n\n # expand_dims to (k, 1), then broadcast\n ind = reshape_op(ind_2d + expand_dims(range_flatten_index, -1), (-1,))\n in_shape_1d = _get_1d_shape(in_shape)\n\n out_grad = reshape_op(\n scatter(\n expand_dims(ind, -1),\n reshape_op(dout[0], (-1,)),\n in_shape_1d),\n in_shape)\n return out_grad, zeros_like(k)\n\n return bprop\n\n\n@bprop_getters.register(P.SmoothL1Loss)\ndef get_bprop_smooth_l1_loss(self):\n \"\"\"Grad definition for `SmoothL1Loss` operation.\"\"\"\n grad = G.SmoothL1LossGrad(self.beta)\n\n def bprop(prediction, target, out, dout):\n dx = grad(prediction, target, dout)\n dy = grad(target, prediction, dout)\n return dx, dy\n\n return bprop\n\n\n@bprop_getters.register(P.L2Loss)\ndef get_bprop_l2_loss(self):\n \"\"\"Grad definition for `L2Loss` operation.\"\"\"\n\n def bprop(x, out, dout):\n dx = x * dout\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.RNNTLoss)\ndef get_bprop_rnnt_loss(self):\n \"\"\"Grad definition for `RNNTLoss` operation.\"\"\"\n\n def bprop(acts, labels, act_lens, label_lens, out, dout):\n grad = out[1]\n return grad, zeros_like(labels), zeros_like(act_lens), zeros_like(label_lens)\n return bprop\n\n\n@bprop_getters.register(P.PReLU)\ndef get_bprop_prelu(self):\n \"\"\"Grad definition for `PReLU` operation.\"\"\"\n grad = G.PReLUGrad()\n\n def bprop(x, w, out, dout):\n dx, dw = grad(dout, x, w)\n return dx, dw\n\n return bprop\n\n\n@bprop_getters.register(P.LSTM)\ndef get_bprop_lstm(self):\n \"\"\"Grad definition for `LSTM` operation.\"\"\"\n lstm_grad_data = G.LSTMGradData(\n input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=self.num_layers,\n has_bias=self.has_bias,\n bidirectional=self.bidirectional,\n dropout=self.dropout\n )\n\n lstm_grad_weight = G.LSTMGradWeight(\n input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=self.num_layers,\n has_bias=self.has_bias,\n bidirectional=self.bidirectional,\n dropout=self.dropout\n )\n lstm_grad = G.LSTMGrad(\n input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=self.num_layers,\n has_bias=self.has_bias,\n bidirectional=self.bidirectional,\n dropout=self.dropout\n )\n\n def bprop(x, hx, cx, w, out, dout):\n y, _, _, reserve, state = out\n dy, dhy, dcy, _, _ = dout\n dx, dhx, dcx = lstm_grad_data(y, dy, dhy, dcy, w, hx, cx, reserve, state)\n dw = lstm_grad_weight(F.depend(x, dx), hx, y, reserve, state)\n return dx, dhx, dcx, dw\n\n #\n def bprop_cpu(x, hx, cx, w, out, dout):\n y, hy, cy, reserve, _ = out\n dy, dhy, dcy, _, _ = dout\n dx, dhx, dcx, dw = lstm_grad(x, hx, cx, w, y, hy, cy, dy, dhy, dcy, reserve)\n return dx, dhx, dcx, dw\n\n if context.get_context('device_target') == \"CPU\":\n return bprop_cpu\n\n return bprop\n\n\n@bprop_getters.register(P.DynamicRNN)\ndef get_bprop_dynamic_rnn(self):\n \"\"\"Grad definition for `DynamicRNN` operation.\"\"\"\n dynamic_rnn_grad = G.DynamicRNNGrad(cell_type=self.cell_type,\n direction=self.direction,\n cell_depth=self.cell_depth,\n use_peephole=self.use_peephole,\n keep_prob=self.keep_prob,\n cell_clip=self.cell_clip,\n num_proj=self.num_proj,\n time_major=self.time_major,\n forget_bias=self.forget_bias)\n expand_dims = P.ExpandDims()\n\n def bprop(x, w, b, seq_length, init_h, init_c, out, dout):\n dy, dh, dc, _, _, _, _, _, = dout\n dh = dh[-1]\n dc = dc[-1]\n y, h, c, i, j, f, o, tanhct = out\n dw, db, dx, dh_prev, dc_prev = dynamic_rnn_grad(x, w, b, y, init_h[0], init_c[0], h,\n c, dy, dh, dc, i, j, f, o, tanhct)\n dh_prev = expand_dims(dh_prev, 0)\n dc_prev = expand_dims(dc_prev, 0)\n return dx, dw, db, (0), dh_prev, dc_prev\n return bprop\n\n\n@bprop_getters.register(P.DynamicGRUV2)\ndef get_bprop_dynamic_gru_v2(self):\n \"\"\"Grad definition for `DynamicGRUV2` operation.\"\"\"\n dynamic_gru_v2_grad = G.DynamicGRUV2Grad(self.direction, self.cell_depth, self.keep_prob, self.cell_clip,\n self.num_proj, self.time_major, self.gate_order,\n self.reset_after)\n\n def bprop(x, winput, whidden, binput, bhidden, seq, init_h, out, dout):\n y, out_h, update, reset, new, hidden_new = out\n dy, dout_h, _, _, _, _ = dout\n\n dw_input, dw_hidden, db_input, db_hidden, dx, dh_prev = dynamic_gru_v2_grad(x, winput, whidden, y, init_h,\n out_h, dy, dout_h[-1], update,\n reset, new, hidden_new, None, None)\n return dx, dw_input, dw_hidden, db_input, db_hidden, (0), dh_prev\n return bprop\n\n\n@bprop_getters.register(P.SigmoidCrossEntropyWithLogits)\ndef get_bprop_sigmoid_crossentropy_with_logits(self):\n \"\"\"Grad definition for `SigmoidCrossEntropyWithLogits` operation.\"\"\"\n op = G.SigmoidCrossEntropyWithLogitsGrad()\n\n def bprop(x, y, out, dout):\n dx = op(x, y, dout)\n return (dx, zeros_like(y))\n\n return bprop\n\n\n@bprop_getters.register(P.Pad)\ndef get_bprop_pad(self):\n \"\"\"Grad definition for `Pad` operation.\"\"\"\n shape_op = P.Shape()\n paddings = self.paddings\n\n def bprop(x, out, dout):\n begin = ()\n for item in paddings:\n begin += (item[0],)\n shp = shape_op(x)\n dx = P.Slice()(dout, begin, shp)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.MirrorPad)\ndef get_bprop_mirror_pad(self):\n \"\"\"Grad definition for `MirrorPad` operation.\"\"\"\n mirror_pad_grad = G.MirrorPadGrad(self.mode)\n\n def bprop(x, paddings, out, dout):\n dx = mirror_pad_grad(dout, paddings)\n return (dx, zeros_like(paddings))\n\n return bprop\n\n\n@bprop_getters.register(P.ROIAlign)\ndef get_bprop_roi_align(self):\n \"\"\"Grad definition for `ROIAlign` operation.\"\"\"\n shape_op = P.Shape()\n pooled_height = self.pooled_height\n pooled_width = self.pooled_width\n spatial_scale = self.spatial_scale\n sample_num = self.sample_num\n\n def bprop(inputs, rois, out, dout):\n inputs_shape = shape_op(inputs)\n dx = G.ROIAlignGrad(inputs_shape,\n pooled_height,\n pooled_width,\n spatial_scale,\n sample_num,\n )(dout, rois)\n return dx, zeros_like(rois)\n\n return bprop\n\n\n@bprop_getters.register(P.Conv2DBackpropInput)\ndef get_bprop_conv2d_backprop_input(self):\n \"\"\"Grad definition for `Conv2DBackpropInput` operation.\"\"\"\n filter_grad = G.Conv2DBackpropFilter(\n self.out_channel, self.kernel_size, self.pad_mode, self.pad, self.pad_list, mode=self.mode,\n dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format\n )\n input_grad = P.Conv2D(\n self.out_channel, self.kernel_size, pad_mode=self.pad_mode.lower(), pad=self.pad,\n dilation=self.dilation, stride=self.stride, group=self.group, data_format=self.format\n )\n\n def bprop(x, w, f_sizes, out, dout):\n dx = input_grad(dout, w)\n if env_force_bprop_seq == '1':\n x = F.depend(x, dx)\n dw = filter_grad(x, dout, F.shape(w))\n return dx, dw, zeros_like(f_sizes)\n\n return bprop\n\n\n@bprop_getters.register(P.BinaryCrossEntropy)\ndef get_bprop_binary_cross_entropy(self):\n \"\"\"Grad definition for `BinaryCrossEntropy` operation.\"\"\"\n grad = G.BinaryCrossEntropyGrad(self.reduction)\n\n def bprop(x, y, weight, out, dout):\n dx = grad(x, y, dout, weight)\n return dx, zeros_like(y), zeros_like(weight)\n\n return bprop\n\n@bprop_getters.register(P.KLDivLoss)\ndef get_bprop_kl_div_loss(self):\n \"\"\"Grad definition for `KLDivLoss` operation.\"\"\"\n grad = G.KLDivLossGrad(self.reduction)\n\n def bprop(x, y, out, dout):\n dx, dy = grad(x, y, dout)\n return dx, dy\n\n return bprop\n\n\n@bprop_getters.register(P.Dropout)\ndef get_bprop_dropout(self):\n \"\"\"Grad definition for `Dropout` operation.\"\"\"\n grad = G.DropoutGrad(self.keep_prob)\n\n def bprop(x, out, dout):\n _, mask = out\n dy, _ = dout\n dx = grad(dy, mask)\n return (dx,)\n\n return bprop\n\n\n@bprop_getters.register(P.CTCLoss)\ndef get_bprop_ctc_loss(self):\n \"\"\"Grad definition for `CTCLoss` operation\"\"\"\n expand = P.ExpandDims()\n\n def bprop(inputs, labels_indices, labels_values, sequence_length, out, dout):\n grad_loss = out[1]\n grad = grad_loss * expand(dout[0], -1)\n return grad, zeros_like(labels_indices), zeros_like(labels_values), zeros_like(sequence_length)\n\n return bprop\n\n\n@bprop_getters.register(P.BasicLSTMCell)\ndef get_bprop_basic_lstm_cell(self):\n \"\"\"Grad definition for `BasicLSTMCell` operation.\"\"\"\n basic_lstm_cell_cstate_grad = G.BasicLSTMCellCStateGrad(\n forget_bias=self.forget_bias,\n activation=self.activation\n )\n\n basic_lstm_cell_weight_grad = G.BasicLSTMCellWeightGrad()\n\n basic_lstm_cell_input_grad = G.BasicLSTMCellInputGrad(keep_prob=self.keep_prob)\n\n def bprop(x, h, c, w, b, out, dout):\n _, _, it, jt, ft, ot, tanhct = out\n dct, dht, _, _, _, _, _ = dout\n dgate, dct_1 = basic_lstm_cell_cstate_grad(c, dht, dct, it, jt, ft, ot, tanhct)\n dxt, dht = basic_lstm_cell_input_grad(dgate, w)\n dw, db = basic_lstm_cell_weight_grad(F.depend(x, dxt), h, dgate)\n return dxt, dht, dct_1, dw, db\n return bprop\n\n\n@bprop_getters.register(P.LRN)\ndef get_bprop_lrn(self):\n \"\"\"Grad definition for `LRN` operation.\"\"\"\n grad = G.LRNGrad(self.depth_radius, self.bias, self.alpha, self.beta)\n\n def bprop(x, out, dout):\n dx = grad(dout, x, out)\n return (dx,)\n\n return bprop\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nimport mindspore as ms\nfrom mindspore import context, Tensor, Parameter\nfrom mindspore.common.api import _executor\nfrom mindspore.nn import Cell, TrainOneStepCell, Momentum\nfrom mindspore.ops import operations as P\n\n\nclass Net(Cell):\n def __init__(self, mul_weight, strategy1=None, strategy2=None):\n super().__init__()\n self.mul = P.Mul().shard(strategy1)\n self.mul2 = P.Mul().shard(strategy1)\n self.dropout_do_mask = P.DropoutDoMask().shard(strategy2)\n self.dropout_gen_mask = P.DropoutGenMask()\n self.get_shape = P.Shape()\n self.cast = P.Cast()\n self.mul_weight = Parameter(mul_weight, \"w1\")\n self.mul_weight2 = Parameter(mul_weight, \"w2\")\n self.keep_prob = Tensor(0.9)\n\n def construct(self, x, b):\n out = self.mul(x, self.mul_weight)\n shape = self.get_shape(out)\n dtype = P.DType()(out)\n keep_prob = self.cast(self.keep_prob, dtype)\n mask = self.dropout_gen_mask(shape, keep_prob)\n out = self.dropout_do_mask(out, mask, keep_prob)\n out = self.mul2(out, self.mul_weight2)\n return out\n\n\n_x = Tensor(np.ones([128, 64]), dtype=ms.float32)\n_w1 = Tensor(np.ones([128, 64]), dtype=ms.float32)\n_b = Tensor(np.ones([128, 64]), dtype=ms.float32)\n\n\ndef compile_net(net):\n optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)\n train_net = TrainOneStepCell(net, optimizer)\n train_net.set_auto_parallel()\n train_net.set_train()\n _executor.compile(train_net, _x, _b)\n context.reset_auto_parallel_context()\n\n\ndef test_dropout_do_mask_data_parallel():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=16, global_rank=0)\n strategy1 = ((16, 1), (16, 1))\n strategy2 = ((16, 1),)\n net = Net(_w1, strategy1, strategy2)\n compile_net(net)\n\n\ndef test_dropout_do_mask_model_parallel():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=16, global_rank=0)\n strategy1 = ((1, 16), (1, 16))\n strategy2 = ((1, 16),)\n net = Net(_w1, strategy1, strategy2)\n compile_net(net)\n\n\ndef test_dropout_do_mask_hybrid_parallel():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=16, global_rank=0)\n strategy1 = ((4, 4), (4, 4))\n strategy2 = ((4, 4),)\n net = Net(_w1, strategy1, strategy2)\n compile_net(net)\n\n\ndef test_dropout_do_mask_auto_parallel():\n context.set_auto_parallel_context(parallel_mode=\"auto_parallel\", device_num=16, global_rank=0)\n net = Net(_w1)\n compile_net(net)\n\n\ndef test_dropout_do_mask_repeat_calc():\n context.set_auto_parallel_context(parallel_mode=\"semi_auto_parallel\", device_num=16, global_rank=0)\n strategy1 = ((4, 4), (4, 4))\n strategy2 = ((2, 4),)\n net = Net(_w1, strategy1, strategy2)\n compile_net(net)\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\n\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\nfrom mindspore.ops.composite import GradOperation\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"CPU\")\n\n\nclass Net(nn.Cell):\n def __init__(self, sigma=1.0):\n super(Net, self).__init__()\n self.SmoothL1Loss = P.SmoothL1Loss(sigma)\n\n def construct(self, pred, gt):\n return self.SmoothL1Loss(pred, gt)\n\n\nclass Grad(nn.Cell):\n def __init__(self, network):\n super(Grad, self).__init__()\n self.grad = GradOperation(get_all=True, sens_param=True)\n self.network = network\n\n def construct(self, pred, gt, dout):\n return self.grad(self.network)(pred, gt, dout)\n\n\n@pytest.mark.level0\n@pytest.mark.platform_x86_cpu\n@pytest.mark.env_onecard\ndef test_net():\n pred = np.random.randn(2, 4).astype(np.float32)\n gt = np.random.randn(2, 4).astype(np.float32)\n dout = np.random.randn(2, 4).astype(np.float32)\n smooth_l1_loss_grad = Grad(Net())\n output = smooth_l1_loss_grad(Tensor(pred), Tensor(gt), Tensor(dout))\n print(\"------------- input ---------------\")\n print(\"predict:\\n\", pred)\n print(\"grount truth:\\n\", gt)\n print(\"dout:\\n\", dout)\n print(\"------------- output ---------------\")\n print(\"predict grad:\\n\", output[0].asnumpy())\n", "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nTesting RandomVerticalFlipWithBBox op in DE\n\"\"\"\nimport numpy as np\nimport mindspore.dataset as ds\nimport mindspore.dataset.vision.c_transforms as c_vision\n\nfrom mindspore import log as logger\nfrom util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \\\n config_get_set_seed, config_get_set_num_parallel_workers, save_and_check_md5\n\nGENERATE_GOLDEN = False\n\n# Updated VOC dataset with correct annotations - DATA_DIR\nDATA_DIR_VOC = \"../data/dataset/testVOC2012_2\"\n# COCO dataset - DATA_DIR, ANNOTATION_DIR\nDATA_DIR_COCO = [\"../data/dataset/testCOCO/train/\", \"../data/dataset/testCOCO/annotations/train.json\"]\n\n\ndef test_random_vertical_flip_with_bbox_op_c(plot_vis=False):\n \"\"\"\n Prints images and bboxes side by side with and without RandomVerticalFlipWithBBox Op applied\n \"\"\"\n logger.info(\"test_random_vertical_flip_with_bbox_op_c\")\n # Load dataset\n dataVoc1 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n test_op = c_vision.RandomVerticalFlipWithBBox(1)\n\n # map to apply ops\n dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n unaugSamp, augSamp = [], []\n\n for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True),\n dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)):\n unaugSamp.append(unAug)\n augSamp.append(Aug)\n\n if plot_vis:\n visualize_with_bounding_boxes(unaugSamp, augSamp)\n\n\ndef test_random_vertical_flip_with_bbox_op_coco_c(plot_vis=False):\n \"\"\"\n Prints images and bboxes side by side with and without RandomVerticalFlipWithBBox Op applied,\n Testing with Coco dataset\n \"\"\"\n logger.info(\"test_random_vertical_flip_with_bbox_op_coco_c\")\n # load dataset\n dataCoco1 = ds.CocoDataset(DATA_DIR_COCO[0], annotation_file=DATA_DIR_COCO[1], task=\"Detection\",\n decode=True, shuffle=False)\n\n dataCoco2 = ds.CocoDataset(DATA_DIR_COCO[0], annotation_file=DATA_DIR_COCO[1], task=\"Detection\",\n decode=True, shuffle=False)\n\n test_op = c_vision.RandomVerticalFlipWithBBox(1)\n\n dataCoco2 = dataCoco2.map(operations=[test_op], input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n test_op = c_vision.RandomVerticalFlipWithBBox(1)\n\n unaugSamp, augSamp = [], []\n\n for unAug, Aug in zip(dataCoco1.create_dict_iterator(num_epochs=1, output_numpy=True),\n dataCoco2.create_dict_iterator(num_epochs=1, output_numpy=True)):\n unaugSamp.append(unAug)\n augSamp.append(Aug)\n\n if plot_vis:\n visualize_with_bounding_boxes(unaugSamp, augSamp, \"bbox\")\n\n\ndef test_random_vertical_flip_with_bbox_op_rand_c(plot_vis=False):\n \"\"\"\n Prints images and bboxes side by side with and without RandomVerticalFlipWithBBox Op applied,\n tests with MD5 check, expected to pass\n \"\"\"\n logger.info(\"test_random_vertical_flip_with_bbox_op_rand_c\")\n original_seed = config_get_set_seed(29847)\n original_num_parallel_workers = config_get_set_num_parallel_workers(1)\n\n # Load dataset\n dataVoc1 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n test_op = c_vision.RandomVerticalFlipWithBBox(0.8)\n\n # map to apply ops\n dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n filename = \"random_vertical_flip_with_bbox_01_c_result.npz\"\n save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN)\n\n unaugSamp, augSamp = [], []\n\n for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True),\n dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)):\n unaugSamp.append(unAug)\n augSamp.append(Aug)\n\n if plot_vis:\n visualize_with_bounding_boxes(unaugSamp, augSamp)\n\n # Restore config setting\n ds.config.set_seed(original_seed)\n ds.config.set_num_parallel_workers(original_num_parallel_workers)\n\n\ndef test_random_vertical_flip_with_bbox_op_edge_c(plot_vis=False):\n \"\"\"\n Prints images and bboxes side by side with and without RandomVerticalFlipWithBBox Op applied,\n applied on dynamically generated edge case, expected to pass\n \"\"\"\n logger.info(\"test_random_vertical_flip_with_bbox_op_edge_c\")\n dataVoc1 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n test_op = c_vision.RandomVerticalFlipWithBBox(1)\n\n # maps to convert data into valid edge case data\n dataVoc1 = dataVoc1.map(\n operations=[lambda img, bboxes: (img, np.array([[0, 0, img.shape[1], img.shape[0]]]).astype(bboxes.dtype))],\n input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n # Test Op added to list of Operations here\n dataVoc2 = dataVoc2.map(\n operations=[lambda img, bboxes: (img, np.array([[0, 0, img.shape[1], img.shape[0]]]).astype(bboxes.dtype)),\n test_op], input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n unaugSamp, augSamp = [], []\n\n for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True),\n dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)):\n unaugSamp.append(unAug)\n augSamp.append(Aug)\n\n if plot_vis:\n visualize_with_bounding_boxes(unaugSamp, augSamp)\n\n\ndef test_random_vertical_flip_with_bbox_op_invalid_c():\n \"\"\"\n Test RandomVerticalFlipWithBBox Op on invalid constructor parameters, expected to raise ValueError\n \"\"\"\n logger.info(\"test_random_vertical_flip_with_bbox_op_invalid_c\")\n dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n\n try:\n test_op = c_vision.RandomVerticalFlipWithBBox(2)\n\n # map to apply ops\n dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=[\"image\", \"bbox\"],\n output_columns=[\"image\", \"bbox\"],\n column_order=[\"image\", \"bbox\"])\n\n for _ in dataVoc2.create_dict_iterator(num_epochs=1):\n break\n\n except ValueError as err:\n logger.info(\"Got an exception in DE: {}\".format(str(err)))\n assert \"Input prob is not within the required interval of (0.0 to 1.0).\" in str(err)\n\n\ndef test_random_vertical_flip_with_bbox_op_bad_c():\n \"\"\"\n Tests RandomVerticalFlipWithBBox Op with invalid bounding boxes, expected to catch multiple errors\n \"\"\"\n logger.info(\"test_random_vertical_flip_with_bbox_op_bad_c\")\n test_op = c_vision.RandomVerticalFlipWithBBox(1)\n\n data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, \"bounding boxes is out of bounds of the image\")\n data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n check_bad_bbox(data_voc2, test_op, InvalidBBoxType.HeightOverflow, \"bounding boxes is out of bounds of the image\")\n data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n check_bad_bbox(data_voc2, test_op, InvalidBBoxType.NegativeXY, \"negative value\")\n data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task=\"Detection\", usage=\"train\", shuffle=False, decode=True)\n check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WrongShape, \"4 features\")\n\n\nif __name__ == \"__main__\":\n test_random_vertical_flip_with_bbox_op_c(plot_vis=True)\n test_random_vertical_flip_with_bbox_op_coco_c(plot_vis=True)\n test_random_vertical_flip_with_bbox_op_rand_c(plot_vis=True)\n test_random_vertical_flip_with_bbox_op_edge_c(plot_vis=True)\n test_random_vertical_flip_with_bbox_op_invalid_c()\n test_random_vertical_flip_with_bbox_op_bad_c()\n" ]
[ [ "numpy.random.seed", "numpy.random.rand" ], [ "numpy.array", "numpy.ones", "numpy.reshape", "numpy.sum" ], [ "numpy.ones" ], [ "numpy.random.randn" ], [ "numpy.array" ] ]
abishekganesh72/koalas
[ "40c2e209384d078ee75d08c7681d2e6a276ab834" ]
[ "databricks/koalas/frame.py" ]
[ "#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nA wrapper class for Spark DataFrame to behave similar to pandas DataFrame.\n\"\"\"\nimport re\nimport warnings\nfrom functools import partial, reduce\nfrom typing import Any, Optional, List, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like, \\\n is_dict_like\nfrom pyspark import sql as spark\nfrom pyspark.sql import functions as F, Column\nfrom pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType,\n IntegerType, LongType, ShortType, StructField, StructType,\n to_arrow_type)\nfrom pyspark.sql.utils import AnalysisException\n\nfrom databricks import koalas as ks # For running doctests and reference resolution in PyCharm.\nfrom databricks.koalas.utils import default_session, validate_arguments_and_invoke_function\nfrom databricks.koalas.generic import _Frame, max_display_count\nfrom databricks.koalas.metadata import Metadata\nfrom databricks.koalas.missing.frame import _MissingPandasLikeDataFrame\nfrom databricks.koalas.ml import corr\nfrom databricks.koalas.typedef import infer_pd_series_spark_type\n\n\n# These regular expression patterns are complied and defined here to avoid to compile the same\n# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.\n# Two patterns basically seek the footer string from Pandas'\nREPR_PATTERN = re.compile(r\"\\n\\n\\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\\]$\")\nREPR_HTML_PATTERN = re.compile(\n r\"\\n\\<p\\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\\<\\/p\\>\\n\\<\\/div\\>$\")\n\n\nclass DataFrame(_Frame):\n \"\"\"\n Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame\n internally.\n\n :ivar _sdf: Spark Column instance\n :ivar _metadata: Metadata related to column names and index information.\n\n Parameters\n ----------\n data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame or Spark DataFrame\n Dict can contain Series, arrays, constants, or list-like objects\n If data is a dict, argument order is maintained for Python 3.6\n and later.\n Note that if `data` is a Pandas DataFrame, other arguments should not be used.\n If `data` is a Spark DataFrame, all other arguments except `index` should not be used.\n index : Index or array-like\n Index to use for resulting frame. Will default to RangeIndex if\n no indexing information part of input data and no index provided\n If `data` is a Spark DataFrame, `index` is expected to be `Metadata`.\n columns : Index or array-like\n Column labels to use for resulting frame. Will default to\n RangeIndex (0, 1, 2, ..., n) if no column labels are provided\n dtype : dtype, default None\n Data type to force. Only a single dtype is allowed. If None, infer\n copy : boolean, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input\n\n Examples\n --------\n Constructing DataFrame from a dictionary.\n\n >>> d = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df = ks.DataFrame(data=d, columns=['col1', 'col2'])\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n\n Constructing DataFrame from Pandas DataFrame\n\n >>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n\n Notice that the inferred dtype is int64.\n\n >>> df.dtypes\n col1 int64\n col2 int64\n dtype: object\n\n To enforce a single dtype:\n\n >>> df = ks.DataFrame(data=d, dtype=np.int8)\n >>> df.dtypes\n col1 int8\n col2 int8\n dtype: object\n\n Constructing DataFrame from numpy ndarray:\n\n >>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n >>> df2 # doctest: +SKIP\n a b c d e\n 0 3 1 4 9 8\n 1 4 8 4 8 4\n 2 7 6 5 6 7\n 3 8 7 9 1 0\n 4 2 5 4 3 9\n \"\"\"\n def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):\n if isinstance(data, pd.DataFrame):\n assert index is None\n assert columns is None\n assert dtype is None\n assert not copy\n self._init_from_pandas(data)\n elif isinstance(data, spark.DataFrame):\n assert columns is None\n assert dtype is None\n assert not copy\n self._init_from_spark(data, index)\n else:\n pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)\n self._init_from_pandas(pdf)\n\n def _init_from_pandas(self, pdf):\n metadata = Metadata.from_pandas(pdf)\n reset_index = pdf.reset_index()\n reset_index.columns = metadata.columns\n schema = StructType([StructField(name, infer_pd_series_spark_type(col),\n nullable=bool(col.isnull().any()))\n for name, col in reset_index.iteritems()])\n for name, col in reset_index.iteritems():\n dt = col.dtype\n if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):\n continue\n reset_index[name] = col.replace({np.nan: None})\n self._init_from_spark(default_session().createDataFrame(reset_index, schema=schema),\n metadata)\n\n def _init_from_spark(self, sdf, metadata=None):\n self._sdf = sdf\n if metadata is None:\n self._metadata = Metadata(data_columns=self._sdf.schema.fieldNames())\n else:\n self._metadata = metadata\n\n @property\n def _index_columns(self):\n return [self._sdf.__getitem__(field)\n for field in self._metadata.index_columns]\n\n def _reduce_for_stat_function(self, sfun):\n \"\"\"\n Applies sfun to each column and returns a pd.Series where the number of rows equal the\n number of columns.\n\n :param sfun: either an 1-arg function that takes a Column and returns a Column, or\n a 2-arg function that takes a Column and its DataType and returns a Column.\n \"\"\"\n from inspect import signature\n exprs = []\n num_args = len(signature(sfun).parameters)\n for col in self.columns:\n col_sdf = self._sdf[col]\n col_type = self._sdf.schema[col].dataType\n if isinstance(col_type, BooleanType) and sfun.__name__ not in ('min', 'max'):\n # Stat functions cannot be used with boolean values by default\n # Thus, cast to integer (true to 1 and false to 0)\n # Exclude the min and max methods though since those work with booleans\n col_sdf = col_sdf.cast('integer')\n if num_args == 1:\n # Only pass in the column if sfun accepts only one arg\n col_sdf = sfun(col_sdf)\n else: # must be 2\n assert num_args == 2\n # Pass in both the column and its data type if sfun accepts two args\n col_sdf = sfun(col_sdf, col_type)\n exprs.append(col_sdf.alias(col))\n\n sdf = self._sdf.select(*exprs)\n pdf = sdf.toPandas()\n assert len(pdf) == 1, (sdf, pdf)\n row = pdf.iloc[0]\n row.name = None\n return row # Return first row as a Series\n\n def corr(self, method='pearson'):\n \"\"\"\n Compute pairwise correlation of columns, excluding NA/null values.\n\n Parameters\n ----------\n method : {'pearson', 'spearman'}\n * pearson : standard correlation coefficient\n * spearman : Spearman rank correlation\n\n Returns\n -------\n y : pandas.DataFrame\n\n See Also\n --------\n Series.corr\n\n Examples\n --------\n >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.corr('pearson')\n dogs cats\n dogs 1.000000 -0.851064\n cats -0.851064 1.000000\n\n >>> df.corr('spearman')\n dogs cats\n dogs 1.000000 -0.948683\n cats -0.948683 1.000000\n\n Notes\n -----\n There are behavior differences between Koalas and pandas.\n\n * the `method` argument only accepts 'pearson', 'spearman'\n * the data should not contain NaNs. Koalas will return an error.\n * Koalas doesn't support the following argument(s).\n\n * `min_periods` argument is not supported\n \"\"\"\n return corr(self, method)\n\n def iteritems(self):\n \"\"\"\n Iterator over (column name, Series) pairs.\n\n Iterates over the DataFrame columns, returning a tuple with\n the column name and the content as a Series.\n\n Returns\n -------\n label : object\n The column names for the DataFrame being iterated over.\n content : Series\n The column entries belonging to each label, as a Series.\n\n Examples\n --------\n >>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],\n ... 'population': [1864, 22000, 80000]},\n ... index=['panda', 'polar', 'koala'],\n ... columns=['species', 'population'])\n >>> df\n species population\n panda bear 1864\n polar bear 22000\n koala marsupial 80000\n\n >>> for label, content in df.iteritems():\n ... print('label:', label)\n ... print('content:', content.to_string())\n ...\n label: species\n content: panda bear\n polar bear\n koala marsupial\n label: population\n content: panda 1864\n polar 22000\n koala 80000\n \"\"\"\n cols = list(self.columns)\n return list((col_name, self[col_name]) for col_name in cols)\n\n def to_clipboard(self, excel=True, sep=None, **kwargs):\n \"\"\"\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n .. note:: This method should only be used if the resulting DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n excel : bool, default True\n - True, use the provided separator, writing in a csv format for\n allowing easy pasting into excel.\n - False, write a string representation of the object to the\n clipboard.\n\n sep : str, default ``'\\\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP\n >>> df.to_clipboard(sep=',') # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n\n This function also works for Series:\n\n >>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP\n >>> df.to_clipboard(sep=',') # doctest: +SKIP\n ... # Wrote the following to the system clipboard:\n ... # 0, 1\n ... # 1, 2\n ... # 2, 3\n ... # 3, 4\n ... # 4, 5\n ... # 5, 6\n ... # 6, 7\n \"\"\"\n\n args = locals()\n kdf = self\n return validate_arguments_and_invoke_function(\n kdf.to_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args)\n\n def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True,\n na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,\n justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.',\n bold_rows=True, classes=None, escape=True, notebook=False, border=None,\n table_id=None, render_links=False):\n \"\"\"\n Render a DataFrame as an HTML table.\n\n .. note:: This method should only be used if the resulting Pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, set max_rows parameter.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n columns : sequence, optional, default None\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool, optional\n Write out the column names. If a list of strings is given, it\n is assumed to be aliases for the column names\n index : bool, optional, default True\n Whether to print index (row) labels.\n na_rep : str, optional, default 'NaN'\n String representation of NAN to use.\n formatters : list or dict of one-param. functions, optional\n Formatter functions to apply to columns' elements by position or\n name.\n The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function, optional, default None\n Formatter function to apply to columns' elements if they are\n floats. The result of this function must be a unicode string.\n sparsify : bool, optional, default True\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row.\n index_names : bool, optional, default True\n Prints the names of the indexes.\n justify : str, default None\n How to justify the column labels. If None uses the option from\n the print configuration (controlled by set_option), 'right' out\n of the box. Valid values are\n\n * left\n * right\n * center\n * justify\n * justify-all\n * start\n * end\n * inherit\n * match-parent\n * initial\n * unset.\n max_rows : int, optional\n Maximum number of rows to display in the console.\n max_cols : int, optional\n Maximum number of columns to display in the console.\n show_dimensions : bool, default False\n Display DataFrame dimensions (number of rows by number of columns).\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n bold_rows : bool, default True\n Make the row labels bold in the output.\n classes : str or list or tuple, default None\n CSS class(es) to apply to the resulting html table.\n escape : bool, default True\n Convert the characters <, >, and & to HTML-safe sequences.\n notebook : {True, False}, default False\n Whether the generated HTML is for IPython Notebook.\n border : int\n A ``border=border`` attribute is included in the opening\n `<table>` tag. Default ``pd.options.html.border``.\n table_id : str, optional\n A css id is included in the opening `<table>` tag if specified.\n render_links : bool, default False\n Convert URLs to HTML links (only works with Pandas 0.24+).\n\n Returns\n -------\n str (or unicode, depending on data and options)\n String representation of the dataframe.\n\n See Also\n --------\n to_string : Convert DataFrame to a string.\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n if max_rows is not None:\n kdf = self.head(max_rows)\n else:\n kdf = self\n\n return validate_arguments_and_invoke_function(\n kdf.to_pandas(), self.to_html, pd.DataFrame.to_html, args)\n\n def to_string(self, buf=None, columns=None, col_space=None, header=True,\n index=True, na_rep='NaN', formatters=None, float_format=None,\n sparsify=None, index_names=True, justify=None,\n max_rows=None, max_cols=None, show_dimensions=False,\n decimal='.', line_width=None):\n \"\"\"\n Render a DataFrame to a console-friendly tabular output.\n\n .. note:: This method should only be used if the resulting Pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, set max_rows parameter.\n\n Parameters\n ----------\n buf : StringIO-like, optional\n Buffer to write to.\n columns : sequence, optional, default None\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool, optional\n Write out the column names. If a list of strings is given, it\n is assumed to be aliases for the column names\n index : bool, optional, default True\n Whether to print index (row) labels.\n na_rep : str, optional, default 'NaN'\n String representation of NAN to use.\n formatters : list or dict of one-param. functions, optional\n Formatter functions to apply to columns' elements by position or\n name.\n The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function, optional, default None\n Formatter function to apply to columns' elements if they are\n floats. The result of this function must be a unicode string.\n sparsify : bool, optional, default True\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row.\n index_names : bool, optional, default True\n Prints the names of the indexes.\n justify : str, default None\n How to justify the column labels. If None uses the option from\n the print configuration (controlled by set_option), 'right' out\n of the box. Valid values are\n\n * left\n * right\n * center\n * justify\n * justify-all\n * start\n * end\n * inherit\n * match-parent\n * initial\n * unset.\n max_rows : int, optional\n Maximum number of rows to display in the console.\n max_cols : int, optional\n Maximum number of columns to display in the console.\n show_dimensions : bool, default False\n Display DataFrame dimensions (number of rows by number of columns).\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n line_width : int, optional\n Width to wrap a line in characters.\n\n Returns\n -------\n str (or unicode, depending on data and options)\n String representation of the dataframe.\n\n See Also\n --------\n to_html : Convert DataFrame to HTML.\n\n Examples\n --------\n >>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])\n >>> print(df.to_string())\n col1 col2\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> print(df.to_string(max_rows=2))\n col1 col2\n 0 1 4\n 1 2 5\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n if max_rows is not None:\n kdf = self.head(max_rows)\n else:\n kdf = self\n\n return validate_arguments_and_invoke_function(\n kdf.to_pandas(), self.to_string, pd.DataFrame.to_string, args)\n\n def to_dict(self, orient='dict', into=dict):\n \"\"\"\n Convert the DataFrame to a dictionary.\n\n The type of the key-value pairs can be customized with the parameters\n (see below).\n\n .. note:: This method should only be used if the resulting Pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}\n Determines the type of the values of the dictionary.\n\n - 'dict' (default) : dict like {column -> {index -> value}}\n - 'list' : dict like {column -> [values]}\n - 'series' : dict like {column -> Series(values)}\n - 'split' : dict like\n {'index' -> [index], 'columns' -> [columns], 'data' -> [values]}\n - 'records' : list like\n [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n\n Abbreviations are allowed. `s` indicates `series` and `sp`\n indicates `split`.\n\n into : class, default dict\n The collections.abc.Mapping subclass used for all Mappings\n in the return value. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n Returns\n -------\n dict, list or collections.abc.Mapping\n Return a collections.abc.Mapping object representing the DataFrame.\n The resulting transformation depends on the `orient` parameter.\n\n Examples\n --------\n >>> df = ks.DataFrame({'col1': [1, 2],\n ... 'col2': [0.5, 0.75]},\n ... index=['row1', 'row2'],\n ... columns=['col1', 'col2'])\n >>> df\n col1 col2\n row1 1 0.50\n row2 2 0.75\n >>> df_dict = df.to_dict()\n >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])\n [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]\n\n You can specify the return orientation.\n\n >>> df_dict = df.to_dict('series')\n >>> sorted(df_dict.items())\n [('col1', row1 1\n row2 2\n Name: col1, dtype: int64), ('col2', row1 0.50\n row2 0.75\n Name: col2, dtype: float64)]\n >>> df_dict = df.to_dict('split')\n >>> sorted(df_dict.items()) # doctest: +ELLIPSIS\n [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]\n\n >>> df_dict = df.to_dict('records')\n >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS\n [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]\n\n >>> df_dict = df.to_dict('index')\n >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])\n [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]\n\n You can also specify the mapping type.\n\n >>> from collections import OrderedDict, defaultdict\n >>> df.to_dict(into=OrderedDict)\n OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \\\n('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])\n\n If you want a `defaultdict`, you need to initialize it:\n\n >>> dd = defaultdict(list)\n >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS\n [defaultdict(<class 'list'>, {'col..., 'col...}), \\\ndefaultdict(<class 'list'>, {'col..., 'col...})]\n \"\"\"\n # Make sure locals() call is at the top of the function so we don't capture local variables.\n args = locals()\n kdf = self\n return validate_arguments_and_invoke_function(\n kdf.to_pandas(), self.to_dict, pd.DataFrame.to_dict, args)\n\n def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True,\n na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,\n bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None,\n decimal='.', multicolumn=None, multicolumn_format=None, multirow=None):\n r\"\"\"\n Render an object to a LaTeX tabular environment table.\n\n Render an object to a tabular environment table. You can splice this into a LaTeX\n document. Requires usepackage{booktabs}.\n\n .. note:: This method should only be used if the resulting Pandas object is expected\n to be small, as all the data is loaded into the driver's memory. If the input\n is large, consider alternative formats.\n\n Parameters\n ----------\n buf : file descriptor or None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given, it is assumed to be aliases\n for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default ‘NaN’\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns’ elements by position or name. The result of\n each function must be a unicode string. List must be of length equal to the number of\n columns.\n float_format : str, optional\n Format string for floating point numbers.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print every multiindex key at\n each row. By default, the value will be read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By\n default, ‘l’ will be used for all columns except columns of numbers, which default\n to ‘r’.\n longtable : bool, optional\n By default, the value will be read from the pandas config module. Use a longtable\n environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX\n preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config module. When set to False\n prevents from escaping latex special characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file, defaults to ‘ascii’ on\n Python 2 and ‘utf-8’ on Python 3.\n decimal : str, default ‘.’\n Character recognized as decimal separator, e.g. ‘,’ in Europe.\n multicolumn : bool, default True\n Use multicolumn to enhance MultiIndex columns. The default will be read from the config\n module.\n multicolumn_format : str, default ‘l’\n The alignment for multicolumns, similar to column_format The default will be read from\n the config module.\n multirow : bool, default False\n Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your\n LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read from the pandas config\n module.\n\n Returns\n -------\n str or None\n If buf is None, returns the resulting LateX format as a string. Otherwise returns None.\n\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']},\n ... columns=['name', 'mask', 'weapon'])\n >>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE\n '\\\\begin{tabular}{lll}\\n\\\\toprule\\n name & mask & weapon\n \\\\\\\\\\n\\\\midrule\\n Raphael & red & sai \\\\\\\\\\n Donatello &\n purple & bo staff \\\\\\\\\\n\\\\bottomrule\\n\\\\end{tabular}\\n'\n \"\"\"\n\n args = locals()\n kdf = self\n return validate_arguments_and_invoke_function(\n kdf.to_pandas(), self.to_latex, pd.DataFrame.to_latex, args)\n\n @property\n def index(self):\n \"\"\"The index (row labels) Column of the DataFrame.\n\n Currently supported only when the DataFrame has a single index.\n \"\"\"\n from databricks.koalas.series import Series\n if len(self._metadata.index_map) != 1:\n raise KeyError('Currently supported only when the DataFrame has a single index.')\n return Series(self._index_columns[0], anchor=self, index=[])\n\n def set_index(self, keys, drop=True, append=False, inplace=False):\n \"\"\"Set the DataFrame index (row labels) using one or more existing columns.\n\n Set the DataFrame index (row labels) using one or more existing\n columns or arrays (of the correct length). The index can replace the\n existing index or expand on it.\n\n Parameters\n ----------\n keys : label or array-like or list of labels/arrays\n This parameter can be either a single column key, a single array of\n the same length as the calling DataFrame, or a list containing an\n arbitrary combination of column keys and arrays. Here, \"array\"\n encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.\n drop : bool, default True\n Delete columns to be used as the new index.\n append : bool, default False\n Whether to append columns to existing index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n\n Returns\n -------\n DataFrame\n Changed row labels.\n\n See Also\n --------\n DataFrame.reset_index : Opposite of set_index.\n\n Examples\n --------\n >>> df = ks.DataFrame({'month': [1, 4, 7, 10],\n ... 'year': [2012, 2014, 2013, 2014],\n ... 'sale': [55, 40, 84, 31]},\n ... columns=['month', 'year', 'sale'])\n >>> df\n month year sale\n 0 1 2012 55\n 1 4 2014 40\n 2 7 2013 84\n 3 10 2014 31\n\n Set the index to become the 'month' column:\n\n >>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE\n year sale\n month\n 1 2012 55\n 4 2014 40\n 7 2013 84\n 10 2014 31\n\n Create a MultiIndex using columns 'year' and 'month':\n\n >>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE\n sale\n year month\n 2012 1 55\n 2014 4 40\n 2013 7 84\n 2014 10 31\n \"\"\"\n if isinstance(keys, str):\n keys = [keys]\n else:\n keys = list(keys)\n for key in keys:\n if key not in self.columns:\n raise KeyError(key)\n\n if drop:\n data_columns = [column for column in self._metadata.data_columns if column not in keys]\n else:\n data_columns = self._metadata.data_columns\n if append:\n index_map = self._metadata.index_map + [(column, column) for column in keys]\n else:\n index_map = [(column, column) for column in keys]\n\n metadata = self._metadata.copy(data_columns=data_columns, index_map=index_map)\n\n # Sync Spark's columns as well.\n sdf = self._sdf.select(['`{}`'.format(name) for name in metadata.columns])\n\n if inplace:\n self._metadata = metadata\n self._sdf = sdf\n else:\n kdf = self.copy()\n kdf._metadata = metadata\n kdf._sdf = sdf\n return kdf\n\n def reset_index(self, level=None, drop=False, inplace=False):\n \"\"\"Reset the index, or a level of it.\n\n For DataFrame with multi-level index, return new DataFrame with labeling information in\n the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.\n For a standard index, the index name will be used (if set), otherwise a default 'index' or\n 'level_0' (if 'index' is already taken) will be used.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default.\n drop : bool, default False\n Do not try to insert index into dataframe columns. This resets\n the index to the default integer index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n\n Returns\n -------\n DataFrame\n DataFrame with the new index.\n\n See Also\n --------\n DataFrame.set_index : Opposite of reset_index.\n\n Examples\n --------\n >>> df = ks.DataFrame([('bird', 389.0),\n ... ('bird', 24.0),\n ... ('mammal', 80.5),\n ... ('mammal', np.nan)],\n ... index=['falcon', 'parrot', 'lion', 'monkey'],\n ... columns=('class', 'max_speed'))\n >>> df\n class max_speed\n falcon bird 389.0\n parrot bird 24.0\n lion mammal 80.5\n monkey mammal NaN\n\n When we reset the index, the old index is added as a column. Unlike pandas, Koalas\n does not automatically add a sequential index. The following 0, 1, 2, 3 are only\n there when we display the DataFrame.\n\n >>> df.reset_index()\n index class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n We can use the `drop` parameter to avoid the old index being added as\n a column:\n\n >>> df.reset_index(drop=True)\n class max_speed\n 0 bird 389.0\n 1 bird 24.0\n 2 mammal 80.5\n 3 mammal NaN\n \"\"\"\n # TODO: add example of MultiIndex back. See https://github.com/databricks/koalas/issues/301\n if len(self._metadata.index_map) == 0:\n raise NotImplementedError('Can\\'t reset index because there is no index.')\n\n multi_index = len(self._metadata.index_map) > 1\n\n def rename(index):\n if multi_index:\n return 'level_{}'.format(index)\n else:\n if 'index' not in self._metadata.data_columns:\n return 'index'\n else:\n return 'level_{}'.format(index)\n\n if level is None:\n new_index_map = [(column, name if name is not None else rename(i))\n for i, (column, name) in enumerate(self._metadata.index_map)]\n index_map = []\n else:\n if isinstance(level, (int, str)):\n level = [level]\n level = list(level)\n\n if all(isinstance(l, int) for l in level):\n for lev in level:\n if lev >= len(self._metadata.index_map):\n raise IndexError('Too many levels: Index has only {} level, not {}'\n .format(len(self._metadata.index_map), lev + 1))\n idx = level\n elif all(isinstance(lev, str) for lev in level):\n idx = []\n for l in level:\n try:\n i = self._metadata.index_columns.index(l)\n idx.append(i)\n except ValueError:\n if multi_index:\n raise KeyError('Level unknown not found')\n else:\n raise KeyError('Level unknown must be same as name ({})'\n .format(self._metadata.index_columns[0]))\n else:\n raise ValueError('Level should be all int or all string.')\n idx.sort()\n\n new_index_map = []\n index_map = self._metadata.index_map.copy()\n for i in idx:\n info = self._metadata.index_map[i]\n index_column, index_name = info\n new_index_map.append(\n (index_column,\n index_name if index_name is not None else rename(index_name)))\n index_map.remove(info)\n\n if drop:\n new_index_map = []\n\n metadata = self._metadata.copy(\n data_columns=[column for column, _ in new_index_map] + self._metadata.data_columns,\n index_map=index_map)\n columns = [name for _, name in new_index_map] + self._metadata.data_columns\n if inplace:\n self._metadata = metadata\n self.columns = columns\n else:\n kdf = self.copy()\n kdf._metadata = metadata\n kdf.columns = columns\n return kdf\n\n def isnull(self):\n \"\"\"\n Detects missing values for items in the current Dataframe.\n\n Return a boolean same-sized Dataframe indicating if the values are NA.\n NA values, such as None or numpy.NaN, gets mapped to True values.\n Everything else gets mapped to False values.\n\n See Also\n --------\n Dataframe.notnull\n\n Examples\n --------\n >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])\n >>> df.isnull()\n 0 1\n 0 False False\n 1 False True\n 2 False True\n 3 False False\n\n >>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])\n >>> df.isnull()\n 0 1 2\n 0 True False True\n 1 False True False\n \"\"\"\n kdf = self.copy()\n for name, ks in kdf.iteritems():\n kdf[name] = ks.isnull()\n return kdf\n\n isna = isnull\n\n def notnull(self):\n \"\"\"\n Detects non-missing values for items in the current Dataframe.\n\n This function takes a dataframe and indicates whether it's\n values are valid (not missing, which is ``NaN`` in numeric\n datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).\n\n See Also\n --------\n Dataframe.isnull\n\n Examples\n --------\n >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])\n >>> df.notnull()\n 0 1\n 0 True True\n 1 True False\n 2 True False\n 3 True True\n\n >>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])\n >>> df.notnull()\n 0 1 2\n 0 True True True\n 1 True False True\n \"\"\"\n kdf = self.copy()\n for name, ks in kdf.iteritems():\n kdf[name] = ks.notnull()\n return kdf\n\n notna = notnull\n\n def to_koalas(self):\n \"\"\"\n Converts the existing DataFrame into a Koalas DataFrame.\n\n This method is monkey-patched into Spark's DataFrame and can be used\n to convert a Spark DataFrame into a Koalas DataFrame. If running on\n an existing Koalas DataFrame, the method returns itself.\n\n If a Koalas DataFrame is converted to a Spark DataFrame and then back\n to Koalas, it will lose the index information and the original index\n will be turned into a normal column.\n\n See Also\n --------\n DataFrame.to_spark\n\n Examples\n --------\n >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n\n >>> spark_df = df.to_spark()\n >>> spark_df\n DataFrame[__index_level_0__: bigint, col1: bigint, col2: bigint]\n\n >>> kdf = spark_df.to_koalas()\n >>> kdf\n __index_level_0__ col1 col2\n 0 0 1 3\n 1 1 2 4\n\n Calling to_koalas on a Koalas DataFrame simply returns itself.\n\n >>> df.to_koalas()\n col1 col2\n 0 1 3\n 1 2 4\n \"\"\"\n if isinstance(self, DataFrame):\n return self\n else:\n return DataFrame(self)\n\n def to_spark(self):\n \"\"\"\n Return the current DataFrame as a Spark DataFrame.\n\n See Also\n --------\n DataFrame.to_koalas\n \"\"\"\n return self._sdf\n\n def to_pandas(self):\n \"\"\"\n Return a Pandas DataFrame.\n\n .. note:: This method should only be used if the resulting Pandas DataFrame is expected\n to be small, as all the data is loaded into the driver's memory.\n\n Examples\n --------\n >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.to_pandas()\n dogs cats\n 0 0.2 0.3\n 1 0.0 0.6\n 2 0.6 0.0\n 3 0.2 0.1\n \"\"\"\n sdf = self._sdf.select(['`{}`'.format(name) for name in self._metadata.columns])\n pdf = sdf.toPandas()\n if len(pdf) == 0 and len(sdf.schema) > 0:\n # TODO: push to OSS\n pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype()\n for field in sdf.schema})\n\n index_columns = self._metadata.index_columns\n if len(index_columns) > 0:\n append = False\n for index_field in index_columns:\n drop = index_field not in self._metadata.data_columns\n pdf = pdf.set_index(index_field, drop=drop, append=append)\n append = True\n pdf = pdf[self._metadata.data_columns]\n\n index_names = self._metadata.index_names\n if len(index_names) > 0:\n if isinstance(pdf.index, pd.MultiIndex):\n pdf.index.names = index_names\n else:\n pdf.index.name = index_names[0]\n return pdf\n\n # Alias to maintain backward compatibility with Spark\n toPandas = to_pandas\n\n def assign(self, **kwargs):\n \"\"\"\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {str: callable or Series}\n The column names are keywords. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. The callable must not\n change input DataFrame (though Koalas doesn't check it).\n If the values are not callable, (e.g. a Series or a literal),\n they are simply assigned.\n\n Returns\n -------\n DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Examples\n --------\n >>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},\n ... index=['Portland', 'Berkeley'])\n >>> df\n temp_c\n Portland 17.0\n Berkeley 25.0\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n Alternatively, the same behavior can be achieved by directly\n referencing an existing Series or sequence and you can also\n create multiple columns within the same assign.\n\n >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,\n ... temp_k=df['temp_c'] + 273.15)\n >>> assigned[['temp_c', 'temp_f', 'temp_k']]\n temp_c temp_f temp_k\n Portland 17.0 62.6 290.15\n Berkeley 25.0 77.0 298.15\n\n Notes\n -----\n Assigning multiple columns within the same ``assign`` is possible\n but you cannot refer to newly created or modified columns. This\n feature is supported in pandas for Python 3.6 and later but not in\n Koalas. In Koalas, all items are computed first, and then assigned.\n \"\"\"\n from databricks.koalas.series import Series\n for k, v in kwargs.items():\n if not (isinstance(v, (Series, spark.Column)) or\n callable(v) or pd.api.types.is_scalar(v)):\n raise TypeError(\"Column assignment doesn't support type \"\n \"{0}\".format(type(v).__name__))\n if callable(v):\n kwargs[k] = v(self)\n\n pairs = list(kwargs.items())\n sdf = self._sdf\n for (name, c) in pairs:\n if isinstance(c, Series):\n sdf = sdf.withColumn(name, c._scol)\n elif isinstance(c, Column):\n sdf = sdf.withColumn(name, c)\n else:\n sdf = sdf.withColumn(name, F.lit(c))\n\n data_columns = self._metadata.data_columns\n metadata = self._metadata.copy(\n data_columns=(data_columns +\n [name for name, _ in pairs if name not in data_columns]))\n return DataFrame(sdf, metadata)\n\n def to_records(self, index=True, convert_datetime64=None,\n column_dtypes=None, index_dtypes=None):\n \"\"\"\n Convert DataFrame to a NumPy record array.\n\n Index will be included as the first field of the record array if\n requested.\n\n .. note:: This method should only be used if the resulting NumPy ndarray is\n expected to be small, as all the data is loaded into the driver's memory.\n\n Parameters\n ----------\n index : bool, default True\n Include index in resulting record array, stored in 'index'\n field or using the index label, if set.\n convert_datetime64 : bool, default None\n Whether to convert the index to datetime.datetime if it is a\n DatetimeIndex.\n column_dtypes : str, type, dict, default None\n If a string or type, the data type to store all columns. If\n a dictionary, a mapping of column names and indices (zero-indexed)\n to specific data types.\n index_dtypes : str, type, dict, default None\n If a string or type, the data type to store all index levels. If\n a dictionary, a mapping of index level names and indices\n (zero-indexed) to specific data types.\n This mapping is applied only if `index=True`.\n\n Returns\n -------\n numpy.recarray\n NumPy ndarray with the DataFrame labels as fields and each row\n of the DataFrame as entries.\n\n See Also\n --------\n DataFrame.from_records: Convert structured or record ndarray\n to DataFrame.\n numpy.recarray: An ndarray that allows field access using\n attributes, analogous to typed columns in a\n spreadsheet.\n\n Examples\n --------\n >>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},\n ... index=['a', 'b'])\n >>> df\n A B\n a 1 0.50\n b 2 0.75\n\n >>> df.to_records() # doctest: +SKIP\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])\n\n The index can be excluded from the record array:\n\n >>> df.to_records(index=False) # doctest: +SKIP\n rec.array([(1, 0.5 ), (2, 0.75)],\n dtype=[('A', '<i8'), ('B', '<f8')])\n\n Specification of dtype for columns is new in Pandas 0.24.0.\n Data types can be specified for the columns:\n\n >>> df.to_records(column_dtypes={\"A\": \"int32\"}) # doctest: +SKIP\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])\n\n Specification of dtype for index is new in Pandas 0.24.0.\n Data types can also be specified for the index:\n\n >>> df.to_records(index_dtypes=\"<S2\") # doctest: +SKIP\n rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],\n dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])\n \"\"\"\n args = locals()\n kdf = self\n\n return validate_arguments_and_invoke_function(\n kdf.to_pandas(), self.to_records, pd.DataFrame.to_records, args)\n\n def copy(self) -> 'DataFrame':\n \"\"\"\n Make a copy of this object's indices and data.\n\n Returns\n -------\n copy : DataFrame\n \"\"\"\n return DataFrame(self._sdf, self._metadata.copy())\n\n def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):\n \"\"\"\n Remove missing values.\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Determine if rows or columns which contain missing values are\n removed.\n\n * 0, or 'index' : Drop rows which contain missing values.\n how : {'any', 'all'}, default 'any'\n Determine if row or column is removed from DataFrame, when we have\n at least one NA or all NA.\n\n * 'any' : If any NA values are present, drop that row or column.\n * 'all' : If all values are NA, drop that row or column.\n\n thresh : int, optional\n Require that many non-NA values.\n subset : array-like, optional\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries dropped from it.\n\n See Also\n --------\n DataFrame.drop : Drop specified labels from columns.\n DataFrame.isnull: Indicate missing values.\n DataFrame.notnull : Indicate existing (non-missing) values.\n\n Examples\n --------\n >>> df = ks.DataFrame({\"name\": ['Alfred', 'Batman', 'Catwoman'],\n ... \"toy\": [None, 'Batmobile', 'Bullwhip'],\n ... \"born\": [None, \"1940-04-25\", None]},\n ... columns=['name', 'toy', 'born'])\n >>> df\n name toy born\n 0 Alfred None None\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Drop the rows where at least one element is missing.\n\n >>> df.dropna()\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Drop the rows where all elements are missing.\n\n >>> df.dropna(how='all')\n name toy born\n 0 Alfred None None\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Keep only the rows with at least 2 non-NA values.\n\n >>> df.dropna(thresh=2)\n name toy born\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip None\n\n Define in which columns to look for missing values.\n\n >>> df.dropna(subset=['name', 'born'])\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Keep the DataFrame with valid entries in the same variable.\n\n >>> df.dropna(inplace=True)\n >>> df\n name toy born\n 1 Batman Batmobile 1940-04-25\n \"\"\"\n if axis == 0 or axis == 'index':\n if subset is not None:\n if isinstance(subset, str):\n columns = [subset]\n else:\n columns = list(subset)\n invalids = [column for column in columns\n if column not in self._metadata.data_columns]\n if len(invalids) > 0:\n raise KeyError(invalids)\n else:\n columns = list(self.columns)\n\n cnt = reduce(lambda x, y: x + y,\n [F.when(self[column].notna()._scol, 1).otherwise(0)\n for column in columns],\n F.lit(0))\n if thresh is not None:\n pred = cnt >= F.lit(int(thresh))\n elif how == 'any':\n pred = cnt == F.lit(len(columns))\n elif how == 'all':\n pred = cnt > F.lit(0)\n else:\n if how is not None:\n raise ValueError('invalid how option: {h}'.format(h=how))\n else:\n raise TypeError('must specify how or thresh')\n\n sdf = self._sdf.filter(pred)\n if inplace:\n self._sdf = sdf\n else:\n return DataFrame(sdf, self._metadata.copy())\n\n else:\n raise NotImplementedError(\"dropna currently only works for axis=0 or axis='index'\")\n\n def fillna(self, value=None, axis=None, inplace=False):\n \"\"\"Fill NA/NaN values.\n\n Parameters\n ----------\n value : scalar, dict, Series\n Value to use to fill holes. alternately a dict/Series of values\n specifying which value to use for each column.\n DataFrame is not supported.\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n\n Returns\n -------\n DataFrame\n DataFrame with NA entries filled.\n\n Examples\n --------\n >>> df = ks.DataFrame({\n ... 'A': [None, 3, None, None],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 1.0 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 1.0 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 1.0 4\n \"\"\"\n if axis is None:\n axis = 0\n if not (axis == 0 or axis == \"index\"):\n raise NotImplementedError(\"fillna currently only works for axis=0 or axis='index'\")\n\n if value is None:\n raise ValueError('Currently must specify value')\n if not isinstance(value, (float, int, str, bool, dict, pd.Series)):\n raise TypeError(\"Unsupported type %s\" % type(value))\n if isinstance(value, pd.Series):\n value = value.to_dict()\n if isinstance(value, dict):\n for v in value.values():\n if not isinstance(v, (float, int, str, bool)):\n raise TypeError(\"Unsupported type %s\" % type(v))\n\n sdf = self._sdf.fillna(value)\n if inplace:\n self._sdf = sdf\n else:\n return DataFrame(sdf, self._metadata.copy())\n\n def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \\\n -> 'DataFrame':\n \"\"\"\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values.\n\n Parameters\n ----------\n lower : float or int, default None\n Minimum threshold value. All values below this threshold will be set to it.\n upper : float or int, default None\n Maximum threshold value. All values above this threshold will be set to it.\n\n Returns\n -------\n DataFrame\n DataFrame with the values outside the clip boundaries replaced.\n\n Examples\n --------\n >>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3)\n A\n 0 1\n 1 2\n 2 3\n\n Notes\n -----\n One difference between this implementation and pandas is that running\n pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with \"TypeError: '<=' not supported\n between instances of 'str' and 'int'\" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1)\n will output the original DataFrame, simply ignoring the incompatible types.\n \"\"\"\n if is_list_like(lower) or is_list_like(upper):\n raise ValueError(\"List-like value are not supported for 'lower' and 'upper' at the \" +\n \"moment\")\n\n if lower is None and upper is None:\n return self\n\n sdf = self._sdf\n\n numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType,\n ShortType)\n numeric_columns = [c for c in self.columns\n if isinstance(sdf.schema[c].dataType, numeric_types)]\n nonnumeric_columns = [c for c in self.columns\n if not isinstance(sdf.schema[c].dataType, numeric_types)]\n\n if lower is not None:\n sdf = sdf.select(*[F.when(F.col(c) < lower, lower).otherwise(F.col(c)).alias(c)\n for c in numeric_columns] + nonnumeric_columns)\n if upper is not None:\n sdf = sdf.select(*[F.when(F.col(c) > upper, upper).otherwise(F.col(c)).alias(c)\n for c in numeric_columns] + nonnumeric_columns)\n\n # Restore initial column order\n sdf = sdf.select(list(self.columns))\n\n return ks.DataFrame(sdf)\n\n def head(self, n=5):\n \"\"\"\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n obj_head : same type as caller\n The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n \"\"\"\n\n return DataFrame(self._sdf.limit(n), self._metadata.copy())\n\n @property\n def columns(self):\n \"\"\"The column labels of the DataFrame.\"\"\"\n return pd.Index(self._metadata.data_columns)\n\n @columns.setter\n def columns(self, names):\n old_names = self._metadata.data_columns\n if len(old_names) != len(names):\n raise ValueError(\n \"Length mismatch: Expected axis has %d elements, new values have %d elements\"\n % (len(old_names), len(names)))\n sdf = self._sdf.select(self._metadata.index_columns +\n [self[old_name]._scol.alias(new_name)\n for (old_name, new_name) in zip(old_names, names)])\n self._sdf = sdf\n self._metadata = self._metadata.copy(data_columns=names)\n\n @property\n def dtypes(self):\n \"\"\"Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column. The result's index is the original\n DataFrame's columns. Columns with mixed types are stored with the object dtype.\n\n Returns\n -------\n pd.Series\n The data type of each column.\n\n Examples\n --------\n >>> df = ks.DataFrame({'a': list('abc'),\n ... 'b': list(range(1, 4)),\n ... 'c': np.arange(3, 6).astype('i1'),\n ... 'd': np.arange(4.0, 7.0, dtype='float64'),\n ... 'e': [True, False, True],\n ... 'f': pd.date_range('20130101', periods=3)},\n ... columns=['a', 'b', 'c', 'd', 'e', 'f'])\n >>> df.dtypes\n a object\n b int64\n c int8\n d float64\n e bool\n f datetime64[ns]\n dtype: object\n \"\"\"\n return pd.Series([self[col].dtype for col in self._metadata.data_columns],\n index=self._metadata.data_columns)\n\n def count(self):\n \"\"\"\n Count non-NA cells for each column.\n\n The values `None`, `NaN` are considered NA.\n\n Returns\n -------\n pandas.Series\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n DataFrame.shape: Number of DataFrame rows and columns (including NA\n elements).\n DataFrame.isna: Boolean same-sized DataFrame showing places of NA\n elements.\n\n Examples\n --------\n Constructing DataFrame from a dictionary:\n\n >>> df = ks.DataFrame({\"Person\":\n ... [\"John\", \"Myla\", \"Lewis\", \"John\", \"Myla\"],\n ... \"Age\": [24., np.nan, 21., 33, 26],\n ... \"Single\": [False, True, True, True, False]},\n ... columns=[\"Person\", \"Age\", \"Single\"])\n >>> df\n Person Age Single\n 0 John 24.0 False\n 1 Myla NaN True\n 2 Lewis 21.0 True\n 3 John 33.0 True\n 4 Myla 26.0 False\n\n Notice the uncounted NA values:\n\n >>> df.count()\n Person 5\n Age 4\n Single 5\n dtype: int64\n \"\"\"\n return self._reduce_for_stat_function(_Frame._count_expr)\n\n def drop(self, labels=None, axis=1, columns: Union[str, List[str]] = None):\n \"\"\"\n Drop specified labels from columns.\n\n Remove columns by specifying label names and axis=1 or columns.\n When specifying both labels and columns, only labels will be dropped.\n Removing rows is yet to be implemented.\n\n Parameters\n ----------\n labels : single label or list-like\n Column labels to drop.\n axis : {1 or 'columns'}, default 1\n .. dropna currently only works for axis=1 'columns'\n axis=0 is yet to be implemented.\n columns : single label or list-like\n Alternative to specifying axis (``labels, axis=1``\n is equivalent to ``columns=labels``).\n\n Returns\n -------\n dropped : DataFrame\n\n See Also\n --------\n Series.dropna\n\n Examples\n --------\n >>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},\n ... columns=['x', 'y', 'z', 'w'])\n >>> df\n x y z w\n 0 1 3 5 7\n 1 2 4 6 8\n\n >>> df.drop('x', axis=1)\n y z w\n 0 3 5 7\n 1 4 6 8\n\n >>> df.drop(['y', 'z'], axis=1)\n x w\n 0 1 7\n 1 2 8\n\n >>> df.drop(columns=['y', 'z'])\n x w\n 0 1 7\n 1 2 8\n\n Notes\n -----\n Currently only axis = 1 is supported in this function,\n axis = 0 is yet to be implemented.\n \"\"\"\n if labels is not None:\n axis = self._validate_axis(axis)\n if axis == 1:\n return self.drop(columns=labels)\n raise NotImplementedError(\"Drop currently only works for axis=1\")\n elif columns is not None:\n if isinstance(columns, str):\n columns = [columns]\n sdf = self._sdf.drop(*columns)\n metadata = self._metadata.copy(\n data_columns=[column for column in self.columns if column not in columns]\n )\n return DataFrame(sdf, metadata)\n else:\n raise ValueError(\"Need to specify at least one of 'labels' or 'columns'\")\n\n def get(self, key, default=None):\n \"\"\"\n Get item from object for given key (DataFrame column, Panel slice,\n etc.). Returns default value if not found.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n value : same type as items contained in object\n\n Examples\n --------\n >>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},\n ... columns=['x', 'y', 'z'])\n >>> df\n x y z\n 0 0 a a\n 1 1 b b\n 2 2 b b\n\n >>> df.get('x')\n 0 0\n 1 1\n 2 2\n Name: x, dtype: int64\n\n >>> df.get(['x', 'y'])\n x y\n 0 0 a\n 1 1 b\n 2 2 b\n \"\"\"\n try:\n return self._pd_getitem(key)\n except (KeyError, ValueError, IndexError):\n return default\n\n def sort_values(self, by, ascending=True, inplace=False, na_position='last'):\n \"\"\"\n Sort by the values along either axis.\n\n Parameters\n ----------\n by : str or list of str\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n if True, perform operation in-place\n na_position : {'first', 'last'}, default 'last'\n `first` puts NaNs at the beginning, `last` puts NaNs at the end\n\n Returns\n -------\n sorted_obj : DataFrame\n\n Examples\n --------\n >>> df = ks.DataFrame({\n ... 'col1': ['A', 'B', None, 'D', 'C'],\n ... 'col2': [2, 9, 8, 7, 4],\n ... 'col3': [0, 9, 4, 2, 3],\n ... },\n ... columns=['col1', 'col2', 'col3'])\n >>> df\n col1 col2 col3\n 0 A 2 0\n 1 B 9 9\n 2 None 8 4\n 3 D 7 2\n 4 C 4 3\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3\n 0 A 2 0\n 1 B 9 9\n 4 C 4 3\n 3 D 7 2\n 2 None 8 4\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3\n 3 D 7 2\n 4 C 4 3\n 1 B 9 9\n 0 A 2 0\n 2 None 8 4\n\n Sort by multiple columns\n\n >>> df = ks.DataFrame({\n ... 'col1': ['A', 'A', 'B', None, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... },\n ... columns=['col1', 'col2', 'col3'])\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3\n 1 A 1 1\n 0 A 2 0\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 None 8 4\n \"\"\"\n if isinstance(by, str):\n by = [by]\n if isinstance(ascending, bool):\n ascending = [ascending] * len(by)\n if len(ascending) != len(by):\n raise ValueError('Length of ascending ({}) != length of by ({})'\n .format(len(ascending), len(by)))\n if na_position not in ('first', 'last'):\n raise ValueError(\"invalid na_position: '{}'\".format(na_position))\n\n # Mapper: Get a spark column function for (ascending, na_position) combination\n # Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.\n mapper = {\n (True, 'first'): lambda x: Column(getattr(x._jc, \"asc_nulls_first\")()),\n (True, 'last'): lambda x: Column(getattr(x._jc, \"asc_nulls_last\")()),\n (False, 'first'): lambda x: Column(getattr(x._jc, \"desc_nulls_first\")()),\n (False, 'last'): lambda x: Column(getattr(x._jc, \"desc_nulls_last\")()),\n }\n by = [mapper[(asc, na_position)](self[colname]._scol)\n for colname, asc in zip(by, ascending)]\n kdf = DataFrame(self._sdf.sort(*by), self._metadata.copy())\n if inplace:\n self._sdf = kdf._sdf\n self._metadata = kdf._metadata\n else:\n return kdf\n\n # TODO: add keep = First\n def nlargest(self, n: int, columns: 'Any') -> 'DataFrame':\n \"\"\"\n Return the first `n` rows ordered by `columns` in descending order.\n\n Return the first `n` rows with the largest values in `columns`, in\n descending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=False).head(n)``, but more\n performant in Pandas.\n In Koalas, thanks to Spark's lazy execution and query optimizer,\n the two would have same performance.\n\n Parameters\n ----------\n n : int\n Number of rows to return.\n columns : label or list of labels\n Column label(s) to order by.\n\n Returns\n -------\n DataFrame\n The first `n` rows ordered by the given columns in descending\n order.\n\n See Also\n --------\n DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in\n ascending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Notes\n -----\n\n This function cannot be used with all column types. For example, when\n specifying columns with `object` or `category` dtypes, ``TypeError`` is\n raised.\n\n Examples\n --------\n >>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],\n ... 'Y': [6, 7, 8, 9, 10, 11, 12]})\n >>> df\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n 3 5.0 9\n 4 6.0 10\n 5 7.0 11\n 6 NaN 12\n\n In the following example, we will use ``nlargest`` to select the three\n rows having the largest values in column \"population\".\n\n >>> df.nlargest(n=3, columns='X')\n X Y\n 5 7.0 11\n 4 6.0 10\n 3 5.0 9\n\n >>> df.nlargest(n=3, columns=['Y', 'X'])\n X Y\n 6 NaN 12\n 5 7.0 11\n 4 6.0 10\n\n \"\"\"\n return self.sort_values(by=columns, ascending=False).head(n=n)\n\n # TODO: add keep = First\n def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame':\n \"\"\"\n Return the first `n` rows ordered by `columns` in ascending order.\n\n Return the first `n` rows with the smallest values in `columns`, in\n ascending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=True).head(n)``, but more\n performant.\n In Koalas, thanks to Spark's lazy execution and query optimizer,\n the two would have same performance.\n\n Parameters\n ----------\n n : int\n Number of items to retrieve.\n columns : list or str\n Column name or names to order by.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.nlargest : Return the first `n` rows ordered by `columns` in\n descending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Examples\n --------\n >>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],\n ... 'Y': [6, 7, 8, 9, 10, 11, 12]})\n >>> df\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n 3 5.0 9\n 4 6.0 10\n 5 7.0 11\n 6 NaN 12\n\n In the following example, we will use ``nsmallest`` to select the\n three rows having the smallest values in column \"a\".\n\n >>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n\n To order by the largest values in column \"a\" and then \"c\", we can\n specify multiple columns like in the next example.\n\n >>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE\n X Y\n 0 1.0 6\n 1 2.0 7\n 2 3.0 8\n \"\"\"\n return self.sort_values(by=columns, ascending=True).head(n=n)\n\n def isin(self, values):\n \"\"\"\n Whether each element in the DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable or dict\n The sequence of values to test. If values is a dict,\n the keys must be the column names, which must match.\n Series and DataFrame are not supported.\n\n Returns\n -------\n DataFrame\n DataFrame of booleans showing whether each element in the DataFrame\n is contained in values.\n\n Examples\n --------\n >>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},\n ... index=['falcon', 'dog'],\n ... columns=['num_legs', 'num_wings'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n\n When ``values`` is a list check whether every value in the DataFrame\n is present in the list (which animals have 0 or 2 legs or wings)\n\n >>> df.isin([0, 2])\n num_legs num_wings\n falcon True True\n dog False True\n\n When ``values`` is a dict, we can pass values to check for each\n column separately:\n\n >>> df.isin({'num_wings': [0, 3]})\n num_legs num_wings\n falcon False False\n dog False True\n \"\"\"\n if isinstance(values, (pd.DataFrame, pd.Series)):\n raise NotImplementedError(\"DataFrame and Series are not supported\")\n if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):\n raise AttributeError(\n \"'DataFrame' object has no attribute %s\"\n % (set(values.keys()).difference(self.columns)))\n\n _select_columns = self._metadata.index_columns\n if isinstance(values, dict):\n for col in self.columns:\n if col in values:\n _select_columns.append(self[col]._scol.isin(values[col]).alias(col))\n else:\n _select_columns.append(F.lit(False).alias(col))\n elif is_list_like(values):\n _select_columns += [\n self[col]._scol.isin(list(values)).alias(col) for col in self.columns]\n else:\n raise TypeError('Values should be iterable, Series, DataFrame or dict.')\n\n return DataFrame(self._sdf.select(_select_columns), self._metadata.copy())\n\n def pipe(self, func, *args, **kwargs):\n r\"\"\"\n Apply func(self, \\*args, \\*\\*kwargs).\n\n Parameters\n ----------\n func : function\n function to apply to the DataFrame.\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the DataFrames.\n args : iterable, optional\n positional arguments passed into ``func``.\n kwargs : mapping, optional\n a dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n Notes\n -----\n Use ``.pipe`` when chaining together functions that expect\n Series, DataFrames or GroupBy objects. For example, given\n\n >>> df = ks.DataFrame({'category': ['A', 'A', 'B'],\n ... 'col1': [1, 2, 3],\n ... 'col2': [4, 5, 6]},\n ... columns=['category', 'col1', 'col2'])\n >>> def keep_category_a(df):\n ... return df[df['category'] == 'A']\n >>> def add_one(df, column):\n ... return df.assign(col3=df[column] + 1)\n >>> def multiply(df, column1, column2):\n ... return df.assign(col4=df[column1] * df[column2])\n\n\n instead of writing\n\n >>> multiply(add_one(keep_category_a(df), column=\"col1\"), column1=\"col2\", column2=\"col3\")\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n\n\n You can write\n\n >>> (df.pipe(keep_category_a)\n ... .pipe(add_one, column=\"col1\")\n ... .pipe(multiply, column1=\"col2\", column2=\"col3\")\n ... )\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``df``:\n\n >>> def multiply_2(column1, df, column2):\n ... return df.assign(col4=df[column1] * df[column2])\n\n\n Then you can write\n\n >>> (df.pipe(keep_category_a)\n ... .pipe(add_one, column=\"col1\")\n ... .pipe((multiply_2, 'df'), column1=\"col2\", column2=\"col3\")\n ... )\n category col1 col2 col3 col4\n 0 A 1 4 2 8\n 1 A 2 5 3 15\n \"\"\"\n\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError('%s is both the pipe target and a keyword '\n 'argument' % target)\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple representing the dimensionality of the DataFrame.\n\n Examples\n --------\n >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.shape\n (2, 2)\n\n >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],\n ... 'col3': [5, 6]})\n >>> df.shape\n (2, 3)\n \"\"\"\n return len(self), len(self.columns)\n\n def merge(self, right: 'DataFrame', how: str = 'inner', on: str = None,\n left_index: bool = False, right_index: bool = False,\n suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame':\n \"\"\"\n Merge DataFrame objects with a database-style join.\n\n Parameters\n ----------\n right: Object to merge with.\n how: Type of merge to be performed.\n {‘left’, ‘right’, ‘outer’, ‘inner’}, default ‘inner’\n\n left: use only keys from left frame, similar to a SQL left outer join; preserve key\n order.\n right: use only keys from right frame, similar to a SQL right outer join; preserve key\n order.\n outer: use union of keys from both frames, similar to a SQL full outer join; sort keys\n lexicographically.\n inner: use intersection of keys from both frames, similar to a SQL inner join;\n preserve the order of the left keys.\n on: Column or index level names to join on. These must be found in both DataFrames. If on\n is None and not merging on indexes then this defaults to the intersection of the\n columns in both DataFrames.\n left_index: Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index or a number of\n columns) must match the number of levels.\n right_index: Use the index from the right DataFrame as the join key. Same caveats as\n left_index.\n suffixes: Suffix to apply to overlapping column names in the left and right side,\n respectively.\n\n Returns\n -------\n DataFrame\n A DataFrame of the two merged objects.\n\n Examples\n --------\n >>> left_kdf = ks.DataFrame({'A': [1, 2]})\n >>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])\n\n >>> left_kdf.merge(right_kdf, left_index=True, right_index=True)\n A B\n 0 2 x\n\n >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left')\n A B\n 0 1 None\n 1 2 x\n\n >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right')\n A B\n 0 2.0 x\n 1 NaN y\n\n >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer')\n A B\n 0 1.0 None\n 1 2.0 x\n 2 NaN y\n\n Notes\n -----\n As described in #263, joining string columns currently returns None for missing values\n instead of NaN.\n \"\"\"\n if on is None and not left_index and not right_index:\n raise ValueError(\"At least 'on' or 'left_index' and 'right_index' have to be set\")\n if on is not None and (left_index or right_index):\n raise ValueError(\"Only 'on' or 'left_index' and 'right_index' can be set\")\n\n if how == 'full':\n warnings.warn(\"Warning: While Koalas will accept 'full', you should use 'outer' \" +\n \"instead to be compatible with the pandas merge API\", UserWarning)\n if how == 'outer':\n # 'outer' in pandas equals 'full' in Spark\n how = 'full'\n if how not in ('inner', 'left', 'right', 'full'):\n raise ValueError(\"The 'how' parameter has to be amongst the following values: \",\n \"['inner', 'left', 'right', 'outer']\")\n\n if on is None:\n # FIXME Move index string to constant?\n on = '__index_level_0__'\n\n left_table = self._sdf.alias('left_table')\n right_table = right._sdf.alias('right_table')\n\n # Unpack suffixes tuple for convenience\n left_suffix = suffixes[0]\n right_suffix = suffixes[1]\n\n # Append suffixes to columns with the same name to avoid conflicts later\n duplicate_columns = list(self.columns & right.columns)\n if duplicate_columns:\n for duplicate_column_name in duplicate_columns:\n left_table = left_table.withColumnRenamed(duplicate_column_name,\n duplicate_column_name + left_suffix)\n right_table = right_table.withColumnRenamed(duplicate_column_name,\n duplicate_column_name + right_suffix)\n\n join_condition = (left_table[on] == right_table[on] if on not in duplicate_columns\n else left_table[on + left_suffix] == right_table[on + right_suffix])\n joined_table = left_table.join(right_table, join_condition, how=how)\n\n if on in duplicate_columns:\n # Merge duplicate key columns\n joined_table = joined_table.withColumnRenamed(on + left_suffix, on)\n joined_table = joined_table.drop(on + right_suffix)\n\n # Remove auxiliary index\n # FIXME Move index string to constant?\n joined_table = joined_table.drop('__index_level_0__')\n\n kdf = DataFrame(joined_table)\n return kdf\n\n def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False,\n random_state: Optional[int] = None) -> 'DataFrame':\n \"\"\"\n Return a random sample of items from an axis of object.\n\n Please call this function using named argument by specifing the ``frac`` argument.\n\n You can use `random_state` for reproducibility. However, note that different from pandas,\n specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The\n result set depends on not only the seed, but also how the data is distributed across\n machines and to some extent network randomness when shuffle operations are involved. Even\n in the simplest case, the result set will depend on the system's CPU core count.\n\n Parameters\n ----------\n n : int, optional\n Number of items to return. This is currently NOT supported. Use frac instead.\n frac : float, optional\n Fraction of axis items to return.\n replace : bool, default False\n Sample with or without replacement.\n random_state : int, optional\n Seed for the random number generator (if int).\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing the sampled items.\n\n Examples\n --------\n >>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'],\n ... columns=['num_legs', 'num_wings', 'num_specimen_seen'])\n >>> df # doctest: +SKIP\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n A random 25% sample of the ``DataFrame``.\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n\n Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,\n so the same items could appear more than once.\n\n >>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP\n falcon 2\n spider 8\n spider 8\n Name: num_legs, dtype: int64\n\n Specifying the exact number of items to return is not supported at the moment.\n\n >>> df.sample(n=5) # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n NotImplementedError: Function sample currently does not support specifying ...\n \"\"\"\n # Note: we don't run any of the doctests because the result can change depending on the\n # system's core count.\n if n is not None:\n raise NotImplementedError(\"Function sample currently does not support specifying \"\n \"exact number of items to return. Use frac instead.\")\n\n if frac is None:\n raise ValueError(\"frac must be specified.\")\n\n sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state)\n return DataFrame(sdf, self._metadata.copy())\n\n def astype(self, dtype) -> 'DataFrame':\n \"\"\"\n Cast a pandas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n\n Examples\n --------\n >>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')\n >>> df\n a b\n 0 1 1\n 1 2 2\n 2 3 3\n\n Convert to float type:\n\n >>> df.astype('float')\n a b\n 0 1.0 1.0\n 1 2.0 2.0\n 2 3.0 3.0\n\n Convert to int64 type back:\n\n >>> df.astype('int64')\n a b\n 0 1 1\n 1 2 2\n 2 3 3\n\n Convert column a to float type:\n\n >>> df.astype({'a': float})\n a b\n 0 1.0 1\n 1 2.0 2\n 2 3.0 3\n\n \"\"\"\n results = []\n if is_dict_like(dtype):\n for col_name in dtype.keys():\n if col_name not in self.columns:\n raise KeyError('Only a column name can be used for the '\n 'key in a dtype mappings argument.')\n for col_name, col in self.iteritems():\n if col_name in dtype:\n results.append(col.astype(dtype=dtype[col_name]))\n else:\n results.append(col)\n else:\n for col_name, col in self.iteritems():\n results.append(col.astype(dtype=dtype))\n sdf = self._sdf.select(\n self._metadata.index_columns + list(map(lambda ser: ser._scol, results)))\n return DataFrame(sdf, self._metadata.copy())\n\n def _pd_getitem(self, key):\n from databricks.koalas.series import Series\n if key is None:\n raise KeyError(\"none key\")\n if isinstance(key, str):\n try:\n return Series(self._sdf.__getitem__(key), anchor=self,\n index=self._metadata.index_map)\n except AnalysisException:\n raise KeyError(key)\n if np.isscalar(key) or isinstance(key, (tuple, str)):\n raise NotImplementedError(key)\n elif isinstance(key, slice):\n return self.loc[key]\n\n if isinstance(key, (pd.Series, np.ndarray, pd.Index)):\n raise NotImplementedError(key)\n if isinstance(key, list):\n return self.loc[:, key]\n if isinstance(key, DataFrame):\n # TODO Should not implement alignment, too dangerous?\n return Series(self._sdf.__getitem__(key), anchor=self, index=self._metadata.index_map)\n if isinstance(key, Series):\n # TODO Should not implement alignment, too dangerous?\n # It is assumed to be only a filter, otherwise .loc should be used.\n bcol = key._scol.cast(\"boolean\")\n return DataFrame(self._sdf.filter(bcol), self._metadata.copy())\n raise NotImplementedError(key)\n\n def __repr__(self):\n pdf = self.head(max_display_count + 1).to_pandas()\n pdf_length = len(pdf)\n repr_string = repr(pdf.iloc[:max_display_count])\n if pdf_length > max_display_count:\n match = REPR_PATTERN.search(repr_string)\n if match is not None:\n nrows = match.group(\"rows\")\n ncols = match.group(\"columns\")\n footer = (\"\\n\\n[Showing only the first {nrows} rows x {ncols} columns]\"\n .format(nrows=nrows, ncols=ncols))\n return REPR_PATTERN.sub(footer, repr_string)\n return repr_string\n\n def _repr_html_(self):\n pdf = self.head(max_display_count + 1).to_pandas()\n pdf_length = len(pdf)\n repr_html = pdf[:max_display_count]._repr_html_()\n if pdf_length > max_display_count:\n match = REPR_HTML_PATTERN.search(repr_html)\n if match is not None:\n nrows = match.group(\"rows\")\n ncols = match.group(\"columns\")\n by = chr(215)\n footer = ('\\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\\n</div>'\n .format(rows=nrows,\n by=by,\n cols=ncols))\n return REPR_HTML_PATTERN.sub(footer, repr_html)\n return repr_html\n\n def __getitem__(self, key):\n return self._pd_getitem(key)\n\n def __setitem__(self, key, value):\n from databricks.koalas.series import Series\n # For now, we don't support realignment against different dataframes.\n # This is too expensive in Spark.\n # Are we assigning against a column?\n if isinstance(value, Series):\n assert value._kdf is self, \\\n \"Cannot combine column argument because it comes from a different dataframe\"\n if isinstance(key, (tuple, list)):\n assert isinstance(value.schema, StructType)\n field_names = value.schema.fieldNames()\n kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)})\n else:\n kdf = self.assign(**{key: value})\n\n self._sdf = kdf._sdf\n self._metadata = kdf._metadata\n\n def __getattr__(self, key: str) -> Any:\n from databricks.koalas.series import Series\n if key.startswith(\"__\") or key.startswith(\"_pandas_\") or key.startswith(\"_spark_\"):\n raise AttributeError(key)\n if hasattr(_MissingPandasLikeDataFrame, key):\n property_or_func = getattr(_MissingPandasLikeDataFrame, key)\n if isinstance(property_or_func, property):\n return property_or_func.fget(self) # type: ignore\n else:\n return partial(property_or_func, self)\n return Series(self._sdf.__getattr__(key), anchor=self, index=self._metadata.index_map)\n\n def __len__(self):\n return self._sdf.count()\n\n def __dir__(self):\n fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f]\n return super(DataFrame, self).__dir__() + fields\n\n @classmethod\n def _validate_axis(cls, axis=0):\n if axis not in (0, 1, 'index', 'columns', None):\n raise ValueError('No axis named {0}'.format(axis))\n # convert to numeric axis\n return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)\n\n\ndef _reduce_spark_multi(sdf, aggs):\n \"\"\"\n Performs a reduction on a dataframe, the functions being known sql aggregate functions.\n \"\"\"\n assert isinstance(sdf, spark.DataFrame)\n sdf0 = sdf.agg(*aggs)\n l = sdf0.head(2)\n assert len(l) == 1, (sdf, l)\n row = l[0]\n l2 = list(row)\n assert len(l2) == len(aggs), (row, l2)\n return l2\n" ]
[ [ "pandas.Series", "pandas.api.types.is_list_like", "pandas.DataFrame", "numpy.isscalar", "pandas.api.types.is_datetime64tz_dtype", "pandas.api.types.is_dict_like", "pandas.api.types.is_scalar", "pandas.api.types.is_datetime64_dtype", "pandas.Index" ] ]
paalge/scikit-image
[ "f3c4b88b0610242b033449fd38c1118475f96a73" ]
[ "doc/examples/transform/plot_pyramid.py" ]
[ "\"\"\"\n====================\nBuild image pyramids\n====================\n\nThe ``pyramid_gaussian`` function takes an image and yields successive images\nshrunk by a constant scale factor. Image pyramids are often used, e.g., to\nimplement algorithms for denoising, texture discrimination, and scale-\ninvariant detection.\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage.transform import pyramid_gaussian\n\n\nimage = data.astronaut()\nrows, cols, dim = image.shape\npyramid = tuple(pyramid_gaussian(image, downscale=2))\n\ncomposite_image = np.zeros((rows, cols + cols / 2, 3), dtype=np.double)\n\ncomposite_image[:rows, :cols, :] = pyramid[0]\n\ni_row = 0\nfor p in pyramid[1:]:\n n_rows, n_cols = p.shape[:2]\n composite_image[i_row:i_row + n_rows, cols:cols + n_cols] = p\n i_row += n_rows\n\nfig, ax = plt.subplots()\nax.imshow(composite_image)\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots", "numpy.zeros" ] ]
RajArPatra/Improvement-semantic-segmentation-using-clustring-and-class-voating
[ "1e4b5fa5ccc462d88a68f3c88c8af31fa3f14b8b" ]
[ "deeplabv3/datahandler.py" ]
[ "from torch.utils.data import Dataset, DataLoader\nimport glob\nimport os\nimport numpy as np\nimport cv2\nimport torch\nfrom torchvision import transforms, utils\nfrom skimage.transform import resize\n\n\nclass SegDataset(Dataset):\n \"\"\"Segmentation Dataset\"\"\"\n\n def __init__(self, root_dir, imageFolder, maskFolder, transform=None, seed=None, fraction=None, subset=None, imagecolormode='rgb', maskcolormode='grayscale'):\n \"\"\"\n Args:\n root_dir (string): Directory with all the images and should have the following structure.\n root\n --Images\n -----Img 1\n -----Img N\n --Mask\n -----Mask 1\n -----Mask N\n imageFolder (string) = 'Images' : Name of the folder which contains the Images.\n maskFolder (string) = 'Masks : Name of the folder which contains the Masks.\n transform (callable, optional): Optional transform to be applied on a sample.\n seed: Specify a seed for the train and test split\n fraction: A float value from 0 to 1 which specifies the validation split fraction\n subset: 'Train' or 'Test' to select the appropriate set.\n imagecolormode: 'rgb' or 'grayscale'\n maskcolormode: 'rgb' or 'grayscale'\n \"\"\"\n self.color_dict = {'rgb': 1, 'grayscale': 0}\n assert(imagecolormode in ['rgb', 'grayscale'])\n assert(maskcolormode in ['rgb', 'grayscale'])\n\n self.imagecolorflag = self.color_dict[imagecolormode]\n self.maskcolorflag = self.color_dict[maskcolormode]\n self.root_dir = root_dir\n self.transform = transform\n if not fraction:\n self.image_names = sorted(\n glob.glob(os.path.join(self.root_dir, imageFolder, '*')))\n self.mask_names = sorted(\n glob.glob(os.path.join(self.root_dir, maskFolder, '*')))\n else:\n assert(subset in ['Train', 'Test'])\n self.fraction = fraction\n self.image_list = np.array(\n sorted(glob.glob(os.path.join(self.root_dir, imageFolder, '*'))))\n self.mask_list = np.array(\n sorted(glob.glob(os.path.join(self.root_dir, maskFolder, '*'))))\n if seed:\n np.random.seed(seed)\n indices = np.arange(len(self.image_list))\n np.random.shuffle(indices)\n self.image_list = self.image_list[indices]\n self.mask_list = self.mask_list[indices]\n if subset == 'Train':\n self.image_names = self.image_list[:int(\n np.ceil(len(self.image_list)*(1-self.fraction)))]\n self.mask_names = self.mask_list[:int(\n np.ceil(len(self.mask_list)*(1-self.fraction)))]\n else:\n self.image_names = self.image_list[int(\n np.ceil(len(self.image_list)*(1-self.fraction))):]\n self.mask_names = self.mask_list[int(\n np.ceil(len(self.mask_list)*(1-self.fraction))):]\n\n def __len__(self):\n return len(self.image_names)\n\n def __getitem__(self, idx):\n img_name = self.image_names[idx]\n if self.imagecolorflag:\n image = cv2.imread(\n img_name, self.imagecolorflag).transpose(2, 0, 1)\n else:\n image = cv2.imread(img_name, self.imagecolorflag)\n msk_name = self.mask_names[idx]\n if self.maskcolorflag:\n mask = cv2.imread(msk_name, self.maskcolorflag).transpose(2, 0, 1)\n else:\n mask = cv2.imread(msk_name, self.maskcolorflag)\n sample = {'image': image, 'mask': mask}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n# Define few transformations for the Segmentation Dataloader\n\n\nclass Resize(object):\n \"\"\"Resize image and/or masks.\"\"\"\n\n def __init__(self, imageresize, maskresize):\n self.imageresize = imageresize\n self.maskresize = maskresize\n\n def __call__(self, sample):\n image, mask = sample['image'], sample['mask']\n if len(image.shape) == 3:\n image = image.transpose(1, 2, 0)\n if len(mask.shape) == 3:\n mask = mask.transpose(1, 2, 0)\n mask = cv2.resize(mask, self.maskresize, cv2.INTER_AREA)\n #mask = 256 * resize(mask, (256, 256), anti_aliasing = True)\n image = cv2.resize(image, self.imageresize, cv2.INTER_AREA)\n #image = 256 * resize(image, (256, 256), anti_aliasing = True)\n if len(image.shape) == 3:\n image = image.transpose(2, 0, 1)\n if len(mask.shape) == 3:\n mask = mask.transpose(2, 0, 1)\n\n return {'image': image,\n 'mask': mask}\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample, maskresize=None, imageresize=None):\n image, mask = sample['image'], sample['mask']\n if len(mask.shape) == 2:\n mask = mask.reshape((1,)+mask.shape)\n if len(image.shape) == 2:\n image = image.reshape((1,)+image.shape)\n return {'image': torch.from_numpy(image),\n 'mask': torch.from_numpy(mask)}\n\n\nclass Normalize(object):\n '''Normalize image'''\n\n def __call__(self, sample):\n image, mask = sample['image'], sample['mask']\n return {'image': image.type(torch.FloatTensor)/255,\n 'mask': mask.type(torch.FloatTensor)/255}\n\n\n\n\n\ndef get_dataloader_single_folder(data_dir, imageFolder='Images', maskFolder='Masks', fraction=0.2, batch_size=4):\n \"\"\"\n Create training and testing dataloaders from a single folder.\n \"\"\"\n data_transforms = {\n 'Train': transforms.Compose([Resize((256, 256), (256, 256)), ToTensor(), Normalize()]),\n 'Test': transforms.Compose([Resize((256,256), (256, 256)), ToTensor(), Normalize()]),\n }\n\n image_datasets = {x: SegDataset(data_dir, imageFolder=imageFolder, maskFolder=maskFolder, seed=100, fraction=fraction, subset=x, transform=data_transforms[x])\n for x in ['Train', 'Test']}\n dataloaders = {x: DataLoader(image_datasets[x], batch_size=batch_size,\n shuffle=True, num_workers=8)\n for x in ['Train', 'Test']}\n return dataloaders\n" ]
[ [ "torch.utils.data.DataLoader", "torch.from_numpy", "numpy.random.seed", "numpy.random.shuffle" ] ]
UCL/scikit-surgeryvtk
[ "75a2cb15f976348b844fea165bddf187efa722f0" ]
[ "tests/utils/test_polydata_utils.py" ]
[ "# -*- coding: utf-8 -*-\n\n# -*- coding: utf-8 -*-\n\nimport pytest\nimport vtk\nimport numpy as np\nimport sksurgeryvtk.utils.polydata_utils as pdu\nimport sksurgeryvtk.models.vtk_surface_model as vbs\n\ndef test_overlapping_bounds():\n radius_0=10.0\n radius_1=7.0\n centre_1=5.0\n radius_2=4.0\n centre_2=15.0\n radius_3=4.0\n centre_3=0.0\n sphere_0 = vtk.vtkSphereSource()\n sphere_0.SetRadius(radius_0)\n sphere_0.SetPhiResolution(12)\n sphere_0.SetThetaResolution(12)\n sphere_0.SetCenter(0.0, 0.0, 0.0)\n sphere_0.Update()\n vtk_model_0 = sphere_0.GetOutput()\n\n sphere_1 = vtk.vtkSphereSource()\n sphere_1.SetRadius(radius_1)\n sphere_1.SetPhiResolution(12)\n sphere_1.SetThetaResolution(21)\n sphere_1.SetCenter(centre_1, 0.0, 0.0)\n sphere_1.Update()\n vtk_model_1 = sphere_1.GetOutput()\n \n sphere_2 = vtk.vtkSphereSource()\n sphere_2.SetRadius(radius_2)\n sphere_2.SetPhiResolution(12)\n sphere_2.SetThetaResolution(21)\n sphere_2.SetCenter(centre_2, 0.0, 0.0)\n sphere_2.Update()\n vtk_model_2 = sphere_2.GetOutput()\n\n sphere_3 = vtk.vtkSphereSource()\n sphere_3.SetRadius(radius_3)\n sphere_3.SetPhiResolution(12)\n sphere_3.SetThetaResolution(21)\n sphere_3.SetCenter(centre_3, 0.0, 0.0)\n sphere_3.Update()\n vtk_model_3 = sphere_3.GetOutput()\n \n assert (pdu.check_overlapping_bounds( vtk_model_0, vtk_model_1))\n assert (pdu.check_overlapping_bounds( vtk_model_1, vtk_model_0))\n assert (not pdu.check_overlapping_bounds( vtk_model_0, vtk_model_2))\n assert (not pdu.check_overlapping_bounds( vtk_model_2, vtk_model_0))\n assert (pdu.check_overlapping_bounds( vtk_model_0, vtk_model_3))\n assert (pdu.check_overlapping_bounds( vtk_model_3, vtk_model_0))\n\ndef test_dice_overlap():\n\n radius_0=10.0\n radius_1=7.0\n centre_1=5.0\n sphere_0 = vtk.vtkSphereSource()\n sphere_0.SetRadius(radius_0)\n sphere_0.SetPhiResolution(60)\n sphere_0.SetThetaResolution(60)\n sphere_0.SetCenter(0.0, 0.0, 0.0)\n sphere_0.Update()\n vtk_model_0 = sphere_0.GetOutput()\n\n sphere_1 = vtk.vtkSphereSource()\n sphere_1.SetRadius(radius_1)\n sphere_1.SetPhiResolution(60)\n sphere_1.SetThetaResolution(60)\n sphere_1.SetCenter(centre_1, 0.0, 0.0)\n sphere_1.Update()\n vtk_model_1 = sphere_1.GetOutput()\n\n dice, volume_0, volume_1, volume_01 = pdu.two_polydata_dice(vtk_model_0, vtk_model_1)\n\n np.testing.assert_approx_equal(volume_0, 4.0 * np.pi * radius_0**3.0 / 3.0, significant=2)\n np.testing.assert_approx_equal(volume_1, 4.0 * np.pi * radius_1**3.0 / 3.0, significant=2)\n\n #from http://mathworld.wolfram.com/Sphere-SphereIntersection.html\n cap_height_0 = ( radius_1 - radius_0 + centre_1) * ( radius_1 + radius_0 - centre_1) / (2 * centre_1)\n cap_height_1 = ( radius_0 - radius_1 + centre_1) * ( radius_0 + radius_1 - centre_1) / (2 * centre_1)\n cap_vol_0 = np.pi * cap_height_0**2 * ( 3 * radius_0 - cap_height_0) / 3\n cap_vol_1 = np.pi * cap_height_1**2 * ( 3 * radius_1 - cap_height_1) / 3\n\n analytic = cap_vol_0 + cap_vol_1\n np.testing.assert_approx_equal(volume_01, analytic, significant=2)\n\n np.testing.assert_approx_equal(dice, 2*volume_01 / ( volume_0 + volume_1) , significant=10)\n\ndef test_dice_no_overlap():\n\n radius_0=5.5\n radius_1=4.3\n centre_1=12.0\n sphere_0 = vtk.vtkSphereSource()\n sphere_0.SetRadius(radius_0)\n sphere_0.SetPhiResolution(60)\n sphere_0.SetThetaResolution(60)\n sphere_0.SetCenter(0.0, 0.0, 0.0)\n sphere_0.Update()\n vtk_model_0 = sphere_0.GetOutput()\n\n sphere_1 = vtk.vtkSphereSource()\n sphere_1.SetRadius(radius_1)\n sphere_1.SetPhiResolution(60)\n sphere_1.SetThetaResolution(60)\n sphere_1.SetCenter(centre_1, 0.0, 0.0)\n sphere_1.Update()\n vtk_model_1 = sphere_1.GetOutput()\n\n dice, volume_0, volume_1, volume_01 = pdu.two_polydata_dice(vtk_model_0, vtk_model_1)\n\n np.testing.assert_approx_equal(volume_0, 4.0 * np.pi * radius_0**3.0 / 3.0, significant=2)\n np.testing.assert_approx_equal(volume_1, 4.0 * np.pi * radius_1**3.0 / 3.0, significant=2)\n\n analytic = 0.0\n np.testing.assert_approx_equal(volume_01, analytic, significant=2)\n\n np.testing.assert_approx_equal(dice, 2*volume_01 / ( volume_0 + volume_1) , significant=10)\n" ]
[ [ "numpy.testing.assert_approx_equal" ] ]
YuHsin1998/EllSeg
[ "ff56b255f8e650856aec9af23792e105897eba5c", "ff56b255f8e650856aec9af23792e105897eba5c" ]
[ "dataset_generation/ExtractSantini.py", "extern/locating-objects-without-bboxes/object-locator/models/utils.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 29 16:16:57 2019\n\n@author: rakshit\n\"\"\"\nimport os\nimport cv2\nimport argparse\nimport matplotlib\nimport numpy as np\nimport deepdish as dd\nimport scipy.io as scio\n\nprint('Extracting Santini')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--noDisp', help='Specify flag to display labelled images', type=int)\nparser.add_argument('--path2ds', help='Path to dataset', type=str)\nargs = parser.parse_args()\nif args.noDisp:\n noDisp = True\n print('No graphics')\nelse:\n noDisp = False\n print('Showing figures')\n\ngui_env = ['Qt5Agg','WXAgg','TKAgg','GTKAgg']\nfor gui in gui_env:\n try:\n print(\"testing: {}\".format(gui))\n matplotlib.use(gui,warn=False, force=True)\n from matplotlib import pyplot as plt\n break\n except:\n continue\n\nprint(\"Using: {}\".format(matplotlib.get_backend()))\nplt.ion()\n\nargs.path2ds = '/media/rakshit/tank/Dataset'\nPATH_DIR = os.path.join(args.path2ds, 'Santini')\nPATH_DS = os.path.join(args.path2ds, 'All')\nPATH_MASTER = os.path.join(args.path2ds, 'MasterKey')\nlist_ds = ['1', '2', '3', '4', '5', '6']\n\nsc = (640.0/384.0)\nImage_counter = 0.0\nds_num = 24\n\ndef mypause(interval):\n backend = plt.rcParams['backend']\n if backend in matplotlib.rcsetup.interactive_bk:\n figManager = matplotlib._pylab_helpers.Gcf.get_active()\n if figManager is not None:\n canvas = figManager.canvas\n if canvas.figure.stale:\n canvas.draw()\n canvas.start_event_loop(interval)\n return\n\ndef fix_pupil_loc(p, res):\n # res: [H, W]\n p[0] = 0.5*p[0]\n p[1] = res[0] - 0.5*p[1]\n return p\n\ndef readFormattedText(path2file, ignoreLines):\n data = []\n count = 0\n f = open(path2file, 'r')\n for line in f:\n d = [int(d) for d in line.split() if d.isdigit()]\n count = count + 1\n if d and count > ignoreLines:\n data.append(d)\n f.close()\n return data\n\nfor name in list_ds:\n # Ignore the first row and column.\n # Columns: [index, p_x, p_y]\n opts = os.listdir(os.path.join(PATH_DIR, name))\n for subdir in opts:\n PATH_DATA = os.path.join(PATH_DIR, name, subdir)\n\n # Read pupil data\n Path2text = os.path.join(PATH_DATA, 'journal-{:04d}.txt'.format(int(subdir)-1))\n Path2vid = os.path.join(PATH_DATA, 'eye-{:04d}-0000.avi'.format(int(subdir)-1))\n PupilData = np.array(readFormattedText(Path2text, 2))\n VidObj = cv2.VideoCapture(Path2vid)\n\n keydict = {k:[] for k in ['pupil_loc', 'archive', 'data_type', 'resolution', 'dataset', 'subset']}\n\n # Generate empty dictionaries\n keydict['data_type'] = 0 # Only pupil center available\n keydict['resolution'] = []\n keydict['dataset'] = 'Santini'\n keydict['subset'] = '{}-{}'.format(name, subdir)\n\n # Create an empty dictionary as per agreed structure\n Data = {k:[] for k in ['Images', 'Info', 'Masks', 'Masks_noSkin', 'Fits', 'pupil_loc']}\n Data['Fits'] = {k:[] for k in ['pupil', 'pupil_norm', 'pupil_phi', 'iris', 'iris_norm', 'iris_phi']}\n\n if not noDisp:\n fig, plts = plt.subplots(1,1)\n fr_num = 0\n while(VidObj.isOpened()):\n ret, I = VidObj.read()\n if ret == True:\n\n I = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)\n I = cv2.resize(I, (640, 480), cv2.INTER_LANCZOS4)\n\n Data['Images'].append(I)\n keydict['resolution'].append(I.shape)\n keydict['archive'].append(ds_num)\n\n pupil_loc = fix_pupil_loc(PupilData[fr_num, 10:12]*sc, I.shape)\n\n keydict['pupil_loc'].append(pupil_loc)\n Data['pupil_loc'].append(pupil_loc)\n Data['Info'].append(str(fr_num))\n fr_num+=1\n Image_counter+=1\n if not noDisp:\n if fr_num == 1:\n cI = plts.imshow(I)\n cX = plts.scatter(pupil_loc[0], pupil_loc[1])\n plt.show()\n plt.pause(.01)\n else:\n newLoc = np.array([pupil_loc[0], pupil_loc[1]])\n cI.set_data(I)\n cX.set_offsets(newLoc)\n mypause(0.01)\n else: # No more frames to load\n break\n\n Data['Images'] = np.stack(Data['Images'], axis=0)\n Data['pupil_loc'] = np.stack(Data['pupil_loc'], axis=0)\n keydict['pupil_loc'] = np.stack(keydict['pupil_loc'], axis=0)\n keydict['resolution'] = np.stack(keydict['resolution'], axis=0)\n keydict['archive'] = np.stack(keydict['archive'], axis=0)\n\n # Save out data\n dd.io.save(os.path.join(PATH_DS, str(ds_num)+'.h5'), Data)\n scio.savemat(os.path.join(PATH_MASTER, str(ds_num)), keydict, appendmat=True)\n ds_num=ds_num+1", "__copyright__ = \\\n\"\"\"\nCopyright &copyright © (c) 2019 The Board of Trustees of Purdue University and the Purdue Research Foundation.\nAll rights reserved.\n\nThis software is covered by US patents and copyright.\nThis source code is to be used for academic research purposes only, and no commercial use is allowed.\n\nFor any questions, please contact Edward J. Delp (ace@ecn.purdue.edu) at Purdue University.\n\nLast Modified: 10/02/2019 \n\"\"\"\n__license__ = \"CC BY-NC-SA 4.0\"\n__authors__ = \"Javier Ribera, David Guera, Yuhao Chen, Edward J. Delp\"\n__version__ = \"1.6.0\"\n\n\nimport h5py\nimport torch\nimport shutil\n\ndef save_net(fname, net):\n with h5py.File(fname, 'w') as h5f:\n for k, v in net.state_dict().items():\n h5f.create_dataset(k, data=v.cpu().numpy())\ndef load_net(fname, net):\n with h5py.File(fname, 'r') as h5f:\n for k, v in net.state_dict().items(): \n param = torch.from_numpy(np.asarray(h5f[k])) \n v.copy_(param)\n \ndef save_checkpoint(state, is_best,task_id, filename='checkpoint.pth.tar'):\n torch.save(state, task_id+filename)\n if is_best:\n shutil.copyfile(task_id+filename, task_id+'model_best.pth.tar') \n\n\n\"\"\"\nCopyright &copyright © (c) 2019 The Board of Trustees of Purdue University and the Purdue Research Foundation.\nAll rights reserved.\n\nThis software is covered by US patents and copyright.\nThis source code is to be used for academic research purposes only, and no commercial use is allowed.\n\nFor any questions, please contact Edward J. Delp (ace@ecn.purdue.edu) at Purdue University.\n\nLast Modified: 10/02/2019 \n\"\"\"\n" ]
[ [ "matplotlib.pyplot.pause", "matplotlib.get_backend", "numpy.stack", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "matplotlib.use", "matplotlib.pyplot.ion", "matplotlib._pylab_helpers.Gcf.get_active", "numpy.array" ], [ "torch.save" ] ]
anaheino/Ufo-sightings-map
[ "64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc", "64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc" ]
[ "app/venv/lib/python2.7/site-packages/numpy/lib/arraypad.py", "app/venv/lib/python2.7/site-packages/numpy/distutils/mingw32ccompiler.py" ]
[ "\"\"\"\nThe arraypad module contains a group of functions to pad values onto the edges\nof an n-dimensional array.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\n\n\n__all__ = ['pad']\n\n\n###############################################################################\n# Private utility functions.\n\n\ndef _arange_ndarray(arr, shape, axis, reverse=False):\n \"\"\"\n Create an ndarray of `shape` with increments along specified `axis`\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n shape : tuple of ints\n Shape of desired array. Should be equivalent to `arr.shape` except\n `shape[axis]` which may have any positive value.\n axis : int\n Axis to increment along.\n reverse : bool\n If False, increment in a positive fashion from 1 to `shape[axis]`,\n inclusive. If True, the bounds are the same but the order reversed.\n\n Returns\n -------\n padarr : ndarray\n Output array sized to pad `arr` along `axis`, with linear range from\n 1 to `shape[axis]` along specified `axis`.\n\n Notes\n -----\n The range is deliberately 1-indexed for this specific use case. Think of\n this algorithm as broadcasting `np.arange` to a single `axis` of an\n arbitrarily shaped ndarray.\n\n \"\"\"\n initshape = tuple(1 if i != axis else shape[axis]\n for (i, x) in enumerate(arr.shape))\n if not reverse:\n padarr = np.arange(1, shape[axis] + 1)\n else:\n padarr = np.arange(shape[axis], 0, -1)\n padarr = padarr.reshape(initshape)\n for i, dim in enumerate(shape):\n if padarr.shape[i] != dim:\n padarr = padarr.repeat(dim, axis=i)\n return padarr\n\n\ndef _round_ifneeded(arr, dtype):\n \"\"\"\n Rounds arr inplace if destination dtype is integer.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n dtype : dtype\n The dtype of the destination array.\n\n \"\"\"\n if np.issubdtype(dtype, np.integer):\n arr.round(out=arr)\n\n\ndef _prepend_const(arr, pad_amt, val, axis=-1):\n \"\"\"\n Prepend constant `val` along `axis` of `arr`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n val : scalar\n Constant value to use. For best results should be of type `arr.dtype`;\n if not `arr.dtype` will be cast to `arr.dtype`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` constant `val` prepended along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n padshape = tuple(x if i != axis else pad_amt\n for (i, x) in enumerate(arr.shape))\n if val == 0:\n return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr),\n axis=axis)\n else:\n return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype),\n arr), axis=axis)\n\n\ndef _append_const(arr, pad_amt, val, axis=-1):\n \"\"\"\n Append constant `val` along `axis` of `arr`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n val : scalar\n Constant value to use. For best results should be of type `arr.dtype`;\n if not `arr.dtype` will be cast to `arr.dtype`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` constant `val` appended along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n padshape = tuple(x if i != axis else pad_amt\n for (i, x) in enumerate(arr.shape))\n if val == 0:\n return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)),\n axis=axis)\n else:\n return np.concatenate(\n (arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis)\n\n\ndef _prepend_edge(arr, pad_amt, axis=-1):\n \"\"\"\n Prepend `pad_amt` to `arr` along `axis` by extending edge values.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, extended by `pad_amt` edge values appended along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n edge_slice = tuple(slice(None) if i != axis else 0\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n edge_arr = arr[edge_slice].reshape(pad_singleton)\n return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr),\n axis=axis)\n\n\ndef _append_edge(arr, pad_amt, axis=-1):\n \"\"\"\n Append `pad_amt` to `arr` along `axis` by extending edge values.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, extended by `pad_amt` edge values prepended along\n `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n edge_arr = arr[edge_slice].reshape(pad_singleton)\n return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)),\n axis=axis)\n\n\ndef _prepend_ramp(arr, pad_amt, end, axis=-1):\n \"\"\"\n Prepend linear ramp along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n end : scalar\n Constal value to use. For best results should be of type `arr.dtype`;\n if not `arr.dtype` will be cast to `arr.dtype`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values prepended along `axis`. The\n prepended region ramps linearly from the edge value to `end`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Generate shape for final concatenated array\n padshape = tuple(x if i != axis else pad_amt\n for (i, x) in enumerate(arr.shape))\n\n # Generate an n-dimensional array incrementing along `axis`\n ramp_arr = _arange_ndarray(arr, padshape, axis,\n reverse=True).astype(np.float64)\n\n # Appropriate slicing to extract n-dimensional edge along `axis`\n edge_slice = tuple(slice(None) if i != axis else 0\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract edge, reshape to original rank, and extend along `axis`\n edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)\n\n # Linear ramp\n slope = (end - edge_pad) / float(pad_amt)\n ramp_arr = ramp_arr * slope\n ramp_arr += edge_pad\n _round_ifneeded(ramp_arr, arr.dtype)\n\n # Ramp values will most likely be float, cast them to the same type as arr\n return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis)\n\n\ndef _append_ramp(arr, pad_amt, end, axis=-1):\n \"\"\"\n Append linear ramp along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n end : scalar\n Constal value to use. For best results should be of type `arr.dtype`;\n if not `arr.dtype` will be cast to `arr.dtype`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region ramps linearly from the edge value to `end`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Generate shape for final concatenated array\n padshape = tuple(x if i != axis else pad_amt\n for (i, x) in enumerate(arr.shape))\n\n # Generate an n-dimensional array incrementing along `axis`\n ramp_arr = _arange_ndarray(arr, padshape, axis,\n reverse=False).astype(np.float64)\n\n # Slice a chunk from the edge to calculate stats on\n edge_slice = tuple(slice(None) if i != axis else -1\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract edge, reshape to original rank, and extend along `axis`\n edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)\n\n # Linear ramp\n slope = (end - edge_pad) / float(pad_amt)\n ramp_arr = ramp_arr * slope\n ramp_arr += edge_pad\n _round_ifneeded(ramp_arr, arr.dtype)\n\n # Ramp values will most likely be float, cast them to the same type as arr\n return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis)\n\n\ndef _prepend_max(arr, pad_amt, num, axis=-1):\n \"\"\"\n Prepend `pad_amt` maximum values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n num : int\n Depth into `arr` along `axis` to calculate maximum.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n prepended region is the maximum of the first `num` values along\n `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _prepend_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n max_slice = tuple(slice(None) if i != axis else slice(num)\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate max, reshape to add singleton dimension back\n max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)\n\n # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr),\n axis=axis)\n\n\ndef _append_max(arr, pad_amt, num, axis=-1):\n \"\"\"\n Pad one `axis` of `arr` with the maximum of the last `num` elements.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n num : int\n Depth into `arr` along `axis` to calculate maximum.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region is the maximum of the final `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _append_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n end = arr.shape[axis] - 1\n if num is not None:\n max_slice = tuple(\n slice(None) if i != axis else slice(end, end - num, -1)\n for (i, x) in enumerate(arr.shape))\n else:\n max_slice = tuple(slice(None) for x in arr.shape)\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate max, reshape to add singleton dimension back\n max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)\n\n # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)),\n axis=axis)\n\n\ndef _prepend_mean(arr, pad_amt, num, axis=-1):\n \"\"\"\n Prepend `pad_amt` mean values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n num : int\n Depth into `arr` along `axis` to calculate mean.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values prepended along `axis`. The\n prepended region is the mean of the first `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _prepend_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n mean_slice = tuple(slice(None) if i != axis else slice(num)\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate mean, reshape to add singleton dimension back\n mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton)\n _round_ifneeded(mean_chunk, arr.dtype)\n\n # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype),\n arr), axis=axis)\n\n\ndef _append_mean(arr, pad_amt, num, axis=-1):\n \"\"\"\n Append `pad_amt` mean values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n num : int\n Depth into `arr` along `axis` to calculate mean.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region is the maximum of the final `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _append_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n end = arr.shape[axis] - 1\n if num is not None:\n mean_slice = tuple(\n slice(None) if i != axis else slice(end, end - num, -1)\n for (i, x) in enumerate(arr.shape))\n else:\n mean_slice = tuple(slice(None) for x in arr.shape)\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate mean, reshape to add singleton dimension back\n mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton)\n _round_ifneeded(mean_chunk, arr.dtype)\n\n # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate(\n (arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)\n\n\ndef _prepend_med(arr, pad_amt, num, axis=-1):\n \"\"\"\n Prepend `pad_amt` median values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n num : int\n Depth into `arr` along `axis` to calculate median.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values prepended along `axis`. The\n prepended region is the median of the first `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _prepend_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n med_slice = tuple(slice(None) if i != axis else slice(num)\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate median, reshape to add singleton dimension back\n med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)\n _round_ifneeded(med_chunk, arr.dtype)\n\n # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate(\n (med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis)\n\n\ndef _append_med(arr, pad_amt, num, axis=-1):\n \"\"\"\n Append `pad_amt` median values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n num : int\n Depth into `arr` along `axis` to calculate median.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region is the median of the final `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _append_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n end = arr.shape[axis] - 1\n if num is not None:\n med_slice = tuple(\n slice(None) if i != axis else slice(end, end - num, -1)\n for (i, x) in enumerate(arr.shape))\n else:\n med_slice = tuple(slice(None) for x in arr.shape)\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate median, reshape to add singleton dimension back\n med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)\n _round_ifneeded(med_chunk, arr.dtype)\n\n # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate(\n (arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)\n\n\ndef _prepend_min(arr, pad_amt, num, axis=-1):\n \"\"\"\n Prepend `pad_amt` minimum values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to prepend.\n num : int\n Depth into `arr` along `axis` to calculate minimum.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values prepended along `axis`. The\n prepended region is the minimum of the first `num` values along\n `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _prepend_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n min_slice = tuple(slice(None) if i != axis else slice(num)\n for (i, x) in enumerate(arr.shape))\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate min, reshape to add singleton dimension back\n min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)\n\n # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr),\n axis=axis)\n\n\ndef _append_min(arr, pad_amt, num, axis=-1):\n \"\"\"\n Append `pad_amt` median values along `axis`.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : int\n Amount of padding to append.\n num : int\n Depth into `arr` along `axis` to calculate minimum.\n Range: [1, `arr.shape[axis]`] or None (entire axis)\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt` values appended along `axis`. The\n appended region is the minimum of the final `num` values along `axis`.\n\n \"\"\"\n if pad_amt == 0:\n return arr\n\n # Equivalent to edge padding for single value, so do that instead\n if num == 1:\n return _append_edge(arr, pad_amt, axis)\n\n # Use entire array if `num` is too large\n if num is not None:\n if num >= arr.shape[axis]:\n num = None\n\n # Slice a chunk from the edge to calculate stats on\n end = arr.shape[axis] - 1\n if num is not None:\n min_slice = tuple(\n slice(None) if i != axis else slice(end, end - num, -1)\n for (i, x) in enumerate(arr.shape))\n else:\n min_slice = tuple(slice(None) for x in arr.shape)\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n\n # Extract slice, calculate min, reshape to add singleton dimension back\n min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)\n\n # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`\n return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)),\n axis=axis)\n\n\ndef _pad_ref(arr, pad_amt, method, axis=-1):\n \"\"\"\n Pad `axis` of `arr` by reflection.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : tuple of ints, length 2\n Padding to (prepend, append) along `axis`.\n method : str\n Controls method of reflection; options are 'even' or 'odd'.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`\n values appended along `axis`. Both regions are padded with reflected\n values from the original array.\n\n Notes\n -----\n This algorithm does not pad with repetition, i.e. the edges are not\n repeated in the reflection. For that behavior, use `mode='symmetric'`.\n\n The modes 'reflect', 'symmetric', and 'wrap' must be padded with a\n single function, lest the indexing tricks in non-integer multiples of the\n original shape would violate repetition in the final iteration.\n\n \"\"\"\n # Implicit booleanness to test for zero (or None) in any scalar type\n if pad_amt[0] == 0 and pad_amt[1] == 0:\n return arr\n\n ##########################################################################\n # Prepended region\n\n # Slice off a reverse indexed chunk from near edge to pad `arr` before\n ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1)\n for (i, x) in enumerate(arr.shape))\n\n ref_chunk1 = arr[ref_slice]\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n if pad_amt[0] == 1:\n ref_chunk1 = ref_chunk1.reshape(pad_singleton)\n\n # Memory/computationally more expensive, only do this if `method='odd'`\n if 'odd' in method and pad_amt[0] > 0:\n edge_slice1 = tuple(slice(None) if i != axis else 0\n for (i, x) in enumerate(arr.shape))\n edge_chunk = arr[edge_slice1].reshape(pad_singleton)\n ref_chunk1 = 2 * edge_chunk - ref_chunk1\n del edge_chunk\n\n ##########################################################################\n # Appended region\n\n # Slice off a reverse indexed chunk from far edge to pad `arr` after\n start = arr.shape[axis] - pad_amt[1] - 1\n end = arr.shape[axis] - 1\n ref_slice = tuple(slice(None) if i != axis else slice(start, end)\n for (i, x) in enumerate(arr.shape))\n rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)\n for (i, x) in enumerate(arr.shape))\n ref_chunk2 = arr[ref_slice][rev_idx]\n\n if pad_amt[1] == 1:\n ref_chunk2 = ref_chunk2.reshape(pad_singleton)\n\n if 'odd' in method:\n edge_slice2 = tuple(slice(None) if i != axis else -1\n for (i, x) in enumerate(arr.shape))\n edge_chunk = arr[edge_slice2].reshape(pad_singleton)\n ref_chunk2 = 2 * edge_chunk - ref_chunk2\n del edge_chunk\n\n # Concatenate `arr` with both chunks, extending along `axis`\n return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis)\n\n\ndef _pad_sym(arr, pad_amt, method, axis=-1):\n \"\"\"\n Pad `axis` of `arr` by symmetry.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : tuple of ints, length 2\n Padding to (prepend, append) along `axis`.\n method : str\n Controls method of symmetry; options are 'even' or 'odd'.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`\n values appended along `axis`. Both regions are padded with symmetric\n values from the original array.\n\n Notes\n -----\n This algorithm DOES pad with repetition, i.e. the edges are repeated.\n For padding without repeated edges, use `mode='reflect'`.\n\n The modes 'reflect', 'symmetric', and 'wrap' must be padded with a\n single function, lest the indexing tricks in non-integer multiples of the\n original shape would violate repetition in the final iteration.\n\n \"\"\"\n # Implicit booleanness to test for zero (or None) in any scalar type\n if pad_amt[0] == 0 and pad_amt[1] == 0:\n return arr\n\n ##########################################################################\n # Prepended region\n\n # Slice off a reverse indexed chunk from near edge to pad `arr` before\n sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0])\n for (i, x) in enumerate(arr.shape))\n rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)\n for (i, x) in enumerate(arr.shape))\n sym_chunk1 = arr[sym_slice][rev_idx]\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n if pad_amt[0] == 1:\n sym_chunk1 = sym_chunk1.reshape(pad_singleton)\n\n # Memory/computationally more expensive, only do this if `method='odd'`\n if 'odd' in method and pad_amt[0] > 0:\n edge_slice1 = tuple(slice(None) if i != axis else 0\n for (i, x) in enumerate(arr.shape))\n edge_chunk = arr[edge_slice1].reshape(pad_singleton)\n sym_chunk1 = 2 * edge_chunk - sym_chunk1\n del edge_chunk\n\n ##########################################################################\n # Appended region\n\n # Slice off a reverse indexed chunk from far edge to pad `arr` after\n start = arr.shape[axis] - pad_amt[1]\n end = arr.shape[axis]\n sym_slice = tuple(slice(None) if i != axis else slice(start, end)\n for (i, x) in enumerate(arr.shape))\n sym_chunk2 = arr[sym_slice][rev_idx]\n\n if pad_amt[1] == 1:\n sym_chunk2 = sym_chunk2.reshape(pad_singleton)\n\n if 'odd' in method:\n edge_slice2 = tuple(slice(None) if i != axis else -1\n for (i, x) in enumerate(arr.shape))\n edge_chunk = arr[edge_slice2].reshape(pad_singleton)\n sym_chunk2 = 2 * edge_chunk - sym_chunk2\n del edge_chunk\n\n # Concatenate `arr` with both chunks, extending along `axis`\n return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis)\n\n\ndef _pad_wrap(arr, pad_amt, axis=-1):\n \"\"\"\n Pad `axis` of `arr` via wrapping.\n\n Parameters\n ----------\n arr : ndarray\n Input array of arbitrary shape.\n pad_amt : tuple of ints, length 2\n Padding to (prepend, append) along `axis`.\n axis : int\n Axis along which to pad `arr`.\n\n Returns\n -------\n padarr : ndarray\n Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`\n values appended along `axis`. Both regions are padded wrapped values\n from the opposite end of `axis`.\n\n Notes\n -----\n This method of padding is also known as 'tile' or 'tiling'.\n\n The modes 'reflect', 'symmetric', and 'wrap' must be padded with a\n single function, lest the indexing tricks in non-integer multiples of the\n original shape would violate repetition in the final iteration.\n\n \"\"\"\n # Implicit booleanness to test for zero (or None) in any scalar type\n if pad_amt[0] == 0 and pad_amt[1] == 0:\n return arr\n\n ##########################################################################\n # Prepended region\n\n # Slice off a reverse indexed chunk from near edge to pad `arr` before\n start = arr.shape[axis] - pad_amt[0]\n end = arr.shape[axis]\n wrap_slice = tuple(slice(None) if i != axis else slice(start, end)\n for (i, x) in enumerate(arr.shape))\n wrap_chunk1 = arr[wrap_slice]\n\n # Shape to restore singleton dimension after slicing\n pad_singleton = tuple(x if i != axis else 1\n for (i, x) in enumerate(arr.shape))\n if pad_amt[0] == 1:\n wrap_chunk1 = wrap_chunk1.reshape(pad_singleton)\n\n ##########################################################################\n # Appended region\n\n # Slice off a reverse indexed chunk from far edge to pad `arr` after\n wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1])\n for (i, x) in enumerate(arr.shape))\n wrap_chunk2 = arr[wrap_slice]\n\n if pad_amt[1] == 1:\n wrap_chunk2 = wrap_chunk2.reshape(pad_singleton)\n\n # Concatenate `arr` with both chunks, extending along `axis`\n return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)\n\n\ndef _normalize_shape(ndarray, shape, cast_to_int=True):\n \"\"\"\n Private function which does some checks and normalizes the possibly\n much simpler representations of 'pad_width', 'stat_length',\n 'constant_values', 'end_values'.\n\n Parameters\n ----------\n narray : ndarray\n Input ndarray\n shape : {sequence, array_like, float, int}, optional\n The width of padding (pad_width), the number of elements on the\n edge of the narray used for statistics (stat_length), the constant\n value(s) to use when filling padded regions (constant_values), or the\n endpoint target(s) for linear ramps (end_values).\n ((before_1, after_1), ... (before_N, after_N)) unique number of\n elements for each axis where `N` is rank of `narray`.\n ((before, after),) yields same before and after constants for each\n axis.\n (constant,) or val is a shortcut for before = after = constant for\n all axes.\n cast_to_int : bool, optional\n Controls if values in ``shape`` will be rounded and cast to int\n before being returned.\n\n Returns\n -------\n normalized_shape : tuple of tuples\n val => ((val, val), (val, val), ...)\n [[val1, val2], [val3, val4], ...] => ((val1, val2), (val3, val4), ...)\n ((val1, val2), (val3, val4), ...) => no change\n [[val1, val2], ] => ((val1, val2), (val1, val2), ...)\n ((val1, val2), ) => ((val1, val2), (val1, val2), ...)\n [[val , ], ] => ((val, val), (val, val), ...)\n ((val , ), ) => ((val, val), (val, val), ...)\n\n \"\"\"\n ndims = ndarray.ndim\n\n # Shortcut shape=None\n if shape is None:\n return ((None, None), ) * ndims\n\n # Convert any input `info` to a NumPy array\n arr = np.asarray(shape)\n\n # Switch based on what input looks like\n if arr.ndim <= 1:\n if arr.shape == () or arr.shape == (1,):\n # Single scalar input\n # Create new array of ones, multiply by the scalar\n arr = np.ones((ndims, 2), dtype=ndarray.dtype) * arr\n elif arr.shape == (2,):\n # Apply padding (before, after) each axis\n # Create new axis 0, repeat along it for every axis\n arr = arr[np.newaxis, :].repeat(ndims, axis=0)\n else:\n fmt = \"Unable to create correctly shaped tuple from %s\"\n raise ValueError(fmt % (shape,))\n\n elif arr.ndim == 2:\n if arr.shape[1] == 1 and arr.shape[0] == ndims:\n # Padded before and after by the same amount\n arr = arr.repeat(2, axis=1)\n elif arr.shape[0] == ndims:\n # Input correctly formatted, pass it on as `arr`\n arr = shape\n else:\n fmt = \"Unable to create correctly shaped tuple from %s\"\n raise ValueError(fmt % (shape,))\n\n else:\n fmt = \"Unable to create correctly shaped tuple from %s\"\n raise ValueError(fmt % (shape,))\n\n # Cast if necessary\n if cast_to_int is True:\n arr = np.round(arr).astype(int)\n\n # Convert list of lists to tuple of tuples\n return tuple(tuple(axis) for axis in arr.tolist())\n\n\ndef _validate_lengths(narray, number_elements):\n \"\"\"\n Private function which does some checks and reformats pad_width and\n stat_length using _normalize_shape.\n\n Parameters\n ----------\n narray : ndarray\n Input ndarray\n number_elements : {sequence, int}, optional\n The width of padding (pad_width) or the number of elements on the edge\n of the narray used for statistics (stat_length).\n ((before_1, after_1), ... (before_N, after_N)) unique number of\n elements for each axis.\n ((before, after),) yields same before and after constants for each\n axis.\n (constant,) or int is a shortcut for before = after = constant for all\n axes.\n\n Returns\n -------\n _validate_lengths : tuple of tuples\n int => ((int, int), (int, int), ...)\n [[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...)\n ((int1, int2), (int3, int4), ...) => no change\n [[int1, int2], ] => ((int1, int2), (int1, int2), ...)\n ((int1, int2), ) => ((int1, int2), (int1, int2), ...)\n [[int , ], ] => ((int, int), (int, int), ...)\n ((int , ), ) => ((int, int), (int, int), ...)\n\n \"\"\"\n normshp = _normalize_shape(narray, number_elements)\n for i in normshp:\n chk = [1 if x is None else x for x in i]\n chk = [1 if x >= 0 else -1 for x in chk]\n if (chk[0] < 0) or (chk[1] < 0):\n fmt = \"%s cannot contain negative values.\"\n raise ValueError(fmt % (number_elements,))\n return normshp\n\n\n###############################################################################\n# Public functions\n\n\ndef pad(array, pad_width, mode=None, **kwargs):\n \"\"\"\n Pads an array.\n\n Parameters\n ----------\n array : array_like of rank N\n Input array\n pad_width : {sequence, array_like, int}\n Number of values padded to the edges of each axis.\n ((before_1, after_1), ... (before_N, after_N)) unique pad widths\n for each axis.\n ((before, after),) yields same before and after pad for each axis.\n (pad,) or int is a shortcut for before = after = pad width for all\n axes.\n mode : str or function\n One of the following string values or a user supplied function.\n\n 'constant'\n Pads with a constant value.\n 'edge'\n Pads with the edge values of array.\n 'linear_ramp'\n Pads with the linear ramp between end_value and the\n array edge value.\n 'maximum'\n Pads with the maximum value of all or part of the\n vector along each axis.\n 'mean'\n Pads with the mean value of all or part of the\n vector along each axis.\n 'median'\n Pads with the median value of all or part of the\n vector along each axis.\n 'minimum'\n Pads with the minimum value of all or part of the\n vector along each axis.\n 'reflect'\n Pads with the reflection of the vector mirrored on\n the first and last values of the vector along each\n axis.\n 'symmetric'\n Pads with the reflection of the vector mirrored\n along the edge of the array.\n 'wrap'\n Pads with the wrap of the vector along the axis.\n The first values are used to pad the end and the\n end values are used to pad the beginning.\n <function>\n Padding function, see Notes.\n stat_length : sequence or int, optional\n Used in 'maximum', 'mean', 'median', and 'minimum'. Number of\n values at edge of each axis used to calculate the statistic value.\n\n ((before_1, after_1), ... (before_N, after_N)) unique statistic\n lengths for each axis.\n\n ((before, after),) yields same before and after statistic lengths\n for each axis.\n\n (stat_length,) or int is a shortcut for before = after = statistic\n length for all axes.\n\n Default is ``None``, to use the entire axis.\n constant_values : sequence or int, optional\n Used in 'constant'. The values to set the padded values for each\n axis.\n\n ((before_1, after_1), ... (before_N, after_N)) unique pad constants\n for each axis.\n\n ((before, after),) yields same before and after constants for each\n axis.\n\n (constant,) or int is a shortcut for before = after = constant for\n all axes.\n\n Default is 0.\n end_values : sequence or int, optional\n Used in 'linear_ramp'. The values used for the ending value of the\n linear_ramp and that will form the edge of the padded array.\n\n ((before_1, after_1), ... (before_N, after_N)) unique end values\n for each axis.\n\n ((before, after),) yields same before and after end values for each\n axis.\n\n (constant,) or int is a shortcut for before = after = end value for\n all axes.\n\n Default is 0.\n reflect_type : {'even', 'odd'}, optional\n Used in 'reflect', and 'symmetric'. The 'even' style is the\n default with an unaltered reflection around the edge value. For\n the 'odd' style, the extented part of the array is created by\n subtracting the reflected values from two times the edge value.\n\n Returns\n -------\n pad : ndarray\n Padded array of rank equal to `array` with shape increased\n according to `pad_width`.\n\n Notes\n -----\n .. versionadded:: 1.7.0\n\n For an array with rank greater than 1, some of the padding of later\n axes is calculated from padding of previous axes. This is easiest to\n think about with a rank 2 array where the corners of the padded array\n are calculated by using padded values from the first axis.\n\n The padding function, if used, should return a rank 1 array equal in\n length to the vector argument with padded values replaced. It has the\n following signature::\n\n padding_func(vector, iaxis_pad_width, iaxis, **kwargs)\n\n where\n\n vector : ndarray\n A rank 1 array already padded with zeros. Padded values are\n vector[:pad_tuple[0]] and vector[-pad_tuple[1]:].\n iaxis_pad_width : tuple\n A 2-tuple of ints, iaxis_pad_width[0] represents the number of\n values padded at the beginning of vector where\n iaxis_pad_width[1] represents the number of values padded at\n the end of vector.\n iaxis : int\n The axis currently being calculated.\n kwargs : misc\n Any keyword arguments the function requires.\n\n Examples\n --------\n >>> a = [1, 2, 3, 4, 5]\n >>> np.lib.pad(a, (2,3), 'constant', constant_values=(4, 6))\n array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6])\n\n >>> np.lib.pad(a, (2, 3), 'edge')\n array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5])\n\n >>> np.lib.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))\n array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])\n\n >>> np.lib.pad(a, (2,), 'maximum')\n array([5, 5, 1, 2, 3, 4, 5, 5, 5])\n\n >>> np.lib.pad(a, (2,), 'mean')\n array([3, 3, 1, 2, 3, 4, 5, 3, 3])\n\n >>> np.lib.pad(a, (2,), 'median')\n array([3, 3, 1, 2, 3, 4, 5, 3, 3])\n\n >>> a = [[1, 2], [3, 4]]\n >>> np.lib.pad(a, ((3, 2), (2, 3)), 'minimum')\n array([[1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [3, 3, 3, 4, 3, 3, 3],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1]])\n\n >>> a = [1, 2, 3, 4, 5]\n >>> np.lib.pad(a, (2, 3), 'reflect')\n array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])\n\n >>> np.lib.pad(a, (2, 3), 'reflect', reflect_type='odd')\n array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])\n\n >>> np.lib.pad(a, (2, 3), 'symmetric')\n array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])\n\n >>> np.lib.pad(a, (2, 3), 'symmetric', reflect_type='odd')\n array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])\n\n >>> np.lib.pad(a, (2, 3), 'wrap')\n array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])\n\n >>> def padwithtens(vector, pad_width, iaxis, kwargs):\n ... vector[:pad_width[0]] = 10\n ... vector[-pad_width[1]:] = 10\n ... return vector\n\n >>> a = np.arange(6)\n >>> a = a.reshape((2, 3))\n\n >>> np.lib.pad(a, 2, padwithtens)\n array([[10, 10, 10, 10, 10, 10, 10],\n [10, 10, 10, 10, 10, 10, 10],\n [10, 10, 0, 1, 2, 10, 10],\n [10, 10, 3, 4, 5, 10, 10],\n [10, 10, 10, 10, 10, 10, 10],\n [10, 10, 10, 10, 10, 10, 10]])\n \"\"\"\n if not np.asarray(pad_width).dtype.kind == 'i':\n raise TypeError('`pad_width` must be of integral type.')\n\n narray = np.array(array)\n pad_width = _validate_lengths(narray, pad_width)\n\n allowedkwargs = {\n 'constant': ['constant_values'],\n 'edge': [],\n 'linear_ramp': ['end_values'],\n 'maximum': ['stat_length'],\n 'mean': ['stat_length'],\n 'median': ['stat_length'],\n 'minimum': ['stat_length'],\n 'reflect': ['reflect_type'],\n 'symmetric': ['reflect_type'],\n 'wrap': [],\n }\n\n kwdefaults = {\n 'stat_length': None,\n 'constant_values': 0,\n 'end_values': 0,\n 'reflect_type': 'even',\n }\n\n if isinstance(mode, str):\n # Make sure have allowed kwargs appropriate for mode\n for key in kwargs:\n if key not in allowedkwargs[mode]:\n raise ValueError('%s keyword not in allowed keywords %s' %\n (key, allowedkwargs[mode]))\n\n # Set kwarg defaults\n for kw in allowedkwargs[mode]:\n kwargs.setdefault(kw, kwdefaults[kw])\n\n # Need to only normalize particular keywords.\n for i in kwargs:\n if i == 'stat_length':\n kwargs[i] = _validate_lengths(narray, kwargs[i])\n if i in ['end_values', 'constant_values']:\n kwargs[i] = _normalize_shape(narray, kwargs[i],\n cast_to_int=False)\n elif mode is None:\n raise ValueError('Keyword \"mode\" must be a function or one of %s.' %\n (list(allowedkwargs.keys()),))\n else:\n # Drop back to old, slower np.apply_along_axis mode for user-supplied\n # vector function\n function = mode\n\n # Create a new padded array\n rank = list(range(len(narray.shape)))\n total_dim_increase = [np.sum(pad_width[i]) for i in rank]\n offset_slices = [slice(pad_width[i][0],\n pad_width[i][0] + narray.shape[i])\n for i in rank]\n new_shape = np.array(narray.shape) + total_dim_increase\n newmat = np.zeros(new_shape, narray.dtype)\n\n # Insert the original array into the padded array\n newmat[offset_slices] = narray\n\n # This is the core of pad ...\n for iaxis in rank:\n np.apply_along_axis(function,\n iaxis,\n newmat,\n pad_width[iaxis],\n iaxis,\n kwargs)\n return newmat\n\n # If we get here, use new padding method\n newmat = narray.copy()\n\n # API preserved, but completely new algorithm which pads by building the\n # entire block to pad before/after `arr` with in one step, for each axis.\n if mode == 'constant':\n for axis, ((pad_before, pad_after), (before_val, after_val)) \\\n in enumerate(zip(pad_width, kwargs['constant_values'])):\n newmat = _prepend_const(newmat, pad_before, before_val, axis)\n newmat = _append_const(newmat, pad_after, after_val, axis)\n\n elif mode == 'edge':\n for axis, (pad_before, pad_after) in enumerate(pad_width):\n newmat = _prepend_edge(newmat, pad_before, axis)\n newmat = _append_edge(newmat, pad_after, axis)\n\n elif mode == 'linear_ramp':\n for axis, ((pad_before, pad_after), (before_val, after_val)) \\\n in enumerate(zip(pad_width, kwargs['end_values'])):\n newmat = _prepend_ramp(newmat, pad_before, before_val, axis)\n newmat = _append_ramp(newmat, pad_after, after_val, axis)\n\n elif mode == 'maximum':\n for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \\\n in enumerate(zip(pad_width, kwargs['stat_length'])):\n newmat = _prepend_max(newmat, pad_before, chunk_before, axis)\n newmat = _append_max(newmat, pad_after, chunk_after, axis)\n\n elif mode == 'mean':\n for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \\\n in enumerate(zip(pad_width, kwargs['stat_length'])):\n newmat = _prepend_mean(newmat, pad_before, chunk_before, axis)\n newmat = _append_mean(newmat, pad_after, chunk_after, axis)\n\n elif mode == 'median':\n for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \\\n in enumerate(zip(pad_width, kwargs['stat_length'])):\n newmat = _prepend_med(newmat, pad_before, chunk_before, axis)\n newmat = _append_med(newmat, pad_after, chunk_after, axis)\n\n elif mode == 'minimum':\n for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \\\n in enumerate(zip(pad_width, kwargs['stat_length'])):\n newmat = _prepend_min(newmat, pad_before, chunk_before, axis)\n newmat = _append_min(newmat, pad_after, chunk_after, axis)\n\n elif mode == 'reflect':\n for axis, (pad_before, pad_after) in enumerate(pad_width):\n # Recursive padding along any axis where `pad_amt` is too large\n # for indexing tricks. We can only safely pad the original axis\n # length, to keep the period of the reflections consistent.\n if ((pad_before > 0) or\n (pad_after > 0)) and newmat.shape[axis] == 1:\n # Extending singleton dimension for 'reflect' is legacy\n # behavior; it really should raise an error.\n newmat = _prepend_edge(newmat, pad_before, axis)\n newmat = _append_edge(newmat, pad_after, axis)\n continue\n\n method = kwargs['reflect_type']\n safe_pad = newmat.shape[axis] - 1\n while ((pad_before > safe_pad) or (pad_after > safe_pad)):\n pad_iter_b = min(safe_pad,\n safe_pad * (pad_before // safe_pad))\n pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))\n newmat = _pad_ref(newmat, (pad_iter_b,\n pad_iter_a), method, axis)\n pad_before -= pad_iter_b\n pad_after -= pad_iter_a\n safe_pad += pad_iter_b + pad_iter_a\n newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis)\n\n elif mode == 'symmetric':\n for axis, (pad_before, pad_after) in enumerate(pad_width):\n # Recursive padding along any axis where `pad_amt` is too large\n # for indexing tricks. We can only safely pad the original axis\n # length, to keep the period of the reflections consistent.\n method = kwargs['reflect_type']\n safe_pad = newmat.shape[axis]\n while ((pad_before > safe_pad) or\n (pad_after > safe_pad)):\n pad_iter_b = min(safe_pad,\n safe_pad * (pad_before // safe_pad))\n pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))\n newmat = _pad_sym(newmat, (pad_iter_b,\n pad_iter_a), method, axis)\n pad_before -= pad_iter_b\n pad_after -= pad_iter_a\n safe_pad += pad_iter_b + pad_iter_a\n newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis)\n\n elif mode == 'wrap':\n for axis, (pad_before, pad_after) in enumerate(pad_width):\n # Recursive padding along any axis where `pad_amt` is too large\n # for indexing tricks. We can only safely pad the original axis\n # length, to keep the period of the reflections consistent.\n safe_pad = newmat.shape[axis]\n while ((pad_before > safe_pad) or\n (pad_after > safe_pad)):\n pad_iter_b = min(safe_pad,\n safe_pad * (pad_before // safe_pad))\n pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))\n newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis)\n\n pad_before -= pad_iter_b\n pad_after -= pad_iter_a\n safe_pad += pad_iter_b + pad_iter_a\n newmat = _pad_wrap(newmat, (pad_before, pad_after), axis)\n\n return newmat\n", "\"\"\"\nSupport code for building Python extensions on Windows.\n\n # NT stuff\n # 1. Make sure libpython<version>.a exists for gcc. If not, build it.\n # 2. Force windows to use gcc (we're struggling with MSVC and g77 support)\n # 3. Force windows to use g77\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport os\nimport sys\nimport subprocess\nimport re\n\n# Overwrite certain distutils.ccompiler functions:\nimport numpy.distutils.ccompiler\n\nif sys.version_info[0] < 3:\n from . import log\nelse:\n from numpy.distutils import log\n# NT stuff\n# 1. Make sure libpython<version>.a exists for gcc. If not, build it.\n# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)\n# --> this is done in numpy/distutils/ccompiler.py\n# 3. Force windows to use g77\n\nimport distutils.cygwinccompiler\nfrom distutils.version import StrictVersion\nfrom numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options\nfrom distutils.unixccompiler import UnixCCompiler\nfrom distutils.msvccompiler import get_build_version as get_build_msvc_version\nfrom distutils.errors import (DistutilsExecError, CompileError,\n UnknownFileError)\nfrom numpy.distutils.misc_util import (msvc_runtime_library,\n get_build_architecture)\n\n# Useful to generate table of symbols from a dll\n_START = re.compile(r'\\[Ordinal/Name Pointer\\] Table')\n_TABLE = re.compile(r'^\\s+\\[([\\s*[0-9]*)\\] ([a-zA-Z0-9_]*)')\n\n# the same as cygwin plus some additional parameters\nclass Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):\n \"\"\" A modified MingW32 compiler compatible with an MSVC built Python.\n\n \"\"\"\n\n compiler_type = 'mingw32'\n\n def __init__ (self,\n verbose=0,\n dry_run=0,\n force=0):\n\n distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose,\n dry_run, force)\n\n # we need to support 3.2 which doesn't match the standard\n # get_versions methods regex\n if self.gcc_version is None:\n import re\n p = subprocess.Popen(['gcc', '-dumpversion'], shell=True,\n stdout=subprocess.PIPE)\n out_string = p.stdout.read()\n p.stdout.close()\n result = re.search('(\\d+\\.\\d+)', out_string)\n if result:\n self.gcc_version = StrictVersion(result.group(1))\n\n # A real mingw32 doesn't need to specify a different entry point,\n # but cygwin 2.91.57 in no-cygwin-mode needs it.\n if self.gcc_version <= \"2.91.57\":\n entry_point = '--entry _DllMain@12'\n else:\n entry_point = ''\n\n if self.linker_dll == 'dllwrap':\n # Commented out '--driver-name g++' part that fixes weird\n # g++.exe: g++: No such file or directory\n # error (mingw 1.0 in Enthon24 tree, gcc-3.4.5).\n # If the --driver-name part is required for some environment\n # then make the inclusion of this part specific to that\n # environment.\n self.linker = 'dllwrap' # --driver-name g++'\n elif self.linker_dll == 'gcc':\n self.linker = 'g++'\n\n p = subprocess.Popen(['gcc', '--version'], shell=True,\n stdout=subprocess.PIPE)\n out_string = p.stdout.read()\n p.stdout.close()\n\n # Before build with MinGW-W64 generate the python import library\n # with gendef and dlltool according to the MingW-W64 FAQ.\n # Use the MinGW-W64 provided msvc runtime import libraries.\n # Don't call build_import_library() and build_msvcr_library.\n\n if 'MinGW-W64' not in str(out_string):\n\n # **changes: eric jones 4/11/01\n # 1. Check for import library on Windows. Build if it doesn't\n # exist.\n build_import_library()\n\n # Check for custom msvc runtime library on Windows. Build if it\n # doesn't exist.\n msvcr_success = build_msvcr_library()\n msvcr_dbg_success = build_msvcr_library(debug=True)\n if msvcr_success or msvcr_dbg_success:\n # add preprocessor statement for using customized msvcr lib\n self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR')\n\n # Define the MSVC version as hint for MinGW\n msvcr_version = '0x%03i0' % int(msvc_runtime_library().lstrip('msvcr'))\n self.define_macro('__MSVCRT_VERSION__', msvcr_version)\n\n # MS_WIN64 should be defined when building for amd64 on windows,\n # but python headers define it only for MS compilers, which has all\n # kind of bad consequences, like using Py_ModuleInit4 instead of\n # Py_ModuleInit4_64, etc... So we add it here\n if get_build_architecture() == 'AMD64':\n if self.gcc_version < \"4.0\":\n self.set_executables(\n compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall',\n compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0'\n ' -Wall -Wstrict-prototypes',\n linker_exe='gcc -g -mno-cygwin',\n linker_so='gcc -g -mno-cygwin -shared')\n else:\n # gcc-4 series releases do not support -mno-cygwin option\n self.set_executables(\n compiler='gcc -march=x86-64 -mtune=generic -DMS_WIN64'\n ' -O2 -msse2 -Wall',\n compiler_so='gcc -march=x86-64 -mtune=generic -DMS_WIN64'\n ' -O2 -msse2 -Wall -Wstrict-prototypes',\n linker_exe='gcc',\n linker_so='gcc -shared -Wl,-gc-sections -Wl,-s')\n else:\n if self.gcc_version <= \"3.0.0\":\n self.set_executables(\n compiler='gcc -mno-cygwin -O2 -w',\n compiler_so='gcc -mno-cygwin -mdll -O2 -w'\n ' -Wstrict-prototypes',\n linker_exe='g++ -mno-cygwin',\n linker_so='%s -mno-cygwin -mdll -static %s' %\n (self.linker, entry_point))\n elif self.gcc_version < \"4.0\":\n self.set_executables(\n compiler='gcc -mno-cygwin -O2 -Wall',\n compiler_so='gcc -mno-cygwin -O2 -Wall'\n ' -Wstrict-prototypes',\n linker_exe='g++ -mno-cygwin',\n linker_so='g++ -mno-cygwin -shared')\n else:\n # gcc-4 series releases do not support -mno-cygwin option i686\n # build needs '-mincoming-stack-boundary=2' due to ABI\n # incompatibility to Win32 ABI\n self.set_executables(\n compiler='gcc -O2 -march=core2 -mtune=generic'\n ' -mfpmath=sse -msse2'\n ' -mincoming-stack-boundary=2 -Wall',\n compiler_so='gcc -O2 -march=core2 -mtune=generic'\n ' -mfpmath=sse -msse2'\n ' -mincoming-stack-boundary=2 -Wall'\n ' -Wstrict-prototypes',\n linker_exe='g++ ',\n linker_so='g++ -shared -Wl,-gc-sections -Wl,-s')\n # added for python2.3 support we can't pass it through set_executables\n # because pre 2.2 would fail\n self.compiler_cxx = ['g++']\n\n # Maybe we should also append -mthreads, but then the finished dlls\n # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support\n # thread-safe exception handling on `Mingw32')\n\n # no additional libraries needed\n #self.dll_libraries=[]\n return\n\n # __init__ ()\n\n def link(self,\n target_desc,\n objects,\n output_filename,\n output_dir,\n libraries,\n library_dirs,\n runtime_library_dirs,\n export_symbols = None,\n debug=0,\n extra_preargs=None,\n extra_postargs=None,\n build_temp=None,\n target_lang=None):\n # Include the appropiate MSVC runtime library if Python was built\n # with MSVC >= 7.0 (MinGW standard is msvcrt)\n runtime_library = msvc_runtime_library()\n if runtime_library:\n if not libraries:\n libraries = []\n libraries.append(runtime_library)\n args = (self,\n target_desc,\n objects,\n output_filename,\n output_dir,\n libraries,\n library_dirs,\n runtime_library_dirs,\n None, #export_symbols, we do this in our def-file\n debug,\n extra_preargs,\n extra_postargs,\n build_temp,\n target_lang)\n if self.gcc_version < \"3.0.0\":\n func = distutils.cygwinccompiler.CygwinCCompiler.link\n else:\n func = UnixCCompiler.link\n func(*args[:func.__code__.co_argcount])\n return\n\n def object_filenames (self,\n source_filenames,\n strip_dir=0,\n output_dir=''):\n if output_dir is None: output_dir = ''\n obj_names = []\n for src_name in source_filenames:\n # use normcase to make sure '.rc' is really '.rc' and not '.RC'\n (base, ext) = os.path.splitext (os.path.normcase(src_name))\n\n # added these lines to strip off windows drive letters\n # without it, .o files are placed next to .c files\n # instead of the build directory\n drv, base = os.path.splitdrive(base)\n if drv:\n base = base[1:]\n\n if ext not in (self.src_extensions + ['.rc', '.res']):\n raise UnknownFileError(\n \"unknown file type '%s' (from '%s')\" % \\\n (ext, src_name))\n if strip_dir:\n base = os.path.basename (base)\n if ext == '.res' or ext == '.rc':\n # these need to be compiled to object files\n obj_names.append (os.path.join (output_dir,\n base + ext + self.obj_extension))\n else:\n obj_names.append (os.path.join (output_dir,\n base + self.obj_extension))\n return obj_names\n\n # object_filenames ()\n\n\ndef find_python_dll():\n maj, min, micro = [int(i) for i in sys.version_info[:3]]\n dllname = 'python%d%d.dll' % (maj, min)\n print(\"Looking for %s\" % dllname)\n\n # We can't do much here:\n # - find it in python main dir\n # - in system32,\n # - ortherwise (Sxs), I don't know how to get it.\n lib_dirs = []\n lib_dirs.append(sys.prefix)\n lib_dirs.append(os.path.join(sys.prefix, 'lib'))\n try:\n lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'system32'))\n except KeyError:\n pass\n\n for d in lib_dirs:\n dll = os.path.join(d, dllname)\n if os.path.exists(dll):\n return dll\n\n raise ValueError(\"%s not found in %s\" % (dllname, lib_dirs))\n\ndef dump_table(dll):\n st = subprocess.Popen([\"objdump.exe\", \"-p\", dll], stdout=subprocess.PIPE)\n return st.stdout.readlines()\n\ndef generate_def(dll, dfile):\n \"\"\"Given a dll file location, get all its exported symbols and dump them\n into the given def file.\n\n The .def file will be overwritten\"\"\"\n dump = dump_table(dll)\n for i in range(len(dump)):\n if _START.match(dump[i].decode()):\n break\n else:\n raise ValueError(\"Symbol table not found\")\n\n syms = []\n for j in range(i+1, len(dump)):\n m = _TABLE.match(dump[j].decode())\n if m:\n syms.append((int(m.group(1).strip()), m.group(2)))\n else:\n break\n\n if len(syms) == 0:\n log.warn('No symbols found in %s' % dll)\n\n d = open(dfile, 'w')\n d.write('LIBRARY %s\\n' % os.path.basename(dll))\n d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\\n')\n d.write(';DATA PRELOAD SINGLE\\n')\n d.write('\\nEXPORTS\\n')\n for s in syms:\n #d.write('@%d %s\\n' % (s[0], s[1]))\n d.write('%s\\n' % s[1])\n d.close()\n\ndef find_dll(dll_name):\n\n arch = {'AMD64' : 'amd64',\n 'Intel' : 'x86'}[get_build_architecture()]\n\n def _find_dll_in_winsxs(dll_name):\n # Walk through the WinSxS directory to find the dll.\n winsxs_path = os.path.join(os.environ['WINDIR'], 'winsxs')\n if not os.path.exists(winsxs_path):\n return None\n for root, dirs, files in os.walk(winsxs_path):\n if dll_name in files and arch in root:\n return os.path.join(root, dll_name)\n return None\n\n def _find_dll_in_path(dll_name):\n # First, look in the Python directory, then scan PATH for\n # the given dll name.\n for path in [sys.prefix] + os.environ['PATH'].split(';'):\n filepath = os.path.join(path, dll_name)\n if os.path.exists(filepath):\n return os.path.abspath(filepath)\n\n return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name)\n\ndef build_msvcr_library(debug=False):\n if os.name != 'nt':\n return False\n\n msvcr_name = msvc_runtime_library()\n\n # Skip using a custom library for versions < MSVC 8.0\n if int(msvcr_name.lstrip('msvcr')) < 80:\n log.debug('Skip building msvcr library:'\n ' custom functionality not present')\n return False\n\n if debug:\n msvcr_name += 'd'\n\n # Skip if custom library already exists\n out_name = \"lib%s.a\" % msvcr_name\n out_file = os.path.join(sys.prefix, 'libs', out_name)\n if os.path.isfile(out_file):\n log.debug('Skip building msvcr library: \"%s\" exists' %\n (out_file,))\n return True\n\n # Find the msvcr dll\n msvcr_dll_name = msvcr_name + '.dll'\n dll_file = find_dll(msvcr_dll_name)\n if not dll_file:\n log.warn('Cannot build msvcr library: \"%s\" not found' %\n msvcr_dll_name)\n return False\n\n def_name = \"lib%s.def\" % msvcr_name\n def_file = os.path.join(sys.prefix, 'libs', def_name)\n\n log.info('Building msvcr library: \"%s\" (from %s)' \\\n % (out_file, dll_file))\n\n # Generate a symbol definition file from the msvcr dll\n generate_def(dll_file, def_file)\n\n # Create a custom mingw library for the given symbol definitions\n cmd = ['dlltool', '-d', def_file, '-l', out_file]\n retcode = subprocess.call(cmd)\n\n # Clean up symbol definitions\n os.remove(def_file)\n\n return (not retcode)\n\ndef build_import_library():\n if os.name != 'nt':\n return\n\n arch = get_build_architecture()\n if arch == 'AMD64':\n return _build_import_library_amd64()\n elif arch == 'Intel':\n return _build_import_library_x86()\n else:\n raise ValueError(\"Unhandled arch %s\" % arch)\n\ndef _build_import_library_amd64():\n dll_file = find_python_dll()\n\n out_name = \"libpython%d%d.a\" % tuple(sys.version_info[:2])\n out_file = os.path.join(sys.prefix, 'libs', out_name)\n if os.path.isfile(out_file):\n log.debug('Skip building import library: \"%s\" exists' %\n (out_file))\n return\n\n def_name = \"python%d%d.def\" % tuple(sys.version_info[:2])\n def_file = os.path.join(sys.prefix, 'libs', def_name)\n\n log.info('Building import library (arch=AMD64): \"%s\" (from %s)' %\n (out_file, dll_file))\n\n generate_def(dll_file, def_file)\n\n cmd = ['dlltool', '-d', def_file, '-l', out_file]\n subprocess.Popen(cmd)\n\ndef _build_import_library_x86():\n \"\"\" Build the import libraries for Mingw32-gcc on Windows\n \"\"\"\n lib_name = \"python%d%d.lib\" % tuple(sys.version_info[:2])\n lib_file = os.path.join(sys.prefix, 'libs', lib_name)\n out_name = \"libpython%d%d.a\" % tuple(sys.version_info[:2])\n out_file = os.path.join(sys.prefix, 'libs', out_name)\n if not os.path.isfile(lib_file):\n log.warn('Cannot build import library: \"%s\" not found' % (lib_file))\n return\n if os.path.isfile(out_file):\n log.debug('Skip building import library: \"%s\" exists' % (out_file))\n return\n log.info('Building import library (ARCH=x86): \"%s\"' % (out_file))\n\n from numpy.distutils import lib2def\n\n def_name = \"python%d%d.def\" % tuple(sys.version_info[:2])\n def_file = os.path.join(sys.prefix, 'libs', def_name)\n nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file)\n nm_output = lib2def.getnm(nm_cmd)\n dlist, flist = lib2def.parse_nm(nm_output)\n lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w'))\n\n dll_name = \"python%d%d.dll\" % tuple(sys.version_info[:2])\n args = (dll_name, def_file, out_file)\n cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args\n status = os.system(cmd)\n # for now, fail silently\n if status:\n log.warn('Failed to build import library for gcc. Linking will fail.')\n return\n\n#=====================================\n# Dealing with Visual Studio MANIFESTS\n#=====================================\n\n# Functions to deal with visual studio manifests. Manifest are a mechanism to\n# enforce strong DLL versioning on windows, and has nothing to do with\n# distutils MANIFEST. manifests are XML files with version info, and used by\n# the OS loader; they are necessary when linking against a DLL not in the\n# system path; in particular, official python 2.6 binary is built against the\n# MS runtime 9 (the one from VS 2008), which is not available on most windows\n# systems; python 2.6 installer does install it in the Win SxS (Side by side)\n# directory, but this requires the manifest for this to work. This is a big\n# mess, thanks MS for a wonderful system.\n\n# XXX: ideally, we should use exactly the same version as used by python. I\n# submitted a patch to get this version, but it was only included for python\n# 2.6.1 and above. So for versions below, we use a \"best guess\".\n_MSVCRVER_TO_FULLVER = {}\nif sys.platform == 'win32':\n try:\n import msvcrt\n # I took one version in my SxS directory: no idea if it is the good\n # one, and we can't retrieve it from python\n _MSVCRVER_TO_FULLVER['80'] = \"8.0.50727.42\"\n _MSVCRVER_TO_FULLVER['90'] = \"9.0.21022.8\"\n # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0\n # on Windows XP:\n _MSVCRVER_TO_FULLVER['100'] = \"10.0.30319.460\"\n if hasattr(msvcrt, \"CRT_ASSEMBLY_VERSION\"):\n major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(\".\", 2)\n _MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION\n del major, minor, rest\n except ImportError:\n # If we are here, means python was not built with MSVC. Not sure what\n # to do in that case: manifest building will fail, but it should not be\n # used in that case anyway\n log.warn('Cannot import msvcrt: using manifest will not be possible')\n\ndef msvc_manifest_xml(maj, min):\n \"\"\"Given a major and minor version of the MSVCR, returns the\n corresponding XML file.\"\"\"\n try:\n fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)]\n except KeyError:\n raise ValueError(\"Version %d,%d of MSVCRT not supported yet\" %\n (maj, min))\n # Don't be fooled, it looks like an XML, but it is not. In particular, it\n # should not have any space before starting, and its size should be\n # divisible by 4, most likely for alignement constraints when the xml is\n # embedded in the binary...\n # This template was copied directly from the python 2.6 binary (using\n # strings.exe from mingw on python.exe).\n template = \"\"\"\\\n<assembly xmlns=\"urn:schemas-microsoft-com:asm.v1\" manifestVersion=\"1.0\">\n <trustInfo xmlns=\"urn:schemas-microsoft-com:asm.v3\">\n <security>\n <requestedPrivileges>\n <requestedExecutionLevel level=\"asInvoker\" uiAccess=\"false\"></requestedExecutionLevel>\n </requestedPrivileges>\n </security>\n </trustInfo>\n <dependency>\n <dependentAssembly>\n <assemblyIdentity type=\"win32\" name=\"Microsoft.VC%(maj)d%(min)d.CRT\" version=\"%(fullver)s\" processorArchitecture=\"*\" publicKeyToken=\"1fc8b3b9a1e18e3b\"></assemblyIdentity>\n </dependentAssembly>\n </dependency>\n</assembly>\"\"\"\n\n return template % {'fullver': fullver, 'maj': maj, 'min': min}\n\ndef manifest_rc(name, type='dll'):\n \"\"\"Return the rc file used to generate the res file which will be embedded\n as manifest for given manifest file name, of given type ('dll' or\n 'exe').\n\n Parameters\n ----------\n name : str\n name of the manifest file to embed\n type : str {'dll', 'exe'}\n type of the binary which will embed the manifest\n\n \"\"\"\n if type == 'dll':\n rctype = 2\n elif type == 'exe':\n rctype = 1\n else:\n raise ValueError(\"Type %s not supported\" % type)\n\n return \"\"\"\\\n#include \"winuser.h\"\n%d RT_MANIFEST %s\"\"\" % (rctype, name)\n\ndef check_embedded_msvcr_match_linked(msver):\n \"\"\"msver is the ms runtime version used for the MANIFEST.\"\"\"\n # check msvcr major version are the same for linking and\n # embedding\n msvcv = msvc_runtime_library()\n if msvcv:\n assert msvcv.startswith(\"msvcr\"), msvcv\n # Dealing with something like \"mscvr90\" or \"mscvr100\", the last\n # last digit is the minor release, want int(\"9\") or int(\"10\"):\n maj = int(msvcv[5:-1])\n if not maj == int(msver):\n raise ValueError(\n \"Discrepancy between linked msvcr \" \\\n \"(%d) and the one about to be embedded \" \\\n \"(%d)\" % (int(msver), maj))\n\ndef configtest_name(config):\n base = os.path.basename(config._gen_temp_sourcefile(\"yo\", [], \"c\"))\n return os.path.splitext(base)[0]\n\ndef manifest_name(config):\n # Get configest name (including suffix)\n root = configtest_name(config)\n exext = config.compiler.exe_extension\n return root + exext + \".manifest\"\n\ndef rc_name(config):\n # Get configtest name (including suffix)\n root = configtest_name(config)\n return root + \".rc\"\n\ndef generate_manifest(config):\n msver = get_build_msvc_version()\n if msver is not None:\n if msver >= 8:\n check_embedded_msvcr_match_linked(msver)\n ma = int(msver)\n mi = int((msver - ma) * 10)\n # Write the manifest file\n manxml = msvc_manifest_xml(ma, mi)\n man = open(manifest_name(config), \"w\")\n config.temp_files.append(manifest_name(config))\n man.write(manxml)\n man.close()\n" ]
[ [ "numpy.ones", "numpy.sum", "numpy.zeros", "numpy.issubdtype", "numpy.asarray", "numpy.median", "numpy.arange", "numpy.apply_along_axis", "numpy.array", "numpy.concatenate", "numpy.round" ], [ "numpy.distutils.log.info", "numpy.distutils.lib2def.getnm", "numpy.distutils.log.debug", "numpy.distutils.log.warn", "numpy.distutils.misc_util.get_build_architecture", "numpy.distutils.lib2def.parse_nm", "numpy.distutils.misc_util.msvc_runtime_library" ] ]
arcada-uas/doccano
[ "c29aece3dd4504eeaaa3466af0663bfe18b90dc1" ]
[ "backend/data_export/tests/test_dataset.py" ]
[ "import unittest\nfrom unittest.mock import MagicMock\n\nimport pandas as pd\nfrom pandas.testing import assert_frame_equal\n\nfrom data_export.pipeline.dataset import Dataset\n\n\nclass TestDataset(unittest.TestCase):\n def setUp(self):\n example = MagicMock()\n example.to_dict.return_value = {\"data\": \"example\"}\n self.examples = MagicMock()\n self.examples.__iter__.return_value = [example]\n label = MagicMock()\n label.find_by.return_value = {\"labels\": [\"label\"]}\n self.labels = MagicMock()\n self.labels.__iter__.return_value = [label]\n\n def test_to_dataframe(self):\n dataset = Dataset(self.examples, self.labels)\n df = dataset.to_dataframe()\n expected = pd.DataFrame([{\"data\": \"example\", \"labels\": [\"label\"]}])\n assert_frame_equal(df, expected)\n" ]
[ [ "pandas.DataFrame", "pandas.testing.assert_frame_equal" ] ]
kiritigowda/Anakin
[ "4ba2329153163590e11875dc6b4150031066915d" ]
[ "tools/external_converter_v2/parser/kill_fluid/fluid_helper.py" ]
[ "from ..proto import *\nfrom ..graph_io import *\nimport paddle.fluid as fluid\nimport numpy as np\nfrom paddle.fluid.core import VarDesc, AttrType\n\n\ndef union(list_a, list_b):\n\treturn list(set(list_a).union(set(list_b)))\n\ndef difference(list_a, list_b):\n\treturn list(set(list_a).difference(set(list_b)))\n\n\nclass Edge_for_fluid:\n\n\tdef __init__(self, param, target, var):\n\t\tself.param = param\n\t\tself.target = target\n\t\tself.var = var\n\n\nclass Fluid_edger:\n\n\tdef __init__(self, param = None, target = None, var = None):\n\t\tself.edges = []\n\t\tif param is not None and target is not None:\n\t\t\tedge = Edge_for_fluid(param, target, var)\n\t\t\tself.edges.append(edge)\n\n\tdef __call__(self):\n\t\treturn self.all_targets()\n\n\tdef add(self, param, target, var = None):\n\t\tedge = Edge_for_fluid(param, target, var)\n\t\tself.edges.append(edge)\n\n\tdef rm_edges_by_param(self, param):\n\t\tfor edge in self.edges:\n\t\t\tif edge.param == param:\n\t\t\t\tedge_idx = self.edges.index(edge)\n\t\t\t\tdel self.edges[edge_idx]\n\n\tdef rm(self, target):\n\t\tres = -1\n\t\tfor edge in self.edges:\n\t\t\tif target == edge.target:\n\t\t\t\tedge_idx = self.edges.index(edge)\n\t\t\t\tdel self.edges[edge_idx]\n\t\t\t\tres = res + 1\n\t\tif res != 0:\n\t\t\tpass\n\n\tdef mv(self, old_target, new_target):\n\t\tres = -1\n\t\tfor edge in self.edges:\n\t\t\tif old_target == edge.target:\n\t\t\t\tedge.target = new_target\n\t\t\t\tres = res + 1\n\t\tif res != 0:\n\t\t\tpass\n\n\tdef all_params(self):\n\t\tparams = []\n\t\tfor edge in self.edges:\n\t\t\tif edge.param not in params:\n\t\t\t\tparams.append(edge.param)\n\t\treturn params\n\n\tdef all_targets(self):\n\t\ttargets = []\n\t\tfor edge in self.edges:\n\t\t\ttargets.append(edge.target)\n\t\treturn targets\n\n\tdef targets(self, param):\n\t\ttargets = []\n\t\tfor edge in self.edges:\n\t\t\tif edge.param == param:\n\t\t\t\ttargets.append(edge.target)\n\t\treturn targets\n\n\tdef target(self, param, idx = 0):\n\t\treturn self.targets(param)[idx]\n\n\tdef clear(self):\n\t\ttargets_list = self.all_targets()\n\t\tfor target in targets_list:\n\t\t\tself.rm(target)\n\n\tdef targets_with_params(self):\n\t\tlist_of_targets_and_params = []\n\t\tfor edge in self.edges:\n\t\t\ttarget_and_param = [edge.target, edge.param]\n\t\t\tlist_of_targets_and_params.append(target_and_param)\n\t\treturn list_of_targets_and_params\n\n\tdef vars_by_target(self, target):\n\t\tvars = []\n\t\tfor edge in self.edges:\n\t\t\tif edge.target == target and edge.var is not None:\n\t\t\t\tvars.append(edge.var)\n\t\treturn vars\n\n\tdef __getitem__(self, idx):\n\t\tif idx < len(self.edges):\n\t\t\treturn self.edges[idx]\n\t\treturn None\n\n\nclass Fluid_helper:\n\n\tdef __init__(self, scope, block):\n\t\tself.scope = scope\n\t\tself.block = block\n\n\tdef args_by_input_param(self, op, param_name):\n\t\tif param_name in op.input_names:\n\t\t\treturn op.input(param_name)\n\t\telse:\n\t\t\traise NameError('ERROR: param_name %s is not exists.' % ( param_name ) )\n\n\tdef args_by_output_param(self, op, param_name):\n\t\tif param_name in op.output_names:\n\t\t\treturn op.output(param_name)\n\t\telse:\n\t\t\traise NameError('ERROR: param_name %s is not exists.' % ( param_name ) )\n\n\tdef var_by_input_param(self, op, param_name, var_idx = 0):\n\t\tvar_name = self.args_by_input_param(op, param_name)[var_idx]\n\t\tvar = self.block.var(var_name)\n\t\treturn var\n\n\tdef var_by_output_param(self, op, param_name, var_idx = 0):\n\t\tvar_name = self.args_by_output_param(op, param_name)[var_idx]\n\t\tvar = self.block.var(var_name)\n\t\treturn var\n\n\tdef var_name_by_param(self, op, param_name, var_idx = 0):\n\t\tif param_name not in op.input_names + op.output_names:\n\t\t\traise NameError('ERROR: param_name %s is not exists.' % ( param_name ) )\n\t\telif param_name in op.input_names:\n\t\t\tif len(op.input(param_name)) > 0:\n\t\t\t\tvar_name_unicode = op.input(param_name)[var_idx]\n\t\t\telse:\n\t\t\t\traise NameError('ERROR: param %s has not var.' % ( param_name ) )\n\t\telif param_name in op.output_names:\n\t\t\tif len(op.output(param_name)) > 0:\n\t\t\t\tvar_name_unicode = op.output(param_name)[var_idx]\n\t\t\telse:\n\t\t\t\traise NameError('ERROR: param %s has not var.' % ( param_name ) )\n\t\tvar = self.block.var(var_name_unicode)\n\t\tvar_name = var.name\n\t\treturn var_name\n\n\tdef var_by_param(self, op, param_name, var_idx = 0):\n\t\tvar_name = self.var_name_by_param(op, param_name, var_idx)\n\t\tvar = self.block.var(var_name)\n\t\treturn var\n\n\tdef shape_by_var_name(self, var_name, layout = 'NCHW'):\n\t\tvar = self.block.var(var_name)\n\t\tlong_tuple = var.shape\n\t\tlong_list = list(long_tuple)\n\t\tif layout == 'NCHW':\n\t\t\tint_list_4d = map(int, [1]*(4-len(long_list)) + long_list)\n\t\t\treturn int_list_4d\n\t\telif layout == 'UNMODIFIED':\n\t\t\treturn long_list\n\t\telse:\n\t\t\traise NameError('ERROR: layout %s is not implemented yet.' % ( layout ) )\n\n\tdef np_data_by_var_name(self, var_name):\n\t\tnumpy_array = fluid.executor.fetch_var(var_name, self.scope, True)\n\t\treturn numpy_array\n\n\tdef dtype_by_var_name(self, var_name):\n\t\tvar = self.block.var(var_name)\n\t\tfluid_var_type = var.dtype\n\t\tdtype = ANAKIN_TENSOR_DTYPE[fluid_var_type]\n\t\treturn dtype\n\n\tdef is_persistable_param(self, op, param_name, var_idx = 0):\n\t\tvar = self.var_by_param(op, param_name, var_idx)\n\t\tis_persistable_var = var.persistable\n\t\treturn is_persistable_var\n\n\tdef var_shape_by_param(self, transpose, op, param_name, var_idx = 0, layout = 'NCHW'):\n\t\tif transpose is True:\n\t\t\traise NameError('ERROR: var_shape transpose is not implemented yet.')\n\t\telse:\n\t\t\tvar_name = self.var_name_by_param(op, param_name, var_idx)\n\t\t\tshape = self.shape_by_var_name(var_name, layout)\n\t\t\treturn shape\n\n\tdef data_with_shape_by_param(self,\n\t\t\t\t\t\t\t\t op,\n\t\t\t\t\t\t\t\t param_name,\n\t\t\t\t\t\t\t\t transpose = False,\n\t\t\t\t\t\t\t\t axes = None,\n\t\t\t\t\t\t\t\t var_idx = 0,\n\t\t\t\t\t\t\t\t is_flat_list = True,\n\t\t\t\t\t\t\t\t layout = 'NCHW'):\n\n\t\tnp.set_printoptions(threshold=np.inf, suppress=True)\n\n\t\tvar_name = self.var_name_by_param(op, param_name, var_idx)\n\t\tnp_array = self.np_data_by_var_name(var_name)\n\t\tif transpose is True:\n\t\t\tnp_array = np.transpose(np_array, axes)\n\t\tnp_shape = np.shape(np_array)\n\t\tif layout == 'NCHW':\n\t\t\tnp_shape = map(int, [1]*(4-len(np_shape)) + list(np_shape))\n\t\tif is_flat_list is True:\n\t\t\tflat_list = list(np_array.flatten())\n\t\t\treturn [flat_list, np_shape]\n\t\telse:\n\t\t\treturn [np_array, np_shape]\n\n\tdef np_param(self,\n\t\t\t\t op,\n\t\t\t\t param_name,\n\t\t\t\t transpose = False,\n\t\t\t\t axes = None,\n\t\t\t\t var_idx = 0):\n\n\t\t[data, np_shape] = self.data_with_shape_by_param(op, param_name, transpose, \\\n\t\t\taxes, var_idx, False)\n\t\treturn data\n\n\tdef dtype_by_param(self, op, param_name, var_idx = 0):\n\t\tvar_name = self.var_name_by_param(op, param_name, var_idx)\n\t\tdtype = self.dtype_by_var_name(var_name)\n\t\treturn dtype\n\n\tdef is_list_type(self, op, attr_name):\n\t\tif op.has_attr(attr_name):\n\t\t\tfluid_attr_type = op.attr_type(attr_name)\n\t\t\tif fluid_attr_type in ANAKIN_ATTR_IS_LIST.keys():\n\t\t\t\treturn ANAKIN_ATTR_IS_LIST[fluid_attr_type]\n\t\t\telse:\n\t\t\t\treturn False # AttrType.LONG\n\t\telse:\n\t\t\traise NameError('ERROR: attr_name %s is not exists.' % ( attr_name ) )\n\n\tdef dtype_of_attr(self, op, attr_name):\n\t\tif op.has_attr(attr_name):\n\t\t\tfluid_attr_type = op.attr_type(attr_name)\n\t\t\tif fluid_attr_type in ANAKIN_ATTR_DTYPE.keys():\n\t\t\t\treturn ANAKIN_ATTR_DTYPE[fluid_attr_type]\n\t\t\telse:\n\t\t\t\treturn INT32 # AttrType.LONG\n\t\telse:\n\t\t\traise NameError('ERROR: attr_name %s is not exists.' % ( attr_name ) )\n\n\tdef attr_data_required(self, op, attr_name):\n\t\tdata = op.attr(attr_name)\n\t\tis_list = self.is_list_type(op, attr_name)\n\t\tdtype = self.dtype_of_attr(op, attr_name)\n\t\tif dtype not in [INT32, FLOAT, STR]:\n\t\t\treturn data\n\t\telif dtype == INT32:\n\t\t\treturn map(int, data) if is_list else int(data)\n\t\telif dtype == FLOAT:\n\t\t\treturn map(float, data) if is_list else float(data)\n\t\telif dtype == STR:\n\t\t\treturn bytes(data)\n\n\tdef attr_data(self, op, attr_name, default_value = 0, type = None):\n\t\tif op.has_attr(attr_name):\n\t\t\treturn self.attr_data_required(op, attr_name)\n\t\telse:\n\t\t\t#raise NameError('ERROR: attr_name %s is not exists.' % ( attr_name ) )\n\t\t\treturn default_value\n\n\tdef param_tensor_sh(self,\n\t\t\t\t\t\top,\n\t\t\t\t\t\tparam_name,\n\t\t\t\t\t\ttranspose = False,\n\t\t\t\t\t\taxes = None,\n\t\t\t\t\t\treshape = None,\n\t\t\t\t\t\tvar_idx = 0,\n\t\t\t\t\t\tlayout = 'NCHW'):\n\n\t\ttensor = TensorProtoIO()\n\t\t[flat_data, shape] = self.data_with_shape_by_param(op, param_name, transpose, \\\n\t\t\taxes, var_idx, True, layout)\n\t\tdtype = self.dtype_by_param(op, param_name, var_idx)\n\t\ttensor.set_data_type(dtype)\n\t\tif dtype in ANAKIN_TENSOR_DTYPESTR.keys():\n\t\t\ttensor.set_data(flat_data, ANAKIN_TENSOR_DTYPESTR[dtype])\n\t\t\t#pass #debug\n\t\telse:\n\t\t\traise NameError('ERROR: Unknown data type (%s)' % ( dtype ) )\n\t\tif reshape is not None:\n\t\t\ttensor.set_shape(reshape)\n\t\telse:\n\t\t\ttensor.set_shape(shape)\n\t\treturn [tensor, shape]\n\n\tdef param_tensor(self,\n\t\t\t\t\t op,\n\t\t\t\t\t param_name,\n\t\t\t\t\t transpose = False,\n\t\t\t\t\t axes = None,\n\t\t\t\t\t reshape = None,\n\t\t\t\t\t var_idx = 0,\n\t\t\t\t\t layout = 'NCHW'):\n\n\t\t[tensor, shape] = self.param_tensor_sh(op, param_name, transpose, axes, \\\n\t\t\treshape, var_idx, layout)\n\t\treturn tensor\n\n\tdef create_tensor(self, data_list, data_shape, dtype):\n\t\ttensor = TensorProtoIO()\n\t\ttensor.set_data_type(dtype)\n\t\ttensor.set_data(data_list, ANAKIN_TENSOR_DTYPESTR[dtype])\n\t\ttensor.set_shape(data_shape)\n\t\treturn tensor\n\n\tdef gru_tensor_convert(self, origin_h2h, origin_i2h, origin_b, offset=[2, 1, 0]):\n\t\thidden_size = int(origin_b.size // 3)\n\t\tword_size = int(origin_i2h.size // hidden_size // 3)\n\t\ttar_h2h=np.array(origin_h2h.flatten().tolist()[2*hidden_size*hidden_size:]\\\n\t\t\t+np.array(origin_h2h.flatten().tolist()[:2*hidden_size*hidden_size])\\\n\t\t\t.reshape(hidden_size,2,hidden_size)[:,[1,0],:].flatten().tolist())\\\n\t\t.reshape(1,1,hidden_size,3*hidden_size)\n\t\ttar_i2h=origin_i2h.reshape(word_size,3,hidden_size)[:,offset,:]\\\n\t\t.reshape(1,1,word_size,3*hidden_size)\n\t\ttar_b=origin_b.reshape(3, hidden_size)[offset, :].reshape(1,1,1,3 * hidden_size)\n\t\ttar_i2h_h2h=np.concatenate([tar_i2h.flatten(),tar_h2h.flatten()])\\\n\t\t.reshape(1,1,1,3*hidden_size*hidden_size+3*word_size*hidden_size)\n\t\treturn tar_i2h_h2h, tar_b\n\n\tdef lstm_fc_tensor_merge_convert(self, origin_hidden_size, origin_lstm_w, origin_lstm_b, origin_fc_w, origin_fc_b):\n\n\t\tlayer_size = int (origin_hidden_size // 4)\n\t\tinput_size = int (origin_fc_w.size // origin_hidden_size)\n\t\tlstm_bias_num = int (origin_lstm_b.size // layer_size)\n\t\ttar_w = np.vstack((np.hstack((origin_fc_w[:, 1 * layer_size : 2 * layer_size],\n\t\t\t\t\t\t\t\t\t origin_fc_w[:, 2 * layer_size : 3 * layer_size],\n\t\t\t\t\t\t\t\t\t origin_fc_w[:, : 1 * layer_size],\n\t\t\t\t\t\t\t\t\t origin_fc_w[:, 3 * layer_size :])),\n\t\t\t\t\t\t np.hstack((origin_lstm_w[:, 1 * layer_size : 2 * layer_size],\n\t\t\t\t\t\t\t\t\t origin_lstm_w[:, 2 * layer_size : 3 * layer_size],\n\t\t\t\t\t\t\t\t\t origin_lstm_w[:, : 1 * layer_size],\n\t\t\t\t\t\t\t\t\t origin_lstm_w[:, 3 * layer_size : ]))))\n\n\t\tif origin_fc_b is not None:\n\t\t\tsplit_fc_bc = origin_fc_b.flatten()[: 1 * layer_size]\n\t\t\tsplit_fc_bi = origin_fc_b.flatten()[1 * layer_size : 2 * layer_size]\n\t\t\tsplit_fc_bf = origin_fc_b.flatten()[2 * layer_size : 3 * layer_size]\n\t\t\tsplit_fc_bo = origin_fc_b.flatten()[3 * layer_size : 4 * layer_size]\n\t\telse:\n\t\t\tsplit_fc_bc = np.zeros(layer_size)\n\t\t\tsplit_fc_bi = np.zeros(layer_size)\n\t\t\tsplit_fc_bf = np.zeros(layer_size)\n\t\t\tsplit_fc_bo = np.zeros(layer_size)\n\n\t\tsplit_lstm_bc = origin_lstm_b.flatten()[: 1 * layer_size]\n\t\tsplit_lstm_bi = origin_lstm_b.flatten()[1 * layer_size: 2 * layer_size]\n\t\tsplit_lstm_bf = origin_lstm_b.flatten()[2 * layer_size: 3 * layer_size]\n\t\tsplit_lstm_bo = origin_lstm_b.flatten()[3 * layer_size: 4 * layer_size]\n\t\tsplit_lstm_bc = np.add(split_lstm_bc, split_fc_bc)\n\t\tsplit_lstm_bi = np.add(split_lstm_bi, split_fc_bi)\n\t\tsplit_lstm_bf = np.add(split_lstm_bf, split_fc_bf)\n\t\tsplit_lstm_bo = np.add(split_lstm_bo, split_fc_bo)\n\n\t\tif lstm_bias_num == 4:\n\t\t\ttar_b = np.array(split_lstm_bi.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_bf.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_bc.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_bo.flatten().tolist())\n\t\telse:\n\t\t\tsplit_lstm_wic = origin_lstm_b.flatten()[4 * layer_size : 5 * layer_size]\n\t\t\tsplit_lstm_wfc = origin_lstm_b.flatten()[5 * layer_size : 6 * layer_size]\n\t\t\tsplit_lstm_woc = origin_lstm_b.flatten()[6 * layer_size :]\n\t\t\ttar_b = np.array(split_lstm_bi.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_bf.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_bc.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_bo.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_wic.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_wfc.flatten().tolist()\n\t\t\t\t\t\t\t + split_lstm_woc.flatten().tolist())\n\t\treturn tar_w.reshape(input_size+ layer_size, 4 * layer_size, 1, 1),\\\n\t\t\t tar_b.reshape(1, origin_lstm_b.size, 1, 1)\n\n\nclass Fluid_comparator:\n\n\tdef __init__(self, helper):\n\t\tself.helper = helper\n\t\tself.only_list = ['feed', 'fetch']\n\n\tdef compare_by_param(self, op_a, op_b, param):\n\t\tis_weight_a = self.helper.is_persistable_param(op_a, param)\n\t\tis_weight_b = self.helper.is_persistable_param(op_b, param)\n\t\tif is_weight_a and is_weight_b:\n\t\t\tnp_a = self.helper.np_param(op_a, param)\n\t\t\tnp_b = self.helper.np_param(op_b, param)\n\t\t\tif (np_a == np_b).all() == True:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\telif is_weight_a is is_weight_b:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef have_same_weights(self, op_a, op_b):\n\t\tis_same = True\n\t\tif op_a.input_names == op_b.input_names:\n\t\t\tparams = op_a.input_names\n\t\t\tfor param in params:\n\t\t\t\tif self.compare_by_param(op_a, op_b, param) is False:\n\t\t\t\t\tis_same = False\n\t\t\treturn is_same\n\t\telse:\n\t\t\treturn False\n\n\tdef compare_by_attr(self, op_a, op_b, attr_name):\n\t\tdata_a = self.helper.attr_data(op_a, attr_name)\n\t\tdata_b = self.helper.attr_data(op_b, attr_name)\n\t\treturn data_a == data_b\n\n\tdef have_same_attrs(self, op_a, op_b):\n\t\tis_same = True\n\t\tif op_a.attr_names == op_b.attr_names:\n\t\t\tattrs = op_a.attr_names\n\t\t\tfor attr in attrs:\n\t\t\t\tif self.compare_by_attr(op_a, op_b, attr) is False:\n\t\t\t\t\tis_same = False\n\t\t\treturn is_same\n\t\telse:\n\t\t\treturn False\n\n\tdef brothers(self, op_list):\n\t\tis_same = True\n\t\tif len(op_list) > 1:\n\t\t\tidx = 0\n\t\t\tfor op_b in op_list[1:]:\n\t\t\t\tif op_b.type not in self.only_list:\n\t\t\t\t\tidx = op_list.index(op_b)\n\t\t\t\t\top_a = op_list[idx - 1]\n\t\t\t\t\tif op_a.type not in self.only_list:\n\t\t\t\t\t\tsame_weights = self.have_same_weights(op_a, op_b)\n\t\t\t\t\t\tsame_attrs = self.have_same_attrs(op_a, op_b)\n\t\t\t\t\t\tif (same_weights and same_attrs) is False:\n\t\t\t\t\t\t\tis_same = False\n\t\t\t\t\telse:\n\t\t\t\t\t\traise NameError('ERROR: %s is in only_list.' % ( op_a.type ))\n\t\t\t\telse:\n\t\t\t\t\traise NameError('ERROR: %s is in only_list.' % ( op_b.type ))\n\t\t\treturn is_same\n\t\telse:\n\t\t\traise NameError('ERROR: Members of op_list must be greater than 2.')\n\n\nANAKIN_TENSOR_DTYPE = {\n\tVarDesc.VarType.BOOL: BOOLEN,\n\tVarDesc.VarType.INT32: INT32,\n\tVarDesc.VarType.FP16: FLOAT16,\n\tVarDesc.VarType.FP32: FLOAT,\n\tVarDesc.VarType.FP64: DOUBLE,\n}\n\nANAKIN_TENSOR_DTYPESTR = {\n\tSTR: \"string\",\n\tINT32: \"int\",\n\tFLOAT: \"float\",\n\tBOOLEN: \"bool\",\n}\n\nANAKIN_ATTR_DTYPE = {\n\tAttrType.INT: INT32,\n\tAttrType.INTS: INT32,\n\tAttrType.FLOAT: FLOAT,\n\tAttrType.FLOATS: FLOAT,\n\tAttrType.STRING: STR,\n\tAttrType.STRINGS: STR,\n\tAttrType.BOOL: BOOLEN,\n\tAttrType.BOOLS: BOOLEN,\n}\n\nANAKIN_ATTR_IS_LIST = {\n\tAttrType.INT: False,\n\tAttrType.INTS: True,\n\tAttrType.FLOAT: False,\n\tAttrType.FLOATS: True,\n\tAttrType.STRING: False,\n\tAttrType.STRINGS: True,\n\tAttrType.BOOL: False,\n\tAttrType.BOOLS: True,\n}\n\nAPPEND_BIAS_OP_TYPE = [\n\t'FC',\n\t'mul',\n\t'sequence_conv',\n\t'conv2d',\n\t'conv2d_transpose',\n\t'depthwise_conv2d',\n\t'elementwise_mul',\n]\n\nAPPEND_ACT_OP_TYPE = [\n\t'FC',\n\t'mul',\n\t'sequence_conv',\n\t'conv2d',\n\t'conv2d_transpose',\n\t'batch_norm',\n\t'layer_norm',\n\t'row_conv',\n\t'reshape',\n]\n" ]
[ [ "numpy.transpose", "numpy.zeros", "numpy.set_printoptions", "numpy.add", "numpy.hstack", "numpy.shape" ] ]
Fengdalu/LEARN-AN-EFFECTIVE-LIP-READING-MODEL-WITHOUT-PAINS
[ "8d5eef415c19b4c5e161259b1222fbfec6a5edb0" ]
[ "model/video_cnn.py" ]
[ "# coding: utf-8\nimport math\nimport numpy as np\n\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=1)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, se=False):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n self.se = se\n \n if(self.se):\n self.gap = nn.AdaptiveAvgPool2d(1)\n self.conv3 = conv1x1(planes, planes//16)\n self.conv4 = conv1x1(planes//16, planes)\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n \n if self.downsample is not None:\n residual = self.downsample(x)\n \n if(self.se):\n w = self.gap(out)\n w = self.conv3(w)\n w = self.relu(w)\n w = self.conv4(w).sigmoid()\n \n out = out * w\n \n out = out + residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, se=False):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.se = se\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n \n \n self.avgpool = nn.AdaptiveAvgPool2d(1)\n \n self.bn = nn.BatchNorm1d(512)\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, se=self.se))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, se=self.se))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.bn(x)\n return x \n\n\nclass VideoCNN(nn.Module):\n def __init__(self, se=False):\n super(VideoCNN, self).__init__()\n \n # frontend3D\n self.frontend3D = nn.Sequential(\n nn.Conv3d(1, 64, kernel_size=(5, 7, 7), stride=(1, 2, 2), padding=(2, 3, 3), bias=False),\n nn.BatchNorm3d(64),\n nn.ReLU(True),\n nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))\n )\n # resnet\n self.resnet18 = ResNet(BasicBlock, [2, 2, 2, 2], se=se)\n self.dropout = nn.Dropout(p=0.5)\n\n # backend_gru\n # initialize\n self._initialize_weights()\n \n def visual_frontend_forward(self, x):\n x = x.transpose(1, 2)\n x = self.frontend3D(x)\n x = x.transpose(1, 2)\n x = x.contiguous()\n x = x.view(-1, 64, x.size(3), x.size(4))\n x = self.resnet18(x)\n return x \n \n def forward(self, x):\n b, t = x.size()[:2]\n\n x = self.visual_frontend_forward(x)\n \n #x = self.dropout(x)\n feat = x.view(b, -1, 512)\n\n x = x.view(b, -1, 512) \n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n\n elif isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n\n elif isinstance(m, nn.Conv1d):\n n = m.kernel_size[0] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.MaxPool3d", "torch.nn.BatchNorm3d", "torch.nn.BatchNorm1d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.Conv3d", "torch.nn.ReLU", "torch.nn.Dropout" ] ]
DavidIbarr/nmma
[ "109fdd57add52cfea3553df8346981d6a117a7e7" ]
[ "nmma/em/create_injection_slurm.py" ]
[ "import os\nimport argparse\nimport json\nimport pandas as pd\n\nimport bilby\nfrom bilby_pipe.create_injections import InjectionCreator\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description=\"Slurm files from nmma injection file\")\n parser.add_argument(\n \"--prior-file\",\n type=str,\n required=True,\n help=\"The prior file from which to generate injections\",\n )\n parser.add_argument(\n \"--injection-file\",\n type=str,\n required=True,\n help=\"The bilby injection json file to be used\",\n )\n parser.add_argument(\n \"--analysis-file\",\n type=str,\n required=True,\n help=\"The analysis bash script to be replicated\",\n )\n parser.add_argument(\"-o\", \"--outdir\", type=str, default=\"outdir\")\n args = parser.parse_args()\n\n # load the injection json file\n if args.injection_file:\n if args.injection_file.endswith(\".json\"):\n with open(args.injection_file, \"rb\") as f:\n injection_data = json.load(f)\n datadict = injection_data[\"injections\"][\"content\"]\n dataframe_from_inj = pd.DataFrame.from_dict(datadict)\n else:\n print(\"Only json supported.\")\n exit(1)\n\n if len(dataframe_from_inj) > 0:\n args.n_injection = len(dataframe_from_inj)\n\n # create the injection dataframe from the prior_file\n injection_creator = InjectionCreator(\n prior_file=args.prior_file,\n prior_dict=None,\n n_injection=args.n_injection,\n default_prior=\"PriorDict\",\n gps_file=None,\n trigger_time=0,\n generation_seed=0,\n )\n dataframe_from_prior = injection_creator.get_injection_dataframe()\n\n # combine the dataframes\n dataframe = pd.DataFrame.merge(\n dataframe_from_inj,\n dataframe_from_prior,\n how=\"outer\",\n left_index=True,\n right_index=True,\n )\n\n for index, row in dataframe.iterrows():\n with open(args.analysis_file, \"r\") as file:\n analysis = file.read()\n\n outdir = os.path.join(args.outdir, str(index))\n if not os.path.isdir(outdir):\n os.makedirs(outdir)\n\n priors = bilby.gw.prior.PriorDict(args.prior_file)\n priors.to_file(outdir, label=\"injection\")\n priorfile = os.path.join(outdir, \"injection.prior\")\n injfile = os.path.join(outdir, \"lc.csv\")\n\n analysis = analysis.replace(\"PRIOR\", priorfile)\n analysis = analysis.replace(\"OUTDIR\", outdir)\n analysis = analysis.replace(\"INJOUT\", injfile)\n analysis = analysis.replace(\"INJNUM\", str(index))\n analysis_file = os.path.join(outdir, \"inference.sh\")\n\n fid = open(analysis_file, \"w\")\n fid.write(analysis)\n fid.close()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.DataFrame.merge", "pandas.DataFrame.from_dict" ] ]
dkswxd/unetpp_pytorch_qiu
[ "7f139d0c71110052399f0a93b55a39ba85897561" ]
[ "tool/metric.py" ]
[ "import numpy as np\nfrom sklearn import metrics\nfrom PIL import Image\n\ndef get_metrics(pred, logits, gt):\n if isinstance(logits, list):\n logits = logits[-1]\n result = {'confusion_matrix': metrics.confusion_matrix(gt.flatten(), pred.flatten(), labels=[1, 0]),\n 'auc': roc(gt, logits)}\n return result\n\ndef get_metrics_without_roc(pred, gt):\n result = {'confusion_matrix': metrics.confusion_matrix(gt.flatten(), pred.flatten(), labels=[1, 0])}\n return result\n\ndef show_metrics(metrics):\n con_mat = np.zeros((2,2))\n auc = 0.0\n for m in metrics:\n con_mat += m['confusion_matrix']\n auc += m['auc']\n auc /= len(metrics)\n result = {'confusion_matrix': con_mat.tolist(),\n 'accuracy': accuracy(con_mat),\n 'kappa': kappa(con_mat),\n 'precision': precision(con_mat),\n 'sensitivity': sensitivity(con_mat),\n 'specificity': specificity(con_mat),\n 'auc': auc,\n }\n return result\n\ndef show_metrics_without_roc(metrics):\n con_mat = np.zeros((2,2))\n for m in metrics:\n con_mat += m['confusion_matrix']\n result = {'confusion_matrix': con_mat,\n 'accuracy': accuracy(con_mat),\n 'kappa': kappa(con_mat),\n 'precision': precision(con_mat),\n 'sensitivity': sensitivity(con_mat),\n 'specificity': specificity(con_mat),\n }\n return result\n\ndef show_metrics_from_save_image(data):\n pred = data[:,:,0] // 255\n gt = data[:,:,1] // 255\n metrics = [get_metrics_without_roc(pred, gt)]\n return show_metrics_without_roc(metrics)\n\ndef kappa(matrix):\n matrix = np.array(matrix)\n n = np.sum(matrix)\n sum_po = 0\n sum_pe = 0\n for i in range(len(matrix[0])):\n sum_po += matrix[i][i]\n row = np.sum(matrix[i, :])\n col = np.sum(matrix[:, i])\n sum_pe += row * col\n po = sum_po / n\n pe = sum_pe / (n * n)\n # print(po, pe)\n return (po - pe) / (1 - pe)\n\n\ndef sensitivity(matrix):\n return matrix[0][0]/(matrix[0][0]+matrix[1][0])\n\n\ndef specificity(matrix):\n return matrix[1][1]/(matrix[1][1]+matrix[0][1])\n\n\ndef precision(matrix):\n return matrix[0][0]/(matrix[0][0]+matrix[0][1])\n\ndef roc(gt, logits):\n gtlist = gt.flatten()\n predlist = logits.detach().cpu().numpy()[0, 1, ...].flatten()\n\n fpr, tpr, thresholds = metrics.roc_curve(gtlist, predlist, pos_label=1)\n roc_auc = metrics.auc(fpr, tpr) # auc为Roc曲线下的面积\n return roc_auc\n\n\ndef accuracy(matrix):\n return (matrix[0][0]+matrix[1][1])/(matrix[0][0]+matrix[0][1]+matrix[1][0]+matrix[1][1])\n\ndef error_rate(predictions, labels):\n \"\"\"\n Return the error rate based on dense predictions and 1-hot labels.\n \"\"\"\n return 100.0 - (\n 100.0 *\n np.sum(np.argmin(predictions, 3) == np.argmin(labels, 3)) /\n (predictions.shape[0] * predictions.shape[1] * predictions.shape[2]))\n\ndef save_predict(filename, data, gt, pred):\n pred = pred * 255\n gt = gt[0, 1, :, :]\n gt = np.where(gt > 0.5, 255, 0)\n differ = np.stack([np.zeros_like(pred), gt, pred], -1)\n pred = np.stack([pred, pred, pred], -1)\n gt = np.stack([gt, gt, gt], -1)\n data = np.transpose(data, (0, 2, 3, 1))[0,...]\n if data.shape[2] == 60:\n data = data[:, :, 10:40:10]\n elif data.shape[2] == 1:\n data = np.concatenate([data, data, data], -1)\n elif data.shape[2] == 15:\n data = data[:, :, 0:15:5]\n data -= np.min(data, axis=(0,1))\n data /= (np.max(data, axis=(0,1))/255)\n data = data.astype(np.uint8)\n img = Image.fromarray(np.concatenate([data, pred, gt, differ], axis=1).astype(np.uint8))\n img.save(filename)\n\ndef save_logits(filename, pred):\n pred = pred * 255\n pred = np.stack([pred, pred, pred], -1)\n img = Image.fromarray(pred.astype(np.uint8))\n img.save(filename)\n" ]
[ [ "numpy.sum", "numpy.zeros_like", "numpy.transpose", "numpy.zeros", "sklearn.metrics.roc_curve", "numpy.stack", "sklearn.metrics.auc", "numpy.concatenate", "numpy.argmin", "numpy.max", "numpy.min", "numpy.array", "numpy.where" ] ]
cnheider/pyro
[ "60bcab73ada30c2b3f05d525690c9664ff6fc22e" ]
[ "pyro/infer/trace_elbo.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport numbers\nimport warnings\n\nimport torch\nfrom torch.autograd import Variable\n\nimport pyro\nimport pyro.poutine as poutine\nfrom pyro.distributions.util import is_identically_zero\nfrom pyro.infer.elbo import ELBO\nfrom pyro.infer.enum import iter_discrete_traces\nfrom pyro.infer.util import torch_backward, torch_data_sum, torch_sum\nfrom pyro.poutine.util import prune_subsample_sites\nfrom pyro.util import check_model_guide_match, is_nan\n\n\ndef check_enum_discrete_can_run(model_trace, guide_trace):\n \"\"\"\n Checks whether `enum_discrete` is supported for the given (model, guide) pair.\n\n :param Trace model: A model trace.\n :param Trace guide: A guide trace.\n :raises: NotImplementedError\n \"\"\"\n # Check that all batch_log_pdf shapes are the same,\n # since we currently do not correctly handle broadcasting.\n model_trace.compute_batch_log_pdf()\n guide_trace.compute_batch_log_pdf()\n shapes = {}\n for source, trace in [(\"model\", model_trace), (\"guide\", guide_trace)]:\n for name, site in trace.nodes.items():\n if site[\"type\"] == \"sample\":\n shapes[site[\"batch_log_pdf\"].size()] = (source, name)\n if len(shapes) > 1:\n raise NotImplementedError(\n \"enum_discrete does not support mixture of batched and un-batched variables. \"\n \"Try rewriting your model to avoid batching or running with enum_discrete=False. \"\n \"Found the following variables of different batch shapes:\\n{}\".format(\n \"\\n\".join([\"{} {}: shape = {}\".format(source, name, tuple(shape))\n for shape, (source, name) in sorted(shapes.items())])))\n\n\nclass Trace_ELBO(ELBO):\n \"\"\"\n A trace implementation of ELBO-based SVI\n \"\"\"\n\n def _get_traces(self, model, guide, *args, **kwargs):\n \"\"\"\n runs the guide and runs the model against the guide with\n the result packaged as a trace generator\n \"\"\"\n\n for i in range(self.num_particles):\n if self.enum_discrete:\n # This iterates over a bag of traces, for each particle.\n for scale, guide_trace in iter_discrete_traces(\"flat\", guide, *args, **kwargs):\n model_trace = poutine.trace(poutine.replay(model, guide_trace),\n graph_type=\"flat\").get_trace(*args, **kwargs)\n\n check_model_guide_match(model_trace, guide_trace)\n guide_trace = prune_subsample_sites(guide_trace)\n model_trace = prune_subsample_sites(model_trace)\n check_enum_discrete_can_run(model_trace, guide_trace)\n\n guide_trace.compute_score_parts()\n log_r = model_trace.batch_log_pdf() - guide_trace.batch_log_pdf()\n weight = scale / self.num_particles\n yield weight, model_trace, guide_trace, log_r\n continue\n\n guide_trace = poutine.trace(guide).get_trace(*args, **kwargs)\n model_trace = poutine.trace(poutine.replay(model, guide_trace)).get_trace(*args, **kwargs)\n\n check_model_guide_match(model_trace, guide_trace)\n guide_trace = prune_subsample_sites(guide_trace)\n model_trace = prune_subsample_sites(model_trace)\n\n guide_trace.compute_score_parts()\n log_r = model_trace.log_pdf() - guide_trace.log_pdf()\n weight = 1.0 / self.num_particles\n yield weight, model_trace, guide_trace, log_r\n\n def _is_batched(self, weight):\n return self.enum_discrete and \\\n isinstance(weight, Variable) and \\\n weight.dim() > 0 and \\\n weight.size(0) > 1\n\n def loss(self, model, guide, *args, **kwargs):\n \"\"\"\n :returns: returns an estimate of the ELBO\n :rtype: float\n\n Evaluates the ELBO with an estimator that uses num_particles many samples/particles.\n \"\"\"\n elbo = 0.0\n for weight, model_trace, guide_trace, log_r in self._get_traces(model, guide, *args, **kwargs):\n elbo_particle = weight * 0\n\n if self._is_batched(weight):\n log_pdf = \"batch_log_pdf\"\n else:\n log_pdf = \"log_pdf\"\n for name in model_trace.nodes.keys():\n if model_trace.nodes[name][\"type\"] == \"sample\":\n if model_trace.nodes[name][\"is_observed\"]:\n elbo_particle += model_trace.nodes[name][log_pdf]\n else:\n elbo_particle += model_trace.nodes[name][log_pdf]\n elbo_particle -= guide_trace.nodes[name][log_pdf]\n\n # drop terms of weight zero to avoid nans\n if isinstance(weight, numbers.Number):\n if weight == 0.0:\n elbo_particle = torch.zeros_like(elbo_particle)\n else:\n elbo_particle[weight == 0] = 0.0\n\n elbo += torch_data_sum(weight * elbo_particle)\n\n loss = -elbo\n if is_nan(loss):\n warnings.warn('Encountered NAN loss')\n return loss\n\n def loss_and_grads(self, model, guide, *args, **kwargs):\n \"\"\"\n :returns: returns an estimate of the ELBO\n :rtype: float\n\n Computes the ELBO as well as the surrogate ELBO that is used to form the gradient estimator.\n Performs backward on the latter. Num_particle many samples are used to form the estimators.\n \"\"\"\n elbo = 0.0\n # grab a trace from the generator\n for weight, model_trace, guide_trace, log_r in self._get_traces(model, guide, *args, **kwargs):\n elbo_particle = weight * 0\n surrogate_elbo_particle = weight * 0\n batched = self._is_batched(weight)\n # compute elbo and surrogate elbo\n if batched:\n log_pdf = \"batch_log_pdf\"\n else:\n log_pdf = \"log_pdf\"\n for name, model_site in model_trace.nodes.items():\n if model_site[\"type\"] == \"sample\":\n model_log_pdf = model_site[log_pdf]\n if model_site[\"is_observed\"]:\n elbo_particle += model_log_pdf\n surrogate_elbo_particle += model_log_pdf\n else:\n guide_site = guide_trace.nodes[name]\n guide_log_pdf, score_function_term, entropy_term = guide_site[\"score_parts\"]\n\n if not batched:\n guide_log_pdf = guide_log_pdf.sum()\n elbo_particle += model_log_pdf - guide_log_pdf\n surrogate_elbo_particle += model_log_pdf\n\n if not is_identically_zero(entropy_term):\n if not batched:\n entropy_term = entropy_term.sum()\n surrogate_elbo_particle -= entropy_term\n\n if not is_identically_zero(score_function_term):\n if not batched:\n score_function_term = score_function_term.sum()\n surrogate_elbo_particle += log_r.detach() * score_function_term\n\n # drop terms of weight zero to avoid nans\n if isinstance(weight, numbers.Number):\n if weight == 0.0:\n elbo_particle = torch.zeros_like(elbo_particle)\n surrogate_elbo_particle = torch.zeros_like(surrogate_elbo_particle)\n else:\n weight_eq_zero = (weight == 0)\n elbo_particle[weight_eq_zero] = 0.0\n surrogate_elbo_particle[weight_eq_zero] = 0.0\n\n elbo += torch_data_sum(weight * elbo_particle)\n surrogate_elbo_particle = torch_sum(weight * surrogate_elbo_particle)\n\n # collect parameters to train from model and guide\n trainable_params = set(site[\"value\"]\n for trace in (model_trace, guide_trace)\n for site in trace.nodes.values()\n if site[\"type\"] == \"param\")\n\n if trainable_params:\n surrogate_loss_particle = -surrogate_elbo_particle\n torch_backward(surrogate_loss_particle)\n pyro.get_param_store().mark_params_active(trainable_params)\n\n loss = -elbo\n if is_nan(loss):\n warnings.warn('Encountered NAN loss')\n return loss\n" ]
[ [ "torch.zeros_like" ] ]
Sriram-Ravula/ncsnv2
[ "f610b59441a34063fae1c02aa06837b7eec95c03" ]
[ "models/__init__.py" ]
[ "import torch\nimport numpy as np\n\ndef get_sigmas(config):\n if config.model.sigma_dist == 'geometric':\n sigmas = torch.tensor(\n np.exp(np.linspace(np.log(config.model.sigma_begin), np.log(config.model.sigma_end),\n config.model.num_classes))).float().to(config.device)\n elif config.model.sigma_dist == 'uniform':\n sigmas = torch.tensor(\n np.linspace(config.model.sigma_begin, config.model.sigma_end, config.model.num_classes)\n ).float().to(config.device)\n\n else:\n raise NotImplementedError('sigma distribution not supported')\n\n return sigmas\n\n@torch.no_grad()\ndef anneal_Langevin_dynamics(x_mod, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,\n final_only=False, verbose=False, denoise=True, add_noise=True):\n images = []\n\n with torch.no_grad():\n for c, sigma in enumerate(sigmas):\n labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c #dummy target 1...T depending on iteration\n labels = labels.long()\n step_size = step_lr * (sigma / sigmas[-1]) ** 2\n for s in range(n_steps_each):\n grad = scorenet(x_mod, labels)\n\n #choose whether to add random noise during each gradient ascent step\n if add_noise:\n noise = torch.randn_like(x_mod) \n else:\n noise = torch.zeros_like(x_mod)\n\n #calculate l2 norms of gradient (score) and the additive noise for logging\n grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()\n noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()\n\n x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step\n\n #calc l2 norm of iterate variable for logging\n image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()\n\n #calc snr as scaled version of [||s(x, \\sigma_i)|| / ||z_t||] and mean of score for logging\n snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm\n grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2\n\n if not final_only:\n images.append(x_mod.to('cpu'))\n if verbose:\n print(\"level: {}, step_size: {}, grad_norm: {}, image_norm: {}, snr: {}, grad_mean_norm: {}\".format(\n c, step_size, grad_norm.item(), image_norm.item(), snr.item(), grad_mean_norm.item()))\n\n #final denoising step if desired - removes the very last additive z_L \n if denoise:\n last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device)\n last_noise = last_noise.long()\n x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise)\n images.append(x_mod.to('cpu'))\n\n if final_only:\n return [x_mod.to('cpu')]\n else:\n return images\n\n@torch.no_grad()\ndef langevin_Inverse(x_mod, y, A, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,\n final_only=False, verbose=False, denoise=True, add_noise=True, \n decimate_sigma=None, mode=None, true_x=None):\n images = []\n\n #if desired, decimate the number of noise scales to speed up inference\n if decimate_sigma is not None:\n sigmas_temp = sigmas[0:-1:decimate_sigma].tolist() #grab every decimate_sigma'th value except the last one\n sigmas_temp.append(sigmas[-1]) #add the last sigma value back to the list\n # num_sigmas = sigmas.shape[0] // decimate_sigma\n # sigmas_temp = []\n # for i in range(num_sigmas):\n # sigmas_temp.append(sigmas[-1])\n sigmas = sigmas_temp #swap the new decimated sigma list for the main one\n\n mse = torch.nn.MSELoss()\n\n N, C, H, W = x_mod.shape\n\n steps = np.geomspace(start=5, stop=1, num=len(sigmas))\n\n c2 = 1\n\n with torch.no_grad():\n #outer loop over noise scales\n for c, sigma in enumerate(sigmas):\n #dummy target 1...T depending on iteration\n labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c \n labels = labels.long()\n\n #step_size = step_lr * (sigma / sigmas[-1]) ** 2\n step_size = steps[c]\n\n #Inner loop over T\n for s in range(n_steps_each):\n #s(x_t) ~= \\grad_x log p(x) -- THE PRIOR\n grad = scorenet(x_mod, labels)\n\n prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()\n #prior_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2\n\n #calculate the maximum likelihood gradient - i.e. MSE gradient\n #A should be [N, m, C * H * W], x should be [N, C, H, W], y should be [N, m, 1]\n if mode=='denoising':\n Axt = x_mod \n mle_grad = (Axt - y) * (1 / N) #for denoising, y has same dimension as x\n else:\n Axt = torch.matmul(A, x_mod.view(N, -1, 1))\n mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2 #MSE gradient\n #mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * (1 / N) #L1 error gradient\n\n likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()\n #likelihood_mean_norm = torch.norm(mle_grad.mean(dim=0).view(-1)) ** 2\n\n if c == 0 and s == 0:\n c2 = prior_norm.item() / likelihood_norm.item()\n mle_grad = mle_grad * c2 #MSE gradient\n likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()\n\n\n #The final gradient\n grad = grad - mle_grad\n\n grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()\n #grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2\n\n #choose whether to add random noise during each gradient ascent step\n if add_noise:\n noise = torch.randn_like(x_mod) \n else:\n noise = torch.zeros_like(x_mod)\n\n x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step\n\n #calc l2 norm of iterate variable for logging\n image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()\n noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()\n snr = np.sqrt(step_size / 2.) * prior_norm / noise_norm\n mse_iter = mse(Axt, y)\n if true_x is not None:\n mse_true = mse(true_x, x_mod)\n\n if not final_only:\n images.append(x_mod.to('cpu'))\n if verbose:\n print(\"\\nlevel: {}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \\\n image_norm: {:.4f}, train_mse: {:.4f}\".format( \\\n c, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \\\n mse_iter.item()))\n \n if true_x is not None:\n print(\"true_mse: {:.4f}\".format(mse_true.item()))\n\n #final denoising step if desired - removes the very last additive z_L \n if denoise:\n last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device)\n last_noise = last_noise.long()\n x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise)\n images.append(x_mod.to('cpu'))\n\n if final_only:\n return [x_mod.to('cpu')]\n else:\n return images\n\n@torch.no_grad()\ndef inverse_solver(x_mod, y, A, scorenet, sigmas, lr = [5, 1], c1=1, c2=1, auto_c2=True,\n final_only=False, verbose=False, likelihood_every=1,\n decimate_sigma=None, mode=None, true_x=None, sigma_type = 'subsample', likelihood_type=\"l2\"):\n images = []\n\n #if desired, decimate the number of noise scales to speed up inference\n if decimate_sigma is not None:\n if sigma_type == 'subsample': #grab equally-spaced sigma values\n sigmas_temp = sigmas[0:-1:decimate_sigma].tolist() \n sigmas_temp.append(sigmas[-1]) \n\n elif sigma_type == 'last': #grab just the last sigma value multiple times\n num_sigmas = sigmas.shape[0] // decimate_sigma\n sigmas_temp = []\n for i in range(num_sigmas):\n sigmas_temp.append(sigmas[-1])\n\n else:\n sigmas_temp = sigmas\n\n sigmas = sigmas_temp \n\n mse = torch.nn.MSELoss()\n\n N, C, H, W = x_mod.shape\n\n steps = np.geomspace(start=lr[0], stop=lr[1], num=len(sigmas))\n\n likelihood_norm = 0\n\n with torch.no_grad():\n if sigma_type == 'last':\n labels = torch.ones(x_mod.shape[0], device=x_mod.device) * 1099 \n labels = labels.long()\n for c, sigma in enumerate(sigmas):\n if sigma_type == 'subsample':\n labels = torch.ones(x_mod.shape[0], device=x_mod.device) * decimate_sigma * c\n labels = labels.long()\n elif sigma_type != 'last':\n labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c\n labels = labels.long()\n\n step_size = steps[c]\n\n #s(x_t) ~= \\grad_x log p(x) -- THE PRIOR\n grad = scorenet(x_mod, labels) * c1\n\n prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()\n\n if c % likelihood_every == 0:\n #\\grad_x log p(y | x) -- LIKELIHOOD\n if mode=='denoising':\n Axt = x_mod\n if likelihood_type == \"l2\":\n mle_grad = (Axt - y) * c2 \n elif likelihood_type == \"l1\":\n mle_grad = torch.sign(Axt - y) * c2 \n else:\n Axt = torch.matmul(A, x_mod.view(N, -1, 1)) \n if likelihood_type == \"l2\":\n mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2 \n elif likelihood_type == \"l1\":\n mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * c2 \n\n likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()\n\n if auto_c2 and c == 0:\n c2 = prior_norm.item() / likelihood_norm.item()\n mle_grad = mle_grad * c2 #MSE gradient\n likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()\n\n grad = grad - mle_grad\n\n grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()\n\n x_mod = x_mod + step_size * grad\n #x_mod = torch.clamp(x_mod, 0.0, 1.0)\n\n #calc l2 norm of iterate variable for logging\n image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()\n mse_iter = mse(Axt, y)\n if true_x is not None:\n mse_true = mse(true_x, x_mod)\n\n if not final_only:\n images.append(x_mod.cpu())\n if verbose:\n print(\"\\n iteration: {}, sigma: {:.4f}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \\\n image_norm: {:.4f}, train_mse: {:.4f}\".format( \\\n c, sigma, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \\\n mse_iter.item()))\n \n if true_x is not None:\n print(\"true_mse: {:.4f}\".format(mse_true.item()))\n\n if final_only:\n return [x_mod.to('cpu')]\n else:\n return images\n\n@torch.no_grad()\ndef anneal_Langevin_dynamics_inpainting(x_mod, refer_image, scorenet, sigmas, image_size,\n n_steps_each=100, step_lr=0.000008):\n \"\"\"\n Currently only good for 32x32 images. Assuming the right half is missing.\n \"\"\"\n\n images = []\n\n #refer_image is the untainted x (?)\n #right now this only works with 3-channel images\n refer_image = refer_image.unsqueeze(1).expand(-1, x_mod.shape[1], -1, -1, -1)\n refer_image = refer_image.contiguous().view(-1, 3, image_size, image_size)\n\n \n x_mod = x_mod.view(-1, 3, image_size, image_size)\n cols = image_size // 2\n half_refer_image = refer_image[..., :cols]\n with torch.no_grad():\n for c, sigma in enumerate(sigmas):\n labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c\n labels = labels.long()\n step_size = step_lr * (sigma / sigmas[-1]) ** 2\n\n for s in range(n_steps_each):\n images.append(x_mod.to('cpu'))\n corrupted_half_image = half_refer_image + torch.randn_like(half_refer_image) * sigma\n x_mod[:, :, :, :cols] = corrupted_half_image\n noise = torch.randn_like(x_mod) * np.sqrt(step_size * 2)\n grad = scorenet(x_mod, labels)\n x_mod = x_mod + step_size * grad + noise\n print(\"class: {}, step_size: {}, mean {}, max {}\".format(c, step_size, grad.abs().mean(),\n grad.abs().max()))\n\n return images\n\n@torch.no_grad()\ndef anneal_Langevin_dynamics_interpolation(x_mod, scorenet, sigmas, n_interpolations, n_steps_each=200, step_lr=0.000008,\n final_only=False, verbose=False):\n images = []\n\n n_rows = x_mod.shape[0]\n\n x_mod = x_mod[:, None, ...].repeat(1, n_interpolations, 1, 1, 1)\n x_mod = x_mod.reshape(-1, *x_mod.shape[2:])\n\n for c, sigma in enumerate(sigmas):\n labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c\n labels = labels.long()\n step_size = step_lr * (sigma / sigmas[-1]) ** 2\n for s in range(n_steps_each):\n grad = scorenet(x_mod, labels)\n\n noise_p = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],\n device=x_mod.device)\n noise_q = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],\n device=x_mod.device)\n angles = torch.linspace(0, np.pi / 2., n_interpolations, device=x_mod.device)\n\n noise = noise_p[:, None, ...] * torch.cos(angles)[None, :, None, None, None] + \\\n noise_q[:, None, ...] * torch.sin(angles)[None, :, None, None, None]\n\n noise = noise.reshape(-1, *noise.shape[2:])\n grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()\n noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()\n image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()\n\n x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2)\n\n snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm\n\n if not final_only:\n images.append(x_mod.to('cpu'))\n if verbose:\n print(\n \"level: {}, step_size: {}, image_norm: {}, grad_norm: {}, snr: {}\".format(\n c, step_size, image_norm.item(), grad_norm.item(), snr.item()))\n\n\n if final_only:\n return [x_mod.to('cpu')]\n else:\n return images" ]
[ [ "torch.ones", "torch.randn_like", "torch.nn.MSELoss", "torch.cos", "torch.randn", "torch.linspace", "torch.no_grad", "torch.zeros_like", "torch.sin", "torch.sign", "numpy.log", "numpy.sqrt", "numpy.linspace", "torch.transpose" ] ]
WBobby/pytorch
[ "655960460ccca936fa5c06df6bbafd25b5582115" ]
[ "torch/ao/quantization/fuse_modules.py" ]
[ "\nimport copy\n\nimport torch.nn as nn\n\nfrom torch.quantization.fuser_method_mappings import get_fuser_method\n# for backward compatiblity\nfrom torch.quantization.fuser_method_mappings import fuse_conv_bn # noqa: F401\nfrom torch.quantization.fuser_method_mappings import fuse_conv_bn_relu # noqa: F401\n\nfrom typing import List, Optional\n\n# Generalization of getattr\ndef _get_module(model, submodule_key):\n tokens = submodule_key.split('.')\n cur_mod = model\n for s in tokens:\n cur_mod = getattr(cur_mod, s)\n return cur_mod\n\n# Generalization of setattr\ndef _set_module(model, submodule_key, module):\n tokens = submodule_key.split('.')\n sub_tokens = tokens[:-1]\n cur_mod = model\n for s in sub_tokens:\n cur_mod = getattr(cur_mod, s)\n\n setattr(cur_mod, tokens[-1], module)\n\ndef fuse_known_modules(mod_list, additional_fuser_method_mapping=None):\n r\"\"\"Returns a list of modules that fuses the operations specified\n in the input module list.\n\n Fuses only the following sequence of modules:\n conv, bn\n conv, bn, relu\n conv, relu\n linear, bn\n linear, relu\n For these sequences, the first element in the output module list performs\n the fused operation. The rest of the elements are set to nn.Identity()\n \"\"\"\n types = tuple(type(m) for m in mod_list)\n fuser_method = get_fuser_method(types, additional_fuser_method_mapping)\n if fuser_method is None:\n raise NotImplementedError(\"Cannot fuse modules: {}\".format(types))\n new_mod : List[Optional[nn.Module]] = [None] * len(mod_list)\n fused = fuser_method(*mod_list)\n # NOTE: forward hooks not processed in the two following for loops will be lost after the fusion\n # Move pre forward hooks of the base module to resulting fused module\n for handle_id, pre_hook_fn in mod_list[0]._forward_pre_hooks.items():\n fused.register_forward_pre_hook(pre_hook_fn)\n del mod_list[0]._forward_pre_hooks[handle_id]\n # Move post forward hooks of the last module to resulting fused module\n for handle_id, hook_fn in mod_list[-1]._forward_hooks.items():\n fused.register_forward_hook(hook_fn)\n del mod_list[-1]._forward_hooks[handle_id]\n new_mod[0] = fused\n\n for i in range(1, len(mod_list)):\n identity = nn.Identity()\n identity.training = mod_list[0].training\n new_mod[i] = identity\n\n return new_mod\n\ndef _fuse_modules(model, modules_to_fuse, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):\n if fuse_custom_config_dict is None:\n fuse_custom_config_dict = {}\n additional_fuser_method_mapping = fuse_custom_config_dict.get(\"additional_fuser_method_mapping\", {})\n mod_list = []\n for item in modules_to_fuse:\n mod_list.append(_get_module(model, item))\n\n # Fuse list of modules\n new_mod_list = fuser_func(mod_list, additional_fuser_method_mapping)\n\n # Replace original module list with fused module list\n for i, item in enumerate(modules_to_fuse):\n _set_module(model, item, new_mod_list[i])\n\ndef fuse_modules(model, modules_to_fuse, inplace=False, fuser_func=fuse_known_modules, fuse_custom_config_dict=None):\n r\"\"\"Fuses a list of modules into a single module\n\n Fuses only the following sequence of modules:\n conv, bn\n conv, bn, relu\n conv, relu\n linear, relu\n bn, relu\n All other sequences are left unchanged.\n For these sequences, replaces the first item in the list\n with the fused module, replacing the rest of the modules\n with identity.\n\n Args:\n model: Model containing the modules to be fused\n modules_to_fuse: list of list of module names to fuse. Can also be a list\n of strings if there is only a single list of modules to fuse.\n inplace: bool specifying if fusion happens in place on the model, by default\n a new model is returned\n fuser_func: Function that takes in a list of modules and outputs a list of fused modules\n of the same length. For example,\n fuser_func([convModule, BNModule]) returns the list [ConvBNModule, nn.Identity()]\n Defaults to torch.quantization.fuse_known_modules\n `fuse_custom_config_dict`: custom configuration for fusion\n\n .. code-block:: python\n\n # Example of fuse_custom_config_dict\n fuse_custom_config_dict = {\n # Additional fuser_method mapping\n \"additional_fuser_method_mapping\": {\n (torch.nn.Conv2d, torch.nn.BatchNorm2d): fuse_conv_bn\n },\n }\n\n Returns:\n model with fused modules. A new copy is created if inplace=True.\n\n Examples::\n\n >>> m = myModel()\n >>> # m is a module containing the sub-modules below\n >>> modules_to_fuse = [ ['conv1', 'bn1', 'relu1'], ['submodule.conv', 'submodule.relu']]\n >>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse)\n >>> output = fused_m(input)\n\n >>> m = myModel()\n >>> # Alternately provide a single list of modules to fuse\n >>> modules_to_fuse = ['conv1', 'bn1', 'relu1']\n >>> fused_m = torch.ao.quantization.fuse_modules(m, modules_to_fuse)\n >>> output = fused_m(input)\n\n \"\"\"\n if not inplace:\n model = copy.deepcopy(model)\n\n if all(isinstance(module_element, str) for module_element in modules_to_fuse):\n # Handle case of modules_to_fuse being a list\n _fuse_modules(model, modules_to_fuse, fuser_func, fuse_custom_config_dict)\n else:\n # Handle case of modules_to_fuse being a list of lists\n for module_list in modules_to_fuse:\n _fuse_modules(model, module_list, fuser_func, fuse_custom_config_dict)\n return model\n" ]
[ [ "torch.nn.Identity", "torch.quantization.fuser_method_mappings.get_fuser_method" ] ]
SmolakK/HuMobi
[ "67b40f839a843123093582935e89f91e16bc4374" ]
[ "tools/processing.py" ]
[ "import pandas as pd\nimport numpy as np\n\n\ndef top_time(ind=None, gs=None):\n\t\"\"\"\n\tSelects the location (by coordinates) which was visited for the longest period during given time interval\n\t:param ind: user id\n\t:param gs: GeoDataFrame from groupby execution containing all the data in the given time interval\n\t:return: user id (if given) and the data for the longest visited location\n\t\"\"\"\n\taggregated = []\n\tfor tstamp, g in gs: # for each record in the GeoDataFrame\n\t\tif len(g) > 1: # if there is more than one record\n\t\t\tdiff_places = (g['geometry'].shift(-1) != g['geometry']).iloc[:-1] # checks when coordinates change\n\t\t\tif diff_places.any(): # if there is change in locations\n\t\t\t\tg_res = g.reset_index() # drop index\n\t\t\t\tdiffs = g_res.shift(-1)['datetime'] - g_res['datetime'] # find time differences (spent in location)\n\t\t\t\tjoined_dfs = g_res.join(diffs, rsuffix='a') # add them to locations\n\t\t\t\tjoined_dfs['geometry'] = g_res['geometry'].astype(str) # copy geometry as string\n\t\t\t\tpoint_max = joined_dfs.groupby('geometry')['datetimea'].sum().idxmax() # grouping locations find the longest time sum\n\t\t\t\tselected = g[g['geometry'].astype(str) == point_max] # select the location with the highest total time\n\t\t\telse:\n\t\t\t\tselected = g # if one location visited - copy GeoDataFrame\n\t\telse:\n\t\t\tselected = g\n\t\taggregated.append(selected)\n\tif ind is None:\n\t\treturn pd.concat(aggregated)\n\telse:\n\t\treturn ind, pd.concat(aggregated)\n\n\ndef mode_geoseries(ind, gs):\n\t\"\"\"\n\tCalculates mode for GeoSeries\n\t:param ind: identifier\n\t:param gs: GeoSeries\n\t:return: identifier and a mode for GeoSeries\n\t\"\"\"\n\taggregated = []\n\tfor g in gs:\n\t\tif g[1].empty:\n\t\t\taggregated.append(None)\n\t\telse:\n\t\t\tselected = g[1].mode()\n\t\t\tselected = selected.set_index(g[1].index)\n\t\t\taggregated.append(selected)\n\treturn ind, pd.concat(aggregated)\n\n\ndef rowwise_average(gs, row_count=None):\n\t\"\"\"\n\tCalculates an average for each row in each group - rowwise.\n\t:param gs: GeoSeries\n\t:param row_count: defines how much rows should be considered\n\t:return: averaged GeoSeries rowwise\n\t\"\"\"\n\tif row_count is None:\n\t\trow_count = gs.groupby(level=0).size().max()\n\treturn pd.Series([gs.groupby(level=0).nth(n).mean() for n in range(row_count)])\n\n\ndef groupwise_average(gs):\n\t\"\"\"\n\tCalculates an average from each group of GeoSeries\n\t:param gs: GeoSeries\n\t:return: averaged GeoSeries\n\t\"\"\"\n\treturn gs.groupby(level=0).mean()\n\n\ndef groupwise_normalise(gs):\n\t\"\"\"\n\tNormalises each group of GeoSeries\n\t:param gs: GeoSeries\n\t:return: normalised GeoSeries\n\t\"\"\"\n\treturn gs.groupby(level=0).apply(lambda x: x / x.sum())\n\n\ndef groupwise_expansion(gs):\n\t\"\"\"\n\tCalculates expanding mean for each group of GeoSeries\n\t:param gs: GeoSeries\n\t:return: averaged GeoSeries\n\t\"\"\"\n\treturn gs.groupby(level=0).expanding().mean()\n\n\ndef total_normalise(gs):\n\t\"\"\"\n\tPerforms complete normalisation of GeoSeries\n\t:param gs: GeoSeries\n\t:return: normalised GeoSeries\n\t\"\"\"\n\treturn gs / gs.sum()\n\n\ndef start_end(trajectories_frame):\n\t\"\"\"\n\tCompresses stops in TrajectoriesFrame by adding start and end of visits in locations\n\t:param trajectories_frame: TrajectoriesFrame object class\n\t:return: compressed TrajectoriesFrame\n\t\"\"\"\n\tto_concat = []\n\tif 'date' not in trajectories_frame.columns:\n\t\ttrajectories_frame['date'] = trajectories_frame.index.get_level_values(1)\n\tfor gs in trajectories_frame.groupby(level=0):\n\t\tfirsts = gs[1][gs[1]['geometry'].shift() != gs[1]['geometry']]\n\t\tlasts = gs[1][gs[1]['geometry'].shift(-1) != gs[1]['geometry']]\n\t\tfirsts.loc[:, 'start'] = firsts['date']\n\t\tlasts = lasts.set_index(firsts.index)\n\t\tfirsts.loc[:, 'end'] = lasts['date']\n\t\tfirsts = firsts[firsts['start'] != firsts['end']]\n\t\tto_concat.append(firsts)\n\treturn pd.concat(to_concat)\n" ]
[ [ "pandas.concat" ] ]
temper8/MatBench
[ "1ea24d18af35b57ef2d61148709eb6d49835fe97" ]
[ "show_config.py" ]
[ "import numpy as np \nnp.show_config()" ]
[ [ "numpy.show_config" ] ]
clsteel/DeepPostures
[ "8a7bed8f1e47e4a502080bf6edd513b822ea0bdf" ]
[ "MSSE-2021/train_model.py" ]
[ "# Copyright 2021 Supun Nakandala. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport random\nimport math\nimport argparse\n\nsys.path.append('./')\nfrom commons import cnn_bi_lstm_model, input_iterator\n\n# Setting random seeds\ntf.random.set_random_seed(2019)\nrandom.seed(2019)\nnp.random.seed(2019)\n\ndef get_train_ops(y, logits, learning_rate, n_classes, class_weights):\n y = tf.reshape(y, [-1])\n logits = tf.reshape(logits, [-1, n_classes])\n balanced_accuracy, update_op = tf.metrics.mean_per_class_accuracy(y, tf.argmax(logits, 1), n_classes)\n y = tf.reshape(tf.one_hot(y, depth=n_classes, axis=1), [-1, n_classes])\n\n loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y) * tf.reduce_sum(tf.constant(class_weights, dtype=tf.float32) * y, axis=1))\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss)\n\n return train_op, update_op, balanced_accuracy, loss\n\n\ndef window_generator(data_root, win_size_10s, subject_ids):\n x_segments = []; y_segments = []\n for subject_id in subject_ids:\n for x_seq, _, y_seq in input_iterator(data_root, subject_id, train=True):\n x_window = []; y_window = []\n for x,y in zip(x_seq, y_seq):\n x_window.append(x)\n y_window.append(y)\n\n if len(y_window) == win_size_10s:\n yield np.stack(x_window, axis=0), np.stack(y_window, axis=0)\n x_window = []; y_window = []\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Argument parser for training CNN model.')\n optional_arguments = parser._action_groups.pop()\n required_arguments = parser.add_argument_group('required arguments')\n required_arguments.add_argument('--pre-processed-dir', help='Pre-processed data directory', required=True)\n\n optional_arguments.add_argument('--transfer-learning-model', help='Transfer learning model name (default: CHAP_ALL_ADULTS)', default=None, required=False, choices=['CHAP_ALL_ADULTS'])\n optional_arguments.add_argument('--learning-rate', help='Learning rate for training the model (default: 0.0001)', default=1e-4, type=float, required=False)\n optional_arguments.add_argument('--num-epochs', help='Number of epochs to train the model (default: 15)', default=15, type=int, required=False)\n optional_arguments.add_argument('--batch-size', help='Training batch size (default: 16)', default=16, type=int, required=False)\n \n optional_arguments.add_argument('--amp-factor', help='Factor to increase the number of neurons in the CNN layers (default: 2)', default=2, type=int, required=False)\n optional_arguments.add_argument('--cnn-window-size', help='CNN window size in seconds on which the predictions to be made (default: 10)', default=10, type=int, required=False)\n optional_arguments.add_argument('--bi-lstm-window-size', help='BiLSTM window size in minutes on which the predictions to be smoothed (default: 7)', default=7, type=int, required=False)\n \n optional_arguments.add_argument('--shuffle-buffer-size', help='Training data shuffle buffer size in terms of number of records (default: 10000)', default=10000, type=int, required=False)\n optional_arguments.add_argument('--training-data-fraction', help='Percentage of subjects to be used for training (default: 60)', default=60, type=int, required=False)\n optional_arguments.add_argument('--validation-data-fraction', help='Percentage of subjects to be used for validation (default: 20)', default=20, type=int, required=False)\n optional_arguments.add_argument('--testing-data-fraction', help='Percentage of subjects to be used for testing (default: 20)', default=20, type=int, required=False)\n optional_arguments.add_argument('--model-checkpoint-path', help='Path where the trained model will be saved (default: ./model-checkpoint)', default='./model-checkpoint', required=False)\n \n optional_arguments.add_argument('--num-classes', help='Number of classes in the training dataset (default: 2)', default=2, type=int, required=False)\n optional_arguments.add_argument('--class-weights', help='Class weights for loss aggregation (default: [1.0, 1.0])', default='[1.0, 1.0]', required=False)\n optional_arguments.add_argument('--down-sample-frequency', help='Downsample frequency in Hz for GT3X data (default: 10)', default=10, type=int, required=False)\n optional_arguments.add_argument('--silent', help='Whether to hide info messages', default=False, required=False, action='store_true')\n parser._action_groups.append(optional_arguments)\n args = parser.parse_args()\n\n if os.path.exists(args.model_checkpoint_path):\n raise Exception('Model checkpoint: {} already exists.'.format(args.model_checkpoint_path))\n\n if args.transfer_learning_model:\n if args.transfer_learning_model == 'CHAP_ALL_ADULTS':\n args.amp_factor = 2\n args.cnn_window_size = 10\n args.bi_lstm_win_size = 7\n else:\n raise Exception('Unsupported transfer learning model: {}'.format(args.transfer_learning_model))\n \n assert (args.training_data_fraction + args.validation_data_fraction + args.testing_data_fraction) == 100, 'Train, validation,test split fractions should add up to 100%'\n \n subject_ids = [fname.split('.')[0] for fname in os.listdir(args.pre_processed_dir)]\n random.shuffle(subject_ids)\n\n n_train_subjects = int(math.ceil(len(subject_ids) * args.training_data_fraction / 100.))\n train_subjects = subject_ids[:n_train_subjects]\n subject_ids = subject_ids[n_train_subjects:]\n\n test_frac = args.testing_data_fraction / (100.0 - args.training_data_fraction) * 100\n n_test_subjects = int(math.ceil(len(subject_ids) * test_frac / 100.))\n test_subjects = subject_ids[:n_test_subjects]\n valid_subjects = subject_ids[n_test_subjects:] \n\n output_shapes = ((args.bi_lstm_window_size*(60//args.cnn_window_size), args.cnn_window_size*args.down_sample_frequency, 3), (args.bi_lstm_window_size*(60//args.cnn_window_size)))\n bi_lstm_win_size = 60//args.down_sample_frequency * args.bi_lstm_window_size\n train_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, train_subjects),output_types=(tf.float32, tf.int32),\n output_shapes=output_shapes).shuffle(args.shuffle_buffer_size).batch(args.batch_size).prefetch(10)\n valid_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, valid_subjects),output_types=(tf.float32, tf.int32),\n output_shapes=output_shapes).batch(args.batch_size).prefetch(10)\n test_dataset = tf.data.Dataset.from_generator(lambda: window_generator(args.pre_processed_dir, bi_lstm_win_size, test_subjects),output_types=(tf.float32, tf.int32),\n output_shapes=output_shapes).batch(args.batch_size).prefetch(10)\n \n iterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)\n\n train_init_op = iterator.make_initializer(train_dataset)\n valid_init_op = iterator.make_initializer(valid_dataset)\n test_init_op = iterator.make_initializer(test_dataset)\n x, y = iterator.get_next()\n \n x = tf.reshape(x, [-1, args.cnn_window_size*args.down_sample_frequency, 3, 1])\n x = tf.identity(x, name='input')\n y = tf.reshape(y, [-1, bi_lstm_win_size])\n\n learning_rate = tf.placeholder(tf.float32)\n logits = cnn_bi_lstm_model(x, args.amp_factor, bi_lstm_win_size, args.num_classes)\n output = tf.argmax(tf.reshape(logits, [-1, args.num_classes]), axis=1, name='output')\n prediction = tf.identity(tf.argmax(logits, axis=1), name='prediction')\n\n class_weights = eval(args.class_weights) \n train_op, update_op, balanced_accuracy, loss = get_train_ops(y, logits, learning_rate, args.num_classes, class_weights)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n if args.transfer_learning_model:\n ckpt_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pre-trained-models', '{}_CKPT'.format(args.transfer_learning_model), 'model')\n # Weights for the final classification layer (dense) are ignored\n variables = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if not v.name.startswith('dense/')]\n restorer = tf.train.Saver(variables)\n restorer.restore(sess, ckpt_path)\n \n if not args.silent:\n print('Training subjects: {}'.format(train_subjects))\n print('Validation subjects: {}'.format(valid_subjects))\n print('Testing subjects: {}'.format(test_subjects))\n\n for epoch in range(args.num_epochs):\n for label, init_op, subjects in zip([\"Train\", \"Validation\", \"Test\"],\n [train_init_op, valid_init_op, test_init_op], [train_subjects, valid_subjects, test_subjects]):\n sess.run(tf.local_variables_initializer())\n sess.run(init_op)\n losses = []\n while True:\n try:\n if label == \"Train\":\n _, _, l = sess.run([train_op, update_op, loss], feed_dict={learning_rate: args.learning_rate})\n elif label == \"Validation\":\n _, l = sess.run([update_op, loss])\n elif label == \"Test\":\n _, l = sess.run([update_op, loss])\n losses.append(l)\n except tf.errors.OutOfRangeError:\n if not args.silent:\n ba = sess.run(balanced_accuracy)\n print(\"Epoch: %d, %s Loss: %f, Balanced Accuracy: %f\" %(epoch, label, sum(losses), ba))\n break\n\n if not os.path.exists(args.model_checkpoint_path):\n os.makedirs(args.model_checkpoint_path)\n\n tf.saved_model.simple_save(sess, os.path.join(args.model_checkpoint_path, 'CUSTOM_MODEL'), inputs={\"input\": x}, outputs={\"output\": output})\n\n if not args.silent:\n print('Model saved in path: {}'.format(args.model_checkpoint_path)) \n" ]
[ [ "tensorflow.placeholder", "tensorflow.reshape", "tensorflow.global_variables_initializer", "numpy.stack", "tensorflow.get_collection", "tensorflow.train.AdamOptimizer", "numpy.random.seed", "tensorflow.one_hot", "tensorflow.random.set_random_seed", "tensorflow.argmax", "tensorflow.Session", "tensorflow.identity", "tensorflow.data.Iterator.from_structure", "tensorflow.nn.softmax_cross_entropy_with_logits_v2", "tensorflow.train.Saver", "tensorflow.local_variables_initializer", "tensorflow.constant" ] ]
fchouteau/imgaug
[ "b282b97c13a27a32f91c2e2666db1e128e00cfde", "b282b97c13a27a32f91c2e2666db1e128e00cfde" ]
[ "imgaug/augmenters/size.py", "imgaug/imgaug.py" ]
[ "\"\"\"\nAugmenters that somehow change the size of the images.\n\nList of augmenters:\n\n * :class:`Resize`\n * :class:`CropAndPad`\n * :class:`Crop`\n * :class:`Pad`\n * :class:`PadToFixedSize`\n * :class:`CenterPadToFixedSize`\n * :class:`CropToFixedSize`\n * :class:`CenterCropToFixedSize`\n * :class:`CropToMultiplesOf`\n * :class:`CenterCropToMultiplesOf`\n * :class:`PadToMultiplesOf`\n * :class:`CenterPadToMultiplesOf`\n * :class:`CropToPowersOf`\n * :class:`CenterCropToPowersOf`\n * :class:`PadToPowersOf`\n * :class:`CenterPadToPowersOf`\n * :class:`CropToAspectRatio`\n * :class:`CenterCropToAspectRatio`\n * :class:`PadToAspectRatio`\n * :class:`CenterPadToAspectRatio`\n * :class:`CropToSquare`\n * :class:`CenterCropToSquare`\n * :class:`PadToSquare`\n * :class:`CenterPadToSquare`\n * :class:`KeepSizeByResize`\n\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nimport re\nimport functools\n\nimport numpy as np\nimport cv2\n\nimport imgaug as ia\nfrom imgaug.imgaug import _normalize_cv2_input_arr_\nfrom . import meta\nfrom .. import parameters as iap\n\n\ndef _crop_trbl_to_xyxy(shape, top, right, bottom, left, prevent_zero_size=True):\n if prevent_zero_size:\n top, right, bottom, left = _crop_prevent_zero_size(\n shape[0], shape[1], top, right, bottom, left)\n\n height, width = shape[0:2]\n x1 = left\n x2 = width - right\n y1 = top\n y2 = height - bottom\n\n # these steps prevent negative sizes\n # if x2==x1 or y2==y1 then the output arr has size 0 for the respective axis\n # note that if height/width of arr is zero, then y2==y1 or x2==x1, which\n # is still valid, even if height/width is zero and results in a zero-sized\n # axis\n x2 = max(x2, x1)\n y2 = max(y2, y1)\n\n return x1, y1, x2, y2\n\n\ndef _crop_arr_(arr, top, right, bottom, left, prevent_zero_size=True):\n x1, y1, x2, y2 = _crop_trbl_to_xyxy(arr.shape, top, right, bottom, left,\n prevent_zero_size=prevent_zero_size)\n return arr[y1:y2, x1:x2, ...]\n\n\ndef _crop_and_pad_arr(arr, croppings, paddings, pad_mode=\"constant\",\n pad_cval=0, keep_size=False):\n height, width = arr.shape[0:2]\n\n image_cr = _crop_arr_(arr, *croppings)\n\n image_cr_pa = pad(\n image_cr,\n top=paddings[0], right=paddings[1],\n bottom=paddings[2], left=paddings[3],\n mode=pad_mode, cval=pad_cval)\n\n if keep_size:\n image_cr_pa = ia.imresize_single_image(image_cr_pa, (height, width))\n\n return image_cr_pa\n\n\ndef _crop_and_pad_heatmap_(heatmap, croppings_img, paddings_img,\n pad_mode=\"constant\", pad_cval=0.0, keep_size=False):\n return _crop_and_pad_hms_or_segmaps_(heatmap, croppings_img,\n paddings_img, pad_mode, pad_cval,\n keep_size)\n\n\ndef _crop_and_pad_segmap_(segmap, croppings_img, paddings_img,\n pad_mode=\"constant\", pad_cval=0, keep_size=False):\n return _crop_and_pad_hms_or_segmaps_(segmap, croppings_img,\n paddings_img, pad_mode, pad_cval,\n keep_size)\n\n\ndef _crop_and_pad_hms_or_segmaps_(augmentable, croppings_img,\n paddings_img, pad_mode=\"constant\",\n pad_cval=None, keep_size=False):\n if isinstance(augmentable, ia.HeatmapsOnImage):\n arr_attr_name = \"arr_0to1\"\n pad_cval = pad_cval if pad_cval is not None else 0.0\n else:\n assert isinstance(augmentable, ia.SegmentationMapsOnImage), (\n \"Expected HeatmapsOnImage or SegmentationMapsOnImage, got %s.\" % (\n type(augmentable)))\n arr_attr_name = \"arr\"\n pad_cval = pad_cval if pad_cval is not None else 0\n\n arr = getattr(augmentable, arr_attr_name)\n arr_shape_orig = arr.shape\n augm_shape = augmentable.shape\n\n croppings_proj = _project_size_changes(croppings_img, augm_shape, arr.shape)\n paddings_proj = _project_size_changes(paddings_img, augm_shape, arr.shape)\n\n croppings_proj = _crop_prevent_zero_size(arr.shape[0], arr.shape[1],\n *croppings_proj)\n\n arr_cr = _crop_arr_(arr,\n croppings_proj[0], croppings_proj[1],\n croppings_proj[2], croppings_proj[3])\n arr_cr_pa = pad(\n arr_cr,\n top=paddings_proj[0], right=paddings_proj[1],\n bottom=paddings_proj[2], left=paddings_proj[3],\n mode=pad_mode,\n cval=pad_cval)\n\n setattr(augmentable, arr_attr_name, arr_cr_pa)\n\n if keep_size:\n augmentable = augmentable.resize(arr_shape_orig[0:2])\n else:\n augmentable.shape = _compute_shape_after_crop_and_pad(\n augmentable.shape, croppings_img, paddings_img)\n return augmentable\n\n\ndef _crop_and_pad_kpsoi_(kpsoi, croppings_img, paddings_img, keep_size):\n # using the trbl function instead of croppings_img has the advantage\n # of incorporating prevent_zero_size, dealing with zero-sized input image\n # axis and dealing the negative crop amounts\n x1, y1, _x2, _y2 = _crop_trbl_to_xyxy(kpsoi.shape, *croppings_img)\n crop_left = x1\n crop_top = y1\n\n shape_orig = kpsoi.shape\n shifted = kpsoi.shift_(\n x=-crop_left+paddings_img[3],\n y=-crop_top+paddings_img[0])\n shifted.shape = _compute_shape_after_crop_and_pad(\n shape_orig, croppings_img, paddings_img)\n if keep_size:\n shifted = shifted.on_(shape_orig)\n return shifted\n\n\ndef _compute_shape_after_crop_and_pad(old_shape, croppings, paddings):\n x1, y1, x2, y2 = _crop_trbl_to_xyxy(old_shape, *croppings)\n new_shape = list(old_shape)\n new_shape[0] = y2 - y1 + paddings[0] + paddings[2]\n new_shape[1] = x2 - x1 + paddings[1] + paddings[3]\n return tuple(new_shape)\n\n\ndef _crop_prevent_zero_size(height, width, crop_top, crop_right, crop_bottom,\n crop_left):\n remaining_height = height - (crop_top + crop_bottom)\n remaining_width = width - (crop_left + crop_right)\n if remaining_height < 1:\n regain = abs(remaining_height) + 1\n regain_top = regain // 2\n regain_bottom = regain // 2\n if regain_top + regain_bottom < regain:\n regain_top += 1\n\n if regain_top > crop_top:\n diff = regain_top - crop_top\n regain_top = crop_top\n regain_bottom += diff\n elif regain_bottom > crop_bottom:\n diff = regain_bottom - crop_bottom\n regain_bottom = crop_bottom\n regain_top += diff\n\n crop_top = crop_top - regain_top\n crop_bottom = crop_bottom - regain_bottom\n\n if remaining_width < 1:\n regain = abs(remaining_width) + 1\n regain_right = regain // 2\n regain_left = regain // 2\n if regain_right + regain_left < regain:\n regain_right += 1\n\n if regain_right > crop_right:\n diff = regain_right - crop_right\n regain_right = crop_right\n regain_left += diff\n elif regain_left > crop_left:\n diff = regain_left - crop_left\n regain_left = crop_left\n regain_right += diff\n\n crop_right = crop_right - regain_right\n crop_left = crop_left - regain_left\n\n return (\n max(crop_top, 0), max(crop_right, 0), max(crop_bottom, 0),\n max(crop_left, 0))\n\n\ndef _project_size_changes(trbl, from_shape, to_shape):\n if from_shape[0:2] == to_shape[0:2]:\n return trbl\n\n height_to = to_shape[0]\n width_to = to_shape[1]\n height_from = from_shape[0]\n width_from = from_shape[1]\n\n top = trbl[0]\n right = trbl[1]\n bottom = trbl[2]\n left = trbl[3]\n\n # Adding/subtracting 1e-4 here helps for the case where a heatmap/segmap\n # is exactly half the size of an image and the size change on an axis is\n # an odd value. Then the projected value would end up being <something>.5\n # and the rounding would always round up to the next integer. If both\n # sides then have the same change, they are both rounded up, resulting\n # in more change than expected.\n # E.g. image height is 8, map height is 4, change is 3 at the top and 3 at\n # the bottom. The changes are projected to 4*(3/8) = 1.5 and both rounded\n # up to 2.0. Hence, the maps are changed by 4 (100% of the map height,\n # vs. 6 for images, which is 75% of the image height).\n top = _int_r(height_to * (top/height_from) - 1e-4)\n right = _int_r(width_to * (right/width_from) + 1e-4)\n bottom = _int_r(height_to * (bottom/height_from) + 1e-4)\n left = _int_r(width_to * (left/width_from) - 1e-4)\n\n return top, right, bottom, left\n\n\ndef _int_r(value):\n return int(np.round(value))\n\n\n# TODO somehow integrate this with pad()\ndef _handle_pad_mode_param(pad_mode):\n pad_modes_available = {\n \"constant\", \"edge\", \"linear_ramp\", \"maximum\", \"mean\", \"median\",\n \"minimum\", \"reflect\", \"symmetric\", \"wrap\"}\n if pad_mode == ia.ALL:\n return iap.Choice(list(pad_modes_available))\n if ia.is_string(pad_mode):\n assert pad_mode in pad_modes_available, (\n \"Value '%s' is not a valid pad mode. Valid pad modes are: %s.\" % (\n pad_mode, \", \".join(pad_modes_available)))\n return iap.Deterministic(pad_mode)\n if isinstance(pad_mode, list):\n assert all([v in pad_modes_available for v in pad_mode]), (\n \"At least one in list %s is not a valid pad mode. Valid pad \"\n \"modes are: %s.\" % (str(pad_mode), \", \".join(pad_modes_available)))\n return iap.Choice(pad_mode)\n if isinstance(pad_mode, iap.StochasticParameter):\n return pad_mode\n raise Exception(\n \"Expected pad_mode to be ia.ALL or string or list of strings or \"\n \"StochasticParameter, got %s.\" % (type(pad_mode),))\n\n\ndef _handle_position_parameter(position):\n if position == \"uniform\":\n return iap.Uniform(0.0, 1.0), iap.Uniform(0.0, 1.0)\n if position == \"normal\":\n return (\n iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2),\n minval=0.0, maxval=1.0),\n iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2),\n minval=0.0, maxval=1.0)\n )\n if position == \"center\":\n return iap.Deterministic(0.5), iap.Deterministic(0.5)\n if (ia.is_string(position)\n and re.match(r\"^(left|center|right)-(top|center|bottom)$\",\n position)):\n mapping = {\"top\": 0.0, \"center\": 0.5, \"bottom\": 1.0, \"left\": 0.0,\n \"right\": 1.0}\n return (\n iap.Deterministic(mapping[position.split(\"-\")[0]]),\n iap.Deterministic(mapping[position.split(\"-\")[1]])\n )\n if isinstance(position, iap.StochasticParameter):\n return position\n if isinstance(position, tuple):\n assert len(position) == 2, (\n \"Expected tuple with two entries as position parameter. \"\n \"Got %d entries with types %s..\" % (\n len(position), str([type(item) for item in position])))\n for item in position:\n if ia.is_single_number(item) and (item < 0 or item > 1.0):\n raise Exception(\n \"Both position values must be within the value range \"\n \"[0.0, 1.0]. Got type %s with value %.8f.\" % (\n type(item), item,))\n position = [iap.Deterministic(item)\n if ia.is_single_number(item)\n else item for item in position]\n\n only_sparams = all([isinstance(item, iap.StochasticParameter)\n for item in position])\n assert only_sparams, (\n \"Expected tuple with two entries that are both either \"\n \"StochasticParameter or float/int. Got types %s.\" % (\n str([type(item) for item in position])\n ))\n return tuple(position)\n raise Exception(\n \"Expected one of the following as position parameter: string \"\n \"'uniform', string 'normal', string 'center', a string matching \"\n \"regex ^(left|center|right)-(top|center|bottom)$, a single \"\n \"StochasticParameter or a tuple of two entries, both being either \"\n \"StochasticParameter or floats or int. Got instead type %s with \"\n \"content '%s'.\" % (\n type(position),\n (str(position)\n if len(str(position)) < 20\n else str(position)[0:20] + \"...\")\n )\n )\n\n\n# TODO this is the same as in imgaug.py, make DRY\ndef _assert_two_or_three_dims(shape):\n if hasattr(shape, \"shape\"):\n shape = shape.shape\n assert len(shape) in [2, 3], (\n \"Expected image with two or three dimensions, but got %d dimensions \"\n \"and shape %s.\" % (len(shape), shape))\n\n\ndef pad(arr, top=0, right=0, bottom=0, left=0, mode=\"constant\", cval=0):\n \"\"\"Pad an image-like array on its top/right/bottom/left side.\n\n This function is a wrapper around :func:`numpy.pad`.\n\n Supported dtypes\n ----------------\n\n * ``uint8``: yes; fully tested (1)\n * ``uint16``: yes; fully tested (1)\n * ``uint32``: yes; fully tested (2) (3)\n * ``uint64``: yes; fully tested (2) (3)\n * ``int8``: yes; fully tested (1)\n * ``int16``: yes; fully tested (1)\n * ``int32``: yes; fully tested (1)\n * ``int64``: yes; fully tested (2) (3)\n * ``float16``: yes; fully tested (2) (3)\n * ``float32``: yes; fully tested (1)\n * ``float64``: yes; fully tested (1)\n * ``float128``: yes; fully tested (2) (3)\n * ``bool``: yes; tested (2) (3)\n\n - (1) Uses ``cv2`` if `mode` is one of: ``\"constant\"``, ``\"edge\"``,\n ``\"reflect\"``, ``\"symmetric\"``. Otherwise uses ``numpy``.\n - (2) Uses ``numpy``.\n - (3) Rejected by ``cv2``.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pad.\n\n top : int, optional\n Amount of pixels to add to the top side of the image.\n Must be ``0`` or greater.\n\n right : int, optional\n Amount of pixels to add to the right side of the image.\n Must be ``0`` or greater.\n\n bottom : int, optional\n Amount of pixels to add to the bottom side of the image.\n Must be ``0`` or greater.\n\n left : int, optional\n Amount of pixels to add to the left side of the image.\n Must be ``0`` or greater.\n\n mode : str, optional\n Padding mode to use. See :func:`numpy.pad` for details.\n In case of mode ``constant``, the parameter `cval` will be used as\n the ``constant_values`` parameter to :func:`numpy.pad`.\n In case of mode ``linear_ramp``, the parameter `cval` will be used as\n the ``end_values`` parameter to :func:`numpy.pad`.\n\n cval : number or iterable of number, optional\n Value to use for padding if `mode` is ``constant``.\n See :func:`numpy.pad` for details. The cval is expected to match the\n input array's dtype and value range. If an iterable is used, it is\n expected to contain one value per channel. The number of values\n and number of channels are expected to match.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C) ndarray\n Padded array with height ``H'=H+top+bottom`` and width\n ``W'=W+left+right``.\n\n \"\"\"\n import imgaug.dtypes as iadt\n\n _assert_two_or_three_dims(arr)\n assert all([v >= 0 for v in [top, right, bottom, left]]), (\n \"Expected padding amounts that are >=0, but got %d, %d, %d, %d \"\n \"(top, right, bottom, left)\" % (top, right, bottom, left))\n\n is_multi_cval = ia.is_iterable(cval)\n\n if top > 0 or right > 0 or bottom > 0 or left > 0:\n min_value, _, max_value = iadt.get_value_range_of_dtype(arr.dtype)\n\n # without the if here there are crashes for float128, e.g. if\n # cval is an int (just using float(cval) seems to not be accurate\n # enough)\n if arr.dtype.name == \"float128\":\n cval = np.float128(cval) # pylint: disable=no-member\n\n if is_multi_cval:\n cval = np.clip(cval, min_value, max_value)\n else:\n cval = max(min(cval, max_value), min_value)\n\n # Note that copyMakeBorder() hangs/runs endlessly if arr has an\n # axis of size 0 and mode is \"reflect\".\n # Numpy also complains in these cases if mode is not \"constant\".\n has_zero_sized_axis = any([axis == 0 for axis in arr.shape])\n if has_zero_sized_axis:\n mode = \"constant\"\n\n mapping_mode_np_to_cv2 = {\n \"constant\": cv2.BORDER_CONSTANT,\n \"edge\": cv2.BORDER_REPLICATE,\n \"linear_ramp\": None,\n \"maximum\": None,\n \"mean\": None,\n \"median\": None,\n \"minimum\": None,\n \"reflect\": cv2.BORDER_REFLECT_101,\n \"symmetric\": cv2.BORDER_REFLECT,\n \"wrap\": None,\n cv2.BORDER_CONSTANT: cv2.BORDER_CONSTANT,\n cv2.BORDER_REPLICATE: cv2.BORDER_REPLICATE,\n cv2.BORDER_REFLECT_101: cv2.BORDER_REFLECT_101,\n cv2.BORDER_REFLECT: cv2.BORDER_REFLECT\n }\n bad_mode_cv2 = mapping_mode_np_to_cv2.get(mode, None) is None\n\n # these datatypes all simply generate a \"TypeError: src data type = X\n # is not supported\" error\n bad_datatype_cv2 = (\n arr.dtype.name\n in [\"uint32\", \"uint64\", \"int64\", \"float16\", \"float128\", \"bool\"]\n )\n\n # OpenCV turns the channel axis for arrays with 0 channels to 512\n # TODO add direct test for this. indirectly tested via Pad\n bad_shape_cv2 = (arr.ndim == 3 and arr.shape[-1] == 0)\n\n if not bad_datatype_cv2 and not bad_mode_cv2 and not bad_shape_cv2:\n # convert cval to expected type, as otherwise we get TypeError\n # for np inputs\n kind = arr.dtype.kind\n if is_multi_cval:\n cval = [float(cval_c) if kind == \"f\" else int(cval_c)\n for cval_c in cval]\n else:\n cval = float(cval) if kind == \"f\" else int(cval)\n\n if arr.ndim == 2 or arr.shape[2] <= 4:\n # without this, only the first channel is padded with the cval,\n # all following channels with 0\n if arr.ndim == 3 and not is_multi_cval:\n cval = tuple([cval] * arr.shape[2])\n\n arr_pad = cv2.copyMakeBorder(\n _normalize_cv2_input_arr_(arr),\n top=top, bottom=bottom, left=left, right=right,\n borderType=mapping_mode_np_to_cv2[mode], value=cval)\n if arr.ndim == 3 and arr_pad.ndim == 2:\n arr_pad = arr_pad[..., np.newaxis]\n else:\n result = []\n channel_start_idx = 0\n cval = cval if is_multi_cval else tuple([cval] * arr.shape[2])\n while channel_start_idx < arr.shape[2]:\n arr_c = arr[..., channel_start_idx:channel_start_idx+4]\n cval_c = cval[channel_start_idx:channel_start_idx+4]\n arr_pad_c = cv2.copyMakeBorder(\n _normalize_cv2_input_arr_(arr_c),\n top=top, bottom=bottom, left=left, right=right,\n borderType=mapping_mode_np_to_cv2[mode], value=cval_c)\n arr_pad_c = np.atleast_3d(arr_pad_c)\n result.append(arr_pad_c)\n channel_start_idx += 4\n arr_pad = np.concatenate(result, axis=2)\n else:\n # paddings for 2d case\n paddings_np = [(top, bottom), (left, right)]\n\n # add paddings for 3d case\n if arr.ndim == 3:\n paddings_np.append((0, 0))\n\n if mode == \"constant\":\n if arr.ndim > 2 and is_multi_cval:\n arr_pad_chans = [\n np.pad(arr[..., c], paddings_np[0:2], mode=mode,\n constant_values=cval[c])\n for c in np.arange(arr.shape[2])]\n arr_pad = np.stack(arr_pad_chans, axis=-1)\n else:\n arr_pad = np.pad(arr, paddings_np, mode=mode,\n constant_values=cval)\n elif mode == \"linear_ramp\":\n if arr.ndim > 2 and is_multi_cval:\n arr_pad_chans = [\n np.pad(arr[..., c], paddings_np[0:2], mode=mode,\n end_values=cval[c])\n for c in np.arange(arr.shape[2])]\n arr_pad = np.stack(arr_pad_chans, axis=-1)\n else:\n arr_pad = np.pad(arr, paddings_np, mode=mode,\n end_values=cval)\n else:\n arr_pad = np.pad(arr, paddings_np, mode=mode)\n\n return arr_pad\n return np.copy(arr)\n\n\ndef pad_to_aspect_ratio(arr, aspect_ratio, mode=\"constant\", cval=0,\n return_pad_amounts=False):\n \"\"\"Pad an image array on its sides so that it matches a target aspect ratio.\n\n See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an\n explanation of how the required padding amounts are distributed per\n image axis.\n\n Supported dtypes\n ----------------\n\n See :func:`~imgaug.augmenters.size.pad`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pad.\n\n aspect_ratio : float\n Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the\n image having twice as much width as height.\n\n mode : str, optional\n Padding mode to use. See :func:`~imgaug.imgaug.pad` for details.\n\n cval : number, optional\n Value to use for padding if `mode` is ``constant``.\n See :func:`numpy.pad` for details.\n\n return_pad_amounts : bool, optional\n If ``False``, then only the padded image will be returned. If\n ``True``, a ``tuple`` with two entries will be returned, where the\n first entry is the padded image and the second entry are the amounts\n by which each image side was padded. These amounts are again a\n ``tuple`` of the form ``(top, right, bottom, left)``, with each value\n being an ``int``.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C) ndarray\n Padded image as ``(H',W')`` or ``(H',W',C)`` ndarray, fulfilling the\n given `aspect_ratio`.\n\n tuple of int\n Amounts by which the image was padded on each side, given as a\n ``tuple`` ``(top, right, bottom, left)``.\n This ``tuple`` is only returned if `return_pad_amounts` was set to\n ``True``.\n\n \"\"\"\n pad_top, pad_right, pad_bottom, pad_left = \\\n compute_paddings_to_reach_aspect_ratio(arr, aspect_ratio)\n arr_padded = pad(\n arr,\n top=pad_top,\n right=pad_right,\n bottom=pad_bottom,\n left=pad_left,\n mode=mode,\n cval=cval\n )\n\n if return_pad_amounts:\n return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)\n return arr_padded\n\n\ndef pad_to_multiples_of(arr, height_multiple, width_multiple, mode=\"constant\",\n cval=0, return_pad_amounts=False):\n \"\"\"Pad an image array until its side lengths are multiples of given values.\n\n See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an\n explanation of how the required padding amounts are distributed per\n image axis.\n\n Supported dtypes\n ----------------\n\n See :func:`~imgaug.augmenters.size.pad`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pad.\n\n height_multiple : None or int\n The desired multiple of the height. The computed padding amount will\n reflect a padding that increases the y axis size until it is a multiple\n of this value.\n\n width_multiple : None or int\n The desired multiple of the width. The computed padding amount will\n reflect a padding that increases the x axis size until it is a multiple\n of this value.\n\n mode : str, optional\n Padding mode to use. See :func:`~imgaug.imgaug.pad` for details.\n\n cval : number, optional\n Value to use for padding if `mode` is ``constant``.\n See :func:`numpy.pad` for details.\n\n return_pad_amounts : bool, optional\n If ``False``, then only the padded image will be returned. If\n ``True``, a ``tuple`` with two entries will be returned, where the\n first entry is the padded image and the second entry are the amounts\n by which each image side was padded. These amounts are again a\n ``tuple`` of the form ``(top, right, bottom, left)``, with each value\n being an integer.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C) ndarray\n Padded image as ``(H',W')`` or ``(H',W',C)`` ndarray.\n\n tuple of int\n Amounts by which the image was padded on each side, given as a\n ``tuple`` ``(top, right, bottom, left)``.\n This ``tuple`` is only returned if `return_pad_amounts` was set to\n ``True``.\n\n \"\"\"\n pad_top, pad_right, pad_bottom, pad_left = \\\n compute_paddings_to_reach_multiples_of(\n arr, height_multiple, width_multiple)\n arr_padded = pad(\n arr,\n top=pad_top,\n right=pad_right,\n bottom=pad_bottom,\n left=pad_left,\n mode=mode,\n cval=cval\n )\n\n if return_pad_amounts:\n return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)\n return arr_padded\n\n\ndef compute_paddings_to_reach_aspect_ratio(arr, aspect_ratio):\n \"\"\"Compute pad amounts required to fulfill an aspect ratio.\n\n \"Pad amounts\" here denotes the number of pixels that have to be added to\n each side to fulfill the desired constraint.\n\n The aspect ratio is given as ``ratio = width / height``.\n Depending on which dimension is smaller (height or width), only the\n corresponding sides (top/bottom or left/right) will be padded.\n\n The axis-wise padding amounts are always distributed equally over the\n sides of the respective axis (i.e. left and right, top and bottom). For\n odd pixel amounts, one pixel will be left over after the equal\n distribution and could be added to either side of the axis. This function\n will always add such a left over pixel to the bottom (y-axis) or\n right (x-axis) side.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int\n Image-like array or shape tuple for which to compute pad amounts.\n\n aspect_ratio : float\n Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the\n image having twice as much width as height.\n\n Returns\n -------\n tuple of int\n Required padding amounts to reach the target aspect ratio, given as a\n ``tuple`` of the form ``(top, right, bottom, left)``.\n\n \"\"\"\n _assert_two_or_three_dims(arr)\n assert aspect_ratio > 0, (\n \"Expected to get an aspect ratio >0, got %.4f.\" % (aspect_ratio,))\n\n pad_top = 0\n pad_right = 0\n pad_bottom = 0\n pad_left = 0\n\n shape = arr.shape if hasattr(arr, \"shape\") else arr\n height, width = shape[0:2]\n\n if height == 0:\n height = 1\n pad_bottom += 1\n if width == 0:\n width = 1\n pad_right += 1\n\n aspect_ratio_current = width / height\n\n if aspect_ratio_current < aspect_ratio:\n # image is more vertical than desired, width needs to be increased\n diff = (aspect_ratio * height) - width\n pad_right += int(np.ceil(diff / 2))\n pad_left += int(np.floor(diff / 2))\n elif aspect_ratio_current > aspect_ratio:\n # image is more horizontal than desired, height needs to be increased\n diff = ((1/aspect_ratio) * width) - height\n pad_top += int(np.floor(diff / 2))\n pad_bottom += int(np.ceil(diff / 2))\n\n return pad_top, pad_right, pad_bottom, pad_left\n\n\ndef compute_croppings_to_reach_aspect_ratio(arr, aspect_ratio):\n \"\"\"Compute crop amounts required to fulfill an aspect ratio.\n\n \"Crop amounts\" here denotes the number of pixels that have to be removed\n from each side to fulfill the desired constraint.\n\n The aspect ratio is given as ``ratio = width / height``.\n Depending on which dimension is smaller (height or width), only the\n corresponding sides (top/bottom or left/right) will be cropped.\n\n The axis-wise padding amounts are always distributed equally over the\n sides of the respective axis (i.e. left and right, top and bottom). For\n odd pixel amounts, one pixel will be left over after the equal\n distribution and could be added to either side of the axis. This function\n will always add such a left over pixel to the bottom (y-axis) or\n right (x-axis) side.\n\n If an aspect ratio cannot be reached exactly, this function will return\n rather one pixel too few than one pixel too many.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int\n Image-like array or shape tuple for which to compute crop amounts.\n\n aspect_ratio : float\n Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the\n image having twice as much width as height.\n\n Returns\n -------\n tuple of int\n Required cropping amounts to reach the target aspect ratio, given as a\n ``tuple`` of the form ``(top, right, bottom, left)``.\n\n \"\"\"\n _assert_two_or_three_dims(arr)\n assert aspect_ratio > 0, (\n \"Expected to get an aspect ratio >0, got %.4f.\" % (aspect_ratio,))\n\n shape = arr.shape if hasattr(arr, \"shape\") else arr\n assert shape[0] > 0, (\n \"Expected to get an array with height >0, got shape %s.\" % (shape,))\n\n height, width = shape[0:2]\n aspect_ratio_current = width / height\n\n top = 0\n right = 0\n bottom = 0\n left = 0\n\n if aspect_ratio_current < aspect_ratio:\n # image is more vertical than desired, height needs to be reduced\n # c = H - W/r\n crop_amount = height - (width / aspect_ratio)\n crop_amount = min(crop_amount, height - 1)\n top = int(np.floor(crop_amount / 2))\n bottom = int(np.ceil(crop_amount / 2))\n elif aspect_ratio_current > aspect_ratio:\n # image is more horizontal than desired, width needs to be reduced\n # c = W - Hr\n crop_amount = width - height * aspect_ratio\n crop_amount = min(crop_amount, width - 1)\n left = int(np.floor(crop_amount / 2))\n right = int(np.ceil(crop_amount / 2))\n\n return top, right, bottom, left\n\n\ndef compute_paddings_to_reach_multiples_of(arr, height_multiple,\n width_multiple):\n \"\"\"Compute pad amounts until img height/width are multiples of given values.\n\n See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an\n explanation of how the required padding amounts are distributed per\n image axis.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int\n Image-like array or shape tuple for which to compute pad amounts.\n\n height_multiple : None or int\n The desired multiple of the height. The computed padding amount will\n reflect a padding that increases the y axis size until it is a multiple\n of this value.\n\n width_multiple : None or int\n The desired multiple of the width. The computed padding amount will\n reflect a padding that increases the x axis size until it is a multiple\n of this value.\n\n Returns\n -------\n tuple of int\n Required padding amounts to reach multiples of the provided values,\n given as a ``tuple`` of the form ``(top, right, bottom, left)``.\n\n \"\"\"\n def _compute_axis_value(axis_size, multiple):\n if multiple is None:\n return 0, 0\n if axis_size == 0:\n to_pad = multiple\n elif axis_size % multiple == 0:\n to_pad = 0\n else:\n to_pad = multiple - (axis_size % multiple)\n return int(np.floor(to_pad/2)), int(np.ceil(to_pad/2))\n\n _assert_two_or_three_dims(arr)\n\n if height_multiple is not None:\n assert height_multiple > 0, (\n \"Can only pad to multiples of 1 or larger, got %d.\" % (\n height_multiple,))\n if width_multiple is not None:\n assert width_multiple > 0, (\n \"Can only pad to multiples of 1 or larger, got %d.\" % (\n width_multiple,))\n\n shape = arr.shape if hasattr(arr, \"shape\") else arr\n height, width = shape[0:2]\n\n top, bottom = _compute_axis_value(height, height_multiple)\n left, right = _compute_axis_value(width, width_multiple)\n\n return top, right, bottom, left\n\n\ndef compute_croppings_to_reach_multiples_of(arr, height_multiple,\n width_multiple):\n \"\"\"Compute croppings to reach multiples of given heights/widths.\n\n See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an\n explanation of how the required cropping amounts are distributed per\n image axis.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int\n Image-like array or shape tuple for which to compute crop amounts.\n\n height_multiple : None or int\n The desired multiple of the height. The computed croppings will\n reflect a crop operation that decreases the y axis size until it is\n a multiple of this value.\n\n width_multiple : None or int\n The desired multiple of the width. The computed croppings amount will\n reflect a crop operation that decreases the x axis size until it is\n a multiple of this value.\n\n Returns\n -------\n tuple of int\n Required cropping amounts to reach multiples of the provided values,\n given as a ``tuple`` of the form ``(top, right, bottom, left)``.\n\n \"\"\"\n def _compute_axis_value(axis_size, multiple):\n if multiple is None:\n return 0, 0\n if axis_size == 0:\n to_crop = 0\n elif axis_size % multiple == 0:\n to_crop = 0\n else:\n to_crop = axis_size % multiple\n return int(np.floor(to_crop/2)), int(np.ceil(to_crop/2))\n\n _assert_two_or_three_dims(arr)\n\n if height_multiple is not None:\n assert height_multiple > 0, (\n \"Can only crop to multiples of 1 or larger, got %d.\" % (\n height_multiple,))\n if width_multiple is not None:\n assert width_multiple > 0, (\n \"Can only crop to multiples of 1 or larger, got %d.\" % (\n width_multiple,))\n\n shape = arr.shape if hasattr(arr, \"shape\") else arr\n height, width = shape[0:2]\n\n top, bottom = _compute_axis_value(height, height_multiple)\n left, right = _compute_axis_value(width, width_multiple)\n\n return top, right, bottom, left\n\n\ndef compute_paddings_to_reach_powers_of(arr, height_base, width_base,\n allow_zero_exponent=False):\n \"\"\"Compute paddings to reach powers of given base values.\n\n For given axis size ``S``, padded size ``S'`` (``S' >= S``) and base ``B``\n this function computes paddings that fulfill ``S' = B^E``, where ``E``\n is any exponent from the discrete interval ``[0 .. inf)``.\n\n See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an\n explanation of how the required padding amounts are distributed per\n image axis.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int\n Image-like array or shape tuple for which to compute pad amounts.\n\n height_base : None or int\n The desired base of the height.\n\n width_base : None or int\n The desired base of the width.\n\n allow_zero_exponent : bool, optional\n Whether ``E=0`` in ``S'=B^E`` is a valid value. If ``True``, axes\n with size ``0`` or ``1`` will be padded up to size ``B^0=1`` and\n axes with size ``1 < S <= B`` will be padded up to ``B^1=B``.\n If ``False``, the minimum output axis size is always at least ``B``.\n\n Returns\n -------\n tuple of int\n Required padding amounts to fulfill ``S' = B^E`` given as a\n ``tuple`` of the form ``(top, right, bottom, left)``.\n\n \"\"\"\n def _compute_axis_value(axis_size, base):\n if base is None:\n return 0, 0\n if axis_size == 0:\n to_pad = 1 if allow_zero_exponent else base\n elif axis_size <= base:\n to_pad = base - axis_size\n else:\n # log_{base}(axis_size) in numpy\n exponent = np.log(axis_size) / np.log(base)\n\n to_pad = (base ** int(np.ceil(exponent))) - axis_size\n\n return int(np.floor(to_pad/2)), int(np.ceil(to_pad/2))\n\n _assert_two_or_three_dims(arr)\n\n if height_base is not None:\n assert height_base > 1, (\n \"Can only pad to base larger than 1, got %d.\" % (height_base,))\n if width_base is not None:\n assert width_base > 1, (\n \"Can only pad to base larger than 1, got %d.\" % (width_base,))\n\n shape = arr.shape if hasattr(arr, \"shape\") else arr\n height, width = shape[0:2]\n\n top, bottom = _compute_axis_value(height, height_base)\n left, right = _compute_axis_value(width, width_base)\n\n return top, right, bottom, left\n\n\ndef compute_croppings_to_reach_powers_of(arr, height_base, width_base,\n allow_zero_exponent=False):\n \"\"\"Compute croppings to reach powers of given base values.\n\n For given axis size ``S``, cropped size ``S'`` (``S' <= S``) and base ``B``\n this function computes croppings that fulfill ``S' = B^E``, where ``E``\n is any exponent from the discrete interval ``[0 .. inf)``.\n\n See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an\n explanation of how the required cropping amounts are distributed per\n image axis.\n\n .. note::\n\n For axes where ``S == 0``, this function alwayws returns zeros as\n croppings.\n\n For axes where ``1 <= S < B`` see parameter `allow_zero_exponent`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int\n Image-like array or shape tuple for which to compute crop amounts.\n\n height_base : None or int\n The desired base of the height.\n\n width_base : None or int\n The desired base of the width.\n\n allow_zero_exponent : bool\n Whether ``E=0`` in ``S'=B^E`` is a valid value. If ``True``, axes\n with size ``1 <= S < B`` will be cropped to size ``B^0=1``.\n If ``False``, axes with sizes ``S < B`` will not be changed.\n\n Returns\n -------\n tuple of int\n Required cropping amounts to fulfill ``S' = B^E`` given as a\n ``tuple`` of the form ``(top, right, bottom, left)``.\n\n \"\"\"\n def _compute_axis_value(axis_size, base):\n if base is None:\n return 0, 0\n if axis_size == 0:\n to_crop = 0\n elif axis_size < base:\n # crop down to B^0 = 1\n to_crop = axis_size - 1 if allow_zero_exponent else 0\n else:\n # log_{base}(axis_size) in numpy\n exponent = np.log(axis_size) / np.log(base)\n\n to_crop = axis_size - (base ** int(exponent))\n\n return int(np.floor(to_crop/2)), int(np.ceil(to_crop/2))\n\n _assert_two_or_three_dims(arr)\n\n if height_base is not None:\n assert height_base > 1, (\n \"Can only crop to base larger than 1, got %d.\" % (height_base,))\n if width_base is not None:\n assert width_base > 1, (\n \"Can only crop to base larger than 1, got %d.\" % (width_base,))\n\n shape = arr.shape if hasattr(arr, \"shape\") else arr\n height, width = shape[0:2]\n\n top, bottom = _compute_axis_value(height, height_base)\n left, right = _compute_axis_value(width, width_base)\n\n return top, right, bottom, left\n\n\n@ia.deprecated(alt_func=\"Resize\",\n comment=\"Resize has the exactly same interface as Scale.\")\ndef Scale(*args, **kwargs):\n \"\"\"Augmenter that resizes images to specified heights and widths.\"\"\"\n # pylint: disable=invalid-name\n return Resize(*args, **kwargs)\n\n\nclass Resize(meta.Augmenter):\n \"\"\"Augmenter that resizes images to specified heights and widths.\n\n Supported dtypes\n ----------------\n\n See :func:`~imgaug.imgaug.imresize_many_images`.\n\n Parameters\n ----------\n size : 'keep' or int or float or tuple of int or tuple of float or list of int or list of float or imgaug.parameters.StochasticParameter or dict\n The new size of the images.\n\n * If this has the string value ``keep``, the original height and\n width values will be kept (image is not resized).\n * If this is an ``int``, this value will always be used as the new\n height and width of the images.\n * If this is a ``float`` ``v``, then per image the image's height\n ``H`` and width ``W`` will be changed to ``H*v`` and ``W*v``.\n * If this is a ``tuple``, it is expected to have two entries\n ``(a, b)``. If at least one of these are ``float`` s, a value\n will be sampled from range ``[a, b]`` and used as the ``float``\n value to resize the image (see above). If both are ``int`` s, a\n value will be sampled from the discrete range ``[a..b]`` and\n used as the integer value to resize the image (see above).\n * If this is a ``list``, a random value from the ``list`` will be\n picked to resize the image. All values in the ``list`` must be\n ``int`` s or ``float`` s (no mixture is possible).\n * If this is a ``StochasticParameter``, then this parameter will\n first be queried once per image. The resulting value will be used\n for both height and width.\n * If this is a ``dict``, it may contain the keys ``height`` and\n ``width`` or the keys ``shorter-side`` and ``longer-side``. Each\n key may have the same datatypes as above and describes the\n scaling on x and y-axis or the shorter and longer axis,\n respectively. Both axis are sampled independently. Additionally,\n one of the keys may have the value ``keep-aspect-ratio``, which\n means that the respective side of the image will be resized so\n that the original aspect ratio is kept. This is useful when only\n resizing one image size by a pixel value (e.g. resize images to\n a height of ``64`` pixels and resize the width so that the\n overall aspect ratio is maintained).\n\n interpolation : imgaug.ALL or int or str or list of int or list of str or imgaug.parameters.StochasticParameter, optional\n Interpolation to use.\n\n * If ``imgaug.ALL``, then a random interpolation from ``nearest``,\n ``linear``, ``area`` or ``cubic`` will be picked (per image).\n * If ``int``, then this interpolation will always be used.\n Expected to be any of the following:\n ``cv2.INTER_NEAREST``, ``cv2.INTER_LINEAR``, ``cv2.INTER_AREA``,\n ``cv2.INTER_CUBIC``\n * If string, then this interpolation will always be used.\n Expected to be any of the following:\n ``nearest``, ``linear``, ``area``, ``cubic``\n * If ``list`` of ``int`` / ``str``, then a random one of the values\n will be picked per image as the interpolation.\n * If a ``StochasticParameter``, then this parameter will be\n queried per image and is expected to return an ``int`` or\n ``str``.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.Resize(32)\n\n Resize all images to ``32x32`` pixels.\n\n >>> aug = iaa.Resize(0.5)\n\n Resize all images to ``50`` percent of their original size.\n\n >>> aug = iaa.Resize((16, 22))\n\n Resize all images to a random height and width within the discrete\n interval ``[16..22]`` (uniformly sampled per image).\n\n >>> aug = iaa.Resize((0.5, 0.75))\n\n Resize all any input image so that its height (``H``) and width (``W``)\n become ``H*v`` and ``W*v``, where ``v`` is uniformly sampled from the\n interval ``[0.5, 0.75]``.\n\n >>> aug = iaa.Resize([16, 32, 64])\n\n Resize all images either to ``16x16``, ``32x32`` or ``64x64`` pixels.\n\n >>> aug = iaa.Resize({\"height\": 32})\n\n Resize all images to a height of ``32`` pixels and keeps the original\n width.\n\n >>> aug = iaa.Resize({\"height\": 32, \"width\": 48})\n\n Resize all images to a height of ``32`` pixels and a width of ``48``.\n\n >>> aug = iaa.Resize({\"height\": 32, \"width\": \"keep-aspect-ratio\"})\n\n Resize all images to a height of ``32`` pixels and resizes the\n x-axis (width) so that the aspect ratio is maintained.\n\n >>> aug = iaa.Resize(\n >>> {\"shorter-side\": 224, \"longer-side\": \"keep-aspect-ratio\"})\n\n Resize all images to a height/width of ``224`` pixels, depending on which\n axis is shorter and resize the other axis so that the aspect ratio is\n maintained.\n\n >>> aug = iaa.Resize({\"height\": (0.5, 0.75), \"width\": [16, 32, 64]})\n\n Resize all images to a height of ``H*v``, where ``H`` is the original\n height and ``v`` is a random value sampled from the interval\n ``[0.5, 0.75]``. The width/x-axis of each image is resized to either\n ``16`` or ``32`` or ``64`` pixels.\n\n >>> aug = iaa.Resize(32, interpolation=[\"linear\", \"cubic\"])\n\n Resize all images to ``32x32`` pixels. Randomly use either ``linear``\n or ``cubic`` interpolation.\n\n \"\"\"\n\n def __init__(self, size, interpolation=\"cubic\",\n seed=None, name=None, **old_kwargs):\n super(Resize, self).__init__(\n seed=seed, name=name, **old_kwargs)\n\n self.size, self.size_order = self._handle_size_arg(size, False)\n self.interpolation = self._handle_interpolation_arg(interpolation)\n\n @classmethod\n def _handle_size_arg(cls, size, subcall):\n def _dict_to_size_tuple(val1, val2):\n kaa = \"keep-aspect-ratio\"\n not_both_kaa = (val1 != kaa or val2 != kaa)\n assert not_both_kaa, (\n \"Expected at least one value to not be \\\"keep-aspect-ratio\\\", \"\n \"but got it two times.\")\n\n size_tuple = []\n for k in [val1, val2]:\n if k in [\"keep-aspect-ratio\", \"keep\"]:\n entry = iap.Deterministic(k)\n else:\n entry = cls._handle_size_arg(k, True)\n size_tuple.append(entry)\n return tuple(size_tuple)\n\n def _contains_any_key(dict_, keys):\n return any([key in dict_ for key in keys])\n\n # HW = height, width\n # SL = shorter, longer\n size_order = \"HW\"\n\n if size == \"keep\":\n result = iap.Deterministic(\"keep\")\n elif ia.is_single_number(size):\n assert size > 0, \"Expected only values > 0, got %s\" % (size,)\n result = iap.Deterministic(size)\n elif not subcall and isinstance(size, dict):\n if len(size.keys()) == 0:\n result = iap.Deterministic(\"keep\")\n elif _contains_any_key(size, [\"height\", \"width\"]):\n height = size.get(\"height\", \"keep\")\n width = size.get(\"width\", \"keep\")\n result = _dict_to_size_tuple(height, width)\n elif _contains_any_key(size, [\"shorter-side\", \"longer-side\"]):\n shorter = size.get(\"shorter-side\", \"keep\")\n longer = size.get(\"longer-side\", \"keep\")\n result = _dict_to_size_tuple(shorter, longer)\n size_order = \"SL\"\n else:\n raise ValueError(\n \"Expected dictionary containing no keys, \"\n \"the keys \\\"height\\\" and/or \\\"width\\\", \"\n \"or the keys \\\"shorter-side\\\" and/or \\\"longer-side\\\". \"\n \"Got keys: %s.\" % (str(size.keys()),))\n elif isinstance(size, tuple):\n assert len(size) == 2, (\n \"Expected size tuple to contain exactly 2 values, \"\n \"got %d.\" % (len(size),))\n assert size[0] > 0 and size[1] > 0, (\n \"Expected size tuple to only contain values >0, \"\n \"got %d and %d.\" % (size[0], size[1]))\n if ia.is_single_float(size[0]) or ia.is_single_float(size[1]):\n result = iap.Uniform(size[0], size[1])\n else:\n result = iap.DiscreteUniform(size[0], size[1])\n elif isinstance(size, list):\n if len(size) == 0:\n result = iap.Deterministic(\"keep\")\n else:\n all_int = all([ia.is_single_integer(v) for v in size])\n all_float = all([ia.is_single_float(v) for v in size])\n assert all_int or all_float, (\n \"Expected to get only integers or floats.\")\n assert all([v > 0 for v in size]), (\n \"Expected all values to be >0.\")\n result = iap.Choice(size)\n elif isinstance(size, iap.StochasticParameter):\n result = size\n else:\n raise ValueError(\n \"Expected number, tuple of two numbers, list of numbers, \"\n \"dictionary of form \"\n \"{'height': number/tuple/list/'keep-aspect-ratio'/'keep', \"\n \"'width': <analogous>}, dictionary of form \"\n \"{'shorter-side': number/tuple/list/'keep-aspect-ratio'/\"\n \"'keep', 'longer-side': <analogous>} \"\n \"or StochasticParameter, got %s.\" % (type(size),)\n )\n\n if subcall:\n return result\n return result, size_order\n\n @classmethod\n def _handle_interpolation_arg(cls, interpolation):\n if interpolation == ia.ALL:\n interpolation = iap.Choice(\n [\"nearest\", \"linear\", \"area\", \"cubic\"])\n elif ia.is_single_integer(interpolation):\n interpolation = iap.Deterministic(interpolation)\n elif ia.is_string(interpolation):\n interpolation = iap.Deterministic(interpolation)\n elif ia.is_iterable(interpolation):\n interpolation = iap.Choice(interpolation)\n elif isinstance(interpolation, iap.StochasticParameter):\n pass\n else:\n raise Exception(\n \"Expected int or string or iterable or StochasticParameter, \"\n \"got %s.\" % (type(interpolation),))\n return interpolation\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n nb_rows = batch.nb_rows\n samples = self._draw_samples(nb_rows, random_state)\n\n if batch.images is not None:\n batch.images = self._augment_images_by_samples(batch.images,\n samples)\n\n if batch.heatmaps is not None:\n # TODO this uses the same interpolation as for images for heatmaps\n # while other augmenters resort to cubic\n batch.heatmaps = self._augment_maps_by_samples(\n batch.heatmaps, \"arr_0to1\", samples)\n\n if batch.segmentation_maps is not None:\n batch.segmentation_maps = self._augment_maps_by_samples(\n batch.segmentation_maps, \"arr\",\n (samples[0], samples[1], [None] * nb_rows))\n\n for augm_name in [\"keypoints\", \"bounding_boxes\", \"polygons\",\n \"line_strings\"]:\n augm_value = getattr(batch, augm_name)\n if augm_value is not None:\n func = functools.partial(\n self._augment_keypoints_by_samples,\n samples=samples)\n cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)\n setattr(batch, augm_name, cbaois)\n\n return batch\n\n def _augment_images_by_samples(self, images, samples):\n input_was_array = False\n input_dtype = None\n if ia.is_np_array(images):\n input_was_array = True\n input_dtype = images.dtype\n\n samples_a, samples_b, samples_ip = samples\n result = []\n for i, image in enumerate(images):\n h, w = self._compute_height_width(image.shape, samples_a[i],\n samples_b[i], self.size_order)\n image_rs = ia.imresize_single_image(image, (h, w),\n interpolation=samples_ip[i])\n result.append(image_rs)\n\n if input_was_array:\n all_same_size = (len({image.shape for image in result}) == 1)\n if all_same_size:\n result = np.array(result, dtype=input_dtype)\n\n return result\n\n def _augment_maps_by_samples(self, augmentables, arr_attr_name, samples):\n result = []\n samples_h, samples_w, samples_ip = samples\n\n for i, augmentable in enumerate(augmentables):\n arr = getattr(augmentable, arr_attr_name)\n arr_shape = arr.shape\n img_shape = augmentable.shape\n h_img, w_img = self._compute_height_width(\n img_shape, samples_h[i], samples_w[i], self.size_order)\n h = int(np.round(h_img * (arr_shape[0] / img_shape[0])))\n w = int(np.round(w_img * (arr_shape[1] / img_shape[1])))\n h = max(h, 1)\n w = max(w, 1)\n if samples_ip[0] is not None:\n # TODO change this for heatmaps to always have cubic or\n # automatic interpolation?\n augmentable_resize = augmentable.resize(\n (h, w), interpolation=samples_ip[i])\n else:\n augmentable_resize = augmentable.resize((h, w))\n augmentable_resize.shape = (h_img, w_img) + img_shape[2:]\n result.append(augmentable_resize)\n\n return result\n\n def _augment_keypoints_by_samples(self, kpsois, samples):\n result = []\n samples_a, samples_b, _samples_ip = samples\n for i, kpsoi in enumerate(kpsois):\n h, w = self._compute_height_width(\n kpsoi.shape, samples_a[i], samples_b[i], self.size_order)\n new_shape = (h, w) + kpsoi.shape[2:]\n keypoints_on_image_rs = kpsoi.on_(new_shape)\n\n result.append(keypoints_on_image_rs)\n\n return result\n\n def _draw_samples(self, nb_images, random_state):\n rngs = random_state.duplicate(3)\n if isinstance(self.size, tuple):\n samples_h = self.size[0].draw_samples(nb_images,\n random_state=rngs[0])\n samples_w = self.size[1].draw_samples(nb_images,\n random_state=rngs[1])\n else:\n samples_h = self.size.draw_samples(nb_images, random_state=rngs[0])\n samples_w = samples_h\n\n samples_ip = self.interpolation.draw_samples(nb_images,\n random_state=rngs[2])\n return samples_h, samples_w, samples_ip\n\n @classmethod\n def _compute_height_width(cls, image_shape, sample_a, sample_b, size_order):\n imh, imw = image_shape[0:2]\n\n if size_order == 'SL':\n # size order: short, long\n if imh < imw:\n h, w = sample_a, sample_b\n else:\n w, h = sample_a, sample_b\n else:\n # size order: height, width\n h, w = sample_a, sample_b\n\n if ia.is_single_float(h):\n assert h > 0, \"Expected 'h' to be >0, got %.4f\" % (h,)\n h = int(np.round(imh * h))\n h = h if h > 0 else 1\n elif h == \"keep\":\n h = imh\n if ia.is_single_float(w):\n assert w > 0, \"Expected 'w' to be >0, got %.4f\" % (w,)\n w = int(np.round(imw * w))\n w = w if w > 0 else 1\n elif w == \"keep\":\n w = imw\n\n # at least the checks for keep-aspect-ratio must come after\n # the float checks, as they are dependent on the results\n # this is also why these are not written as elifs\n if h == \"keep-aspect-ratio\":\n h_per_w_orig = imh / imw\n h = int(np.round(w * h_per_w_orig))\n if w == \"keep-aspect-ratio\":\n w_per_h_orig = imw / imh\n w = int(np.round(h * w_per_h_orig))\n\n return h, w\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.size, self.interpolation, self.size_order]\n\n\nclass _CropAndPadSamplingResult(object):\n def __init__(self, crop_top, crop_right, crop_bottom, crop_left,\n pad_top, pad_right, pad_bottom, pad_left, pad_mode, pad_cval):\n self.crop_top = crop_top\n self.crop_right = crop_right\n self.crop_bottom = crop_bottom\n self.crop_left = crop_left\n self.pad_top = pad_top\n self.pad_right = pad_right\n self.pad_bottom = pad_bottom\n self.pad_left = pad_left\n self.pad_mode = pad_mode\n self.pad_cval = pad_cval\n\n @property\n def croppings(self):\n \"\"\"Get absolute pixel amounts of croppings as a TRBL tuple.\"\"\"\n return self.crop_top, self.crop_right, self.crop_bottom, self.crop_left\n\n @property\n def paddings(self):\n \"\"\"Get absolute pixel amounts of paddings as a TRBL tuple.\"\"\"\n return self.pad_top, self.pad_right, self.pad_bottom, self.pad_left\n\n\nclass CropAndPad(meta.Augmenter):\n \"\"\"Crop/pad images by pixel amounts or fractions of image sizes.\n\n Cropping removes pixels at the sides (i.e. extracts a subimage from\n a given full image). Padding adds pixels to the sides (e.g. black pixels).\n\n This augmenter will never crop images below a height or width of ``1``.\n\n .. note::\n\n This augmenter automatically resizes images back to their original size\n after it has augmented them. To deactivate this, add the\n parameter ``keep_size=False``.\n\n Supported dtypes\n ----------------\n\n if (keep_size=False):\n\n * ``uint8``: yes; fully tested\n * ``uint16``: yes; tested\n * ``uint32``: yes; tested\n * ``uint64``: yes; tested\n * ``int8``: yes; tested\n * ``int16``: yes; tested\n * ``int32``: yes; tested\n * ``int64``: yes; tested\n * ``float16``: yes; tested\n * ``float32``: yes; tested\n * ``float64``: yes; tested\n * ``float128``: yes; tested\n * ``bool``: yes; tested\n\n if (keep_size=True):\n\n minimum of (\n ``imgaug.augmenters.size.CropAndPad(keep_size=False)``,\n :func:`~imgaug.imgaug.imresize_many_images`\n )\n\n Parameters\n ----------\n px : None or int or imgaug.parameters.StochasticParameter or tuple, optional\n The number of pixels to crop (negative values) or pad (positive values)\n on each side of the image. Either this or the parameter `percent` may\n be set, not both at the same time.\n\n * If ``None``, then pixel-based cropping/padding will not be used.\n * If ``int``, then that exact number of pixels will always be\n cropped/padded.\n * If ``StochasticParameter``, then that parameter will be used for\n each image. Four samples will be drawn per image (top, right,\n bottom, left), unless `sample_independently` is set to ``False``,\n as then only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of two ``int`` s with values ``a`` and ``b``,\n then each side will be cropped/padded by a random amount sampled\n uniformly per image and side from the inteval ``[a, b]``. If\n however `sample_independently` is set to ``False``, only one\n value will be sampled per image and used for all sides.\n * If a ``tuple`` of four entries, then the entries represent top,\n right, bottom, left. Each entry may be a single ``int`` (always\n crop/pad by exactly that value), a ``tuple`` of two ``int`` s\n ``a`` and ``b`` (crop/pad by an amount within ``[a, b]``), a\n ``list`` of ``int`` s (crop/pad by a random value that is\n contained in the ``list``) or a ``StochasticParameter`` (sample\n the amount to crop/pad from that parameter).\n\n percent : None or number or imgaug.parameters.StochasticParameter or tuple, optional\n The number of pixels to crop (negative values) or pad (positive values)\n on each side of the image given as a *fraction* of the image\n height/width. E.g. if this is set to ``-0.1``, the augmenter will\n always crop away ``10%`` of the image's height at both the top and the\n bottom (both ``10%`` each), as well as ``10%`` of the width at the\n right and left.\n Expected value range is ``(-1.0, inf)``.\n Either this or the parameter `px` may be set, not both\n at the same time.\n\n * If ``None``, then fraction-based cropping/padding will not be\n used.\n * If ``number``, then that fraction will always be cropped/padded.\n * If ``StochasticParameter``, then that parameter will be used for\n each image. Four samples will be drawn per image (top, right,\n bottom, left). If however `sample_independently` is set to\n ``False``, only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of two ``float`` s with values ``a`` and ``b``,\n then each side will be cropped/padded by a random fraction\n sampled uniformly per image and side from the interval\n ``[a, b]``. If however `sample_independently` is set to\n ``False``, only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of four entries, then the entries represent top,\n right, bottom, left. Each entry may be a single ``float``\n (always crop/pad by exactly that percent value), a ``tuple`` of\n two ``float`` s ``a`` and ``b`` (crop/pad by a fraction from\n ``[a, b]``), a ``list`` of ``float`` s (crop/pad by a random\n value that is contained in the list) or a ``StochasticParameter``\n (sample the percentage to crop/pad from that parameter).\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n Padding mode to use. The available modes match the numpy padding modes,\n i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``,\n ``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes\n ``constant`` and ``linear_ramp`` use extra values, which are provided\n by ``pad_cval`` when necessary. See :func:`~imgaug.imgaug.pad` for\n more details.\n\n * If ``imgaug.ALL``, then a random mode from all available modes\n will be sampled per image.\n * If a ``str``, it will be used as the pad mode for all images.\n * If a ``list`` of ``str``, a random one of these will be sampled\n per image and used as the mode.\n * If ``StochasticParameter``, a random mode will be sampled from\n this parameter per image.\n\n pad_cval : number or tuple of number list of number or imgaug.parameters.StochasticParameter, optional\n The constant value to use if the pad mode is ``constant`` or the end\n value to use if the mode is ``linear_ramp``.\n See :func:`~imgaug.imgaug.pad` for more details.\n\n * If ``number``, then that value will be used.\n * If a ``tuple`` of two ``number`` s and at least one of them is\n a ``float``, then a random number will be uniformly sampled per\n image from the continuous interval ``[a, b]`` and used as the\n value. If both ``number`` s are ``int`` s, the interval is\n discrete.\n * If a ``list`` of ``number``, then a random value will be chosen\n from the elements of the ``list`` and used as the value.\n * If ``StochasticParameter``, a random value will be sampled from\n that parameter per image.\n\n keep_size : bool, optional\n After cropping and padding, the result image will usually have a\n different height/width compared to the original input image. If this\n parameter is set to ``True``, then the cropped/padded image will be\n resized to the input image's size, i.e. the augmenter's output shape\n is always identical to the input shape.\n\n sample_independently : bool, optional\n If ``False`` *and* the values for `px`/`percent` result in exactly\n *one* probability distribution for all image sides, only one single\n value will be sampled from that probability distribution and used for\n all sides. I.e. the crop/pad amount then is the same for all sides.\n If ``True``, four values will be sampled independently, one per side.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CropAndPad(px=(-10, 0))\n\n Crop each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[-10..0]``.\n\n >>> aug = iaa.CropAndPad(px=(0, 10))\n\n Pad each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``. The padding happens by\n zero-padding, i.e. it adds black pixels (default setting).\n\n >>> aug = iaa.CropAndPad(px=(0, 10), pad_mode=\"edge\")\n\n Pad each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``. The padding uses the\n ``edge`` mode from numpy's pad function, i.e. the pixel colors around\n the image sides are repeated.\n\n >>> aug = iaa.CropAndPad(px=(0, 10), pad_mode=[\"constant\", \"edge\"])\n\n Similar to the previous example, but uses zero-padding (``constant``) for\n half of the images and ``edge`` padding for the other half.\n\n >>> aug = iaa.CropAndPad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255))\n\n Similar to the previous example, but uses any available padding mode.\n In case the padding mode ends up being ``constant`` or ``linear_ramp``,\n and random intensity is uniformly sampled (once per image) from the\n discrete interval ``[0..255]`` and used as the intensity of the new\n pixels.\n\n >>> aug = iaa.CropAndPad(px=(0, 10), sample_independently=False)\n\n Pad each side by a random pixel value sampled uniformly once per image\n from the discrete interval ``[0..10]``. Each sampled value is used\n for *all* sides of the corresponding image.\n\n >>> aug = iaa.CropAndPad(px=(0, 10), keep_size=False)\n\n Pad each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``. Afterwards, do **not**\n resize the padded image back to the input image's size. This will increase\n the image's height and width by a maximum of ``20`` pixels.\n\n >>> aug = iaa.CropAndPad(px=((0, 10), (0, 5), (0, 10), (0, 5)))\n\n Pad the top and bottom by a random pixel value sampled uniformly from the\n discrete interval ``[0..10]``. Pad the left and right analogously by\n a random value sampled from ``[0..5]``. Each value is always sampled\n independently.\n\n >>> aug = iaa.CropAndPad(percent=(0, 0.1))\n\n Pad each side by a random fraction sampled uniformly from the continuous\n interval ``[0.0, 0.10]``. The fraction is sampled once per image and\n side. E.g. a sampled fraction of ``0.1`` for the top side would pad by\n ``0.1*H``, where ``H`` is the height of the input image.\n\n >>> aug = iaa.CropAndPad(\n >>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))\n\n Pads each side by either ``5%`` or ``10%``. The values are sampled\n once per side and image.\n\n >>> aug = iaa.CropAndPad(px=(-10, 10))\n\n Sample uniformly per image and side a value ``v`` from the discrete range\n ``[-10..10]``. Then either crop (negative sample) or pad (positive sample)\n the side by ``v`` pixels.\n\n \"\"\"\n\n def __init__(self, px=None, percent=None, pad_mode=\"constant\", pad_cval=0,\n keep_size=True, sample_independently=True,\n seed=None, name=None, **old_kwargs):\n # pylint: disable=invalid-name\n super(CropAndPad, self).__init__(\n seed=seed, name=name, **old_kwargs)\n\n self.mode, self.all_sides, self.top, self.right, self.bottom, \\\n self.left = self._handle_px_and_percent_args(px, percent)\n\n self.pad_mode = _handle_pad_mode_param(pad_mode)\n # TODO enable ALL here, like in e.g. Affine\n self.pad_cval = iap.handle_discrete_param(\n pad_cval, \"pad_cval\", value_range=None, tuple_to_uniform=True,\n list_to_choice=True, allow_floats=True)\n\n self.keep_size = keep_size\n self.sample_independently = sample_independently\n\n # set these to None to use the same values as sampled for the\n # images (not tested)\n self._pad_mode_heatmaps = \"constant\"\n self._pad_mode_segmentation_maps = \"constant\"\n self._pad_cval_heatmaps = 0.0\n self._pad_cval_segmentation_maps = 0\n\n @classmethod\n def _handle_px_and_percent_args(cls, px, percent):\n # pylint: disable=invalid-name\n all_sides = None\n top, right, bottom, left = None, None, None, None\n\n if px is None and percent is None:\n mode = \"noop\"\n elif px is not None and percent is not None:\n raise Exception(\"Can only pad by pixels or percent, not both.\")\n elif px is not None:\n mode = \"px\"\n all_sides, top, right, bottom, left = cls._handle_px_arg(px)\n else: # = elif percent is not None:\n mode = \"percent\"\n all_sides, top, right, bottom, left = cls._handle_percent_arg(\n percent)\n return mode, all_sides, top, right, bottom, left\n\n @classmethod\n def _handle_px_arg(cls, px):\n # pylint: disable=invalid-name\n all_sides = None\n top, right, bottom, left = None, None, None, None\n\n if ia.is_single_integer(px):\n all_sides = iap.Deterministic(px)\n elif isinstance(px, tuple):\n assert len(px) in [2, 4], (\n \"Expected 'px' given as a tuple to contain 2 or 4 \"\n \"entries, got %d.\" % (len(px),))\n\n def handle_param(p):\n if ia.is_single_integer(p):\n return iap.Deterministic(p)\n if isinstance(p, tuple):\n assert len(p) == 2, (\n \"Expected tuple of 2 values, got %d.\" % (len(p)))\n only_ints = (\n ia.is_single_integer(p[0])\n and ia.is_single_integer(p[1]))\n assert only_ints, (\n \"Expected tuple of integers, got %s and %s.\" % (\n type(p[0]), type(p[1])))\n return iap.DiscreteUniform(p[0], p[1])\n if isinstance(p, list):\n assert len(p) > 0, (\n \"Expected non-empty list, but got empty one.\")\n assert all([ia.is_single_integer(val) for val in p]), (\n \"Expected list of ints, got types %s.\" % (\n \", \".join([str(type(v)) for v in p])))\n return iap.Choice(p)\n if isinstance(p, iap.StochasticParameter):\n return p\n raise Exception(\n \"Expected int, tuple of two ints, list of ints or \"\n \"StochasticParameter, got type %s.\" % (type(p),))\n\n if len(px) == 2:\n all_sides = handle_param(px)\n else: # len == 4\n top = handle_param(px[0])\n right = handle_param(px[1])\n bottom = handle_param(px[2])\n left = handle_param(px[3])\n elif isinstance(px, iap.StochasticParameter):\n top = right = bottom = left = px\n else:\n raise Exception(\n \"Expected int, tuple of 4 \"\n \"ints/tuples/lists/StochasticParameters or \"\n \"StochasticParameter, got type %s.\" % (type(px),))\n return all_sides, top, right, bottom, left\n\n @classmethod\n def _handle_percent_arg(cls, percent):\n all_sides = None\n top, right, bottom, left = None, None, None, None\n\n if ia.is_single_number(percent):\n assert percent > -1.0, (\n \"Expected 'percent' to be >-1.0, got %.4f.\" % (percent,))\n all_sides = iap.Deterministic(percent)\n elif isinstance(percent, tuple):\n assert len(percent) in [2, 4], (\n \"Expected 'percent' given as a tuple to contain 2 or 4 \"\n \"entries, got %d.\" % (len(percent),))\n\n def handle_param(p):\n if ia.is_single_number(p):\n return iap.Deterministic(p)\n if isinstance(p, tuple):\n assert len(p) == 2, (\n \"Expected tuple of 2 values, got %d.\" % (len(p),))\n only_numbers = (\n ia.is_single_number(p[0])\n and ia.is_single_number(p[1]))\n assert only_numbers, (\n \"Expected tuple of numbers, got %s and %s.\" % (\n type(p[0]), type(p[1])))\n assert p[0] > -1.0 and p[1] > -1.0, (\n \"Expected tuple of values >-1.0, got %.4f and \"\n \"%.4f.\" % (p[0], p[1]))\n return iap.Uniform(p[0], p[1])\n if isinstance(p, list):\n assert len(p) > 0, (\n \"Expected non-empty list, but got empty one.\")\n assert all([ia.is_single_number(val) for val in p]), (\n \"Expected list of numbers, got types %s.\" % (\n \", \".join([str(type(v)) for v in p])))\n assert all([val > -1.0 for val in p]), (\n \"Expected list of values >-1.0, got values %s.\" % (\n \", \".join([\"%.4f\" % (v,) for v in p])))\n return iap.Choice(p)\n if isinstance(p, iap.StochasticParameter):\n return p\n raise Exception(\n \"Expected int, tuple of two ints, list of ints or \"\n \"StochasticParameter, got type %s.\" % (type(p),))\n\n if len(percent) == 2:\n all_sides = handle_param(percent)\n else: # len == 4\n top = handle_param(percent[0])\n right = handle_param(percent[1])\n bottom = handle_param(percent[2])\n left = handle_param(percent[3])\n elif isinstance(percent, iap.StochasticParameter):\n top = right = bottom = left = percent\n else:\n raise Exception(\n \"Expected number, tuple of 4 \"\n \"numbers/tuples/lists/StochasticParameters or \"\n \"StochasticParameter, got type %s.\" % (type(percent),))\n return all_sides, top, right, bottom, left\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n shapes = batch.get_rowwise_shapes()\n samples = self._draw_samples(random_state, shapes)\n\n if batch.images is not None:\n batch.images = self._augment_images_by_samples(batch.images,\n samples)\n\n if batch.heatmaps is not None:\n batch.heatmaps = self._augment_maps_by_samples(\n batch.heatmaps,\n self._pad_mode_heatmaps, self._pad_cval_heatmaps,\n samples)\n\n if batch.segmentation_maps is not None:\n batch.segmentation_maps = self._augment_maps_by_samples(\n batch.segmentation_maps,\n self._pad_mode_segmentation_maps,\n self._pad_cval_segmentation_maps, samples)\n\n for augm_name in [\"keypoints\", \"bounding_boxes\", \"polygons\",\n \"line_strings\"]:\n augm_value = getattr(batch, augm_name)\n if augm_value is not None:\n func = functools.partial(\n self._augment_keypoints_by_samples,\n samples=samples)\n cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)\n setattr(batch, augm_name, cbaois)\n\n return batch\n\n def _augment_images_by_samples(self, images, samples):\n result = []\n for i, image in enumerate(images):\n samples_i = samples[i]\n\n image_cr_pa = _crop_and_pad_arr(\n image, samples_i.croppings, samples_i.paddings,\n samples_i.pad_mode, samples_i.pad_cval, self.keep_size)\n\n result.append(image_cr_pa)\n\n if ia.is_np_array(images):\n if self.keep_size:\n result = np.array(result, dtype=images.dtype)\n else:\n nb_shapes = len({image.shape for image in result})\n if nb_shapes == 1:\n result = np.array(result, dtype=images.dtype)\n\n return result\n\n def _augment_maps_by_samples(self, augmentables, pad_mode, pad_cval,\n samples):\n result = []\n for i, augmentable in enumerate(augmentables):\n samples_img = samples[i]\n\n augmentable = _crop_and_pad_hms_or_segmaps_(\n augmentable,\n croppings_img=samples_img.croppings,\n paddings_img=samples_img.paddings,\n pad_mode=(pad_mode\n if pad_mode is not None\n else samples_img.pad_mode),\n pad_cval=(pad_cval\n if pad_cval is not None\n else samples_img.pad_cval),\n keep_size=self.keep_size\n )\n\n result.append(augmentable)\n\n return result\n\n def _augment_keypoints_by_samples(self, keypoints_on_images, samples):\n result = []\n for i, keypoints_on_image in enumerate(keypoints_on_images):\n samples_i = samples[i]\n\n kpsoi_aug = _crop_and_pad_kpsoi_(\n keypoints_on_image, croppings_img=samples_i.croppings,\n paddings_img=samples_i.paddings, keep_size=self.keep_size)\n result.append(kpsoi_aug)\n\n return result\n\n def _draw_samples(self, random_state, shapes):\n nb_rows = len(shapes)\n\n if self.mode == \"noop\":\n top = right = bottom = left = np.full((nb_rows,), 0,\n dtype=np.int32)\n else:\n if self.all_sides is not None:\n if self.sample_independently:\n samples = self.all_sides.draw_samples(\n (nb_rows, 4), random_state=random_state)\n top = samples[:, 0]\n right = samples[:, 1]\n bottom = samples[:, 2]\n left = samples[:, 3]\n else:\n sample = self.all_sides.draw_samples(\n (nb_rows,), random_state=random_state)\n top = right = bottom = left = sample\n else:\n top = self.top.draw_samples(\n (nb_rows,), random_state=random_state)\n right = self.right.draw_samples(\n (nb_rows,), random_state=random_state)\n bottom = self.bottom.draw_samples(\n (nb_rows,), random_state=random_state)\n left = self.left.draw_samples(\n (nb_rows,), random_state=random_state)\n\n if self.mode == \"px\":\n # no change necessary for pixel values\n pass\n elif self.mode == \"percent\":\n # percentage values have to be transformed to pixel values\n shapes_arr = np.array([shape[0:2] for shape in shapes],\n dtype=np.float32)\n heights = shapes_arr[:, 0]\n widths = shapes_arr[:, 1]\n top = np.round(heights * top).astype(np.int32)\n right = np.round(widths * right).astype(np.int32)\n bottom = np.round(heights * bottom).astype(np.int32)\n left = np.round(widths * left).astype(np.int32)\n else:\n raise Exception(\"Invalid mode\")\n\n def _only_above_zero(arr):\n arr = np.copy(arr)\n mask = (arr < 0)\n arr[mask] = 0\n return arr\n\n crop_top = _only_above_zero((-1) * top)\n crop_right = _only_above_zero((-1) * right)\n crop_bottom = _only_above_zero((-1) * bottom)\n crop_left = _only_above_zero((-1) * left)\n\n pad_top = _only_above_zero(top)\n pad_right = _only_above_zero(right)\n pad_bottom = _only_above_zero(bottom)\n pad_left = _only_above_zero(left)\n\n pad_mode = self.pad_mode.draw_samples((nb_rows,),\n random_state=random_state)\n pad_cval = self.pad_cval.draw_samples((nb_rows,),\n random_state=random_state)\n\n # TODO vectorize this part -- especially return only one instance\n result = []\n for i, shape in enumerate(shapes):\n height, width = shape[0:2]\n crop_top_i, crop_right_i, crop_bottom_i, crop_left_i = \\\n _crop_prevent_zero_size(\n height, width,\n crop_top[i], crop_right[i], crop_bottom[i], crop_left[i])\n\n # add here any_crop_y to not warn in case of zero height/width\n # images\n any_crop_y = (crop_top_i > 0 or crop_bottom_i > 0)\n if any_crop_y and crop_top_i + crop_bottom_i >= height:\n ia.warn(\n \"Expected generated crop amounts in CropAndPad for top and \"\n \"bottom image side to be less than the image's height, but \"\n \"got %d (top) and %d (bottom) vs. image height %d. This \"\n \"will result in an image with output height=1 (if input \"\n \"height was >=1) or output height=0 (if input height \"\n \"was 0).\" % (crop_top_i, crop_bottom_i, height))\n\n # add here any_crop_x to not warn in case of zero height/width\n # images\n any_crop_x = (crop_left_i > 0 or crop_right_i > 0)\n if any_crop_x and crop_left_i + crop_right_i >= width:\n ia.warn(\n \"Expected generated crop amounts in CropAndPad for left \"\n \"and right image side to be less than the image's width, \"\n \"but got %d (left) and %d (right) vs. image width %d. \"\n \"This will result in an image with output width=1 (if \"\n \"input width was >=1) or output width=0 (if input width \"\n \"was 0).\" % (crop_left_i, crop_right_i, width))\n\n result.append(\n _CropAndPadSamplingResult(\n crop_top=crop_top_i,\n crop_right=crop_right_i,\n crop_bottom=crop_bottom_i,\n crop_left=crop_left_i,\n pad_top=pad_top[i],\n pad_right=pad_right[i],\n pad_bottom=pad_bottom[i],\n pad_left=pad_left[i],\n pad_mode=pad_mode[i],\n pad_cval=pad_cval[i]))\n return result\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.all_sides, self.top, self.right, self.bottom, self.left,\n self.pad_mode, self.pad_cval]\n\n\nclass Pad(CropAndPad):\n \"\"\"Pad images, i.e. adds columns/rows of pixels to them.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropAndPad`.\n\n Parameters\n ----------\n px : None or int or imgaug.parameters.StochasticParameter or tuple, optional\n The number of pixels to pad on each side of the image.\n Expected value range is ``[0, inf)``.\n Either this or the parameter `percent` may be set, not both at the same\n time.\n\n * If ``None``, then pixel-based padding will not be used.\n * If ``int``, then that exact number of pixels will always be\n padded.\n * If ``StochasticParameter``, then that parameter will be used for\n each image. Four samples will be drawn per image (top, right,\n bottom, left), unless `sample_independently` is set to ``False``,\n as then only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of two ``int`` s with values ``a`` and ``b``,\n then each side will be padded by a random amount sampled\n uniformly per image and side from the inteval ``[a, b]``. If\n however `sample_independently` is set to ``False``, only one\n value will be sampled per image and used for all sides.\n * If a ``tuple`` of four entries, then the entries represent top,\n right, bottom, left. Each entry may be a single ``int`` (always\n pad by exactly that value), a ``tuple`` of two ``int`` s\n ``a`` and ``b`` (pad by an amount within ``[a, b]``), a\n ``list`` of ``int`` s (pad by a random value that is\n contained in the ``list``) or a ``StochasticParameter`` (sample\n the amount to pad from that parameter).\n\n percent : None or int or float or imgaug.parameters.StochasticParameter or tuple, optional\n The number of pixels to pad\n on each side of the image given as a *fraction* of the image\n height/width. E.g. if this is set to ``0.1``, the augmenter will\n always pad ``10%`` of the image's height at both the top and the\n bottom (both ``10%`` each), as well as ``10%`` of the width at the\n right and left.\n Expected value range is ``[0.0, inf)``.\n Either this or the parameter `px` may be set, not both\n at the same time.\n\n * If ``None``, then fraction-based padding will not be\n used.\n * If ``number``, then that fraction will always be padded.\n * If ``StochasticParameter``, then that parameter will be used for\n each image. Four samples will be drawn per image (top, right,\n bottom, left). If however `sample_independently` is set to\n ``False``, only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of two ``float`` s with values ``a`` and ``b``,\n then each side will be padded by a random fraction\n sampled uniformly per image and side from the interval\n ``[a, b]``. If however `sample_independently` is set to\n ``False``, only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of four entries, then the entries represent top,\n right, bottom, left. Each entry may be a single ``float``\n (always pad by exactly that fraction), a ``tuple`` of\n two ``float`` s ``a`` and ``b`` (pad by a fraction from\n ``[a, b]``), a ``list`` of ``float`` s (pad by a random\n value that is contained in the list) or a ``StochasticParameter``\n (sample the percentage to pad from that parameter).\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n Padding mode to use. The available modes match the numpy padding modes,\n i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``,\n ``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes\n ``constant`` and ``linear_ramp`` use extra values, which are provided\n by ``pad_cval`` when necessary. See :func:`~imgaug.imgaug.pad` for\n more details.\n\n * If ``imgaug.ALL``, then a random mode from all available modes\n will be sampled per image.\n * If a ``str``, it will be used as the pad mode for all images.\n * If a ``list`` of ``str``, a random one of these will be sampled\n per image and used as the mode.\n * If ``StochasticParameter``, a random mode will be sampled from\n this parameter per image.\n\n pad_cval : number or tuple of number list of number or imgaug.parameters.StochasticParameter, optional\n The constant value to use if the pad mode is ``constant`` or the end\n value to use if the mode is ``linear_ramp``.\n See :func:`~imgaug.imgaug.pad` for more details.\n\n * If ``number``, then that value will be used.\n * If a ``tuple`` of two ``number`` s and at least one of them is\n a ``float``, then a random number will be uniformly sampled per\n image from the continuous interval ``[a, b]`` and used as the\n value. If both ``number`` s are ``int`` s, the interval is\n discrete.\n * If a ``list`` of ``number``, then a random value will be chosen\n from the elements of the ``list`` and used as the value.\n * If ``StochasticParameter``, a random value will be sampled from\n that parameter per image.\n\n keep_size : bool, optional\n After padding, the result image will usually have a\n different height/width compared to the original input image. If this\n parameter is set to ``True``, then the padded image will be\n resized to the input image's size, i.e. the augmenter's output shape\n is always identical to the input shape.\n\n sample_independently : bool, optional\n If ``False`` *and* the values for `px`/`percent` result in exactly\n *one* probability distribution for all image sides, only one single\n value will be sampled from that probability distribution and used for\n all sides. I.e. the pad amount then is the same for all sides.\n If ``True``, four values will be sampled independently, one per side.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.Pad(px=(0, 10))\n\n Pad each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``. The padding happens by\n zero-padding, i.e. it adds black pixels (default setting).\n\n >>> aug = iaa.Pad(px=(0, 10), pad_mode=\"edge\")\n\n Pad each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``. The padding uses the\n ``edge`` mode from numpy's pad function, i.e. the pixel colors around\n the image sides are repeated.\n\n >>> aug = iaa.Pad(px=(0, 10), pad_mode=[\"constant\", \"edge\"])\n\n Similar to the previous example, but uses zero-padding (``constant``) for\n half of the images and ``edge`` padding for the other half.\n\n >>> aug = iaa.Pad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255))\n\n Similar to the previous example, but uses any available padding mode.\n In case the padding mode ends up being ``constant`` or ``linear_ramp``,\n and random intensity is uniformly sampled (once per image) from the\n discrete interval ``[0..255]`` and used as the intensity of the new\n pixels.\n\n >>> aug = iaa.Pad(px=(0, 10), sample_independently=False)\n\n Pad each side by a random pixel value sampled uniformly once per image\n from the discrete interval ``[0..10]``. Each sampled value is used\n for *all* sides of the corresponding image.\n\n >>> aug = iaa.Pad(px=(0, 10), keep_size=False)\n\n Pad each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``. Afterwards, do **not**\n resize the padded image back to the input image's size. This will increase\n the image's height and width by a maximum of ``20`` pixels.\n\n >>> aug = iaa.Pad(px=((0, 10), (0, 5), (0, 10), (0, 5)))\n\n Pad the top and bottom by a random pixel value sampled uniformly from the\n discrete interval ``[0..10]``. Pad the left and right analogously by\n a random value sampled from ``[0..5]``. Each value is always sampled\n independently.\n\n >>> aug = iaa.Pad(percent=(0, 0.1))\n\n Pad each side by a random fraction sampled uniformly from the continuous\n interval ``[0.0, 0.10]``. The fraction is sampled once per image and\n side. E.g. a sampled fraction of ``0.1`` for the top side would pad by\n ``0.1*H``, where ``H`` is the height of the input image.\n\n >>> aug = iaa.Pad(\n >>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))\n\n Pads each side by either ``5%`` or ``10%``. The values are sampled\n once per side and image.\n\n \"\"\"\n\n def __init__(self, px=None, percent=None, pad_mode=\"constant\", pad_cval=0,\n keep_size=True, sample_independently=True,\n seed=None, name=None, **old_kwargs):\n def recursive_validate(value):\n if value is None:\n return value\n if ia.is_single_number(value):\n assert value >= 0, \"Expected value >0, got %.4f\" % (value,)\n return value\n if isinstance(value, iap.StochasticParameter):\n return value\n if isinstance(value, tuple):\n return tuple([recursive_validate(v_) for v_ in value])\n if isinstance(value, list):\n return [recursive_validate(v_) for v_ in value]\n raise Exception(\n \"Expected None or int or float or StochasticParameter or \"\n \"list or tuple, got %s.\" % (type(value),))\n\n px = recursive_validate(px)\n percent = recursive_validate(percent)\n\n super(Pad, self).__init__(\n px=px,\n percent=percent,\n pad_mode=pad_mode,\n pad_cval=pad_cval,\n keep_size=keep_size,\n sample_independently=sample_independently,\n seed=seed, name=name, **old_kwargs)\n\n\nclass Crop(CropAndPad):\n \"\"\"Crop images, i.e. remove columns/rows of pixels at the sides of images.\n\n This augmenter allows to extract smaller-sized subimages from given\n full-sized input images. The number of pixels to cut off may be defined\n in absolute values or as fractions of the image sizes.\n\n This augmenter will never crop images below a height or width of ``1``.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropAndPad`.\n\n Parameters\n ----------\n px : None or int or imgaug.parameters.StochasticParameter or tuple, optional\n The number of pixels to crop on each side of the image.\n Expected value range is ``[0, inf)``.\n Either this or the parameter `percent` may be set, not both at the same\n time.\n\n * If ``None``, then pixel-based cropping will not be used.\n * If ``int``, then that exact number of pixels will always be\n cropped.\n * If ``StochasticParameter``, then that parameter will be used for\n each image. Four samples will be drawn per image (top, right,\n bottom, left), unless `sample_independently` is set to ``False``,\n as then only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of two ``int`` s with values ``a`` and ``b``,\n then each side will be cropped by a random amount sampled\n uniformly per image and side from the inteval ``[a, b]``. If\n however `sample_independently` is set to ``False``, only one\n value will be sampled per image and used for all sides.\n * If a ``tuple`` of four entries, then the entries represent top,\n right, bottom, left. Each entry may be a single ``int`` (always\n crop by exactly that value), a ``tuple`` of two ``int`` s\n ``a`` and ``b`` (crop by an amount within ``[a, b]``), a\n ``list`` of ``int`` s (crop by a random value that is\n contained in the ``list``) or a ``StochasticParameter`` (sample\n the amount to crop from that parameter).\n\n percent : None or int or float or imgaug.parameters.StochasticParameter or tuple, optional\n The number of pixels to crop\n on each side of the image given as a *fraction* of the image\n height/width. E.g. if this is set to ``0.1``, the augmenter will\n always crop ``10%`` of the image's height at both the top and the\n bottom (both ``10%`` each), as well as ``10%`` of the width at the\n right and left.\n Expected value range is ``[0.0, 1.0)``.\n Either this or the parameter `px` may be set, not both\n at the same time.\n\n * If ``None``, then fraction-based cropping will not be\n used.\n * If ``number``, then that fraction will always be cropped.\n * If ``StochasticParameter``, then that parameter will be used for\n each image. Four samples will be drawn per image (top, right,\n bottom, left). If however `sample_independently` is set to\n ``False``, only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of two ``float`` s with values ``a`` and ``b``,\n then each side will be cropped by a random fraction\n sampled uniformly per image and side from the interval\n ``[a, b]``. If however `sample_independently` is set to\n ``False``, only one value will be sampled per image and used for\n all sides.\n * If a ``tuple`` of four entries, then the entries represent top,\n right, bottom, left. Each entry may be a single ``float``\n (always crop by exactly that fraction), a ``tuple`` of\n two ``float`` s ``a`` and ``b`` (crop by a fraction from\n ``[a, b]``), a ``list`` of ``float`` s (crop by a random\n value that is contained in the list) or a ``StochasticParameter``\n (sample the percentage to crop from that parameter).\n\n keep_size : bool, optional\n After cropping, the result image will usually have a\n different height/width compared to the original input image. If this\n parameter is set to ``True``, then the cropped image will be\n resized to the input image's size, i.e. the augmenter's output shape\n is always identical to the input shape.\n\n sample_independently : bool, optional\n If ``False`` *and* the values for `px`/`percent` result in exactly\n *one* probability distribution for all image sides, only one single\n value will be sampled from that probability distribution and used for\n all sides. I.e. the crop amount then is the same for all sides.\n If ``True``, four values will be sampled independently, one per side.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.Crop(px=(0, 10))\n\n Crop each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``.\n\n >>> aug = iaa.Crop(px=(0, 10), sample_independently=False)\n\n Crop each side by a random pixel value sampled uniformly once per image\n from the discrete interval ``[0..10]``. Each sampled value is used\n for *all* sides of the corresponding image.\n\n >>> aug = iaa.Crop(px=(0, 10), keep_size=False)\n\n Crop each side by a random pixel value sampled uniformly per image and\n side from the discrete interval ``[0..10]``. Afterwards, do **not**\n resize the cropped image back to the input image's size. This will decrease\n the image's height and width by a maximum of ``20`` pixels.\n\n >>> aug = iaa.Crop(px=((0, 10), (0, 5), (0, 10), (0, 5)))\n\n Crop the top and bottom by a random pixel value sampled uniformly from the\n discrete interval ``[0..10]``. Crop the left and right analogously by\n a random value sampled from ``[0..5]``. Each value is always sampled\n independently.\n\n >>> aug = iaa.Crop(percent=(0, 0.1))\n\n Crop each side by a random fraction sampled uniformly from the continuous\n interval ``[0.0, 0.10]``. The fraction is sampled once per image and\n side. E.g. a sampled fraction of ``0.1`` for the top side would crop by\n ``0.1*H``, where ``H`` is the height of the input image.\n\n >>> aug = iaa.Crop(\n >>> percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))\n\n Crops each side by either ``5%`` or ``10%``. The values are sampled\n once per side and image.\n\n \"\"\"\n\n def __init__(self, px=None, percent=None, keep_size=True,\n sample_independently=True,\n seed=None, name=None, **old_kwargs):\n def recursive_negate(value):\n if value is None:\n return value\n if ia.is_single_number(value):\n assert value >= 0, \"Expected value >0, got %.4f.\" % (value,)\n return -value\n if isinstance(value, iap.StochasticParameter):\n return iap.Multiply(value, -1)\n if isinstance(value, tuple):\n return tuple([recursive_negate(v_) for v_ in value])\n if isinstance(value, list):\n return [recursive_negate(v_) for v_ in value]\n raise Exception(\n \"Expected None or int or float or StochasticParameter or \"\n \"list or tuple, got %s.\" % (type(value),))\n\n px = recursive_negate(px)\n percent = recursive_negate(percent)\n\n super(Crop, self).__init__(\n px=px,\n percent=percent,\n keep_size=keep_size,\n sample_independently=sample_independently,\n seed=seed, name=name, **old_kwargs)\n\n\n# TODO maybe rename this to PadToMinimumSize?\n# TODO this is very similar to CropAndPad, maybe add a way to generate crop\n# values imagewise via a callback in in CropAndPad?\n# TODO why is padding mode and cval here called pad_mode, pad_cval but in other\n# cases mode/cval?\nclass PadToFixedSize(meta.Augmenter):\n \"\"\"Pad images to a predefined minimum width and/or height.\n\n If images are already at the minimum width/height or are larger, they will\n not be padded. Note that this also means that images will not be cropped if\n they exceed the required width/height.\n\n The augmenter randomly decides per image how to distribute the required\n padding amounts over the image axis. E.g. if 2px have to be padded on the\n left or right to reach the required width, the augmenter will sometimes\n add 2px to the left and 0px to the right, sometimes add 2px to the right\n and 0px to the left and sometimes add 1px to both sides. Set `position`\n to ``center`` to prevent that.\n\n Supported dtypes\n ----------------\n\n See :func:`~imgaug.augmenters.size.pad`.\n\n Parameters\n ----------\n width : int or None\n Pad images up to this minimum width.\n If ``None``, image widths will not be altered.\n\n height : int or None\n Pad images up to this minimum height.\n If ``None``, image heights will not be altered.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.CropAndPad.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.CropAndPad.__init__`.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n Sets the center point of the padding, which determines how the\n required padding amounts are distributed to each side. For a ``tuple``\n ``(a, b)``, both ``a`` and ``b`` are expected to be in range\n ``[0.0, 1.0]`` and describe the fraction of padding applied to the\n left/right (low/high values for ``a``) and the fraction of padding\n applied to the top/bottom (low/high values for ``b``). A padding\n position at ``(0.5, 0.5)`` would be the center of the image and\n distribute the padding equally to all sides. A padding position at\n ``(0.0, 1.0)`` would be the left-bottom and would apply 100% of the\n required padding to the bottom and left sides of the image so that\n the bottom left corner becomes more and more the new image\n center (depending on how much is padded).\n\n * If string ``uniform`` then the share of padding is randomly and\n uniformly distributed over each side.\n Equivalent to ``(Uniform(0.0, 1.0), Uniform(0.0, 1.0))``.\n * If string ``normal`` then the share of padding is distributed\n based on a normal distribution, leading to a focus on the\n center of the images.\n Equivalent to\n ``(Clip(Normal(0.5, 0.45/2), 0, 1),\n Clip(Normal(0.5, 0.45/2), 0, 1))``.\n * If string ``center`` then center point of the padding is\n identical to the image center.\n Equivalent to ``(0.5, 0.5)``.\n * If a string matching regex\n ``^(left|center|right)-(top|center|bottom)$``, e.g. ``left-top``\n or ``center-bottom`` then sets the center point of the padding\n to the X-Y position matching that description.\n * If a tuple of float, then expected to have exactly two entries\n between ``0.0`` and ``1.0``, which will always be used as the\n combination the position matching (x, y) form.\n * If a ``StochasticParameter``, then that parameter will be queried\n once per call to ``augment_*()`` to get ``Nx2`` center positions\n in ``(x, y)`` form (with ``N`` the number of images).\n * If a ``tuple`` of ``StochasticParameter``, then expected to have\n exactly two entries that will both be queried per call to\n ``augment_*()``, each for ``(N,)`` values, to get the center\n positions. First parameter is used for ``x`` coordinates,\n second for ``y`` coordinates.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.PadToFixedSize(width=100, height=100)\n\n For image sides smaller than ``100`` pixels, pad to ``100`` pixels. Do\n nothing for the other edges. The padding is randomly (uniformly)\n distributed over the sides, so that e.g. sometimes most of the required\n padding is applied to the left, sometimes to the right (analogous\n top/bottom).\n\n >>> aug = iaa.PadToFixedSize(width=100, height=100, position=\"center\")\n\n For image sides smaller than ``100`` pixels, pad to ``100`` pixels. Do\n nothing for the other image sides. The padding is always equally\n distributed over the left/right and top/bottom sides.\n\n >>> aug = iaa.PadToFixedSize(width=100, height=100, pad_mode=ia.ALL)\n\n For image sides smaller than ``100`` pixels, pad to ``100`` pixels and\n use any possible padding mode for that. Do nothing for the other image\n sides. The padding is always equally distributed over the left/right and\n top/bottom sides.\n\n >>> aug = iaa.Sequential([\n >>> iaa.PadToFixedSize(width=100, height=100),\n >>> iaa.CropToFixedSize(width=100, height=100)\n >>> ])\n\n Pad images smaller than ``100x100`` until they reach ``100x100``.\n Analogously, crop images larger than ``100x100`` until they reach\n ``100x100``. The output images therefore have a fixed size of ``100x100``.\n\n \"\"\"\n\n def __init__(self, width, height, pad_mode=\"constant\", pad_cval=0,\n position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(PadToFixedSize, self).__init__(\n seed=seed, name=name, **old_kwargs)\n self.size = (width, height)\n\n # Position of where to pad. The further to the top left this is, the\n # larger the share of pixels that will be added to the top and left\n # sides. I.e. set to (Deterministic(0.0), Deterministic(0.0)) to only\n # add at the top and left, (Deterministic(1.0), Deterministic(1.0))\n # to only add at the bottom right. Analogously (0.5, 0.5) pads equally\n # on both axis, (0.0, 1.0) pads left and bottom, (1.0, 0.0) pads right\n # and top.\n self.position = _handle_position_parameter(position)\n\n self.pad_mode = _handle_pad_mode_param(pad_mode)\n # TODO enable ALL here like in eg Affine\n self.pad_cval = iap.handle_discrete_param(\n pad_cval, \"pad_cval\", value_range=None, tuple_to_uniform=True,\n list_to_choice=True, allow_floats=True)\n\n # set these to None to use the same values as sampled for the\n # images (not tested)\n self._pad_mode_heatmaps = \"constant\"\n self._pad_mode_segmentation_maps = \"constant\"\n self._pad_cval_heatmaps = 0.0\n self._pad_cval_segmentation_maps = 0\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n # Providing the whole batch to _draw_samples() would not be necessary\n # for this augmenter. The number of rows would be sufficient. This\n # formulation however enables derived augmenters to use rowwise shapes\n # without having to compute them here for this augmenter.\n samples = self._draw_samples(batch, random_state)\n\n if batch.images is not None:\n batch.images = self._augment_images_by_samples(batch.images,\n samples)\n\n if batch.heatmaps is not None:\n batch.heatmaps = self._augment_maps_by_samples(\n batch.heatmaps, samples, self._pad_mode_heatmaps,\n self._pad_cval_heatmaps)\n\n if batch.segmentation_maps is not None:\n batch.segmentation_maps = self._augment_maps_by_samples(\n batch.segmentation_maps, samples, self._pad_mode_heatmaps,\n self._pad_cval_heatmaps)\n\n for augm_name in [\"keypoints\", \"bounding_boxes\", \"polygons\",\n \"line_strings\"]:\n augm_value = getattr(batch, augm_name)\n if augm_value is not None:\n func = functools.partial(\n self._augment_keypoints_by_samples,\n samples=samples)\n cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)\n setattr(batch, augm_name, cbaois)\n\n return batch\n\n def _augment_images_by_samples(self, images, samples):\n result = []\n sizes, pad_xs, pad_ys, pad_modes, pad_cvals = samples\n for i, (image, size) in enumerate(zip(images, sizes)):\n width_min, height_min = size\n height_image, width_image = image.shape[:2]\n paddings = self._calculate_paddings(height_image, width_image,\n height_min, width_min,\n pad_xs[i], pad_ys[i])\n\n image = _crop_and_pad_arr(\n image, (0, 0, 0, 0), paddings, pad_modes[i], pad_cvals[i],\n keep_size=False)\n\n result.append(image)\n\n # TODO result is always a list. Should this be converted to an array\n # if possible (not guaranteed that all images have same size,\n # some might have been larger than desired height/width)\n return result\n\n def _augment_keypoints_by_samples(self, keypoints_on_images, samples):\n result = []\n sizes, pad_xs, pad_ys, _, _ = samples\n for i, (kpsoi, size) in enumerate(zip(keypoints_on_images, sizes)):\n width_min, height_min = size\n height_image, width_image = kpsoi.shape[:2]\n paddings_img = self._calculate_paddings(height_image, width_image,\n height_min, width_min,\n pad_xs[i], pad_ys[i])\n\n keypoints_padded = _crop_and_pad_kpsoi_(\n kpsoi, (0, 0, 0, 0), paddings_img,\n keep_size=False)\n\n result.append(keypoints_padded)\n\n return result\n\n def _augment_maps_by_samples(self, augmentables, samples, pad_mode,\n pad_cval):\n sizes, pad_xs, pad_ys, pad_modes, pad_cvals = samples\n\n for i, (augmentable, size) in enumerate(zip(augmentables, sizes)):\n width_min, height_min = size\n height_img, width_img = augmentable.shape[:2]\n paddings_img = self._calculate_paddings(\n height_img, width_img, height_min, width_min,\n pad_xs[i], pad_ys[i])\n\n # TODO for the previous method (and likely the new/current one\n # too):\n # for 30x30 padded to 32x32 with 15x15 heatmaps this results\n # in paddings of 1 on each side (assuming\n # position=(0.5, 0.5)) giving 17x17 heatmaps when they should\n # be 16x16. Error is due to each side getting projected 0.5\n # padding which is rounded to 1. This doesn't seem right.\n augmentables[i] = _crop_and_pad_hms_or_segmaps_(\n augmentables[i],\n (0, 0, 0, 0),\n paddings_img,\n pad_mode=pad_mode if pad_mode is not None else pad_modes[i],\n pad_cval=pad_cval if pad_cval is not None else pad_cvals[i],\n keep_size=False)\n\n return augmentables\n\n def _draw_samples(self, batch, random_state):\n nb_images = batch.nb_rows\n rngs = random_state.duplicate(4)\n\n if isinstance(self.position, tuple):\n pad_xs = self.position[0].draw_samples(nb_images,\n random_state=rngs[0])\n pad_ys = self.position[1].draw_samples(nb_images,\n random_state=rngs[1])\n else:\n pads = self.position.draw_samples((nb_images, 2),\n random_state=rngs[0])\n pad_xs = pads[:, 0]\n pad_ys = pads[:, 1]\n\n pad_modes = self.pad_mode.draw_samples(nb_images,\n random_state=rngs[2])\n pad_cvals = self.pad_cval.draw_samples(nb_images,\n random_state=rngs[3])\n\n # We return here the sizes even though they are static as it allows\n # derived augmenters to define image-specific heights/widths.\n return [self.size] * nb_images, pad_xs, pad_ys, pad_modes, pad_cvals\n\n @classmethod\n def _calculate_paddings(cls, height_image, width_image,\n height_min, width_min, pad_xs_i, pad_ys_i):\n pad_top = 0\n pad_right = 0\n pad_bottom = 0\n pad_left = 0\n\n if width_min is not None and width_image < width_min:\n pad_total_x = width_min - width_image\n pad_left = int((1-pad_xs_i) * pad_total_x)\n pad_right = pad_total_x - pad_left\n\n if height_min is not None and height_image < height_min:\n pad_total_y = height_min - height_image\n pad_top = int((1-pad_ys_i) * pad_total_y)\n pad_bottom = pad_total_y - pad_top\n\n return pad_top, pad_right, pad_bottom, pad_left\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.size[0], self.size[1], self.pad_mode, self.pad_cval,\n self.position]\n\n\nclass CenterPadToFixedSize(PadToFixedSize):\n \"\"\"Pad images equally on all sides up to given minimum heights/widths.\n\n This is an alias for :class:`~imgaug.augmenters.size.PadToFixedSize`\n with ``position=\"center\"``. It spreads the pad amounts equally over\n all image sides, while :class:`~imgaug.augmenters.size.PadToFixedSize`\n by defaults spreads them randomly.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n width : int or None\n See :func:`PadToFixedSize.__init__`.\n\n height : int or None\n See :func:`PadToFixedSize.__init__`.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`PadToFixedSize.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`PadToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CenterPadToFixedSize(height=20, width=30)\n\n Create an augmenter that pads images up to ``20x30``, with the padded\n rows added *equally* on the top and bottom (analogous for the padded\n columns).\n\n \"\"\"\n\n def __init__(self, width, height, pad_mode=\"constant\", pad_cval=0,\n seed=None, name=None, **old_kwargs):\n super(CenterPadToFixedSize, self).__init__(\n width=width, height=height, pad_mode=pad_mode, pad_cval=pad_cval,\n position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\n# TODO maybe rename this to CropToMaximumSize ?\n# TODO this is very similar to CropAndPad, maybe add a way to generate crop\n# values imagewise via a callback in in CropAndPad?\n# TODO add crop() function in imgaug, similar to pad\nclass CropToFixedSize(meta.Augmenter):\n \"\"\"Crop images down to a predefined maximum width and/or height.\n\n If images are already at the maximum width/height or are smaller, they\n will not be cropped. Note that this also means that images will not be\n padded if they are below the required width/height.\n\n The augmenter randomly decides per image how to distribute the required\n cropping amounts over the image axis. E.g. if 2px have to be cropped on\n the left or right to reach the required width, the augmenter will\n sometimes remove 2px from the left and 0px from the right, sometimes\n remove 2px from the right and 0px from the left and sometimes remove 1px\n from both sides. Set `position` to ``center`` to prevent that.\n\n Supported dtypes\n ----------------\n\n * ``uint8``: yes; fully tested\n * ``uint16``: yes; tested\n * ``uint32``: yes; tested\n * ``uint64``: yes; tested\n * ``int8``: yes; tested\n * ``int16``: yes; tested\n * ``int32``: yes; tested\n * ``int64``: yes; tested\n * ``float16``: yes; tested\n * ``float32``: yes; tested\n * ``float64``: yes; tested\n * ``float128``: yes; tested\n * ``bool``: yes; tested\n\n Parameters\n ----------\n width : int or None\n Crop images down to this maximum width.\n If ``None``, image widths will not be altered.\n\n height : int or None\n Crop images down to this maximum height.\n If ``None``, image heights will not be altered.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n Sets the center point of the cropping, which determines how the\n required cropping amounts are distributed to each side. For a\n ``tuple`` ``(a, b)``, both ``a`` and ``b`` are expected to be in\n range ``[0.0, 1.0]`` and describe the fraction of cropping applied\n to the left/right (low/high values for ``a``) and the fraction\n of cropping applied to the top/bottom (low/high values for ``b``).\n A cropping position at ``(0.5, 0.5)`` would be the center of the\n image and distribute the cropping equally over all sides. A cropping\n position at ``(1.0, 0.0)`` would be the right-top and would apply\n 100% of the required cropping to the right and top sides of the image.\n\n * If string ``uniform`` then the share of cropping is randomly\n and uniformly distributed over each side.\n Equivalent to ``(Uniform(0.0, 1.0), Uniform(0.0, 1.0))``.\n * If string ``normal`` then the share of cropping is distributed\n based on a normal distribution, leading to a focus on the center\n of the images.\n Equivalent to\n ``(Clip(Normal(0.5, 0.45/2), 0, 1),\n Clip(Normal(0.5, 0.45/2), 0, 1))``.\n * If string ``center`` then center point of the cropping is\n identical to the image center.\n Equivalent to ``(0.5, 0.5)``.\n * If a string matching regex\n ``^(left|center|right)-(top|center|bottom)$``, e.g.\n ``left-top`` or ``center-bottom`` then sets the center point of\n the cropping to the X-Y position matching that description.\n * If a tuple of float, then expected to have exactly two entries\n between ``0.0`` and ``1.0``, which will always be used as the\n combination the position matching (x, y) form.\n * If a ``StochasticParameter``, then that parameter will be queried\n once per call to ``augment_*()`` to get ``Nx2`` center positions\n in ``(x, y)`` form (with ``N`` the number of images).\n * If a ``tuple`` of ``StochasticParameter``, then expected to have\n exactly two entries that will both be queried per call to\n ``augment_*()``, each for ``(N,)`` values, to get the center\n positions. First parameter is used for ``x`` coordinates,\n second for ``y`` coordinates.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CropToFixedSize(width=100, height=100)\n\n For image sides larger than ``100`` pixels, crop to ``100`` pixels. Do\n nothing for the other sides. The cropping amounts are randomly (and\n uniformly) distributed over the sides of the image.\n\n >>> aug = iaa.CropToFixedSize(width=100, height=100, position=\"center\")\n\n For sides larger than ``100`` pixels, crop to ``100`` pixels. Do nothing\n for the other sides. The cropping amounts are always equally distributed\n over the left/right sides of the image (and analogously for top/bottom).\n\n >>> aug = iaa.Sequential([\n >>> iaa.PadToFixedSize(width=100, height=100),\n >>> iaa.CropToFixedSize(width=100, height=100)\n >>> ])\n\n Pad images smaller than ``100x100`` until they reach ``100x100``.\n Analogously, crop images larger than ``100x100`` until they reach\n ``100x100``. The output images therefore have a fixed size of ``100x100``.\n\n \"\"\"\n\n def __init__(self, width, height, position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(CropToFixedSize, self).__init__(\n seed=seed, name=name, **old_kwargs)\n self.size = (width, height)\n\n # Position of where to crop. The further to the top left this is,\n # the larger the share of pixels that will be cropped from the top\n # and left sides. I.e. set to (Deterministic(0.0), Deterministic(0.0))\n # to only crop at the top and left,\n # (Deterministic(1.0), Deterministic(1.0)) to only crop at the bottom\n # right. Analogously (0.5, 0.5) crops equally on both axis,\n # (0.0, 1.0) crops left and bottom, (1.0, 0.0) crops right and top.\n self.position = _handle_position_parameter(position)\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n # Providing the whole batch to _draw_samples() would not be necessary\n # for this augmenter. The number of rows would be sufficient. This\n # formulation however enables derived augmenters to use rowwise shapes\n # without having to compute them here for this augmenter.\n samples = self._draw_samples(batch, random_state)\n\n if batch.images is not None:\n batch.images = self._augment_images_by_samples(batch.images,\n samples)\n\n if batch.heatmaps is not None:\n batch.heatmaps = self._augment_maps_by_samples(\n batch.heatmaps, samples)\n\n if batch.segmentation_maps is not None:\n batch.segmentation_maps = self._augment_maps_by_samples(\n batch.segmentation_maps, samples)\n\n for augm_name in [\"keypoints\", \"bounding_boxes\", \"polygons\",\n \"line_strings\"]:\n augm_value = getattr(batch, augm_name)\n if augm_value is not None:\n func = functools.partial(\n self._augment_keypoints_by_samples,\n samples=samples)\n cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)\n setattr(batch, augm_name, cbaois)\n\n return batch\n\n def _augment_images_by_samples(self, images, samples):\n result = []\n sizes, offset_xs, offset_ys = samples\n for i, (image, size) in enumerate(zip(images, sizes)):\n w, h = size\n height_image, width_image = image.shape[0:2]\n\n croppings = self._calculate_crop_amounts(\n height_image, width_image, h, w, offset_ys[i], offset_xs[i])\n\n image_cropped = _crop_and_pad_arr(image, croppings, (0, 0, 0, 0),\n keep_size=False)\n\n result.append(image_cropped)\n\n return result\n\n def _augment_keypoints_by_samples(self, kpsois, samples):\n result = []\n sizes, offset_xs, offset_ys = samples\n for i, (kpsoi, size) in enumerate(zip(kpsois, sizes)):\n w, h = size\n height_image, width_image = kpsoi.shape[0:2]\n\n croppings_img = self._calculate_crop_amounts(\n height_image, width_image, h, w, offset_ys[i], offset_xs[i])\n\n kpsoi_cropped = _crop_and_pad_kpsoi_(\n kpsoi, croppings_img, (0, 0, 0, 0), keep_size=False)\n\n result.append(kpsoi_cropped)\n\n return result\n\n def _augment_maps_by_samples(self, augmentables, samples):\n sizes, offset_xs, offset_ys = samples\n for i, (augmentable, size) in enumerate(zip(augmentables, sizes)):\n w, h = size\n height_image, width_image = augmentable.shape[0:2]\n\n croppings_img = self._calculate_crop_amounts(\n height_image, width_image, h, w, offset_ys[i], offset_xs[i])\n\n augmentables[i] = _crop_and_pad_hms_or_segmaps_(\n augmentable, croppings_img, (0, 0, 0, 0), keep_size=False)\n\n return augmentables\n\n @classmethod\n def _calculate_crop_amounts(cls, height_image, width_image,\n height_max, width_max,\n offset_y, offset_x):\n crop_top = 0\n crop_right = 0\n crop_bottom = 0\n crop_left = 0\n\n if height_max is not None and height_image > height_max:\n crop_top = int(offset_y * (height_image - height_max))\n crop_bottom = height_image - height_max - crop_top\n\n if width_max is not None and width_image > width_max:\n crop_left = int(offset_x * (width_image - width_max))\n crop_right = width_image - width_max - crop_left\n\n return crop_top, crop_right, crop_bottom, crop_left\n\n def _draw_samples(self, batch, random_state):\n nb_images = batch.nb_rows\n rngs = random_state.duplicate(2)\n\n if isinstance(self.position, tuple):\n offset_xs = self.position[0].draw_samples(nb_images,\n random_state=rngs[0])\n offset_ys = self.position[1].draw_samples(nb_images,\n random_state=rngs[1])\n else:\n offsets = self.position.draw_samples((nb_images, 2),\n random_state=rngs[0])\n offset_xs = offsets[:, 0]\n offset_ys = offsets[:, 1]\n\n offset_xs = 1.0 - offset_xs\n offset_ys = 1.0 - offset_ys\n\n # We return here the sizes even though they are static as it allows\n # derived augmenters to define image-specific heights/widths.\n return [self.size] * nb_images, offset_xs, offset_ys\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.size[0], self.size[1], self.position]\n\n\nclass CenterCropToFixedSize(CropToFixedSize):\n \"\"\"Take a crop from the center of each image.\n\n This is an alias for :class:`~imgaug.augmenters.size.CropToFixedSize` with\n ``position=\"center\"``.\n\n .. note::\n\n If images already have a width and/or height below the provided\n width and/or height then this augmenter will do nothing for the\n respective axis. Hence, resulting images can be smaller than the\n provided axis sizes.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n width : int or None\n See :func:`CropToFixedSize.__init__`.\n\n height : int or None\n See :func:`CropToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> crop = iaa.CenterCropToFixedSize(height=20, width=10)\n\n Create an augmenter that takes ``20x10`` sized crops from the center of\n images.\n\n \"\"\"\n\n def __init__(self, width, height,\n seed=None, name=None, **old_kwargs):\n super(CenterCropToFixedSize, self).__init__(\n width=width, height=height, position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass CropToMultiplesOf(CropToFixedSize):\n \"\"\"Crop images down until their height/width is a multiple of a value.\n\n .. note::\n\n For a given axis size ``A`` and multiple ``M``, if ``A`` is in the\n interval ``[0 .. M]``, the axis will not be changed.\n As a result, this augmenter can still produce axis sizes that are\n not multiples of the given values.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n width_multiple : int or None\n Multiple for the width. Images will be cropped down until their\n width is a multiple of this value.\n If ``None``, image widths will not be altered.\n\n height_multiple : int or None\n Multiple for the height. Images will be cropped down until their\n height is a multiple of this value.\n If ``None``, image heights will not be altered.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`CropToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CropToMultiplesOf(height_multiple=10, width_multiple=6)\n\n Create an augmenter that crops images to multiples of ``10`` along\n the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the\n x-axis (i.e. 6, 12, 18, ...).\n The rows to be cropped will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_multiple, height_multiple, position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(CropToMultiplesOf, self).__init__(\n width=None, height=None, position=position,\n seed=seed, name=name, **old_kwargs)\n self.width_multiple = width_multiple\n self.height_multiple = height_multiple\n\n def _draw_samples(self, batch, random_state):\n _sizes, offset_xs, offset_ys = super(\n CropToMultiplesOf, self\n )._draw_samples(batch, random_state)\n\n shapes = batch.get_rowwise_shapes()\n sizes = []\n for shape in shapes:\n height, width = shape[0:2]\n croppings = compute_croppings_to_reach_multiples_of(\n shape,\n height_multiple=self.height_multiple,\n width_multiple=self.width_multiple)\n\n # TODO change that\n # note that these are not in the same order as shape tuples\n # in CropToFixedSize\n new_size = (\n width - croppings[1] - croppings[3],\n height - croppings[0] - croppings[2]\n )\n sizes.append(new_size)\n\n return sizes, offset_xs, offset_ys\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.width_multiple, self.height_multiple, self.position]\n\n\nclass CenterCropToMultiplesOf(CropToMultiplesOf):\n \"\"\"Crop images equally on all sides until H/W are multiples of given values.\n\n This is the same as :class:`~imgaug.augmenters.size.CropToMultiplesOf`,\n but uses ``position=\"center\"`` by default, which spreads the crop amounts\n equally over all image sides, while\n :class:`~imgaug.augmenters.size.CropToMultiplesOf` by default spreads\n them randomly.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n width_multiple : int or None\n See :func:`CropToMultiplesOf.__init__`.\n\n height_multiple : int or None\n See :func:`CropToMultiplesOf.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CenterCropToMultiplesOf(height_multiple=10, width_multiple=6)\n\n Create an augmenter that crops images to multiples of ``10`` along\n the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the\n x-axis (i.e. 6, 12, 18, ...).\n The rows to be cropped will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_multiple, height_multiple,\n seed=None, name=None, **old_kwargs):\n super(CenterCropToMultiplesOf, self).__init__(\n width_multiple=width_multiple,\n height_multiple=height_multiple,\n position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass PadToMultiplesOf(PadToFixedSize):\n \"\"\"Pad images until their height/width is a multiple of a value.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n width_multiple : int or None\n Multiple for the width. Images will be padded until their\n width is a multiple of this value.\n If ``None``, image widths will not be altered.\n\n height_multiple : int or None\n Multiple for the height. Images will be padded until their\n height is a multiple of this value.\n If ``None``, image heights will not be altered.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`PadToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.PadToMultiplesOf(height_multiple=10, width_multiple=6)\n\n Create an augmenter that pads images to multiples of ``10`` along\n the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the\n x-axis (i.e. 6, 12, 18, ...).\n The rows to be padded will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_multiple, height_multiple,\n pad_mode=\"constant\", pad_cval=0,\n position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(PadToMultiplesOf, self).__init__(\n width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval,\n position=position,\n seed=seed, name=name, **old_kwargs)\n self.width_multiple = width_multiple\n self.height_multiple = height_multiple\n\n def _draw_samples(self, batch, random_state):\n _sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super(\n PadToMultiplesOf, self\n )._draw_samples(batch, random_state)\n\n shapes = batch.get_rowwise_shapes()\n sizes = []\n for shape in shapes:\n height, width = shape[0:2]\n paddings = compute_paddings_to_reach_multiples_of(\n shape,\n height_multiple=self.height_multiple,\n width_multiple=self.width_multiple)\n\n # TODO change that\n # note that these are not in the same order as shape tuples\n # in PadToFixedSize\n new_size = (\n width + paddings[1] + paddings[3],\n height + paddings[0] + paddings[2]\n )\n sizes.append(new_size)\n\n return sizes, pad_xs, pad_ys, pad_modes, pad_cvals\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.width_multiple, self.height_multiple,\n self.pad_mode, self.pad_cval,\n self.position]\n\n\nclass CenterPadToMultiplesOf(PadToMultiplesOf):\n \"\"\"Pad images equally on all sides until H/W are multiples of given values.\n\n This is the same as :class:`~imgaug.augmenters.size.PadToMultiplesOf`, but\n uses ``position=\"center\"`` by default, which spreads the pad amounts\n equally over all image sides, while\n :class:`~imgaug.augmenters.size.PadToMultiplesOf` by default spreads them\n randomly.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n width_multiple : int or None\n See :func:`PadToMultiplesOf.__init__`.\n\n height_multiple : int or None\n See :func:`PadToMultiplesOf.__init__`.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToMultiplesOf.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToMultiplesOf.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CenterPadToMultiplesOf(height_multiple=10, width_multiple=6)\n\n Create an augmenter that pads images to multiples of ``10`` along\n the y-axis (i.e. 10, 20, 30, ...) and to multiples of ``6`` along the\n x-axis (i.e. 6, 12, 18, ...).\n The rows to be padded will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_multiple, height_multiple,\n pad_mode=\"constant\", pad_cval=0,\n seed=None, name=None, **old_kwargs):\n super(CenterPadToMultiplesOf, self).__init__(\n width_multiple=width_multiple,\n height_multiple=height_multiple,\n pad_mode=pad_mode,\n pad_cval=pad_cval,\n position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass CropToPowersOf(CropToFixedSize):\n \"\"\"Crop images until their height/width is a power of a base.\n\n This augmenter removes pixels from an axis with size ``S`` leading to the\n new size ``S'`` until ``S' = B^E`` is fulfilled, where ``B`` is a\n provided base (e.g. ``2``) and ``E`` is an exponent from the discrete\n interval ``[1 .. inf)``.\n\n .. note::\n\n This augmenter does nothing for axes with size less than ``B^1 = B``.\n If you have images with ``S < B^1``, it is recommended\n to combine this augmenter with a padding augmenter that pads each\n axis up to ``B``.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n width_base : int or None\n Base for the width. Images will be cropped down until their\n width fulfills ``width' = width_base ^ E`` with ``E`` being any\n natural number.\n If ``None``, image widths will not be altered.\n\n height_base : int or None\n Base for the height. Images will be cropped down until their\n height fulfills ``height' = height_base ^ E`` with ``E`` being any\n natural number.\n If ``None``, image heights will not be altered.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`CropToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CropToPowersOf(height_base=3, width_base=2)\n\n Create an augmenter that crops each image down to powers of ``3`` along\n the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e.\n 2, 4, 8, 16, ...).\n The rows to be cropped will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_base, height_base, position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(CropToPowersOf, self).__init__(\n width=None, height=None, position=position,\n seed=seed, name=name, **old_kwargs)\n self.width_base = width_base\n self.height_base = height_base\n\n def _draw_samples(self, batch, random_state):\n _sizes, offset_xs, offset_ys = super(\n CropToPowersOf, self\n )._draw_samples(batch, random_state)\n\n shapes = batch.get_rowwise_shapes()\n sizes = []\n for shape in shapes:\n height, width = shape[0:2]\n croppings = compute_croppings_to_reach_powers_of(\n shape,\n height_base=self.height_base,\n width_base=self.width_base)\n\n # TODO change that\n # note that these are not in the same order as shape tuples\n # in CropToFixedSize\n new_size = (\n width - croppings[1] - croppings[3],\n height - croppings[0] - croppings[2]\n )\n sizes.append(new_size)\n\n return sizes, offset_xs, offset_ys\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.width_base, self.height_base, self.position]\n\n\nclass CenterCropToPowersOf(CropToPowersOf):\n \"\"\"Crop images equally on all sides until H/W is a power of a base.\n\n This is the same as :class:`~imgaug.augmenters.size.CropToPowersOf`, but\n uses ``position=\"center\"`` by default, which spreads the crop amounts\n equally over all image sides, while\n :class:`~imgaug.augmenters.size.CropToPowersOf` by default spreads them\n randomly.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n width_base : int or None\n See :func:`CropToPowersOf.__init__`.\n\n height_base : int or None\n See :func:`CropToPowersOf.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CropToPowersOf(height_base=3, width_base=2)\n\n Create an augmenter that crops each image down to powers of ``3`` along\n the y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e.\n 2, 4, 8, 16, ...).\n The rows to be cropped will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_base, height_base,\n seed=None, name=None, **old_kwargs):\n super(CenterCropToPowersOf, self).__init__(\n width_base=width_base, height_base=height_base, position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass PadToPowersOf(PadToFixedSize):\n \"\"\"Pad images until their height/width is a power of a base.\n\n This augmenter adds pixels to an axis with size ``S`` leading to the\n new size ``S'`` until ``S' = B^E`` is fulfilled, where ``B`` is a\n provided base (e.g. ``2``) and ``E`` is an exponent from the discrete\n interval ``[1 .. inf)``.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n width_base : int or None\n Base for the width. Images will be padded down until their\n width fulfills ``width' = width_base ^ E`` with ``E`` being any\n natural number.\n If ``None``, image widths will not be altered.\n\n height_base : int or None\n Base for the height. Images will be padded until their\n height fulfills ``height' = height_base ^ E`` with ``E`` being any\n natural number.\n If ``None``, image heights will not be altered.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`PadToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.PadToPowersOf(height_base=3, width_base=2)\n\n Create an augmenter that pads each image to powers of ``3`` along the\n y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2,\n 4, 8, 16, ...).\n The rows to be padded will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_base, height_base,\n pad_mode=\"constant\", pad_cval=0,\n position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(PadToPowersOf, self).__init__(\n width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval,\n position=position,\n seed=seed, name=name, **old_kwargs)\n self.width_base = width_base\n self.height_base = height_base\n\n def _draw_samples(self, batch, random_state):\n _sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super(\n PadToPowersOf, self\n )._draw_samples(batch, random_state)\n\n shapes = batch.get_rowwise_shapes()\n sizes = []\n for shape in shapes:\n height, width = shape[0:2]\n paddings = compute_paddings_to_reach_powers_of(\n shape,\n height_base=self.height_base,\n width_base=self.width_base)\n\n # TODO change that\n # note that these are not in the same order as shape tuples\n # in PadToFixedSize\n new_size = (\n width + paddings[1] + paddings[3],\n height + paddings[0] + paddings[2]\n )\n sizes.append(new_size)\n\n return sizes, pad_xs, pad_ys, pad_modes, pad_cvals\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.width_base, self.height_base,\n self.pad_mode, self.pad_cval,\n self.position]\n\n\nclass CenterPadToPowersOf(PadToPowersOf):\n \"\"\"Pad images equally on all sides until H/W is a power of a base.\n\n This is the same as :class:`~imgaug.augmenters.size.PadToPowersOf`, but uses\n ``position=\"center\"`` by default, which spreads the pad amounts equally\n over all image sides, while :class:`~imgaug.augmenters.size.PadToPowersOf`\n by default spreads them randomly.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n width_base : int or None\n See :func:`PadToPowersOf.__init__`.\n\n height_base : int or None\n See :func:`PadToPowersOf.__init__`.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToPowersOf.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToPowersOf.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CenterPadToPowersOf(height_base=5, width_base=2)\n\n Create an augmenter that pads each image to powers of ``3`` along the\n y-axis (i.e. 3, 9, 27, ...) and powers of ``2`` along the x-axis (i.e. 2,\n 4, 8, 16, ...).\n The rows to be padded will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, width_base, height_base,\n pad_mode=\"constant\", pad_cval=0,\n seed=None, name=None, **old_kwargs):\n super(CenterPadToPowersOf, self).__init__(\n width_base=width_base, height_base=height_base,\n pad_mode=pad_mode, pad_cval=pad_cval,\n position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass CropToAspectRatio(CropToFixedSize):\n \"\"\"Crop images until their width/height matches an aspect ratio.\n\n This augmenter removes either rows or columns until the image reaches\n the desired aspect ratio given in ``width / height``. The cropping\n operation is stopped once the desired aspect ratio is reached or the image\n side to crop reaches a size of ``1``. If any side of the image starts\n with a size of ``0``, the image will not be changed.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n aspect_ratio : number\n The desired aspect ratio, given as ``width/height``. E.g. a ratio\n of ``2.0`` denotes an image that is twice as wide as it is high.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`CropToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CropToAspectRatio(2.0)\n\n Create an augmenter that crops each image until its aspect ratio is as\n close as possible to ``2.0`` (i.e. two times as many pixels along the\n x-axis than the y-axis).\n The rows to be cropped will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, aspect_ratio, position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(CropToAspectRatio, self).__init__(\n width=None, height=None, position=position,\n seed=seed, name=name, **old_kwargs)\n self.aspect_ratio = aspect_ratio\n\n def _draw_samples(self, batch, random_state):\n _sizes, offset_xs, offset_ys = super(\n CropToAspectRatio, self\n )._draw_samples(batch, random_state)\n\n shapes = batch.get_rowwise_shapes()\n sizes = []\n for shape in shapes:\n height, width = shape[0:2]\n\n if height == 0 or width == 0:\n croppings = (0, 0, 0, 0)\n else:\n croppings = compute_croppings_to_reach_aspect_ratio(\n shape,\n aspect_ratio=self.aspect_ratio)\n\n # TODO change that\n # note that these are not in the same order as shape tuples\n # in CropToFixedSize\n new_size = (\n width - croppings[1] - croppings[3],\n height - croppings[0] - croppings[2]\n )\n sizes.append(new_size)\n\n return sizes, offset_xs, offset_ys\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.aspect_ratio, self.position]\n\n\nclass CenterCropToAspectRatio(CropToAspectRatio):\n \"\"\"Crop images equally on all sides until they reach an aspect ratio.\n\n This is the same as :class:`~imgaug.augmenters.size.CropToAspectRatio`, but\n uses ``position=\"center\"`` by default, which spreads the crop amounts\n equally over all image sides, while\n :class:`~imgaug.augmenters.size.CropToAspectRatio` by default spreads\n them randomly.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n aspect_ratio : number\n See :func:`CropToAspectRatio.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CenterCropToAspectRatio(2.0)\n\n Create an augmenter that crops each image until its aspect ratio is as\n close as possible to ``2.0`` (i.e. two times as many pixels along the\n x-axis than the y-axis).\n The rows to be cropped will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, aspect_ratio,\n seed=None, name=None, **old_kwargs):\n super(CenterCropToAspectRatio, self).__init__(\n aspect_ratio=aspect_ratio, position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass PadToAspectRatio(PadToFixedSize):\n \"\"\"Pad images until their width/height matches an aspect ratio.\n\n This augmenter adds either rows or columns until the image reaches\n the desired aspect ratio given in ``width / height``.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n aspect_ratio : number\n The desired aspect ratio, given as ``width/height``. E.g. a ratio\n of ``2.0`` denotes an image that is twice as wide as it is high.\n\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`PadToFixedSize.__init__`.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.PadToAspectRatio(2.0)\n\n Create an augmenter that pads each image until its aspect ratio is as\n close as possible to ``2.0`` (i.e. two times as many pixels along the\n x-axis than the y-axis).\n The rows to be padded will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, aspect_ratio, pad_mode=\"constant\", pad_cval=0,\n position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(PadToAspectRatio, self).__init__(\n width=None, height=None, pad_mode=pad_mode, pad_cval=pad_cval,\n position=position,\n seed=seed, name=name, **old_kwargs)\n self.aspect_ratio = aspect_ratio\n\n def _draw_samples(self, batch, random_state):\n _sizes, pad_xs, pad_ys, pad_modes, pad_cvals = super(\n PadToAspectRatio, self\n )._draw_samples(batch, random_state)\n\n shapes = batch.get_rowwise_shapes()\n sizes = []\n for shape in shapes:\n height, width = shape[0:2]\n\n paddings = compute_paddings_to_reach_aspect_ratio(\n shape,\n aspect_ratio=self.aspect_ratio)\n\n # TODO change that\n # note that these are not in the same order as shape tuples\n # in PadToFixedSize\n new_size = (\n width + paddings[1] + paddings[3],\n height + paddings[0] + paddings[2]\n )\n sizes.append(new_size)\n\n return sizes, pad_xs, pad_ys, pad_modes, pad_cvals\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.aspect_ratio, self.pad_mode, self.pad_cval,\n self.position]\n\n\nclass CenterPadToAspectRatio(PadToAspectRatio):\n \"\"\"Pad images equally on all sides until H/W matches an aspect ratio.\n\n This is the same as :class:`~imgaug.augmenters.size.PadToAspectRatio`, but\n uses ``position=\"center\"`` by default, which spreads the pad amounts\n equally over all image sides, while\n :class:`~imgaug.augmenters.size.PadToAspectRatio` by default spreads them\n randomly.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n aspect_ratio : number\n See :func:`PadToAspectRatio.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`.\n\n deterministic : bool, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.PadToAspectRatio(2.0)\n\n Create am augmenter that pads each image until its aspect ratio is as\n close as possible to ``2.0`` (i.e. two times as many pixels along the\n x-axis than the y-axis).\n The rows to be padded will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, aspect_ratio, pad_mode=\"constant\", pad_cval=0,\n seed=None, name=None, **old_kwargs):\n super(CenterPadToAspectRatio, self).__init__(\n aspect_ratio=aspect_ratio, position=\"center\",\n pad_mode=pad_mode, pad_cval=pad_cval,\n seed=seed, name=name, **old_kwargs)\n\n\nclass CropToSquare(CropToAspectRatio):\n \"\"\"Crop images until their width and height are identical.\n\n This is identical to :class:`~imgaug.augmenters.size.CropToAspectRatio`\n with ``aspect_ratio=1.0``.\n\n Images with axis sizes of ``0`` will not be altered.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`CropToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CropToSquare()\n\n Create an augmenter that crops each image until its square, i.e. height\n and width match.\n The rows to be cropped will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(CropToSquare, self).__init__(\n aspect_ratio=1.0, position=position,\n seed=seed, name=name, **old_kwargs)\n\n\nclass CenterCropToSquare(CropToSquare):\n \"\"\"Crop images equally on all sides until their height/width are identical.\n\n In contrast to :class:`~imgaug.augmenters.size.CropToSquare`, this\n augmenter always tries to spread the columns/rows to remove equally over\n both sides of the respective axis to be cropped.\n :class:`~imgaug.augmenters.size.CropToAspectRatio` by default spreads the\n croppings randomly.\n\n This augmenter is identical to :class:`~imgaug.augmenters.size.CropToSquare`\n with ``position=\"center\"``, and thereby the same as\n :class:`~imgaug.augmenters.size.CropToAspectRatio` with\n ``aspect_ratio=1.0, position=\"center\"``.\n\n Images with axis sizes of ``0`` will not be altered.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.CropToFixedSize`.\n\n Parameters\n ----------\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CenterCropToSquare()\n\n Create an augmenter that crops each image until its square, i.e. height\n and width match.\n The rows to be cropped will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, seed=None, name=None, **old_kwargs):\n super(CenterCropToSquare, self).__init__(\n position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass PadToSquare(PadToAspectRatio):\n \"\"\"Pad images until their height and width are identical.\n\n This augmenter is identical to\n :class:`~imgaug.augmenters.size.PadToAspectRatio` with ``aspect_ratio=1.0``.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n position : {'uniform', 'normal', 'center', 'left-top', 'left-center', 'left-bottom', 'center-top', 'center-center', 'center-bottom', 'right-top', 'right-center', 'right-bottom'} or tuple of float or StochasticParameter or tuple of StochasticParameter, optional\n See :func:`PadToFixedSize.__init__`.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToFixedSize.__init__`.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.PadToSquare()\n\n Create an augmenter that pads each image until its square, i.e. height\n and width match.\n The rows to be padded will be spread *randomly* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, pad_mode=\"constant\", pad_cval=0, position=\"uniform\",\n seed=None, name=None, **old_kwargs):\n super(PadToSquare, self).__init__(\n aspect_ratio=1.0, pad_mode=pad_mode, pad_cval=pad_cval,\n position=position,\n seed=seed, name=name, **old_kwargs)\n\n\nclass CenterPadToSquare(PadToSquare):\n \"\"\"Pad images equally on all sides until their height & width are identical.\n\n This is the same as :class:`~imgaug.augmenters.size.PadToSquare`, but uses\n ``position=\"center\"`` by default, which spreads the pad amounts equally\n over all image sides, while :class:`~imgaug.augmenters.size.PadToSquare`\n by default spreads them randomly. This augmenter is thus also identical to\n :class:`~imgaug.augmenters.size.PadToAspectRatio` with\n ``aspect_ratio=1.0, position=\"center\"``.\n\n Supported dtypes\n ----------------\n\n See :class:`~imgaug.augmenters.size.PadToFixedSize`.\n\n Parameters\n ----------\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n pad_mode : imgaug.ALL or str or list of str or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`.\n\n pad_cval : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional\n See :func:`~imgaug.augmenters.size.PadToAspectRatio.__init__`.\n\n deterministic : bool, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.CenterPadToSquare()\n\n Create an augmenter that pads each image until its square, i.e. height\n and width match.\n The rows to be padded will be spread *equally* over the top and bottom\n sides (analogous for the left/right sides).\n\n \"\"\"\n\n def __init__(self, pad_mode=\"constant\", pad_cval=0,\n seed=None, name=None, **old_kwargs):\n super(CenterPadToSquare, self).__init__(\n pad_mode=pad_mode, pad_cval=pad_cval, position=\"center\",\n seed=seed, name=name, **old_kwargs)\n\n\nclass KeepSizeByResize(meta.Augmenter):\n \"\"\"Resize images back to their input sizes after applying child augmenters.\n\n Combining this with e.g. a cropping augmenter as the child will lead to\n images being resized back to the input size after the crop operation was\n applied. Some augmenters have a ``keep_size`` argument that achieves the\n same goal (if set to ``True``), though this augmenter offers control over\n the interpolation mode and which augmentables to resize (images, heatmaps,\n segmentation maps).\n\n Supported dtypes\n ----------------\n\n See :func:`~imgaug.imgaug.imresize_many_images`.\n\n Parameters\n ----------\n children : Augmenter or list of imgaug.augmenters.meta.Augmenter or None, optional\n One or more augmenters to apply to images. These augmenters may change\n the image size.\n\n interpolation : KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional\n The interpolation mode to use when resizing images.\n Can take any value that :func:`~imgaug.imgaug.imresize_single_image`\n accepts, e.g. ``cubic``.\n\n * If this is ``KeepSizeByResize.NO_RESIZE`` then images will not\n be resized.\n * If this is a single ``str``, it is expected to have one of the\n following values: ``nearest``, ``linear``, ``area``, ``cubic``.\n * If this is a single integer, it is expected to have a value\n identical to one of: ``cv2.INTER_NEAREST``,\n ``cv2.INTER_LINEAR``, ``cv2.INTER_AREA``, ``cv2.INTER_CUBIC``.\n * If this is a ``list`` of ``str`` or ``int``, it is expected that\n each ``str``/``int`` is one of the above mentioned valid ones.\n A random one of these values will be sampled per image.\n * If this is a ``StochasticParameter``, it will be queried once per\n call to ``_augment_images()`` and must return ``N`` ``str`` s or\n ``int`` s (matching the above mentioned ones) for ``N`` images.\n\n interpolation_heatmaps : KeepSizeByResize.SAME_AS_IMAGES or KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional\n The interpolation mode to use when resizing heatmaps.\n Meaning and valid values are similar to `interpolation`. This\n parameter may also take the value ``KeepSizeByResize.SAME_AS_IMAGES``,\n which will lead to copying the interpolation modes used for the\n corresponding images. The value may also be returned on a per-image\n basis if `interpolation_heatmaps` is provided as a\n ``StochasticParameter`` or may be one possible value if it is\n provided as a ``list`` of ``str``.\n\n interpolation_segmaps : KeepSizeByResize.SAME_AS_IMAGES or KeepSizeByResize.NO_RESIZE or {'nearest', 'linear', 'area', 'cubic'} or {cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC} or list of str or list of int or StochasticParameter, optional\n The interpolation mode to use when resizing segmentation maps.\n Similar to `interpolation_heatmaps`.\n **Note**: For segmentation maps, only ``NO_RESIZE`` or nearest\n neighbour interpolation (i.e. ``nearest``) make sense in the vast\n majority of all cases.\n\n seed : None or int or imgaug.random.RNG or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.SeedSequence or numpy.random.RandomState, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n name : None or str, optional\n See :func:`~imgaug.augmenters.meta.Augmenter.__init__`.\n\n **old_kwargs\n Outdated parameters. Avoid using these.\n\n Examples\n --------\n >>> import imgaug.augmenters as iaa\n >>> aug = iaa.KeepSizeByResize(\n >>> iaa.Crop((20, 40), keep_size=False)\n >>> )\n\n Apply random cropping to input images, then resize them back to their\n original input sizes. The resizing is done using this augmenter instead\n of the corresponding internal resizing operation in ``Crop``.\n\n >>> aug = iaa.KeepSizeByResize(\n >>> iaa.Crop((20, 40), keep_size=False),\n >>> interpolation=\"nearest\"\n >>> )\n\n Same as in the previous example, but images are now always resized using\n nearest neighbour interpolation.\n\n >>> aug = iaa.KeepSizeByResize(\n >>> iaa.Crop((20, 40), keep_size=False),\n >>> interpolation=[\"nearest\", \"cubic\"],\n >>> interpolation_heatmaps=iaa.KeepSizeByResize.SAME_AS_IMAGES,\n >>> interpolation_segmaps=iaa.KeepSizeByResize.NO_RESIZE\n >>> )\n\n Similar to the previous example, but images are now sometimes resized\n using linear interpolation and sometimes using nearest neighbour\n interpolation. Heatmaps are resized using the same interpolation as was\n used for the corresponding image. Segmentation maps are not resized and\n will therefore remain at their size after cropping.\n\n \"\"\"\n\n NO_RESIZE = \"NO_RESIZE\"\n SAME_AS_IMAGES = \"SAME_AS_IMAGES\"\n\n def __init__(self, children,\n interpolation=\"cubic\",\n interpolation_heatmaps=SAME_AS_IMAGES,\n interpolation_segmaps=\"nearest\",\n seed=None, name=None, **old_kwargs):\n super(KeepSizeByResize, self).__init__(\n seed=seed, name=name, **old_kwargs)\n self.children = children\n\n def _validate_param(val, allow_same_as_images):\n valid_ips_and_resize = ia.IMRESIZE_VALID_INTERPOLATIONS \\\n + [KeepSizeByResize.NO_RESIZE]\n if allow_same_as_images and val == self.SAME_AS_IMAGES:\n return self.SAME_AS_IMAGES\n if val in valid_ips_and_resize:\n return iap.Deterministic(val)\n if isinstance(val, list):\n assert len(val) > 0, (\n \"Expected a list of at least one interpolation method. \"\n \"Got an empty list.\")\n valid_ips_here = valid_ips_and_resize\n if allow_same_as_images:\n valid_ips_here = valid_ips_here \\\n + [KeepSizeByResize.SAME_AS_IMAGES]\n only_valid_ips = all([ip in valid_ips_here for ip in val])\n assert only_valid_ips, (\n \"Expected each interpolations to be one of '%s', got \"\n \"'%s'.\" % (str(valid_ips_here), str(val)))\n return iap.Choice(val)\n if isinstance(val, iap.StochasticParameter):\n return val\n raise Exception(\n \"Expected interpolation to be one of '%s' or a list of \"\n \"these values or a StochasticParameter. Got type %s.\" % (\n str(ia.IMRESIZE_VALID_INTERPOLATIONS), type(val)))\n\n self.children = meta.handle_children_list(children, self.name, \"then\")\n self.interpolation = _validate_param(interpolation, False)\n self.interpolation_heatmaps = _validate_param(interpolation_heatmaps,\n True)\n self.interpolation_segmaps = _validate_param(interpolation_segmaps,\n True)\n\n def _augment_batch_(self, batch, random_state, parents, hooks):\n with batch.propagation_hooks_ctx(self, hooks, parents):\n images_were_array = None\n if batch.images is not None:\n images_were_array = ia.is_np_array(batch.images)\n shapes_orig = self._get_shapes(batch)\n\n samples = self._draw_samples(batch.nb_rows, random_state)\n\n batch = self.children.augment_batch_(\n batch, parents=parents + [self], hooks=hooks)\n\n if batch.images is not None:\n batch.images = self._keep_size_images(\n batch.images, shapes_orig[\"images\"], images_were_array,\n samples)\n\n if batch.heatmaps is not None:\n # dont use shapes_orig[\"images\"] because they might be None\n batch.heatmaps = self._keep_size_maps(\n batch.heatmaps, shapes_orig[\"heatmaps\"],\n shapes_orig[\"heatmaps_arr\"], samples[1])\n\n if batch.segmentation_maps is not None:\n # dont use shapes_orig[\"images\"] because they might be None\n batch.segmentation_maps = self._keep_size_maps(\n batch.segmentation_maps, shapes_orig[\"segmentation_maps\"],\n shapes_orig[\"segmentation_maps_arr\"], samples[2])\n\n for augm_name in [\"keypoints\", \"bounding_boxes\", \"polygons\",\n \"line_strings\"]:\n augm_value = getattr(batch, augm_name)\n if augm_value is not None:\n func = functools.partial(\n self._keep_size_keypoints,\n shapes_orig=shapes_orig[augm_name],\n interpolations=samples[0])\n cbaois = self._apply_to_cbaois_as_keypoints(augm_value,\n func)\n setattr(batch, augm_name, cbaois)\n return batch\n\n @classmethod\n def _keep_size_images(cls, images, shapes_orig, images_were_array,\n samples):\n interpolations, _, _ = samples\n\n gen = zip(images, interpolations, shapes_orig)\n result = []\n for image, interpolation, input_shape in gen:\n if interpolation == KeepSizeByResize.NO_RESIZE:\n result.append(image)\n else:\n result.append(\n ia.imresize_single_image(image, input_shape[0:2],\n interpolation))\n\n if images_were_array:\n # note here that NO_RESIZE can have led to different shapes\n nb_shapes = len({image.shape for image in result})\n if nb_shapes == 1:\n result = np.array(result, dtype=images.dtype)\n\n return result\n\n @classmethod\n def _keep_size_maps(cls, augmentables, shapes_orig_images,\n shapes_orig_arrs, interpolations):\n result = []\n gen = zip(augmentables, interpolations,\n shapes_orig_arrs, shapes_orig_images)\n for augmentable, interpolation, arr_shape_orig, img_shape_orig in gen:\n if interpolation == \"NO_RESIZE\":\n result.append(augmentable)\n else:\n augmentable = augmentable.resize(\n arr_shape_orig[0:2], interpolation=interpolation)\n augmentable.shape = img_shape_orig\n result.append(augmentable)\n\n return result\n\n @classmethod\n def _keep_size_keypoints(cls, kpsois_aug, shapes_orig, interpolations):\n result = []\n gen = zip(kpsois_aug, interpolations, shapes_orig)\n for kpsoi_aug, interpolation, input_shape in gen:\n if interpolation == KeepSizeByResize.NO_RESIZE:\n result.append(kpsoi_aug)\n else:\n result.append(kpsoi_aug.on_(input_shape))\n\n return result\n\n @classmethod\n def _get_shapes(cls, batch):\n result = dict()\n for column in batch.columns:\n result[column.name] = [cell.shape for cell in column.value]\n\n if batch.heatmaps is not None:\n result[\"heatmaps_arr\"] = [\n cell.arr_0to1.shape for cell in batch.heatmaps]\n\n if batch.segmentation_maps is not None:\n result[\"segmentation_maps_arr\"] = [\n cell.arr.shape for cell in batch.segmentation_maps]\n\n return result\n\n def _draw_samples(self, nb_images, random_state):\n rngs = random_state.duplicate(3)\n interpolations = self.interpolation.draw_samples((nb_images,),\n random_state=rngs[0])\n\n if self.interpolation_heatmaps == KeepSizeByResize.SAME_AS_IMAGES:\n interpolations_heatmaps = np.copy(interpolations)\n else:\n interpolations_heatmaps = self.interpolation_heatmaps.draw_samples(\n (nb_images,), random_state=rngs[1]\n )\n\n # Note that `interpolations_heatmaps == self.SAME_AS_IMAGES`\n # works here only if the datatype of the array is such that it\n # may contain strings. It does not work properly for e.g.\n # integer arrays and will produce a single bool output, even\n # for arrays with more than one entry.\n same_as_imgs_idx = [ip == self.SAME_AS_IMAGES\n for ip in interpolations_heatmaps]\n\n interpolations_heatmaps[same_as_imgs_idx] = \\\n interpolations[same_as_imgs_idx]\n\n if self.interpolation_segmaps == KeepSizeByResize.SAME_AS_IMAGES:\n interpolations_segmaps = np.copy(interpolations)\n else:\n # TODO This used previously the same seed as the heatmaps part\n # leading to the same sampled values. Was that intentional?\n # Doesn't look like it should be that way.\n interpolations_segmaps = self.interpolation_segmaps.draw_samples(\n (nb_images,), random_state=rngs[2]\n )\n\n # Note that `interpolations_heatmaps == self.SAME_AS_IMAGES`\n # works here only if the datatype of the array is such that it\n # may contain strings. It does not work properly for e.g.\n # integer arrays and will produce a single bool output, even\n # for arrays with more than one entry.\n same_as_imgs_idx = [ip == self.SAME_AS_IMAGES\n for ip in interpolations_segmaps]\n\n interpolations_segmaps[same_as_imgs_idx] = \\\n interpolations[same_as_imgs_idx]\n\n return interpolations, interpolations_heatmaps, interpolations_segmaps\n\n def _to_deterministic(self):\n aug = self.copy()\n aug.children = aug.children.to_deterministic()\n aug.deterministic = True\n aug.random_state = self.random_state.derive_rng_()\n return aug\n\n def get_parameters(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_parameters`.\"\"\"\n return [self.interpolation, self.interpolation_heatmaps]\n\n def get_children_lists(self):\n \"\"\"See :func:`~imgaug.augmenters.meta.Augmenter.get_children_lists`.\"\"\"\n return [self.children]\n\n def __str__(self):\n pattern = (\n \"%s(\"\n \"interpolation=%s, \"\n \"interpolation_heatmaps=%s, \"\n \"name=%s, \"\n \"children=%s, \"\n \"deterministic=%s\"\n \")\")\n return pattern % (\n self.__class__.__name__, self.interpolation,\n self.interpolation_heatmaps, self.name, self.children,\n self.deterministic)\n", "\"\"\"Collection of basic functions used throughout imgaug.\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nimport math\nimport numbers\nimport sys\nimport os\nimport json\nimport types\nimport functools\n# collections.abc exists since 3.3 and is expected to be used for 3.8+\ntry:\n from collections.abc import Iterable\nexcept ImportError:\n from collections import Iterable\n\nimport numpy as np\nimport cv2\nimport imageio\nimport six\nimport six.moves as sm\nimport skimage.draw\nimport skimage.measure\n\n\nALL = \"ALL\"\n\nFILE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# filepath to the quokka image, its annotations and depth map\nQUOKKA_FP = os.path.join(FILE_DIR, \"quokka.jpg\")\nQUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, \"quokka_annotations.json\")\nQUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(\n FILE_DIR, \"quokka_depth_map_halfres.png\")\n\nDEFAULT_FONT_FP = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"DejaVuSans.ttf\"\n)\n\n\n# to check if a dtype instance is among these dtypes, use e.g.\n# `dtype.type in NP_FLOAT_TYPES` do not just use `dtype in NP_FLOAT_TYPES` as\n# that would fail\nNP_FLOAT_TYPES = set(np.sctypes[\"float\"])\nNP_INT_TYPES = set(np.sctypes[\"int\"])\nNP_UINT_TYPES = set(np.sctypes[\"uint\"])\n\nIMSHOW_BACKEND_DEFAULT = \"matplotlib\"\n\nIMRESIZE_VALID_INTERPOLATIONS = [\n \"nearest\", \"linear\", \"area\", \"cubic\",\n cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]\n\n\n###############################################################################\n# Helpers for deprecation\n###############################################################################\n\nclass DeprecationWarning(Warning): # pylint: disable=redefined-builtin\n \"\"\"Warning for deprecated calls.\n\n Since python 2.7 DeprecatedWarning is silent by default. So we define\n our own DeprecatedWarning here so that it is not silent by default.\n\n \"\"\"\n\n\ndef warn(msg, category=UserWarning, stacklevel=2):\n \"\"\"Generate a a warning with stacktrace.\n\n Parameters\n ----------\n msg : str\n The message of the warning.\n\n category : class\n The class of the warning to produce.\n\n stacklevel : int, optional\n How many steps above this function to \"jump\" in the stacktrace when\n displaying file and line number of the error message.\n Usually ``2``.\n\n \"\"\"\n import warnings\n warnings.warn(msg, category=category, stacklevel=stacklevel)\n\n\ndef warn_deprecated(msg, stacklevel=2):\n \"\"\"Generate a non-silent deprecation warning with stacktrace.\n\n The used warning is ``imgaug.imgaug.DeprecationWarning``.\n\n Parameters\n ----------\n msg : str\n The message of the warning.\n\n stacklevel : int, optional\n How many steps above this function to \"jump\" in the stacktrace when\n displaying file and line number of the error message.\n Usually ``2``\n\n \"\"\"\n warn(msg, category=DeprecationWarning, stacklevel=stacklevel)\n\n\nclass deprecated(object): # pylint: disable=invalid-name\n \"\"\"Decorator to mark deprecated functions with warning.\n\n Adapted from\n <https://github.com/scikit-image/scikit-image/blob/master/skimage/_shared/utils.py>.\n\n Parameters\n ----------\n alt_func : None or str, optional\n If given, tell user what function to use instead.\n\n behavior : {'warn', 'raise'}, optional\n Behavior during call to deprecated function: ``warn`` means that the\n user is warned that the function is deprecated; ``raise`` means that\n an error is raised.\n\n removed_version : None or str, optional\n The package version in which the deprecated function will be removed.\n\n comment : None or str, optional\n An optional comment that will be appended to the warning message.\n\n \"\"\"\n\n def __init__(self, alt_func=None, behavior=\"warn\", removed_version=None,\n comment=None):\n self.alt_func = alt_func\n self.behavior = behavior\n self.removed_version = removed_version\n self.comment = comment\n\n def __call__(self, func):\n alt_msg = None\n if self.alt_func is not None:\n alt_msg = \"Use ``%s`` instead.\" % (self.alt_func,)\n\n rmv_msg = None\n if self.removed_version is not None:\n rmv_msg = \"It will be removed in version %s.\" % (\n self.removed_version,)\n\n comment_msg = None\n if self.comment is not None and len(self.comment) > 0:\n comment_msg = \"%s.\" % (self.comment.rstrip(\". \"),)\n\n addendum = \" \".join([submsg\n for submsg\n in [alt_msg, rmv_msg, comment_msg]\n if submsg is not None])\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n # getargpec() is deprecated\n # pylint: disable=deprecated-method\n\n # TODO add class name if class method\n import inspect\n # arg_names = func.__code__.co_varnames\n\n # getargspec() was deprecated in py3, but doesn't exist in py2\n if hasattr(inspect, \"getfullargspec\"):\n arg_names = inspect.getfullargspec(func)[0]\n else:\n arg_names = inspect.getargspec(func)[0]\n\n if \"self\" in arg_names or \"cls\" in arg_names:\n main_msg = \"Method ``%s.%s()`` is deprecated.\" % (\n args[0].__class__.__name__, func.__name__)\n else:\n main_msg = \"Function ``%s()`` is deprecated.\" % (\n func.__name__,)\n\n msg = (main_msg + \" \" + addendum).rstrip(\" \").replace(\"``\", \"`\")\n\n if self.behavior == \"warn\":\n warn_deprecated(msg, stacklevel=3)\n elif self.behavior == \"raise\":\n raise DeprecationWarning(msg)\n return func(*args, **kwargs)\n\n # modify doc string to display deprecation warning\n doc = \"**Deprecated**. \" + addendum\n if wrapped.__doc__ is None:\n wrapped.__doc__ = doc\n else:\n wrapped.__doc__ = doc + \"\\n\\n \" + wrapped.__doc__\n\n return wrapped\n\n###############################################################################\n\n\ndef is_np_array(val):\n \"\"\"Check whether a variable is a numpy array.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a numpy array. Otherwise ``False``.\n\n \"\"\"\n # using np.generic here via isinstance(val, (np.ndarray, np.generic))\n # seems to also fire for scalar numpy values even though those are not\n # arrays\n return isinstance(val, np.ndarray)\n\n\ndef is_np_scalar(val):\n \"\"\"Check whether a variable is a numpy scalar.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a numpy scalar. Otherwise ``False``.\n\n \"\"\"\n # Note that isscalar() alone also fires for thinks like python strings\n # or booleans.\n # The isscalar() was added to make this function not fire for non-scalar\n # numpy types. Not sure if it is necessary.\n return isinstance(val, np.generic) and np.isscalar(val)\n\n\ndef is_single_integer(val):\n \"\"\"Check whether a variable is an ``int``.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is an ``int``. Otherwise ``False``.\n\n \"\"\"\n return isinstance(val, numbers.Integral) and not isinstance(val, bool)\n\n\ndef is_single_float(val):\n \"\"\"Check whether a variable is a ``float``.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a ``float``. Otherwise ``False``.\n\n \"\"\"\n return (\n isinstance(val, numbers.Real)\n and not is_single_integer(val)\n and not isinstance(val, bool)\n )\n\n\ndef is_single_number(val):\n \"\"\"Check whether a variable is a ``number``, i.e. an ``int`` or ``float``.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a ``number``. Otherwise ``False``.\n\n \"\"\"\n return is_single_integer(val) or is_single_float(val)\n\n\ndef is_iterable(val):\n \"\"\"\n Checks whether a variable is iterable.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is an iterable. Otherwise ``False``.\n\n \"\"\"\n return isinstance(val, Iterable)\n\n\n# TODO convert to is_single_string() or rename is_single_integer/float/number()\ndef is_string(val):\n \"\"\"Check whether a variable is a string.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a string. Otherwise ``False``.\n\n \"\"\"\n return isinstance(val, six.string_types)\n\n\ndef is_single_bool(val):\n \"\"\"Check whether a variable is a ``bool``.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a ``bool``. Otherwise ``False``.\n\n \"\"\"\n # pylint: disable=unidiomatic-typecheck\n return type(val) == type(True)\n\n\ndef is_integer_array(val):\n \"\"\"Check whether a variable is a numpy integer array.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a numpy integer array. Otherwise ``False``.\n\n \"\"\"\n return is_np_array(val) and issubclass(val.dtype.type, np.integer)\n\n\ndef is_float_array(val):\n \"\"\"Check whether a variable is a numpy float array.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a numpy float array. Otherwise ``False``.\n\n \"\"\"\n return is_np_array(val) and issubclass(val.dtype.type, np.floating)\n\n\ndef is_callable(val):\n \"\"\"Check whether a variable is a callable, e.g. a function.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` if the variable is a callable. Otherwise ``False``.\n\n \"\"\"\n # python 3.x with x <= 2 does not support callable(), apparently\n if sys.version_info[0] == 3 and sys.version_info[1] <= 2:\n return hasattr(val, '__call__')\n return callable(val)\n\n\ndef is_generator(val):\n \"\"\"Check whether a variable is a generator.\n\n Parameters\n ----------\n val\n The variable to check.\n\n Returns\n -------\n bool\n ``True`` is the variable is a generator. Otherwise ``False``.\n\n \"\"\"\n return isinstance(val, types.GeneratorType)\n\n\ndef flatten(nested_iterable):\n \"\"\"Flatten arbitrarily nested lists/tuples.\n\n Code partially taken from https://stackoverflow.com/a/10824420.\n\n Parameters\n ----------\n nested_iterable\n A ``list`` or ``tuple`` of arbitrarily nested values.\n\n Yields\n ------\n any\n All values in `nested_iterable`, flattened.\n\n \"\"\"\n # don't just check if something is iterable here, because then strings\n # and arrays will be split into their characters and components\n if not isinstance(nested_iterable, (list, tuple)):\n yield nested_iterable\n else:\n for i in nested_iterable:\n if isinstance(i, (list, tuple)):\n for j in flatten(i):\n yield j\n else:\n yield i\n\n\n# TODO no longer used anywhere. deprecate?\ndef caller_name():\n \"\"\"Return the name of the caller, e.g. a function.\n\n Returns\n -------\n str\n The name of the caller as a string\n\n \"\"\"\n # pylint: disable=protected-access\n return sys._getframe(1).f_code.co_name\n\n\ndef seed(entropy=None, seedval=None):\n \"\"\"Set the seed of imgaug's global RNG.\n\n The global RNG controls most of the \"randomness\" in imgaug.\n\n The global RNG is the default one used by all augmenters. Under special\n circumstances (e.g. when an augmenter is switched to deterministic mode),\n the global RNG is replaced with a local one. The state of that replacement\n may be dependent on the global RNG's state at the time of creating the\n child RNG.\n\n .. note::\n\n This function is not yet marked as deprecated, but might be in the\n future. The preferred way to seed `imgaug` is via\n :func:`~imgaug.random.seed`.\n\n Parameters\n ----------\n entropy : int\n The seed value to use.\n\n seedval : None or int, optional\n Deprecated.\n\n \"\"\"\n assert entropy is not None or seedval is not None, (\n \"Expected argument 'entropy' or 'seedval' to be not-None, but both\"\n \"were None.\")\n\n if seedval is not None:\n assert entropy is None, (\n \"Argument 'seedval' is the outdated name for 'entropy'. Hence, \"\n \"if it is provided, 'entropy' must be None. Got 'entropy' value \"\n \"of type %s.\" % (type(entropy),))\n\n warn_deprecated(\"Parameter 'seedval' is deprecated. Use \"\n \"'entropy' instead.\")\n entropy = seedval\n\n import imgaug.random\n imgaug.random.seed(entropy)\n\n\n@deprecated(\"imgaug.random.normalize_generator\")\ndef normalize_random_state(random_state):\n \"\"\"Normalize various inputs to a numpy random generator.\n\n Parameters\n ----------\n random_state : None or int or numpy.random.Generator or numpy.random.BitGenerator or numpy.random.bit_generator.SeedSequence or numpy.random.RandomState\n See :func:`~imgaug.random.normalize_generator`.\n\n Returns\n -------\n numpy.random.Generator or numpy.random.RandomState\n In numpy <=1.16 a ``RandomState``, in 1.17+ a ``Generator`` (even if\n the input was a ``RandomState``).\n\n \"\"\"\n import imgaug.random\n return imgaug.random.normalize_generator_(random_state)\n\n\n@deprecated(\"imgaug.random.get_global_rng\")\ndef current_random_state():\n \"\"\"Get or create the current global RNG of imgaug.\n\n Note that the first call to this function will create a global RNG.\n\n Returns\n -------\n imgaug.random.RNG\n The global RNG to use.\n\n \"\"\"\n import imgaug.random\n return imgaug.random.get_global_rng()\n\n\n@deprecated(\"imgaug.random.convert_seed_to_rng\")\ndef new_random_state(seed=None, fully_random=False):\n \"\"\"Create a new numpy random number generator.\n\n Parameters\n ----------\n seed : None or int, optional\n The seed value to use. If ``None`` and `fully_random` is ``False``,\n the seed will be derived from the global RNG. If `fully_random` is\n ``True``, the seed will be provided by the OS.\n\n fully_random : bool, optional\n Whether the seed will be provided by the OS.\n\n Returns\n -------\n numpy.random.Generator or numpy.random.RandomState\n In numpy <=1.16 a ``RandomState``, in 1.17+ a ``Generator``.\n Both are initialized with the provided seed.\n\n \"\"\"\n # pylint: disable=redefined-outer-name\n import imgaug.random\n if seed is None:\n if fully_random:\n return imgaug.random.RNG.create_fully_random()\n return imgaug.random.RNG.create_pseudo_random_()\n return imgaug.random.RNG(seed)\n\n\n# TODO seems to not be used anywhere anymore\n@deprecated(\"imgaug.random.convert_seed_to_rng\")\ndef dummy_random_state():\n \"\"\"Create a dummy random state using a seed of ``1``.\n\n Returns\n -------\n imgaug.random.RNG\n The new random state.\n\n \"\"\"\n import imgaug.random\n return imgaug.random.RNG(1)\n\n\n@deprecated(\"imgaug.random.copy_generator_unless_global_rng\")\ndef copy_random_state(random_state, force_copy=False):\n \"\"\"Copy an existing numpy (random number) generator.\n\n Parameters\n ----------\n random_state : numpy.random.Generator or numpy.random.RandomState\n The generator to copy.\n\n force_copy : bool, optional\n If ``True``, this function will always create a copy of every random\n state. If ``False``, it will not copy numpy's default random state,\n but all other random states.\n\n Returns\n -------\n rs_copy : numpy.random.RandomState\n The copied random state.\n\n \"\"\"\n import imgaug.random\n if force_copy:\n return imgaug.random.copy_generator(random_state)\n return imgaug.random.copy_generator_unless_global_generator(random_state)\n\n\n@deprecated(\"imgaug.random.derive_generator_\")\ndef derive_random_state(random_state):\n \"\"\"Derive a child numpy random generator from another one.\n\n Parameters\n ----------\n random_state : numpy.random.Generator or numpy.random.RandomState\n The generator from which to derive a new child generator.\n\n Returns\n -------\n numpy.random.Generator or numpy.random.RandomState\n In numpy <=1.16 a ``RandomState``, in 1.17+ a ``Generator``.\n In both cases a derived child generator.\n\n \"\"\"\n import imgaug.random\n return imgaug.random.derive_generator_(random_state)\n\n\n@deprecated(\"imgaug.random.derive_generators_\")\ndef derive_random_states(random_state, n=1):\n \"\"\"Derive child numpy random generators from another one.\n\n Parameters\n ----------\n random_state : numpy.random.Generator or numpy.random.RandomState\n The generator from which to derive new child generators.\n\n n : int, optional\n Number of child generators to derive.\n\n Returns\n -------\n list of numpy.random.Generator or list of numpy.random.RandomState\n In numpy <=1.16 a ``list`` of ``RandomState`` s,\n in 1.17+ a ``list`` of ``Generator`` s.\n In both cases lists of derived child generators.\n\n \"\"\"\n import imgaug.random\n return imgaug.random.derive_generators_(random_state, n=n)\n\n\n@deprecated(\"imgaug.random.advance_generator_\")\ndef forward_random_state(random_state):\n \"\"\"Advance a numpy random generator's internal state.\n\n Parameters\n ----------\n random_state : numpy.random.Generator or numpy.random.RandomState\n Generator of which to advance the internal state.\n\n \"\"\"\n import imgaug.random\n imgaug.random.advance_generator_(random_state)\n\n\ndef _quokka_normalize_extract(extract):\n \"\"\"Generate a normalized rectangle for the standard quokka image.\n\n Parameters\n ----------\n extract : 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage\n Unnormalized representation of the image subarea to be extracted.\n\n * If ``str`` ``square``, then a squared area\n ``(x: 0 to max 643, y: 0 to max 643)`` will be extracted from\n the image.\n * If a ``tuple``, then expected to contain four ``number`` s\n denoting ``(x1, y1, x2, y2)``.\n * If a :class:`~imgaug.augmentables.bbs.BoundingBox`, then that\n bounding box's area will be extracted from the image.\n * If a :class:`~imgaug.augmentables.bbs.BoundingBoxesOnImage`,\n then expected to contain exactly one bounding box and a shape\n matching the full image dimensions (i.e. ``(643, 960, *)``).\n Then the one bounding box will be used similar to\n ``BoundingBox`` above.\n\n Returns\n -------\n imgaug.augmentables.bbs.BoundingBox\n Normalized representation of the area to extract from the standard\n quokka image.\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage\n\n if extract == \"square\":\n bb = BoundingBox(x1=0, y1=0, x2=643, y2=643)\n elif isinstance(extract, tuple) and len(extract) == 4:\n bb = BoundingBox(x1=extract[0], y1=extract[1],\n x2=extract[2], y2=extract[3])\n elif isinstance(extract, BoundingBox):\n bb = extract\n elif isinstance(extract, BoundingBoxesOnImage):\n assert len(extract.bounding_boxes) == 1, (\n \"Provided BoundingBoxesOnImage instance may currently only \"\n \"contain a single bounding box.\")\n assert extract.shape[0:2] == (643, 960), (\n \"Expected BoundingBoxesOnImage instance on an image of shape \"\n \"(643, 960, ?). Got shape %s.\" % (extract.shape,))\n bb = extract.bounding_boxes[0]\n else:\n raise Exception(\n \"Expected 'square' or tuple of four entries or BoundingBox or \"\n \"BoundingBoxesOnImage for parameter 'extract', \"\n \"got %s.\" % (type(extract),)\n )\n return bb\n\n\n# TODO is this the same as the project functions in augmentables?\ndef _compute_resized_shape(from_shape, to_shape):\n \"\"\"Compute the intended new shape of an image-like array after resizing.\n\n Parameters\n ----------\n from_shape : tuple or ndarray\n Old shape of the array. Usually expected to be a ``tuple`` of form\n ``(H, W)`` or ``(H, W, C)`` or alternatively an array with two or\n three dimensions.\n\n to_shape : None or tuple of ints or tuple of floats or int or float or ndarray\n New shape of the array.\n\n * If ``None``, then `from_shape` will be used as the new shape.\n * If an ``int`` ``V``, then the new shape will be ``(V, V, [C])``,\n where ``C`` will be added if it is part of `from_shape`.\n * If a ``float`` ``V``, then the new shape will be\n ``(H*V, W*V, [C])``, where ``H`` and ``W`` are the old\n height/width.\n * If a ``tuple`` ``(H', W', [C'])`` of ints, then ``H'`` and ``W'``\n will be used as the new height and width.\n * If a ``tuple`` ``(H', W', [C'])`` of floats (except ``C``), then\n ``H'`` and ``W'`` will be used as the new height and width.\n * If a numpy array, then the array's shape will be used.\n\n Returns\n -------\n tuple of int\n New shape.\n\n \"\"\"\n if is_np_array(from_shape):\n from_shape = from_shape.shape\n if is_np_array(to_shape):\n to_shape = to_shape.shape\n\n to_shape_computed = list(from_shape)\n\n if to_shape is None:\n pass\n elif isinstance(to_shape, tuple):\n assert len(from_shape) in [2, 3]\n assert len(to_shape) in [2, 3]\n\n if len(from_shape) == 3 and len(to_shape) == 3:\n assert from_shape[2] == to_shape[2]\n elif len(to_shape) == 3:\n to_shape_computed.append(to_shape[2])\n\n is_to_s_valid_values = all(\n [v is None or is_single_number(v) for v in to_shape[0:2]])\n assert is_to_s_valid_values, (\n \"Expected the first two entries in to_shape to be None or \"\n \"numbers, got types %s.\" % (\n str([type(v) for v in to_shape[0:2]]),))\n\n for i, from_shape_i in enumerate(from_shape[0:2]):\n if to_shape[i] is None:\n to_shape_computed[i] = from_shape_i\n elif is_single_integer(to_shape[i]):\n to_shape_computed[i] = to_shape[i]\n else: # float\n to_shape_computed[i] = int(np.round(from_shape_i * to_shape[i]))\n elif is_single_integer(to_shape) or is_single_float(to_shape):\n to_shape_computed = _compute_resized_shape(\n from_shape, (to_shape, to_shape))\n else:\n raise Exception(\n \"Expected to_shape to be None or ndarray or tuple of floats or \"\n \"tuple of ints or single int or single float, \"\n \"got %s.\" % (type(to_shape),))\n\n return tuple(to_shape_computed)\n\n\ndef quokka(size=None, extract=None):\n \"\"\"Return an image of a quokka as a numpy array.\n\n Parameters\n ----------\n size : None or float or tuple of int, optional\n Size of the output image. Input into\n :func:`~imgaug.imgaug.imresize_single_image`. Usually expected to be a\n ``tuple`` ``(H, W)``, where ``H`` is the desired height and ``W`` is\n the width. If ``None``, then the image will not be resized.\n\n extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage\n Subarea of the quokka image to extract:\n\n * If ``None``, then the whole image will be used.\n * If ``str`` ``square``, then a squared area\n ``(x: 0 to max 643, y: 0 to max 643)`` will be extracted from\n the image.\n * If a ``tuple``, then expected to contain four ``number`` s\n denoting ``(x1, y1, x2, y2)``.\n * If a :class:`~imgaug.augmentables.bbs.BoundingBox`, then that\n bounding box's area will be extracted from the image.\n * If a :class:`~imgaug.augmentables.bbs.BoundingBoxesOnImage`,\n then expected to contain exactly one bounding box and a shape\n matching the full image dimensions (i.e. ``(643, 960, *)``).\n Then the one bounding box will be used similar to\n ``BoundingBox`` above.\n\n Returns\n -------\n (H,W,3) ndarray\n The image array of dtype ``uint8``.\n\n \"\"\"\n img = imageio.imread(QUOKKA_FP, pilmode=\"RGB\")\n if extract is not None:\n bb = _quokka_normalize_extract(extract)\n img = bb.extract_from_image(img)\n if size is not None:\n shape_resized = _compute_resized_shape(img.shape, size)\n img = imresize_single_image(img, shape_resized[0:2])\n return img\n\n\ndef quokka_square(size=None):\n \"\"\"Return an (square) image of a quokka as a numpy array.\n\n Parameters\n ----------\n size : None or float or tuple of int, optional\n Size of the output image. Input into\n :func:`~imgaug.imgaug.imresize_single_image`. Usually expected to be a\n ``tuple`` ``(H, W)``, where ``H`` is the desired height and ``W`` is\n the width. If ``None``, then the image will not be resized.\n\n Returns\n -------\n (H,W,3) ndarray\n The image array of dtype ``uint8``.\n\n \"\"\"\n return quokka(size=size, extract=\"square\")\n\n\ndef quokka_heatmap(size=None, extract=None):\n \"\"\"Return a heatmap (here: depth map) for the standard example quokka image.\n\n Parameters\n ----------\n size : None or float or tuple of int, optional\n See :func:`~imgaug.imgaug.quokka`.\n\n extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage\n See :func:`~imgaug.imgaug.quokka`.\n\n Returns\n -------\n imgaug.augmentables.heatmaps.HeatmapsOnImage\n Depth map as an heatmap object. Values close to ``0.0`` denote objects\n that are close to the camera. Values close to ``1.0`` denote objects\n that are furthest away (among all shown objects).\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.heatmaps import HeatmapsOnImage\n\n img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode=\"RGB\")\n img = imresize_single_image(img, (643, 960), interpolation=\"cubic\")\n\n if extract is not None:\n bb = _quokka_normalize_extract(extract)\n img = bb.extract_from_image(img)\n if size is None:\n size = img.shape[0:2]\n\n shape_resized = _compute_resized_shape(img.shape, size)\n img = imresize_single_image(img, shape_resized[0:2])\n img_0to1 = img[..., 0] # depth map was saved as 3-channel RGB\n img_0to1 = img_0to1.astype(np.float32) / 255.0\n img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away\n\n return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,))\n\n\ndef quokka_segmentation_map(size=None, extract=None):\n \"\"\"Return a segmentation map for the standard example quokka image.\n\n Parameters\n ----------\n size : None or float or tuple of int, optional\n See :func:`~imgaug.imgaug.quokka`.\n\n extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage\n See :func:`~imgaug.imgaug.quokka`.\n\n Returns\n -------\n imgaug.augmentables.segmaps.SegmentationMapsOnImage\n Segmentation map object.\n\n \"\"\"\n # pylint: disable=invalid-name\n # TODO get rid of this deferred import\n from imgaug.augmentables.segmaps import SegmentationMapsOnImage\n\n with open(QUOKKA_ANNOTATIONS_FP, \"r\") as f:\n json_dict = json.load(f)\n\n xx = []\n yy = []\n for kp_dict in json_dict[\"polygons\"][0][\"keypoints\"]:\n x = kp_dict[\"x\"]\n y = kp_dict[\"y\"]\n xx.append(x)\n yy.append(y)\n\n img_seg = np.zeros((643, 960, 1), dtype=np.int32)\n rr, cc = skimage.draw.polygon(\n np.array(yy), np.array(xx), shape=img_seg.shape)\n img_seg[rr, cc, 0] = 1\n\n if extract is not None:\n bb = _quokka_normalize_extract(extract)\n img_seg = bb.extract_from_image(img_seg)\n\n segmap = SegmentationMapsOnImage(img_seg, shape=img_seg.shape[0:2] + (3,))\n\n if size is not None:\n shape_resized = _compute_resized_shape(img_seg.shape, size)\n segmap = segmap.resize(shape_resized[0:2])\n segmap.shape = tuple(shape_resized[0:2]) + (3,)\n\n return segmap\n\n\ndef quokka_keypoints(size=None, extract=None):\n \"\"\"Return example keypoints on the standard example quokke image.\n\n The keypoints cover the eyes, ears, nose and paws.\n\n Parameters\n ----------\n size : None or float or tuple of int or tuple of float, optional\n Size of the output image on which the keypoints are placed. If\n ``None``, then the keypoints are not projected to any new size\n (positions on the original image are used). ``float`` s lead to\n relative size changes, ``int`` s to absolute sizes in pixels.\n\n extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage\n Subarea to extract from the image. See :func:`~imgaug.imgaug.quokka`.\n\n Returns\n -------\n imgaug.augmentables.kps.KeypointsOnImage\n Example keypoints on the quokka image.\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.kps import Keypoint, KeypointsOnImage\n\n left, top = 0, 0\n if extract is not None:\n bb_extract = _quokka_normalize_extract(extract)\n left = bb_extract.x1\n top = bb_extract.y1\n with open(QUOKKA_ANNOTATIONS_FP, \"r\") as f:\n json_dict = json.load(f)\n keypoints = []\n for kp_dict in json_dict[\"keypoints\"]:\n keypoints.append(Keypoint(x=kp_dict[\"x\"] - left, y=kp_dict[\"y\"] - top))\n if extract is not None:\n shape = (bb_extract.height, bb_extract.width, 3)\n else:\n shape = (643, 960, 3)\n kpsoi = KeypointsOnImage(keypoints, shape=shape)\n if size is not None:\n shape_resized = _compute_resized_shape(shape, size)\n kpsoi = kpsoi.on(shape_resized)\n return kpsoi\n\n\ndef quokka_bounding_boxes(size=None, extract=None):\n \"\"\"Return example bounding boxes on the standard example quokke image.\n\n Currently only a single bounding box is returned that covers the quokka.\n\n Parameters\n ----------\n size : None or float or tuple of int or tuple of float, optional\n Size of the output image on which the BBs are placed. If ``None``, then\n the BBs are not projected to any new size (positions on the original\n image are used). ``float`` s lead to relative size changes, ``int`` s\n to absolute sizes in pixels.\n\n extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage\n Subarea to extract from the image. See :func:`~imgaug.imgaug.quokka`.\n\n Returns\n -------\n imgaug.augmentables.bbs.BoundingBoxesOnImage\n Example BBs on the quokka image.\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage\n\n left, top = 0, 0\n if extract is not None:\n bb_extract = _quokka_normalize_extract(extract)\n left = bb_extract.x1\n top = bb_extract.y1\n with open(QUOKKA_ANNOTATIONS_FP, \"r\") as f:\n json_dict = json.load(f)\n bbs = []\n for bb_dict in json_dict[\"bounding_boxes\"]:\n bbs.append(\n BoundingBox(\n x1=bb_dict[\"x1\"] - left,\n y1=bb_dict[\"y1\"] - top,\n x2=bb_dict[\"x2\"] - left,\n y2=bb_dict[\"y2\"] - top\n )\n )\n if extract is not None:\n shape = (bb_extract.height, bb_extract.width, 3)\n else:\n shape = (643, 960, 3)\n bbsoi = BoundingBoxesOnImage(bbs, shape=shape)\n if size is not None:\n shape_resized = _compute_resized_shape(shape, size)\n bbsoi = bbsoi.on(shape_resized)\n return bbsoi\n\n\ndef quokka_polygons(size=None, extract=None):\n \"\"\"\n Returns example polygons on the standard example quokke image.\n\n The result contains one polygon, covering the quokka's outline.\n\n Parameters\n ----------\n size : None or float or tuple of int or tuple of float, optional\n Size of the output image on which the polygons are placed. If ``None``,\n then the polygons are not projected to any new size (positions on the\n original image are used). ``float`` s lead to relative size changes,\n ``int`` s to absolute sizes in pixels.\n\n extract : None or 'square' or tuple of number or imgaug.augmentables.bbs.BoundingBox or imgaug.augmentables.bbs.BoundingBoxesOnImage\n Subarea to extract from the image. See :func:`~imgaug.imgaug.quokka`.\n\n Returns\n -------\n imgaug.augmentables.polys.PolygonsOnImage\n Example polygons on the quokka image.\n\n \"\"\"\n # TODO get rid of this deferred import\n from imgaug.augmentables.polys import Polygon, PolygonsOnImage\n\n left, top = 0, 0\n if extract is not None:\n bb_extract = _quokka_normalize_extract(extract)\n left = bb_extract.x1\n top = bb_extract.y1\n with open(QUOKKA_ANNOTATIONS_FP, \"r\") as f:\n json_dict = json.load(f)\n polygons = []\n for poly_json in json_dict[\"polygons\"]:\n polygons.append(\n Polygon([(point[\"x\"] - left, point[\"y\"] - top)\n for point in poly_json[\"keypoints\"]])\n )\n if extract is not None:\n shape = (bb_extract.height, bb_extract.width, 3)\n else:\n shape = (643, 960, 3)\n psoi = PolygonsOnImage(polygons, shape=shape)\n if size is not None:\n shape_resized = _compute_resized_shape(shape, size)\n psoi = psoi.on(shape_resized)\n return psoi\n\n\n# TODO change this to some atan2 stuff?\ndef angle_between_vectors(v1, v2):\n \"\"\"Calculcate the angle in radians between vectors `v1` and `v2`.\n\n From\n http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python\n\n Parameters\n ----------\n v1 : (N,) ndarray\n First vector.\n\n v2 : (N,) ndarray\n Second vector.\n\n Returns\n -------\n float\n Angle in radians.\n\n Examples\n --------\n >>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([0, 1, 0]))\n 1.570796...\n\n >>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([1, 0, 0]))\n 0.0\n\n >>> angle_between_vectors(np.float32([1, 0, 0]), np.float32([-1, 0, 0]))\n 3.141592...\n\n \"\"\"\n # pylint: disable=invalid-name\n length1 = np.linalg.norm(v1)\n length2 = np.linalg.norm(v2)\n v1_unit = (v1 / length1) if length1 > 0 else np.float32(v1) * 0\n v2_unit = (v2 / length2) if length2 > 0 else np.float32(v2) * 0\n return np.arccos(np.clip(np.dot(v1_unit, v2_unit), -1.0, 1.0))\n\n\n# TODO is this used anywhere?\n# TODO this might also be covered by augmentables.utils or\n# augmentables.polys/lines\ndef compute_line_intersection_point(x1, y1, x2, y2, x3, y3, x4, y4):\n \"\"\"Compute the intersection point of two lines.\n\n Taken from https://stackoverflow.com/a/20679579 .\n\n Parameters\n ----------\n x1 : number\n x coordinate of the first point on line 1.\n (The lines extends beyond this point.)\n\n y1 : number\n y coordinate of the first point on line 1.\n (The lines extends beyond this point.)\n\n x2 : number\n x coordinate of the second point on line 1.\n (The lines extends beyond this point.)\n\n y2 : number\n y coordinate of the second point on line 1.\n (The lines extends beyond this point.)\n\n x3 : number\n x coordinate of the first point on line 2.\n (The lines extends beyond this point.)\n\n y3 : number\n y coordinate of the first point on line 2.\n (The lines extends beyond this point.)\n\n x4 : number\n x coordinate of the second point on line 2.\n (The lines extends beyond this point.)\n\n y4 : number\n y coordinate of the second point on line 2.\n (The lines extends beyond this point.)\n\n Returns\n -------\n tuple of number or bool\n The coordinate of the intersection point as a ``tuple`` ``(x, y)``.\n If the lines are parallel (no intersection point or an infinite number\n of them), the result is ``False``.\n\n \"\"\"\n # pylint: disable=invalid-name\n def _make_line(point1, point2):\n line_y = (point1[1] - point2[1])\n line_x = (point2[0] - point1[0])\n slope = (point1[0] * point2[1] - point2[0] * point1[1])\n return line_y, line_x, -slope\n\n line1 = _make_line((x1, y1), (x2, y2))\n line2 = _make_line((x3, y3), (x4, y4))\n\n D = line1[0] * line2[1] - line1[1] * line2[0]\n Dx = line1[2] * line2[1] - line1[1] * line2[2]\n Dy = line1[0] * line2[2] - line1[2] * line2[0]\n if D != 0:\n x = Dx / D\n y = Dy / D\n return x, y\n return False\n\n\n# TODO replace by cv2.putText()?\ndef draw_text(img, y, x, text, color=(0, 255, 0), size=25):\n \"\"\"Draw text on an image.\n\n This uses by default DejaVuSans as its font, which is included in this\n library.\n\n dtype support::\n\n * ``uint8``: yes; fully tested\n * ``uint16``: no\n * ``uint32``: no\n * ``uint64``: no\n * ``int8``: no\n * ``int16``: no\n * ``int32``: no\n * ``int64``: no\n * ``float16``: no\n * ``float32``: yes; not tested\n * ``float64``: no\n * ``float128``: no\n * ``bool``: no\n\n TODO check if other dtypes could be enabled\n\n Parameters\n ----------\n img : (H,W,3) ndarray\n The image array to draw text on.\n Expected to be of dtype ``uint8`` or ``float32`` (expected value\n range is ``[0.0, 255.0]``).\n\n y : int\n x-coordinate of the top left corner of the text.\n\n x : int\n y- coordinate of the top left corner of the text.\n\n text : str\n The text to draw.\n\n color : iterable of int, optional\n Color of the text to draw. For RGB-images this is expected to be an\n RGB color.\n\n size : int, optional\n Font size of the text to draw.\n\n Returns\n -------\n (H,W,3) ndarray\n Input image with text drawn on it.\n\n \"\"\"\n from PIL import (\n Image as PIL_Image,\n ImageDraw as PIL_ImageDraw,\n ImageFont as PIL_ImageFont\n )\n\n assert img.dtype.name in [\"uint8\", \"float32\"], (\n \"Can currently draw text only on images of dtype 'uint8' or \"\n \"'float32'. Got dtype %s.\" % (img.dtype.name,))\n\n input_dtype = img.dtype\n if img.dtype == np.float32:\n img = img.astype(np.uint8)\n\n img = PIL_Image.fromarray(img)\n font = PIL_ImageFont.truetype(DEFAULT_FONT_FP, size)\n context = PIL_ImageDraw.Draw(img)\n context.text((x, y), text, fill=tuple(color), font=font)\n img_np = np.asarray(img)\n\n # PIL/asarray returns read only array\n if not img_np.flags[\"WRITEABLE\"]:\n try:\n # this seems to no longer work with np 1.16 (or was pillow\n # updated?)\n img_np.setflags(write=True)\n except ValueError as ex:\n if \"cannot set WRITEABLE flag to True of this array\" in str(ex):\n img_np = np.copy(img_np)\n\n if img_np.dtype != input_dtype:\n img_np = img_np.astype(input_dtype)\n\n return img_np\n\n\n# TODO rename sizes to size?\ndef imresize_many_images(images, sizes=None, interpolation=None):\n \"\"\"Resize each image in a list or array to a specified size.\n\n dtype support::\n\n * ``uint8``: yes; fully tested\n * ``uint16``: yes; tested\n * ``uint32``: no (1)\n * ``uint64``: no (2)\n * ``int8``: yes; tested (3)\n * ``int16``: yes; tested\n * ``int32``: limited; tested (4)\n * ``int64``: no (2)\n * ``float16``: yes; tested (5)\n * ``float32``: yes; tested\n * ``float64``: yes; tested\n * ``float128``: no (1)\n * ``bool``: yes; tested (6)\n\n - (1) rejected by ``cv2.imresize``\n - (2) results too inaccurate\n - (3) mapped internally to ``int16`` when interpolation!=\"nearest\"\n - (4) only supported for interpolation=\"nearest\", other interpolations\n lead to cv2 error\n - (5) mapped internally to ``float32``\n - (6) mapped internally to ``uint8``\n\n Parameters\n ----------\n images : (N,H,W,[C]) ndarray or list of (H,W,[C]) ndarray\n Array of the images to resize.\n Usually recommended to be of dtype ``uint8``.\n\n sizes : float or iterable of int or iterable of float\n The new size of the images, given either as a fraction (a single\n float) or as a ``(height, width)`` ``tuple`` of two integers or as a\n ``(height fraction, width fraction)`` ``tuple`` of two floats.\n\n interpolation : None or str or int, optional\n The interpolation to use during resize.\n If ``int``, then expected to be one of:\n\n * ``cv2.INTER_NEAREST`` (nearest neighbour interpolation)\n * ``cv2.INTER_LINEAR`` (linear interpolation)\n * ``cv2.INTER_AREA`` (area interpolation)\n * ``cv2.INTER_CUBIC`` (cubic interpolation)\n\n If ``str``, then expected to be one of:\n\n * ``nearest`` (identical to ``cv2.INTER_NEAREST``)\n * ``linear`` (identical to ``cv2.INTER_LINEAR``)\n * ``area`` (identical to ``cv2.INTER_AREA``)\n * ``cubic`` (identical to ``cv2.INTER_CUBIC``)\n\n If ``None``, the interpolation will be chosen automatically. For size\n increases, ``area`` interpolation will be picked and for size\n decreases, ``linear`` interpolation will be picked.\n\n Returns\n -------\n (N,H',W',[C]) ndarray\n Array of the resized images.\n\n Examples\n --------\n >>> import imgaug as ia\n >>> images = np.zeros((2, 8, 16, 3), dtype=np.uint8)\n >>> images_resized = ia.imresize_many_images(images, 2.0)\n >>> images_resized.shape\n (2, 16, 32, 3)\n\n Convert two RGB images of height ``8`` and width ``16`` to images of\n height ``2*8=16`` and width ``2*16=32``.\n\n >>> images_resized = ia.imresize_many_images(images, (2.0, 4.0))\n >>> images_resized.shape\n (2, 16, 64, 3)\n\n Convert two RGB images of height ``8`` and width ``16`` to images of\n height ``2*8=16`` and width ``4*16=64``.\n\n >>> images_resized = ia.imresize_many_images(images, (16, 32))\n >>> images_resized.shape\n (2, 16, 32, 3)\n\n Converts two RGB images of height ``8`` and width ``16`` to images of\n height ``16`` and width ``32``.\n\n \"\"\"\n # pylint: disable=too-many-statements\n\n # we just do nothing if the input contains zero images\n # one could also argue that an exception would be appropriate here\n if len(images) == 0:\n return images\n\n # verify that sizes contains only values >0\n if is_single_number(sizes) and sizes <= 0:\n raise ValueError(\n \"If 'sizes' is given as a single number, it is expected to \"\n \"be >= 0, got %.8f.\" % (sizes,))\n\n # change after the validation to make the above error messages match the\n # original input\n if is_single_number(sizes):\n sizes = (sizes, sizes)\n else:\n assert len(sizes) == 2, (\n \"If 'sizes' is given as a tuple, it is expected be a tuple of two \"\n \"entries, got %d entries.\" % (len(sizes),))\n assert all([is_single_number(val) and val >= 0 for val in sizes]), (\n \"If 'sizes' is given as a tuple, it is expected be a tuple of two \"\n \"ints or two floats, each >= 0, got types %s with values %s.\" % (\n str([type(val) for val in sizes]), str(sizes)))\n\n # if input is a list, call this function N times for N images\n # but check beforehand if all images have the same shape, then just\n # convert to a single array and de-convert afterwards\n if isinstance(images, list):\n nb_shapes = len({image.shape for image in images})\n if nb_shapes == 1:\n return list(imresize_many_images(\n np.array(images), sizes=sizes, interpolation=interpolation))\n\n return [\n imresize_many_images(\n image[np.newaxis, ...],\n sizes=sizes,\n interpolation=interpolation)[0, ...]\n for image in images]\n\n shape = images.shape\n assert images.ndim in [3, 4], \"Expected array of shape (N, H, W, [C]), \" \\\n \"got shape %s\" % (str(shape),)\n nb_images = shape[0]\n height_image, width_image = shape[1], shape[2]\n nb_channels = shape[3] if images.ndim > 3 else None\n\n height_target, width_target = sizes[0], sizes[1]\n height_target = (int(np.round(height_image * height_target))\n if is_single_float(height_target)\n else height_target)\n width_target = (int(np.round(width_image * width_target))\n if is_single_float(width_target)\n else width_target)\n\n if height_target == height_image and width_target == width_image:\n return np.copy(images)\n\n # return empty array if input array contains zero-sized axes\n # note that None==0 is not True (for case nb_channels=None)\n if 0 in [height_target, width_target, nb_channels]:\n shape_out = tuple([shape[0], height_target, width_target]\n + list(shape[3:]))\n return np.zeros(shape_out, dtype=images.dtype)\n\n # place this after the (h==h' and w==w') check so that images with\n # zero-sized don't result in errors if the aren't actually resized\n # verify that all input images have height/width > 0\n has_zero_size_axes = any([axis == 0 for axis in images.shape[1:]])\n assert not has_zero_size_axes, (\n \"Cannot resize images, because at least one image has a height and/or \"\n \"width and/or number of channels of zero. \"\n \"Observed shapes were: %s.\" % (\n str([image.shape for image in images]),))\n\n inter = interpolation\n assert inter is None or inter in IMRESIZE_VALID_INTERPOLATIONS, (\n \"Expected 'interpolation' to be None or one of %s. Got %s.\" % (\n \", \".join(\n [str(valid_ip) for valid_ip in IMRESIZE_VALID_INTERPOLATIONS]\n ),\n str(inter)\n )\n )\n if inter is None:\n if height_target > height_image or width_target > width_image:\n inter = cv2.INTER_AREA\n else:\n inter = cv2.INTER_LINEAR\n elif inter in [\"nearest\", cv2.INTER_NEAREST]:\n inter = cv2.INTER_NEAREST\n elif inter in [\"linear\", cv2.INTER_LINEAR]:\n inter = cv2.INTER_LINEAR\n elif inter in [\"area\", cv2.INTER_AREA]:\n inter = cv2.INTER_AREA\n else: # if ip in [\"cubic\", cv2.INTER_CUBIC]:\n inter = cv2.INTER_CUBIC\n\n # TODO find more beautiful way to avoid circular imports\n from . import dtypes as iadt\n if inter == cv2.INTER_NEAREST:\n iadt.gate_dtypes(\n images,\n allowed=[\"bool\",\n \"uint8\", \"uint16\",\n \"int8\", \"int16\", \"int32\",\n \"float16\", \"float32\", \"float64\"],\n disallowed=[\"uint32\", \"uint64\", \"uint128\", \"uint256\",\n \"int64\", \"int128\", \"int256\",\n \"float96\", \"float128\", \"float256\"],\n augmenter=None)\n else:\n iadt.gate_dtypes(\n images,\n allowed=[\"bool\",\n \"uint8\", \"uint16\",\n \"int8\", \"int16\",\n \"float16\", \"float32\", \"float64\"],\n disallowed=[\"uint32\", \"uint64\", \"uint128\", \"uint256\",\n \"int32\", \"int64\", \"int128\", \"int256\",\n \"float96\", \"float128\", \"float256\"],\n augmenter=None)\n\n result_shape = (nb_images, height_target, width_target)\n if nb_channels is not None:\n result_shape = result_shape + (nb_channels,)\n result = np.zeros(result_shape, dtype=images.dtype)\n for i, image in enumerate(images):\n input_dtype = image.dtype\n input_dtype_name = input_dtype.name\n\n if input_dtype_name == \"bool\":\n image = image.astype(np.uint8) * 255\n elif input_dtype_name == \"int8\" and inter != cv2.INTER_NEAREST:\n image = image.astype(np.int16)\n elif input_dtype_name == \"float16\":\n image = image.astype(np.float32)\n\n if nb_channels is not None and nb_channels > 512:\n channels = [\n cv2.resize(image[..., c], (width_target, height_target),\n interpolation=inter) for c in sm.xrange(nb_channels)]\n result_img = np.stack(channels, axis=-1)\n else:\n result_img = cv2.resize(\n image, (width_target, height_target), interpolation=inter)\n\n assert result_img.dtype.name == image.dtype.name, (\n \"Expected cv2.resize() to keep the input dtype '%s', but got \"\n \"'%s'. This is an internal error. Please report.\" % (\n image.dtype.name, result_img.dtype.name\n )\n )\n\n # cv2 removes the channel axis if input was (H, W, 1)\n # we re-add it (but only if input was not (H, W))\n if (len(result_img.shape) == 2 and nb_channels is not None\n and nb_channels == 1):\n result_img = result_img[:, :, np.newaxis]\n\n if input_dtype_name == \"bool\":\n result_img = result_img > 127\n elif input_dtype_name == \"int8\" and inter != cv2.INTER_NEAREST:\n # TODO somehow better avoid circular imports here\n from . import dtypes as iadt\n result_img = iadt.restore_dtypes_(result_img, np.int8)\n elif input_dtype_name == \"float16\":\n # TODO see above\n from . import dtypes as iadt\n result_img = iadt.restore_dtypes_(result_img, np.float16)\n result[i] = result_img\n return result\n\n\ndef _assert_two_or_three_dims(shape):\n if hasattr(shape, \"shape\"):\n shape = shape.shape\n assert len(shape) in [2, 3], (\n \"Expected image with two or three dimensions, but got %d dimensions \"\n \"and shape %s.\" % (len(shape), shape))\n\n\ndef imresize_single_image(image, sizes, interpolation=None):\n \"\"\"Resize a single image.\n\n dtype support::\n\n See :func:`~imgaug.imgaug.imresize_many_images`.\n\n Parameters\n ----------\n image : (H,W,C) ndarray or (H,W) ndarray\n Array of the image to resize.\n Usually recommended to be of dtype ``uint8``.\n\n sizes : float or iterable of int or iterable of float\n See :func:`~imgaug.imgaug.imresize_many_images`.\n\n interpolation : None or str or int, optional\n See :func:`~imgaug.imgaug.imresize_many_images`.\n\n Returns\n -------\n (H',W',C) ndarray or (H',W') ndarray\n The resized image.\n\n \"\"\"\n _assert_two_or_three_dims(image)\n\n grayscale = False\n if image.ndim == 2:\n grayscale = True\n image = image[:, :, np.newaxis]\n\n rs = imresize_many_images(\n image[np.newaxis, :, :, :], sizes, interpolation=interpolation)\n if grayscale:\n return rs[0, :, :, 0]\n return rs[0, ...]\n\n\ndef pool(arr, block_size, func, pad_mode=\"constant\", pad_cval=0,\n preserve_dtype=True, cval=None):\n \"\"\"Resize an array by pooling values within blocks.\n\n dtype support::\n\n * ``uint8``: yes; fully tested\n * ``uint16``: yes; tested\n * ``uint32``: yes; tested (2)\n * ``uint64``: no (1)\n * ``int8``: yes; tested\n * ``int16``: yes; tested\n * ``int32``: yes; tested (2)\n * ``int64``: no (1)\n * ``float16``: yes; tested\n * ``float32``: yes; tested\n * ``float64``: yes; tested\n * ``float128``: yes; tested (2)\n * ``bool``: yes; tested\n\n - (1) results too inaccurate (at least when using np.average as func)\n - (2) Note that scikit-image documentation says that the wrapped\n pooling function converts inputs to ``float64``. Actual tests\n showed no indication of that happening (at least when using\n preserve_dtype=True).\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pool. Ideally of datatype ``float64``.\n\n block_size : int or tuple of int\n Spatial size of each group of values to pool, aka kernel size.\n\n * If a single ``int``, then a symmetric block of that size along\n height and width will be used.\n * If a ``tuple`` of two values, it is assumed to be the block size\n along height and width of the image-like, with pooling happening\n per channel.\n * If a ``tuple`` of three values, it is assumed to be the block size\n along height, width and channels.\n\n func : callable\n Function to apply to a given block in order to convert it to a single\n number, e.g. :func:`numpy.average`, :func:`numpy.min`,\n :func:`numpy.max`.\n\n pad_mode : str, optional\n Padding mode to use if the array cannot be divided by `block_size`\n without remainder. See :func:`~imgaug.imgaug.pad` for details.\n\n pad_cval : number, optional\n Value to use for padding if `mode` is ``constant``.\n See :func:`numpy.pad` for details.\n\n preserve_dtype : bool, optional\n Whether to convert the array back to the input datatype if it is\n changed away from that in the pooling process.\n\n cval : None or number, optional\n Deprecated. Old name for `pad_cval`.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C') ndarray\n Array after pooling.\n\n \"\"\"\n # TODO find better way to avoid circular import\n from . import dtypes as iadt\n from .augmenters import size as iasize\n\n if arr.size == 0:\n return np.copy(arr)\n\n iadt.gate_dtypes(arr,\n allowed=[\"bool\",\n \"uint8\", \"uint16\", \"uint32\",\n \"int8\", \"int16\", \"int32\",\n \"float16\", \"float32\", \"float64\", \"float128\"],\n disallowed=[\"uint64\", \"uint128\", \"uint256\",\n \"int64\", \"int128\", \"int256\",\n \"float256\"],\n augmenter=None)\n\n if cval is not None:\n warn_deprecated(\"`cval` is a deprecated argument in pool(). \"\n \"Use `pad_cval` instead.\")\n pad_cval = cval\n\n _assert_two_or_three_dims(arr)\n\n is_valid_int = is_single_integer(block_size) and block_size >= 1\n is_valid_tuple = is_iterable(block_size) and len(block_size) in [2, 3] \\\n and [is_single_integer(val) and val >= 1 for val in block_size]\n assert is_valid_int or is_valid_tuple, (\n \"Expected argument 'block_size' to be a single integer >0 or \"\n \"a tuple of 2 or 3 values with each one being >0. Got %s.\" % (\n str(block_size)))\n\n if is_single_integer(block_size):\n block_size = [block_size, block_size]\n if len(block_size) < arr.ndim:\n block_size = list(block_size) + [1]\n\n # We use custom padding here instead of the one from block_reduce(),\n # because (1) it is expected to be faster and (2) it allows us more\n # flexibility wrt to padding modes.\n arr = iasize.pad_to_multiples_of(\n arr,\n height_multiple=block_size[0],\n width_multiple=block_size[1],\n mode=pad_mode,\n cval=pad_cval\n )\n\n input_dtype = arr.dtype\n\n arr_reduced = skimage.measure.block_reduce(arr, tuple(block_size), func,\n cval=cval)\n if preserve_dtype and arr_reduced.dtype.name != input_dtype.name:\n arr_reduced = arr_reduced.astype(input_dtype)\n return arr_reduced\n\n\n# TODO does OpenCV have a faster avg pooling method?\ndef avg_pool(arr, block_size, pad_mode=\"reflect\", pad_cval=128,\n preserve_dtype=True, cval=None):\n \"\"\"Resize an array using average pooling.\n\n Defaults to ``pad_mode=\"reflect\"`` to ensure that padded values do not\n affect the average.\n\n dtype support::\n\n See :func:`~imgaug.imgaug.pool`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n block_size : int or tuple of int or tuple of int\n Size of each block of values to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n pad_mode : str, optional\n Padding mode to use if the array cannot be divided by `block_size`\n without remainder.\n See :func:`~imgaug.imgaug.pad` for details.\n\n pad_cval : number, optional\n Padding value.\n See :func:`~imgaug.imgaug.pool` for details.\n\n preserve_dtype : bool, optional\n Whether to preserve the input array dtype.\n See :func:`~imgaug.imgaug.pool` for details.\n\n cval : None or number, optional\n Deprecated. Old name for `pad_cval`.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C') ndarray\n Array after average pooling.\n\n \"\"\"\n return pool(arr, block_size, np.average, pad_mode=pad_mode,\n pad_cval=pad_cval, preserve_dtype=preserve_dtype, cval=cval)\n\n\ndef max_pool(arr, block_size, pad_mode=\"edge\", pad_cval=0,\n preserve_dtype=True, cval=None):\n \"\"\"Resize an array using max-pooling.\n\n Defaults to ``pad_mode=\"edge\"`` to ensure that padded values do not affect\n the maximum, even if the dtype was something else than ``uint8``.\n\n dtype support::\n\n See :func:`~imgaug.imgaug.pool`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n block_size : int or tuple of int or tuple of int\n Size of each block of values to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n pad_mode : str, optional\n Padding mode to use if the array cannot be divided by `block_size`\n without remainder.\n See :func:`~imgaug.imgaug.pad` for details.\n\n pad_cval : number, optional\n Padding value.\n See :func:`~imgaug.imgaug.pool` for details.\n\n preserve_dtype : bool, optional\n Whether to preserve the input array dtype.\n See :func:`~imgaug.imgaug.pool` for details.\n\n cval : None or number, optional\n Deprecated. Old name for `pad_cval`.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C') ndarray\n Array after max-pooling.\n\n \"\"\"\n return pool(arr, block_size, np.max, pad_mode=pad_mode,\n pad_cval=pad_cval, preserve_dtype=preserve_dtype, cval=cval)\n\n\ndef min_pool(arr, block_size, pad_mode=\"edge\", pad_cval=255,\n preserve_dtype=True):\n \"\"\"Resize an array using min-pooling.\n\n Defaults to ``pad_mode=\"edge\"`` to ensure that padded values do not affect\n the minimum, even if the dtype was something else than ``uint8``.\n\n dtype support::\n\n See :func:`~imgaug.imgaug.pool`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n block_size : int or tuple of int or tuple of int\n Size of each block of values to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n pad_mode : str, optional\n Padding mode to use if the array cannot be divided by `block_size`\n without remainder.\n See :func:`~imgaug.imgaug.pad` for details.\n\n pad_cval : number, optional\n Padding value.\n See :func:`~imgaug.imgaug.pool` for details.\n\n preserve_dtype : bool, optional\n Whether to preserve the input array dtype.\n See :func:`~imgaug.imgaug.pool` for details.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C') ndarray\n Array after min-pooling.\n\n \"\"\"\n return pool(arr, block_size, np.min, pad_mode=pad_mode, pad_cval=pad_cval,\n preserve_dtype=preserve_dtype)\n\n\ndef median_pool(arr, block_size, pad_mode=\"reflect\", pad_cval=128,\n preserve_dtype=True):\n \"\"\"Resize an array using median-pooling.\n\n Defaults to ``pad_mode=\"reflect\"`` to ensure that padded values do not\n affect the average.\n\n dtype support::\n\n See :func:`~imgaug.imgaug.pool`.\n\n Parameters\n ----------\n arr : (H,W) ndarray or (H,W,C) ndarray\n Image-like array to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n block_size : int or tuple of int or tuple of int\n Size of each block of values to pool.\n See :func:`~imgaug.imgaug.pool` for details.\n\n pad_mode : str, optional\n Padding mode to use if the array cannot be divided by `block_size`\n without remainder.\n See :func:`~imgaug.imgaug.pad` for details.\n\n pad_cval : number, optional\n Padding value.\n See :func:`~imgaug.imgaug.pool` for details.\n\n preserve_dtype : bool, optional\n Whether to preserve the input array dtype.\n See :func:`~imgaug.imgaug.pool` for details.\n\n Returns\n -------\n (H',W') ndarray or (H',W',C') ndarray\n Array after min-pooling.\n\n \"\"\"\n return pool(arr, block_size, np.median, pad_mode=pad_mode,\n pad_cval=pad_cval, preserve_dtype=preserve_dtype)\n\n\ndef draw_grid(images, rows=None, cols=None):\n \"\"\"Combine multiple images into a single grid-like image.\n\n Calling this function with four images of the same shape and ``rows=2``,\n ``cols=2`` will combine the four images to a single image array of shape\n ``(2*H, 2*W, C)``, where ``H`` is the height of any of the images\n (analogous ``W``) and ``C`` is the number of channels of any image.\n\n Calling this function with four images of the same shape and ``rows=4``,\n ``cols=1`` is analogous to calling :func:`numpy.vstack` on the images.\n\n dtype support::\n\n * ``uint8``: yes; fully tested\n * ``uint16``: yes; fully tested\n * ``uint32``: yes; fully tested\n * ``uint64``: yes; fully tested\n * ``int8``: yes; fully tested\n * ``int16``: yes; fully tested\n * ``int32``: yes; fully tested\n * ``int64``: yes; fully tested\n * ``float16``: yes; fully tested\n * ``float32``: yes; fully tested\n * ``float64``: yes; fully tested\n * ``float128``: yes; fully tested\n * ``bool``: yes; fully tested\n\n Parameters\n ----------\n images : (N,H,W,3) ndarray or iterable of (H,W,3) array\n The input images to convert to a grid.\n\n rows : None or int, optional\n The number of rows to show in the grid.\n If ``None``, it will be automatically derived.\n\n cols : None or int, optional\n The number of cols to show in the grid.\n If ``None``, it will be automatically derived.\n\n Returns\n -------\n (H',W',3) ndarray\n Image of the generated grid.\n\n \"\"\"\n nb_images = len(images)\n assert nb_images > 0, \"Expected to get at least one image, got none.\"\n\n if is_np_array(images):\n assert images.ndim == 4, (\n \"Expected to get an array of four dimensions denoting \"\n \"(N, H, W, C), got %d dimensions and shape %s.\" % (\n images.ndim, images.shape))\n else:\n assert is_iterable(images), (\n \"Expected to get an iterable of ndarrays, \"\n \"got %s.\" % (type(images),))\n assert all([is_np_array(image) for image in images]), (\n \"Expected to get an iterable of ndarrays, \"\n \"got types %s.\" % (\n \", \".join([str(type(image)) for image in images],)))\n assert all([image.ndim == 3 for image in images]), (\n \"Expected to get images with three dimensions. Got shapes %s.\" % (\n \", \".join([str(image.shape) for image in images])))\n assert len({image.dtype.name for image in images}) == 1, (\n \"Expected to get images with the same dtypes, got dtypes %s.\" % (\n \", \".join([image.dtype.name for image in images])))\n assert len({image.shape[-1] for image in images}) == 1, (\n \"Expected to get images with the same number of channels, \"\n \"got shapes %s.\" % (\n \", \".join([str(image.shape) for image in images])))\n\n cell_height = max([image.shape[0] for image in images])\n cell_width = max([image.shape[1] for image in images])\n nb_channels = images[0].shape[2]\n\n if rows is None and cols is None:\n rows = cols = int(math.ceil(math.sqrt(nb_images)))\n elif rows is not None:\n cols = int(math.ceil(nb_images / rows))\n elif cols is not None:\n rows = int(math.ceil(nb_images / cols))\n assert rows * cols >= nb_images, (\n \"Expected rows*cols to lead to at least as many cells as there were \"\n \"images provided, but got %d rows, %d cols (=%d cells) for %d \"\n \"images. \" % (rows, cols, rows*cols, nb_images))\n\n width = cell_width * cols\n height = cell_height * rows\n dtype = images.dtype if is_np_array(images) else images[0].dtype\n grid = np.zeros((height, width, nb_channels), dtype=dtype)\n cell_idx = 0\n for row_idx in sm.xrange(rows):\n for col_idx in sm.xrange(cols):\n if cell_idx < nb_images:\n image = images[cell_idx]\n cell_y1 = cell_height * row_idx\n cell_y2 = cell_y1 + image.shape[0]\n cell_x1 = cell_width * col_idx\n cell_x2 = cell_x1 + image.shape[1]\n grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image\n cell_idx += 1\n\n return grid\n\n\ndef show_grid(images, rows=None, cols=None):\n \"\"\"Combine multiple images into a single image and plot the result.\n\n This will show a window of the results of :func:`~imgaug.imgaug.draw_grid`.\n\n dtype support::\n\n minimum of (\n :func:`~imgaug.imgaug.draw_grid`,\n :func:`~imgaug.imgaug.imshow`\n )\n\n Parameters\n ----------\n images : (N,H,W,3) ndarray or iterable of (H,W,3) array\n See :func:`~imgaug.imgaug.draw_grid`.\n\n rows : None or int, optional\n See :func:`~imgaug.imgaug.draw_grid`.\n\n cols : None or int, optional\n See :func:`~imgaug.imgaug.draw_grid`.\n\n \"\"\"\n grid = draw_grid(images, rows=rows, cols=cols)\n imshow(grid)\n\n\ndef imshow(image, backend=IMSHOW_BACKEND_DEFAULT):\n \"\"\"Show an image in a window.\n\n dtype support::\n\n * ``uint8``: yes; not tested\n * ``uint16``: ?\n * ``uint32``: ?\n * ``uint64``: ?\n * ``int8``: ?\n * ``int16``: ?\n * ``int32``: ?\n * ``int64``: ?\n * ``float16``: ?\n * ``float32``: ?\n * ``float64``: ?\n * ``float128``: ?\n * ``bool``: ?\n\n Parameters\n ----------\n image : (H,W,3) ndarray\n Image to show.\n\n backend : {'matplotlib', 'cv2'}, optional\n Library to use to show the image. May be either matplotlib or\n OpenCV ('cv2'). OpenCV tends to be faster, but apparently causes more\n technical issues.\n\n \"\"\"\n assert backend in [\"matplotlib\", \"cv2\"], (\n \"Expected backend 'matplotlib' or 'cv2', got %s.\" % (backend,))\n\n if backend == \"cv2\":\n image_bgr = image\n if image.ndim == 3 and image.shape[2] in [3, 4]:\n image_bgr = image[..., 0:3][..., ::-1]\n\n win_name = \"imgaug-default-window\"\n cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)\n cv2.imshow(win_name, image_bgr)\n cv2.waitKey(0)\n cv2.destroyWindow(win_name)\n else:\n # import only when necessary (faster startup; optional dependency;\n # less fragile -- see issue #225)\n import matplotlib.pyplot as plt\n\n dpi = 96\n h, w = image.shape[0] / dpi, image.shape[1] / dpi\n # if the figure is too narrow, the footer may appear and make the fig\n # suddenly wider (ugly)\n w = max(w, 6)\n\n fig, ax = plt.subplots(figsize=(w, h), dpi=dpi)\n fig.canvas.set_window_title(\"imgaug.imshow(%s)\" % (image.shape,))\n # cmap=gray is automatically only activate for grayscale images\n ax.imshow(image, cmap=\"gray\")\n plt.show()\n\n\ndef do_assert(condition, message=\"Assertion failed.\"):\n \"\"\"Assert that a ``condition`` holds or raise an ``Exception`` otherwise.\n\n This was added because `assert` statements are removed in optimized code.\n It replaced `assert` statements throughout the library, but that was\n reverted again for readability and performance reasons.\n\n Parameters\n ----------\n condition : bool\n If ``False``, an exception is raised.\n\n message : str, optional\n Error message.\n\n \"\"\"\n if not condition:\n raise AssertionError(str(message))\n\n\ndef _normalize_cv2_input_arr_(arr):\n flags = arr.flags\n if not flags[\"OWNDATA\"]:\n arr = np.copy(arr)\n flags = arr.flags\n if not flags[\"C_CONTIGUOUS\"]:\n arr = np.ascontiguousarray(arr)\n return arr\n\n\ndef apply_lut(image, table):\n \"\"\"Map an input image to a new one using a lookup table.\n\n dtype support::\n\n See :func:`~imgaug.imgaug.apply_lut_`.\n\n Parameters\n ----------\n image : ndarray\n See :func:`~imgaug.imgaug.apply_lut_`.\n\n table : ndarray or list of ndarray\n See :func:`~imgaug.imgaug.apply_lut_`.\n\n Returns\n -------\n ndarray\n Image after mapping via lookup table.\n\n \"\"\"\n return apply_lut_(np.copy(image), table)\n\n\n# TODO make this function compatible with short max sized images, probably\n# isn't right now\ndef apply_lut_(image, table):\n \"\"\"Map an input image in-place to a new one using a lookup table.\n\n dtype support::\n\n * ``uint8``: yes; fully tested\n * ``uint16``: no\n * ``uint32``: no\n * ``uint64``: no\n * ``int8``: no\n * ``int16``: no\n * ``int32``: no\n * ``int64``: no\n * ``float16``: no\n * ``float32``: no\n * ``float64``: no\n * ``float128``: no\n * ``bool``: no\n\n Parameters\n ----------\n image : ndarray\n Image of dtype ``uint8`` and shape ``(H,W)`` or ``(H,W,C)``.\n\n table : ndarray or list of ndarray\n Table of dtype ``uint8`` containing the mapping from old to new\n values. Either a ``list`` of ``C`` ``(256,)`` arrays or a single\n array of shape ``(256,)`` or ``(256, C)`` or ``(1, 256, C)``.\n In case of ``(256,)`` the same table is used for all channels,\n otherwise a channelwise table is used and ``C`` is expected to match\n the number of channels.\n\n Returns\n -------\n ndarray\n Image after mapping via lookup table.\n This *might* be the same array instance as provided via `image`.\n\n \"\"\"\n\n image_shape_orig = image.shape\n nb_channels = 1 if len(image_shape_orig) == 2 else image_shape_orig[-1]\n\n if 0 in image_shape_orig:\n return image\n\n image = _normalize_cv2_input_arr_(image)\n\n # [(256,), (256,), ...] => (256, C)\n if isinstance(table, list):\n assert len(table) == nb_channels, (\n \"Expected to get %d tables (one per channel), got %d instead.\" % (\n nb_channels, len(table)))\n table = np.stack(table, axis=-1)\n\n # (256, C) => (1, 256, C)\n if table.shape == (256, nb_channels):\n table = table[np.newaxis, :, :]\n\n assert table.shape == (256,) or table.shape == (1, 256, nb_channels), (\n \"Expected 'table' to be any of the following: \"\n \"A list of C (256,) arrays, an array of shape (256,), an array of \"\n \"shape (256, C), an array of shape (1, 256, C). Transformed 'table' \"\n \"up to shape %s for image with shape %s (C=%d).\" % (\n table.shape, image_shape_orig, nb_channels))\n\n if nb_channels > 512:\n if table.shape == (256,):\n table = np.tile(table[np.newaxis, :, np.newaxis],\n (1, 1, nb_channels))\n\n subluts = []\n for group_idx in np.arange(int(np.ceil(nb_channels / 512))):\n c_start = group_idx * 512\n c_end = c_start + 512\n subluts.append(apply_lut_(image[:, :, c_start:c_end],\n table[:, :, c_start:c_end]))\n\n return np.concatenate(subluts, axis=2)\n\n assert image.dtype.name == \"uint8\", (\n \"Expected uint8 image, got dtype %s.\" % (image.dtype.name,))\n assert table.dtype.name == \"uint8\", (\n \"Expected uint8 table, got dtype %s.\" % (table.dtype.name,))\n\n image = cv2.LUT(image, table, dst=image)\n return image\n\n\nclass HooksImages(object):\n \"\"\"Class to intervene with image augmentation runs.\n\n This is e.g. useful to dynamically deactivate some augmenters.\n\n Parameters\n ----------\n activator : None or callable, optional\n A function that gives permission to execute an augmenter.\n The expected interface is::\n\n ``f(images, augmenter, parents, default)``\n\n where ``images`` are the input images to augment, ``augmenter`` is the\n instance of the augmenter to execute, ``parents`` are previously\n executed augmenters and ``default`` is an expected default value to be\n returned if the activator function does not plan to make a decision\n for the given inputs.\n\n propagator : None or callable, optional\n A function that gives permission to propagate the augmentation further\n to the children of an augmenter. This happens after the activator.\n In theory, an augmenter may augment images itself (if allowed by the\n activator) and then execute child augmenters afterwards (if allowed by\n the propagator). If the activator returned ``False``, the propagation\n step will never be executed.\n The expected interface is::\n\n ``f(images, augmenter, parents, default)``\n\n with all arguments having identical meaning to the activator.\n\n preprocessor : None or callable, optional\n A function to call before an augmenter performed any augmentations.\n The interface is:\n\n ``f(images, augmenter, parents)``\n\n with all arguments having identical meaning to the activator.\n It is expected to return the input images, optionally modified.\n\n postprocessor : None or callable, optional\n A function to call after an augmenter performed augmentations.\n The interface is the same as for the `preprocessor`.\n\n Examples\n --------\n >>> import numpy as np\n >>> import imgaug as ia\n >>> import imgaug.augmenters as iaa\n >>> seq = iaa.Sequential([\n >>> iaa.GaussianBlur(3.0, name=\"blur\"),\n >>> iaa.Dropout(0.05, name=\"dropout\"),\n >>> iaa.Affine(translate_px=-5, name=\"affine\")\n >>> ])\n >>> images = [np.zeros((10, 10), dtype=np.uint8)]\n >>>\n >>> def activator(images, augmenter, parents, default):\n >>> return False if augmenter.name in [\"blur\", \"dropout\"] else default\n >>>\n >>> seq_det = seq.to_deterministic()\n >>> images_aug = seq_det.augment_images(images)\n >>> heatmaps = [np.random.rand(*(3, 10, 10))]\n >>> heatmaps_aug = seq_det.augment_images(\n >>> heatmaps,\n >>> hooks=ia.HooksImages(activator=activator)\n >>> )\n\n This augments images and their respective heatmaps in the same way.\n The heatmaps however are only modified by ``Affine``, not by\n ``GaussianBlur`` or ``Dropout``.\n\n \"\"\"\n\n def __init__(self, activator=None, propagator=None, preprocessor=None,\n postprocessor=None):\n self.activator = activator\n self.propagator = propagator\n self.preprocessor = preprocessor\n self.postprocessor = postprocessor\n\n def is_activated(self, images, augmenter, parents, default):\n \"\"\"Estimate whether an augmenter may be executed.\n\n This also affects propagation of data to child augmenters.\n\n Returns\n -------\n bool\n If ``True``, the augmenter may be executed.\n Otherwise ``False``.\n\n \"\"\"\n if self.activator is None:\n return default\n return self.activator(images, augmenter, parents, default)\n\n def is_propagating(self, images, augmenter, parents, default):\n \"\"\"Estimate whether an augmenter may call its children.\n\n This function decides whether an augmenter with children is allowed\n to call these in order to further augment the inputs.\n Note that if the augmenter itself performs augmentations (before/after\n calling its children), these may still be executed, even if this\n method returns ``False``.\n\n Returns\n -------\n bool\n If ``True``, the augmenter may propagate data to its children.\n Otherwise ``False``.\n\n \"\"\"\n if self.propagator is None:\n return default\n return self.propagator(images, augmenter, parents, default)\n\n def preprocess(self, images, augmenter, parents):\n \"\"\"Preprocess input data per augmenter before augmentation.\n\n Returns\n -------\n (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray\n The input images, optionally modified.\n\n \"\"\"\n if self.preprocessor is None:\n return images\n return self.preprocessor(images, augmenter, parents)\n\n def postprocess(self, images, augmenter, parents):\n \"\"\"Postprocess input data per augmenter after augmentation.\n\n Returns\n -------\n (N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray\n The input images, optionally modified.\n\n \"\"\"\n if self.postprocessor is None:\n return images\n return self.postprocessor(images, augmenter, parents)\n\n\nclass HooksHeatmaps(HooksImages):\n \"\"\"Class to intervene with heatmap augmentation runs.\n\n This is e.g. useful to dynamically deactivate some augmenters.\n\n This class is currently the same as the one for images. This may or may\n not change in the future.\n\n \"\"\"\n\n\nclass HooksKeypoints(HooksImages):\n \"\"\"Class to intervene with keypoint augmentation runs.\n\n This is e.g. useful to dynamically deactivate some augmenters.\n\n This class is currently the same as the one for images. This may or may\n not change in the future.\n\n \"\"\"\n\n\n#####################################################################\n# Create classes/functions that were moved to other files and create\n# DeprecatedWarnings when they are called.\n#####################################################################\n\ndef _mark_moved_class_or_function(class_name_old, module_name_new,\n class_name_new):\n # pylint: disable=redefined-outer-name\n class_name_new = (class_name_new\n if class_name_new is not None\n else class_name_old)\n\n def _func(*args, **kwargs):\n import importlib\n warn_deprecated(\n \"Using imgaug.imgaug.%s is deprecated. Use %s.%s instead.\" % (\n class_name_old, module_name_new, class_name_new\n ))\n module = importlib.import_module(module_name_new)\n return getattr(module, class_name_new)(*args, **kwargs)\n\n return _func\n\n\nMOVED = [\n (\"Keypoint\", \"imgaug.augmentables.kps\", None),\n (\"KeypointsOnImage\", \"imgaug.augmentables.kps\", None),\n (\"BoundingBox\", \"imgaug.augmentables.bbs\", None),\n (\"BoundingBoxesOnImage\", \"imgaug.augmentables.bbs\", None),\n (\"Polygon\", \"imgaug.augmentables.polys\", None),\n (\"PolygonsOnImage\", \"imgaug.augmentables.polys\", None),\n (\"MultiPolygon\", \"imgaug.augmentables.polys\", None),\n (\"_ConcavePolygonRecoverer\", \"imgaug.augmentables.polys\", None),\n (\"HeatmapsOnImage\", \"imgaug.augmentables.heatmaps\", None),\n (\"SegmentationMapsOnImage\", \"imgaug.augmentables.segmaps\", None),\n (\"Batch\", \"imgaug.augmentables.batches\", None),\n (\"BatchLoader\", \"imgaug.multicore\", None),\n (\"BackgroundAugmenter\", \"imgaug.multicore\", None),\n (\"compute_geometric_median\", \"imgaug.augmentables.kps\", None),\n (\"_convert_points_to_shapely_line_string\", \"imgaug.augmentables.polys\",\n None),\n (\"_interpolate_point_pair\", \"imgaug.augmentables.polys\", None),\n (\"_interpolate_points\", \"imgaug.augmentables.polys\", None),\n (\"_interpolate_points_by_max_distance\", \"imgaug.augmentables.polys\", None),\n (\"pad\", \"imgaug.augmenters.size\", None),\n (\"pad_to_aspect_ratio\", \"imgaug.augmenters.size\", None),\n (\"pad_to_multiples_of\", \"imgaug.augmenters.size\", None),\n (\"compute_paddings_for_aspect_ratio\", \"imgaug.augmenters.size\",\n \"compute_paddings_to_reach_aspect_ratio\"),\n (\"compute_paddings_to_reach_multiples_of\", \"imgaug.augmenters.size\", None),\n (\"compute_paddings_to_reach_exponents_of\", \"imgaug.augmenters.size\", None)\n]\n\nfor class_name_old, module_name_new, class_name_new in MOVED:\n locals()[class_name_old] = _mark_moved_class_or_function(\n class_name_old, module_name_new, class_name_new)\n" ]
[ [ "numpy.ceil", "numpy.stack", "numpy.floor", "numpy.pad", "numpy.copy", "numpy.arange", "numpy.clip", "numpy.log", "numpy.float128", "numpy.round", "numpy.concatenate", "numpy.full", "numpy.array", "numpy.atleast_3d" ], [ "numpy.tile", "numpy.ceil", "numpy.zeros", "numpy.dot", "numpy.round", "numpy.stack", "numpy.concatenate", "numpy.float32", "numpy.asarray", "numpy.copy", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "numpy.array", "numpy.ascontiguousarray", "numpy.isscalar", "numpy.linalg.norm" ] ]
cuicaihao/Data_Science_Python
[ "ca4cb64bf9afc1011c192586362d0dd036e9441e" ]
[ "10.Algorithms_Data_Structure/Searching_n_Sorting/QuickSort.py" ]
[ "import numpy as np\n\n\ndef partition(arr, low, high):\n i = (low-1) # index of smaller element\n pivot = arr[high] # pivot\n\n for j in range(low, high):\n\n # If current element is smaller than the pivot\n if arr[j] < pivot:\n\n # increment index of smaller element\n i = i+1\n arr[i], arr[j] = arr[j], arr[i]\n\n arr[i+1], arr[high] = arr[high], arr[i+1]\n return (i + 1)\n\n\ndef quickSort(arr, low, high):\n if low < high:\n\n # pi is partitioning index, arr[p] is now\n # at right place\n pi = partition(arr, low, high)\n\n # Separately sort elements before\n # partition and after partition\n quickSort(arr, low, pi-1)\n quickSort(arr, pi + 1, high)\n\n # Driver code to test above\n# arr = [10, 7, 8, 9, 1, 5]\narr = np.random.randint(0, 1000000, 200000)\nn = len(arr)\nquickSort(arr, 0, n-1)\n# print(f\"Sorted array is: {arr}\")\n" ]
[ [ "numpy.random.randint" ] ]
mvdoc/himalaya
[ "7e3866287b835e2cc0a5c9848331e19c14896309" ]
[ "himalaya/kernel_ridge/tests/test_random_search_kernel.py" ]
[ "import pytest\n\nimport numpy as np\nimport sklearn.linear_model\nimport sklearn.model_selection\nimport scipy.linalg\n\nfrom himalaya.backend import set_backend\nfrom himalaya.backend import ALL_BACKENDS\nfrom himalaya.utils import assert_array_almost_equal\nfrom himalaya.scoring import r2_score\n\nfrom himalaya.kernel_ridge import solve_multiple_kernel_ridge_random_search\n\n\ndef _create_dataset(backend, n_targets=4):\n n_featuress = (100, 200)\n n_samples = 80\n n_gammas = 3\n\n Xs = [\n backend.asarray(backend.randn(n_samples, n_features), backend.float64)\n for n_features in n_featuress\n ]\n Ks = backend.stack([X @ X.T for X in Xs])\n\n ws = [\n backend.asarray(backend.randn(n_features, n_targets), backend.float64)\n for n_features in n_featuress\n ]\n Ys = backend.stack([X @ w for X, w in zip(Xs, ws)])\n Y = Ys.sum(0)\n\n gammas = backend.asarray(backend.rand(n_gammas, Ks.shape[0]),\n backend.float64)\n gammas /= gammas.sum(1)[:, None]\n\n return Ks, Y, gammas, Xs\n\n\n@pytest.mark.parametrize('local_alpha', [True, False])\n@pytest.mark.parametrize('backend', ALL_BACKENDS)\ndef test_solve_multiple_kernel_ridge_random_search_local_alphah(\n backend, local_alpha):\n _test_solve_multiple_kernel_ridge_random_search(backend=backend,\n local_alpha=local_alpha)\n\n\n@pytest.mark.parametrize('n_targets_batch', [None, 3])\n@pytest.mark.parametrize('backend', ALL_BACKENDS)\ndef test_solve_multiple_kernel_ridge_random_search_n_targets_batch(\n backend, n_targets_batch):\n _test_solve_multiple_kernel_ridge_random_search(\n backend=backend, n_targets_batch=n_targets_batch)\n\n\n@pytest.mark.parametrize('n_alphas_batch', [None, 2])\n@pytest.mark.parametrize('backend', ALL_BACKENDS)\ndef test_solve_multiple_kernel_ridge_random_search_n_alphas_batch(\n backend, n_alphas_batch):\n _test_solve_multiple_kernel_ridge_random_search(\n backend=backend, n_alphas_batch=n_alphas_batch)\n\n\n@pytest.mark.parametrize('return_weights', ['primal', 'dual'])\n@pytest.mark.parametrize('backend', ALL_BACKENDS)\ndef test_solve_multiple_kernel_ridge_random_search_return_weights(\n backend, return_weights):\n _test_solve_multiple_kernel_ridge_random_search(\n backend=backend, return_weights=return_weights)\n\n\n@pytest.mark.parametrize('diagonalize_method', ['eigh', 'svd'])\n@pytest.mark.parametrize('backend', ALL_BACKENDS)\ndef test_solve_multiple_kernel_ridge_random_search_diagonalize_method(\n backend, diagonalize_method):\n _test_solve_multiple_kernel_ridge_random_search(\n backend=backend, diagonalize_method=diagonalize_method)\n\n\ndef _test_solve_multiple_kernel_ridge_random_search(\n backend, n_targets_batch=None, n_alphas_batch=None,\n return_weights=\"dual\", diagonalize_method=\"eigh\", local_alpha=True):\n backend = set_backend(backend)\n\n Ks, Y, gammas, Xs = _create_dataset(backend)\n alphas = backend.asarray_like(backend.logspace(-3, 5, 9), Ks)\n n_targets = Y.shape[1]\n cv = sklearn.model_selection.check_cv(10)\n\n ############\n # run solver\n results = solve_multiple_kernel_ridge_random_search(\n Ks, Y, n_iter=gammas, alphas=alphas, score_func=r2_score, cv=cv,\n n_targets_batch=n_targets_batch, Xs=Xs, progress_bar=False,\n return_weights=return_weights, n_alphas_batch=n_alphas_batch,\n diagonalize_method=diagonalize_method, local_alpha=local_alpha)\n best_deltas, refit_weights, cv_scores = results\n\n #########################################\n # compare with sklearn.linear_model.Ridge\n if local_alpha: # only compare when each target optimizes alpha\n test_scores = []\n for gamma in backend.sqrt(gammas):\n X = backend.concatenate([x * g for x, g in zip(Xs, gamma)], 1)\n for train, test in cv.split(X):\n for alpha in alphas:\n model = sklearn.linear_model.Ridge(\n alpha=backend.to_numpy(alpha), fit_intercept=False)\n model = model.fit(backend.to_numpy(X[train]),\n backend.to_numpy(Y[train]))\n predictions = backend.asarray_like(\n model.predict(backend.to_numpy(X[test])), Y)\n test_scores.append(r2_score(Y[test], predictions))\n\n test_scores = backend.stack(test_scores)\n test_scores = test_scores.reshape(len(gammas), cv.get_n_splits(),\n len(alphas), n_targets)\n test_scores_mean = backend.max(test_scores.mean(1), 1)\n assert_array_almost_equal(cv_scores, test_scores_mean, decimal=5)\n\n ######################\n # test refited_weights\n for tt in range(n_targets):\n gamma = backend.exp(best_deltas[:, tt])\n alpha = 1.0\n\n if return_weights == 'primal':\n # compare primal weights with sklearn.linear_model.Ridge\n X = backend.concatenate(\n [X * backend.sqrt(g) for X, g in zip(Xs, gamma)], 1)\n model = sklearn.linear_model.Ridge(fit_intercept=False,\n alpha=backend.to_numpy(alpha))\n w1 = model.fit(backend.to_numpy(X),\n backend.to_numpy(Y[:, tt])).coef_\n w1 = np.split(w1, np.cumsum([X.shape[1] for X in Xs][:-1]), axis=0)\n w1 = [backend.asarray(w) for w in w1]\n w1_scaled = backend.concatenate(\n [w * backend.sqrt(g) for w, g, in zip(w1, gamma)])\n assert_array_almost_equal(w1_scaled, refit_weights[:, tt],\n decimal=5)\n\n elif return_weights == 'dual':\n # compare dual weights with scipy.linalg.solve\n Ks_64 = backend.asarray(Ks, dtype=backend.float64)\n gamma_64 = backend.asarray(gamma, dtype=backend.float64)\n K = backend.matmul(Ks_64.T, gamma_64).T\n reg = backend.asarray_like(np.eye(K.shape[0]), K) * alpha\n Y_64 = backend.asarray(Y, dtype=backend.float64)\n c1 = scipy.linalg.solve(backend.to_numpy(K + reg),\n backend.to_numpy(Y_64[:, tt]))\n c1 = backend.asarray_like(c1, K)\n assert_array_almost_equal(c1, refit_weights[:, tt], decimal=5)\n\n\n@pytest.mark.parametrize('backend', ALL_BACKENDS)\ndef test_solve_multiple_kernel_ridge_random_search_single_alpha_numpy(backend):\n backend = set_backend(backend)\n # just a smoke test, so make it minimal\n Ks, Y, gammas, Xs = _create_dataset(backend)\n alphas = 1.0\n # make Y a numpy array\n Y = backend.to_numpy(Y)\n results = solve_multiple_kernel_ridge_random_search(\n Ks, Y, n_iter=gammas, alphas=alphas\n )\n\n\n@pytest.mark.parametrize('backend', ALL_BACKENDS)\n@pytest.mark.parametrize('n_kernels', [1, 2])\ndef test_solve_multiple_kernel_ridge_random_search_global_alpha(backend, n_kernels):\n backend = set_backend(backend)\n # add more targets to make sure we get some variability\n Ks, Y, gammas, Xs = _create_dataset(backend, n_targets=20)\n alphas = backend.asarray_like(backend.logspace(-3, 5, 9), Ks)\n cv = sklearn.model_selection.check_cv(5)\n\n deltas, *_, best_alphas = solve_multiple_kernel_ridge_random_search(\n Ks[:n_kernels],\n Y,\n n_iter=50,\n progress_bar=False,\n alphas=alphas,\n cv=cv,\n local_alpha=False,\n return_alphas=True\n )\n # test that we return a single combination of deltas\n deltas = backend.to_numpy(deltas)\n if deltas.ndim == 1:\n assert np.allclose(deltas[0], deltas)\n else:\n for dd in deltas:\n assert np.allclose(dd[0], dd)\n\n # test that we return a single alpha\n best_alphas = backend.to_numpy(best_alphas)\n assert np.allclose(best_alphas[0], best_alphas)" ]
[ [ "numpy.allclose", "numpy.cumsum", "numpy.eye" ] ]
andriyor/moviepy
[ "8eaf3f02c5cf812e89f03e925cb2fa5e05b8d29a" ]
[ "moviepy/video/tools/drawing.py" ]
[ "\"\"\"Deals with making images (np arrays). It provides drawing\nmethods that are difficult to do with the existing Python libraries.\n\"\"\"\n\nimport numpy as np\n\n\ndef blit(im1, im2, pos=None, mask=None):\n \"\"\"Blit an image over another.\n\n Blits ``im1`` on ``im2`` as position ``pos=(x,y)``, using the\n ``mask`` if provided.\n \"\"\"\n if pos is None:\n pos = (0, 0) # pragma: no cover\n else:\n # Cast to tuple in case pos is not subscriptable.\n pos = tuple(pos)\n im2.paste(im1, pos, mask)\n return im2\n\n\ndef color_gradient(\n size,\n p1,\n p2=None,\n vector=None,\n radius=None,\n color_1=0.0,\n color_2=1.0,\n shape=\"linear\",\n offset=0,\n):\n \"\"\"Draw a linear, bilinear, or radial gradient.\n\n The result is a picture of size ``size``, whose color varies\n gradually from color `color_1` in position ``p1`` to color ``color_2``\n in position ``p2``.\n\n If it is a RGB picture the result must be transformed into\n a 'uint8' array to be displayed normally:\n\n Parameters\n ----------\n\n size : tuple or list\n Size (width, height) in pixels of the final image array.\n\n p1 : tuple or list\n Position for the first coordinate of the gradient in pixels (x, y).\n The color 'before' ``p1`` is ``color_1`` and it gradually changes in\n the direction of ``p2`` until it is ``color_2`` when it reaches ``p2``.\n\n p2 : tuple or list, optional\n Position for the second coordinate of the gradient in pixels (x, y).\n Coordinates (x, y) of the limit point for ``color_1``\n and ``color_2``.\n\n vector : tuple or list, optional\n A vector (x, y) in pixels that can be provided instead of ``p2``.\n ``p2`` is then defined as (p1 + vector).\n\n color_1 : tuple or list, optional\n Starting color for the gradient. As default, black. Either floats\n between 0 and 1 (for gradients used in masks) or [R, G, B] arrays\n (for colored gradients).\n\n color_2 : tuple or list, optional\n Color for the second point in the gradient. As default, white. Either\n floats between 0 and 1 (for gradients used in masks) or [R, G, B]\n arrays (for colored gradients).\n\n shape : str, optional\n Shape of the gradient. Can be either ``\"linear\"``, ``\"bilinear\"`` or\n ``\"circular\"``. In a linear gradient the color varies in one direction,\n from point ``p1`` to point ``p2``. In a bilinear gradient it also\n varies symmetrically from ``p1`` in the other direction. In a circular\n gradient it goes from ``color_1`` to ``color_2`` in all directions.\n\n radius : float, optional\n If ``shape=\"radial\"``, the radius of the gradient is defined with the\n parameter ``radius``, in pixels.\n\n offset : float, optional\n Real number between 0 and 1 indicating the fraction of the vector\n at which the gradient actually starts. For instance if ``offset``\n is 0.9 in a gradient going from p1 to p2, then the gradient will\n only occur near p2 (before that everything is of color ``color_1``)\n If the offset is 0.9 in a radial gradient, the gradient will\n occur in the region located between 90% and 100% of the radius,\n this creates a blurry disc of radius ``d(p1, p2)``.\n\n Returns\n -------\n\n image\n An Numpy array of dimensions (width, height, n_colors) of type float\n representing the image of the gradient.\n\n Examples\n --------\n\n >>> color_gradient((10, 1), (0, 0), p2=(10, 0)) # from white to black\n [[1. 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1]]\n >>>\n >>> color_gradient( # from red to green\n ... (10, 1), # size\n ... (0, 0), # p1\n ... p2=(10, 0),\n ... color_1=(255, 0, 0), # red\n ... color_2=(0, 255, 0), # green\n ... )\n [[[ 0. 255. 0. ]\n [ 25.5 229.5 0. ]\n [ 51. 204. 0. ]\n [ 76.5 178.5 0. ]\n [102. 153. 0. ]\n [127.5 127.5 0. ]\n [153. 102. 0. ]\n [178.5 76.5 0. ]\n [204. 51. 0. ]\n [229.5 25.5 0. ]]]\n \"\"\"\n # np-arrayize and change x,y coordinates to y,x\n w, h = size\n\n color_1 = np.array(color_1).astype(float)\n color_2 = np.array(color_2).astype(float)\n\n if shape == \"bilinear\":\n if vector is None:\n if p2 is None:\n raise ValueError(\"You must provide either 'p2' or 'vector'\")\n vector = np.array(p2) - np.array(p1)\n\n m1, m2 = [\n color_gradient(\n size,\n p1,\n vector=v,\n color_1=1.0,\n color_2=0.0,\n shape=\"linear\",\n offset=offset,\n )\n for v in [vector, [-v for v in vector]]\n ]\n\n arr = np.maximum(m1, m2)\n if color_1.size > 1:\n arr = np.dstack(3 * [arr])\n return arr * color_1 + (1 - arr) * color_2\n\n p1 = np.array(p1[::-1]).astype(float)\n\n M = np.dstack(np.meshgrid(range(w), range(h))[::-1]).astype(float)\n\n if shape == \"linear\":\n if vector is None:\n if p2 is not None:\n vector = np.array(p2[::-1]) - p1\n else:\n raise ValueError(\"You must provide either 'p2' or 'vector'\")\n else:\n vector = np.array(vector[::-1])\n\n norm = np.linalg.norm(vector)\n n_vec = vector / norm ** 2 # norm 1/norm(vector)\n\n p1 = p1 + offset * vector\n arr = (M - p1).dot(n_vec) / (1 - offset)\n arr = np.minimum(1, np.maximum(0, arr))\n if color_1.size > 1:\n arr = np.dstack(3 * [arr])\n return arr * color_1 + (1 - arr) * color_2\n\n elif shape == \"radial\":\n if (radius or 0) == 0:\n arr = np.ones((h, w))\n else:\n arr = (np.sqrt(((M - p1) ** 2).sum(axis=2))) - offset * radius\n arr = arr / ((1 - offset) * radius)\n arr = np.minimum(1.0, np.maximum(0, arr))\n\n if color_1.size > 1:\n arr = np.dstack(3 * [arr])\n return (1 - arr) * color_1 + arr * color_2\n raise ValueError(\"Invalid shape, should be either 'radial', 'linear' or 'bilinear'\")\n\n\ndef color_split(\n size,\n x=None,\n y=None,\n p1=None,\n p2=None,\n vector=None,\n color_1=0,\n color_2=1.0,\n gradient_width=0,\n):\n \"\"\"Make an image split in 2 colored regions.\n\n Returns an array of size ``size`` divided in two regions called 1 and\n 2 in what follows, and which will have colors color_1 and color_2\n respectively.\n\n Parameters\n ----------\n\n x : int, optional\n If provided, the image is split horizontally in x, the left\n region being region 1.\n\n y : int, optional\n If provided, the image is split vertically in y, the top region\n being region 1.\n\n p1, p2: tuple or list, optional\n Positions (x1, y1), (x2, y2) in pixels, where the numbers can be\n floats. Region 1 is defined as the whole region on the left when\n going from ``p1`` to ``p2``.\n\n p1, vector: tuple or list, optional\n ``p1`` is (x1,y1) and vector (v1,v2), where the numbers can be\n floats. Region 1 is then the region on the left when starting\n in position ``p1`` and going in the direction given by ``vector``.\n\n gradient_width : float, optional\n If not zero, the split is not sharp, but gradual over a region of\n width ``gradient_width`` (in pixels). This is preferable in many\n situations (for instance for antialiasing).\n\n Examples\n --------\n\n >>> size = [200, 200]\n >>>\n >>> # an image with all pixels with x<50 =0, the others =1\n >>> color_split(size, x=50, color_1=0, color_2=1)\n >>>\n >>> # an image with all pixels with y<50 red, the others green\n >>> color_split(size, x=50, color_1=[255, 0, 0], color_2=[0, 255, 0])\n >>>\n >>> # An image split along an arbitrary line (see below)\n >>> color_split(size, p1=[20, 50], p2=[25, 70] color_1=0, color_2=1)\n \"\"\"\n if gradient_width or ((x is None) and (y is None)):\n if p2 is not None:\n vector = np.array(p2) - np.array(p1)\n elif x is not None:\n vector = np.array([0, -1.0])\n p1 = np.array([x, 0])\n elif y is not None:\n vector = np.array([1.0, 0.0])\n p1 = np.array([0, y])\n\n x, y = vector\n vector = np.array([y, -x]).astype(\"float\")\n norm = np.linalg.norm(vector)\n vector = max(0.1, gradient_width) * vector / norm\n return color_gradient(\n size, p1, vector=vector, color_1=color_1, color_2=color_2, shape=\"linear\"\n )\n else:\n w, h = size\n shape = (h, w) if np.isscalar(color_1) else (h, w, len(color_1))\n arr = np.zeros(shape)\n if x:\n arr[:, :x] = color_1\n arr[:, x:] = color_2\n elif y:\n arr[:y] = color_1\n arr[y:] = color_2\n return arr\n\n\ndef circle(screensize, center, radius, color=1.0, bg_color=0, blur=1):\n \"\"\"Draw an image with a circle.\n\n Draws a circle of color ``color``, on a background of color ``bg_color``,\n on a screen of size ``screensize`` at the position ``center=(x, y)``,\n with a radius ``radius`` but slightly blurred on the border by ``blur``\n pixels.\n\n Parameters\n ----------\n\n screensize : tuple or list\n Size of the canvas.\n\n center : tuple or list\n Center of the circle.\n\n radius : float\n Radius of the circle, in pixels.\n\n bg_color : tuple or float, optional\n Color for the background of the canvas. As default, black.\n\n blur : float, optional\n Blur for the border of the circle.\n\n Examples\n --------\n\n >>> from moviepy.video.tools.drawing import circle\n >>>\n >>> circle(\n ... (5, 5), # size\n ... (2, 2), # center\n ... 2, # radius\n ... )\n array([[0. , 0. , 0. , 0. , 0. ],\n [0. , 0.58578644, 1. , 0.58578644, 0. ],\n [0. , 1. , 1. , 1. , 0. ],\n [0. , 0.58578644, 1. , 0.58578644, 0. ],\n [0. , 0. , 0. , 0. , 0. ]])\n \"\"\"\n offset = 1.0 * (radius - blur) / radius if radius else 0\n return color_gradient(\n screensize,\n p1=center,\n radius=radius,\n color_1=color,\n color_2=bg_color,\n shape=\"radial\",\n offset=offset,\n )\n" ]
[ [ "numpy.ones", "numpy.array", "numpy.zeros", "numpy.dstack", "numpy.maximum", "numpy.isscalar", "numpy.linalg.norm" ] ]
hsfzxjy/svdnet-pytorch
[ "8f485d0b162c23b20449f7ee80c955e0b20950ae" ]
[ "train_svdnet_xent.py" ]
[ "from __future__ import print_function\nfrom __future__ import division\n\nimport os\nimport sys\nimport time\nimport datetime\nimport os.path as osp\nimport numpy as np\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\n\nfrom args import argument_parser, image_dataset_kwargs, optimizer_kwargs, lr_scheduler_kwargs\nfrom torchreid.data_manager import ImageDataManager\nfrom torchreid import models\nfrom torchreid.losses import CrossEntropyLoss, DeepSupervision\nfrom torchreid.utils.iotools import check_isfile\nfrom torchreid.utils.avgmeter import AverageMeter\nfrom torchreid.utils.loggers import Logger, RankLogger\nfrom torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers, accuracy, \\\n load_pretrained_weights, save_checkpoint, resume_from_checkpoint\nfrom torchreid.utils.reidtools import visualize_ranked_results\nfrom torchreid.utils.generaltools import set_random_seed\nfrom torchreid.eval_metrics import evaluate\nfrom torchreid.optimizers import init_optimizer\nfrom torchreid.lr_schedulers import init_lr_scheduler\n\n\nos.environ['TORCH_HOME'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '.torch'))\n\ntestloader_dict = trainloader = criterion = None\nuse_gpu = False\n\n# global variables\nparser = argument_parser()\nargs = parser.parse_args()\n\n\ndef corr_metric(W: 'K x N'):\n\n G = W.permute(1, 0) @ W\n return torch.trace(G) / abs(G).sum()\n\n\ndef replace_weight(layer):\n\n with torch.no_grad():\n # NECESSARY! The weight of Linear layer has been transposed!\n A = layer.weight.t()\n M, N = A.size()\n M: 2048\n N: 1024\n U, S, V = torch.svd(A, some=False)\n W = A @ V\n W: '2048 x 1024 = M x N'\n\n NW = torch.zeros_like(A)\n\n for i in range(N):\n\n curr_N = W.size(1)\n\n W_norm = torch.norm(W, p=2, dim=0)\n W_norm: 'curr_N'\n\n index = i\n vec_i = A[:, i]\n vec_i_norm = torch.norm(vec_i)\n\n co = (A[:, i].view(M, 1).t() @ W).view(curr_N)\n co: 'curr_N'\n co = co / vec_i_norm\n absco = abs(co / W_norm)\n maxco_index = torch.max(absco, 0)[1].item()\n\n NW[:, index] = W[:, maxco_index] * torch.sign(co[maxco_index])\n\n # Remove selected column vector from W\n W = W[:, sorted({x for x in range(curr_N) if x != maxco_index})]\n\n layer.weight.copy_(NW.t())\n print(layer.weight)\n\n return layer\n\n\ndef main():\n global args, criterion, testloader_dict, trainloader, use_gpu\n\n set_random_seed(args.seed)\n if not args.use_avai_gpus:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices\n use_gpu = torch.cuda.is_available()\n if args.use_cpu:\n use_gpu = False\n log_name = 'test.log' if args.evaluate else 'train.log'\n sys.stdout = Logger(osp.join(args.save_dir, log_name))\n print('==========\\nArgs:{}\\n=========='.format(args))\n\n if use_gpu:\n print('Currently using GPU {}'.format(args.gpu_devices))\n cudnn.benchmark = True\n else:\n warnings.warn('Currently using CPU, however, GPU is highly recommended')\n\n print('Initializing image data manager')\n dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))\n trainloader, testloader_dict = dm.return_dataloaders()\n\n print('Initializing model: {}'.format(args.arch))\n model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent'}, pretrained=not args.no_pretrained, use_gpu=use_gpu)\n print('Model size: {:.3f} M'.format(count_num_param(model)))\n\n if args.load_weights and check_isfile(args.load_weights):\n load_pretrained_weights(model, args.load_weights)\n\n model = nn.DataParallel(model).cuda() if use_gpu else model\n\n criterion = CrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)\n\n if args.resume and check_isfile(args.resume):\n args.start_epoch = resume_from_checkpoint(args.resume, model, optimizer=None)\n resumed = True\n else:\n resumed = False\n\n if args.evaluate:\n print('Evaluate only')\n\n for name in args.target_names:\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n distmat = test(model, queryloader, galleryloader, use_gpu, return_distmat=True)\n\n if args.visualize_ranks:\n visualize_ranked_results(\n distmat, dm.return_testdataset_by_name(name),\n save_dir=osp.join(args.save_dir, 'ranked_results', name),\n topk=20\n )\n return\n\n time_start = time.time()\n # ranklogger = RankLogger(args.source_names, args.target_names)\n print('=> Start training')\n\n if not resumed:\n train_base(model)\n train_RRI(model, 7)\n\n elapsed = round(time.time() - time_start)\n elapsed = str(datetime.timedelta(seconds=elapsed))\n print('Elapsed {}'.format(elapsed))\n # ranklogger.show_summary()\n\n\ndef train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=False):\n losses = AverageMeter()\n accs = AverageMeter()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n\n model.train()\n\n # if fixbase or args.always_fixbase:\n # open_specified_layers(model, args.open_layers)\n # else:\n # open_all_layers(model)\n\n end = time.time()\n for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):\n data_time.update(time.time() - end)\n\n if use_gpu:\n imgs, pids = imgs.cuda(), pids.cuda()\n\n outputs = model(imgs)\n loss = sum(criterion(x, pids) for x in outputs) / len(outputs)\n # if isinstance(outputs, (tuple, list)):\n # loss = DeepSupervision(criterion, outputs, pids)\n # else:\n # loss = criterion(outputs, pids)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n batch_time.update(time.time() - end)\n\n losses.update(loss.item(), pids.size(0))\n accs.update(accuracy(outputs, pids)[0])\n\n if (batch_idx + 1) % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc {acc.val:.2f} ({acc.avg:.2f})\\t'.format(\n epoch + 1, batch_idx + 1, len(trainloader),\n batch_time=batch_time,\n data_time=data_time,\n loss=losses,\n acc=accs\n ))\n\n end = time.time()\n\n\ndef test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False):\n batch_time = AverageMeter()\n\n model.eval()\n\n with torch.no_grad():\n qf, q_pids, q_camids = [], [], []\n for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):\n if use_gpu:\n imgs = imgs.cuda()\n\n end = time.time()\n features = model(imgs)\n batch_time.update(time.time() - end)\n\n features = features.data.cpu()\n qf.append(features)\n q_pids.extend(pids)\n q_camids.extend(camids)\n qf = torch.cat(qf, 0)\n q_pids = np.asarray(q_pids)\n q_camids = np.asarray(q_camids)\n\n print('Extracted features for query set, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))\n\n gf, g_pids, g_camids = [], [], []\n end = time.time()\n for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):\n if use_gpu:\n imgs = imgs.cuda()\n\n end = time.time()\n features = model(imgs)\n batch_time.update(time.time() - end)\n\n features = features.data.cpu()\n gf.append(features)\n g_pids.extend(pids)\n g_camids.extend(camids)\n gf = torch.cat(gf, 0)\n g_pids = np.asarray(g_pids)\n g_camids = np.asarray(g_camids)\n\n print('Extracted features for gallery set, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))\n\n print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(batch_time.avg, args.test_batch_size))\n\n m, n = qf.size(0), gf.size(0)\n distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\n torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n distmat.addmm_(1, -2, qf, gf.t())\n distmat = distmat.numpy()\n\n print('Computing CMC and mAP')\n cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)\n\n print('Results ----------')\n print('mAP: {:.1%}'.format(mAP))\n print('CMC curve')\n for r in ranks:\n print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))\n print('------------------')\n\n if return_distmat:\n return distmat\n return cmc[0]\n\n\ndef get_base_optimizer(model):\n\n kwargs = {\n 'weight_decay': 5e-4,\n 'lr': 0.0003,\n 'betas': (0.9, 0.999),\n }\n param_groups = model.parameters()\n\n optimizer = torch.optim.Adam(param_groups, **kwargs)\n scheduler = init_lr_scheduler(optimizer, stepsize=[20, 40], gamma=0.1)\n\n return optimizer, scheduler\n\n\ndef get_base_sgd_optimizer(model):\n\n kwargs = {\n 'weight_decay': 5e-4,\n 'lr': 0.001,\n 'momentum': 0.9,\n }\n\n param_groups = model.parameters()\n\n optimizer = torch.optim.SGD(param_groups, **kwargs)\n scheduler = init_lr_scheduler(optimizer, stepsize=[25, 50], gamma=0.1)\n\n return optimizer, scheduler\n\n\ndef get_RRI_optimizer(\n model,\n lr\n):\n\n kwargs = {\n 'weight_decay': 5e-4,\n 'lr': lr,\n 'momentum': 0.9,\n }\n param_groups = model.parameters()\n\n optimizer = torch.optim.SGD(param_groups, **kwargs)\n scheduler = init_lr_scheduler(optimizer, stepsize=[12], gamma=0.1)\n\n return optimizer, scheduler\n\n\ndef train_R(model, lr, T, fix_eigen_layer: bool=False):\n\n eigen_layers = model.module.get_fcs()\n\n if fix_eigen_layer:\n for eigen_layer in eigen_layers:\n eigen_layer.eval()\n for p in eigen_layer.parameters():\n p.requires_grad = False\n\n stage_name = 'restraint'\n else:\n model.train()\n for p in model.parameters():\n p.requires_grad = True\n\n stage_name = 'relaxation'\n\n prefix = '{}_{}_'.format(T, stage_name)\n\n optimizer, scheduler = get_RRI_optimizer(model, lr)\n\n for epoch in range(20):\n train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu)\n\n scheduler.step()\n\n print('=> Test')\n\n if (epoch + 1) % args.eval_freq == 0:\n for name in args.target_names:\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n rank1 = test(model, queryloader, galleryloader, use_gpu)\n\n save_checkpoint({\n 'state_dict': model.state_dict(),\n 'rank1': rank1,\n 'epoch': 0,\n 'arch': args.arch,\n 'optimizer': (),\n }, args.save_dir, prefix=prefix)\n\n\ndef train_base(model):\n\n use_sgd = os.environ.get('sgd') is not None\n\n optimizer_getter = get_base_sgd_optimizer if use_sgd else get_base_optimizer\n\n optimizer, scheduler = get_base_optimizer(model)\n\n model.train()\n print('=== train base ===')\n\n if True:\n open_layers = ['fc', 'classifier1', 'classifier2_1', 'classifier2_2', 'fc2_1', 'fc2_2', 'reduction', 'classifier']\n\n print('Train {} for {} epochs while keeping other layers frozen'.format(open_layers, 10))\n\n for epoch in range(10):\n\n open_specified_layers(model, open_layers)\n train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=True)\n\n print('Done. All layers are open to train for {} epochs'.format(60))\n open_all_layers(model)\n\n optimizer, scheduler = optimizer_getter(model)\n\n for epoch in range(60):\n train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu)\n scheduler.step()\n\n print('=> Test')\n\n if (epoch + 1) % args.eval_freq == 0:\n\n for name in args.target_names:\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n rank1 = test(model, queryloader, galleryloader, use_gpu)\n\n save_checkpoint({\n 'state_dict': model.state_dict(),\n 'rank1': rank1,\n 'epoch': 0,\n 'arch': args.arch,\n 'optimizer': optimizer.state_dict(),\n }, args.save_dir, prefix='base_')\n\n\ndef train_RRI(model, Ts: int=7):\n\n base_lrs = [0.001] * 3 + [0.0001] * 10\n\n for T in range(Ts):\n print('=== T = {} ==='.format(T))\n print('Replacing eigen layer weight...')\n for eigen_layer in model.module.get_fcs():\n replace_weight(eigen_layer)\n print('Replaced.')\n print('--- Restraint ({}) ---'.format(T))\n train_R(model, base_lrs[T], T, fix_eigen_layer=True)\n print('--- Relaxation ({}) ---'.format(T))\n train_R(model, base_lrs[T], T, fix_eigen_layer=False)\n\n for name in args.target_names:\n print('Evaluating {} ...'.format(name))\n queryloader = testloader_dict[name]['query']\n galleryloader = testloader_dict[name]['gallery']\n rank1 = test(model, queryloader, galleryloader, use_gpu)\n\n save_checkpoint({\n 'state_dict': model.state_dict(),\n 'rank1': rank1,\n 'epoch': 0,\n 'arch': args.arch,\n 'optimizer': (),\n }, args.save_dir, prefix='final_')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.optim.SGD", "torch.svd", "torch.zeros_like", "torch.trace", "torch.no_grad", "numpy.asarray", "torch.norm", "torch.optim.Adam", "torch.sign", "torch.cuda.is_available", "torch.max", "torch.nn.DataParallel", "torch.cat", "torch.pow" ] ]
kerel-fs/poliastro
[ "1ad2074aebb7cf18f507ac44931d1e18fec53dad" ]
[ "src/poliastro/core/perturbations.py" ]
[ "import numpy as np\nfrom numpy.linalg import norm\n\nfrom ._jit import jit\n\n\n@jit\ndef J2_perturbation(t0, state, k, J2, R):\n r\"\"\"Calculates J2_perturbation acceleration (km/s2)\n\n .. math::\n\n \\vec{p} = \\frac{3}{2}\\frac{J_{2}\\mu R^{2}}{r^{4}}\\left [\\frac{x}{r}\\left ( 5\\frac{z^{2}}{r^{2}}-1 \\right )\\vec{i} + \\frac{y}{r}\\left ( 5\\frac{z^{2}}{r^{2}}-1 \\right )\\vec{j} + \\frac{z}{r}\\left ( 5\\frac{z^{2}}{r^{2}}-3 \\right )\\vec{k}\\right]\n\n .. versionadded:: 0.9.0\n\n Parameters\n ----------\n t0 : float\n Current time (s)\n state : numpy.ndarray\n Six component state vector [x, y, z, vx, vy, vz] (km, km/s).\n k : float\n gravitational constant, (km^3/s^2)\n J2: float\n oblateness factor\n R: float\n attractor radius\n\n Note\n ----\n The J2 accounts for the oblateness of the attractor. The formula is given in\n Howard Curtis, (12.30)\n\n \"\"\"\n r_vec = state[:3]\n r = norm(r_vec)\n\n factor = (3.0 / 2.0) * k * J2 * (R ** 2) / (r ** 5)\n\n a_x = 5.0 * r_vec[2] ** 2 / r ** 2 - 1\n a_y = 5.0 * r_vec[2] ** 2 / r ** 2 - 1\n a_z = 5.0 * r_vec[2] ** 2 / r ** 2 - 3\n return np.array([a_x, a_y, a_z]) * r_vec * factor\n\n\n@jit\ndef J3_perturbation(t0, state, k, J3, R):\n r\"\"\"Calculates J3_perturbation acceleration (km/s2)\n\n Parameters\n ----------\n t0 : float\n Current time (s)\n state : numpy.ndarray\n Six component state vector [x, y, z, vx, vy, vz] (km, km/s).\n k : float\n gravitational constant, (km^3/s^2)\n J3: float\n oblateness factor\n R: float\n attractor radius\n\n Note\n ----\n The J3 accounts for the oblateness of the attractor. The formula is given in\n Howard Curtis, problem 12.8\n This perturbation has not been fully validated, see https://github.com/poliastro/poliastro/pull/398\n\n \"\"\"\n r_vec = state[:3]\n r = norm(r_vec)\n\n factor = (1.0 / 2.0) * k * J3 * (R ** 3) / (r ** 5)\n cos_phi = r_vec[2] / r\n\n a_x = 5.0 * r_vec[0] / r * (7.0 * cos_phi ** 3 - 3.0 * cos_phi)\n a_y = 5.0 * r_vec[1] / r * (7.0 * cos_phi ** 3 - 3.0 * cos_phi)\n a_z = 3.0 * (35.0 / 3.0 * cos_phi ** 4 - 10.0 * cos_phi ** 2 + 1)\n return np.array([a_x, a_y, a_z]) * factor\n\n\n@jit\ndef atmospheric_drag(t0, state, k, R, C_D, A, m, H0, rho0):\n r\"\"\"Calculates atmospheric drag acceleration (km/s2)\n\n .. math::\n\n \\vec{p} = -\\frac{1}{2}\\rho v_{rel}\\left ( \\frac{C_{d}A}{m} \\right )\\vec{v_{rel}}\n\n\n .. versionadded:: 0.9.0\n\n Parameters\n ----------\n t0 : float\n Current time (s)\n state : numpy.ndarray\n Six component state vector [x, y, z, vx, vy, vz] (km, km/s).\n k : float\n gravitational constant, (km^3/s^2)\n R : float\n radius of the attractor (km)\n C_D: float\n dimensionless drag coefficient ()\n A: float\n frontal area of the spacecraft (km^2)\n m: float\n mass of the spacecraft (kg)\n H0 : float\n atmospheric scale height, (km)\n rho0: float\n the exponent density pre-factor, (kg / m^3)\n\n Note\n ----\n This function provides the acceleration due to atmospheric drag. We follow\n Howard Curtis, section 12.4\n the atmospheric density model is rho(H) = rho0 x exp(-H / H0)\n\n \"\"\"\n H = norm(state[:3])\n\n v_vec = state[3:]\n v = norm(v_vec)\n B = C_D * A / m\n rho = rho0 * np.exp(-(H - R) / H0)\n\n return -(1.0 / 2.0) * rho * B * v * v_vec\n\n\n@jit\ndef shadow_function(r_sat, r_sun, R):\n r\"\"\"Determines whether the satellite is in attractor's shadow, uses algorithm 12.3 from Howard Curtis\n\n Parameters\n ----------\n r_sat : numpy.ndarray\n position of the satellite in the frame of attractor (km)\n r_sun : numpy.ndarray\n position of star in the frame of attractor (km)\n R : float\n radius of body (attractor) that creates shadow (km)\n\n \"\"\"\n\n r_sat_norm = np.sqrt(np.sum(r_sat ** 2))\n r_sun_norm = np.sqrt(np.sum(r_sun ** 2))\n\n theta = np.arccos(np.dot(r_sat, r_sun) / r_sat_norm / r_sun_norm)\n theta_1 = np.arccos(R / r_sat_norm)\n theta_2 = np.arccos(R / r_sun_norm)\n\n return theta < theta_1 + theta_2\n\n\ndef third_body(t0, state, k, k_third, third_body):\n r\"\"\"Calculates 3rd body acceleration (km/s2)\n\n .. math::\n\n \\vec{p} = \\mu_{m}\\left ( \\frac{\\vec{r_{m/s}}}{r_{m/s}^3} - \\frac{\\vec{r_{m}}}{r_{m}^3} \\right )\n\n Parameters\n ----------\n t0 : float\n Current time (s)\n state : numpy.ndarray\n Six component state vector [x, y, z, vx, vy, vz] (km, km/s).\n k : float\n gravitational constant, (km^3/s^2)\n third_body: a callable object returning the position of 3rd body\n third body that causes the perturbation\n\n Note\n ----\n This formula is taken from Howard Curtis, section 12.10. As an example, a third body could be\n the gravity from the Moon acting on a small satellite.\n\n \"\"\"\n\n body_r = third_body(t0)\n delta_r = body_r - state[:3]\n return k_third * delta_r / norm(delta_r) ** 3 - k_third * body_r / norm(body_r) ** 3\n\n\ndef radiation_pressure(t0, state, k, R, C_R, A, m, Wdivc_s, star):\n r\"\"\"Calculates radiation pressure acceleration (km/s2)\n\n .. math::\n\n \\vec{p} = -\\nu \\frac{S}{c} \\left ( \\frac{C_{r}A}{m} \\right )\\frac{\\vec{r}}{r}\n\n Parameters\n ----------\n t0 : float\n Current time (s)\n state : numpy.ndarray\n Six component state vector [x, y, z, vx, vy, vz] (km, km/s).\n k : float\n gravitational constant, (km^3/s^2)\n R : float\n radius of the attractor\n C_R: float\n dimensionless radiation pressure coefficient, 1 < C_R < 2 ()\n A: float\n effective spacecraft area (km^2)\n m: float\n mass of the spacecraft (kg)\n Wdivc_s : float\n total star emitted power divided by the speed of light (W * s / km)\n star: a callable object returning the position of star in attractor frame\n star position\n\n Note\n ----\n This function provides the acceleration due to star light pressure. We follow\n Howard Curtis, section 12.9\n\n \"\"\"\n\n r_star = star(t0)\n r_sat = state[:3]\n P_s = Wdivc_s / (norm(r_star) ** 2)\n\n nu = float(shadow_function(r_sat, r_star, R))\n return -nu * P_s * (C_R * A / m) * r_star / norm(r_star)\n" ]
[ [ "numpy.sum", "numpy.arccos", "numpy.exp", "numpy.array", "numpy.dot", "numpy.linalg.norm" ] ]
tgquintela/pySpatialTools
[ "e028008f9750521bf7d311f7cd3323c88d621ea4" ]
[ "pySpatialTools/utils/artificial_data/artificial_measure.py" ]
[ "\n\"\"\"\nartificial measure\n------------------\nCreation of artificial measure\n\"\"\"\n\nimport numpy as np\n\n\n############################### Create measure ################################\n###############################################################################\ndef create_artificial_measure_array(n_k, n_vals_i, n_feats):\n \"\"\"Create artificial random measure in the array form.\n\n Parameters\n ----------\n n_k: int\n the number of perturbations\n n_vals_i: int\n the number of indices of the output measure.\n n_feats: int\n the number of features.\n\n Returns\n -------\n measure: np.ndarray\n the transformed measure computed by the whole spatial descriptor model.\n\n \"\"\"\n measure = np.random.random((n_vals_i, n_feats, n_k))\n return measure\n\n\ndef create_artificial_measure_append(n_k, n_vals_i, n_feats):\n \"\"\"Create artificial random measure in the list form.\n\n Parameters\n ----------\n n_k: int\n the number of perturbations\n n_vals_i: int\n the number of indices of the output measure.\n n_feats: int\n the number of features.\n\n Returns\n -------\n measure: list\n the transformed measure computed by the whole spatial descriptor model.\n\n \"\"\"\n rounds = np.random.randint(1, 40)\n measure = create_empty_append(n_k, n_vals_i, n_feats)\n for i in range(rounds):\n n_iss = np.random.randint(1, 10)\n vals_i = create_vals_i(n_iss, n_vals_i, n_k)\n x_i = create_features_i_dict(n_feats, n_iss, n_k)\n for k in range(len(vals_i)):\n for i in range(len(vals_i[k])):\n measure[k][vals_i[k][i]].append(x_i[k][i])\n return measure\n\n\ndef create_artificial_measure_replacelist(n_k, n_vals_i, n_feats,\n unique_=False):\n \"\"\"Create artificial random measure in the replacelist form.\n\n Parameters\n ----------\n n_k: int\n the number of perturbations\n n_vals_i: int\n the number of indices of the output measure.\n n_feats: int\n the number of features.\n unique_: boolean (default=False)\n if there are no collapse.\n\n Returns\n -------\n measure: list\n the transformed measure computed by the whole spatial descriptor model.\n\n \"\"\"\n last = 0\n rounds = np.random.randint(1, 40)\n measure = create_empty_replacelist(n_k, n_vals_i, n_feats)\n for i in range(rounds):\n n_iss = np.random.randint(1, 10)\n if unique_:\n vals_i = np.array([last+np.arange(n_iss)]*n_k)\n last += n_iss\n else:\n vals_i = create_vals_i(n_iss, n_vals_i, n_k)\n x_i = create_features_i_dict(n_feats, n_iss, n_k)\n for k in range(len(vals_i)):\n measure[k][0].append(x_i[k])\n measure[k][1].append(vals_i[k])\n return measure\n\n\n############################### Empty measure #################################\n###############################################################################\ndef create_empty_array(n_k, n_vals_i, n_feats):\n \"\"\"Create null measure in the array form.\n\n Parameters\n ----------\n n_k: int\n the number of perturbations\n n_vals_i: int\n the number of indices of the output measure.\n n_feats: int\n the number of features.\n\n Returns\n -------\n measure: np.ndarray\n the null measure to be fill by the computation of the spatial\n descriptor model.\n\n \"\"\"\n return np.zeros((n_vals_i, n_feats, n_k))\n\n\ndef create_empty_append(n_k, n_iss, n_feats):\n \"\"\"Create null measure in the list form.\n\n Parameters\n ----------\n n_k: int\n the number of perturbations\n n_vals_i: int\n the number of indices of the output measure.\n n_feats: int\n the number of features.\n\n Returns\n -------\n measure: list\n the null measure to be fill by the computation of the spatial\n descriptor model.\n\n \"\"\"\n return [[[]]*n_iss]*n_k\n\n\ndef create_empty_replacelist(n_k, n_iss, n_feats):\n \"\"\"Create null measure in the replacelist form.\n\n Parameters\n ----------\n n_k: int\n the number of perturbations\n n_vals_i: int\n the number of indices of the output measure.\n n_feats: int\n the number of features.\n\n Returns\n -------\n measure: list\n the null measure to be fill by the computation of the spatial\n descriptor model.\n\n \"\"\"\n return [[[], []]]*n_k\n\n\n############################### Vals_i creation ###############################\n###############################################################################\ndef create_vals_i(n_iss, nvals, n_k):\n \"\"\"\n\n Parameters\n ----------\n n_k: int\n the number of perturbations\n n_vals_i: int\n the number of indices of the output measure.\n n_feats: int\n the number of features.\n\n Returns\n -------\n vals_i: np.ndarray\n the associated stored indices for the element indices.\n\n \"\"\"\n return np.random.randint(1, nvals, n_iss*n_k).reshape((n_k, n_iss))\n\n\n############################### Empty features ################################\n###############################################################################\ndef create_empty_features_array(n_feats, n_iss, n_k):\n \"\"\"Create null features for different iss in an array-form.\n\n Parameters\n ----------\n n_feats: int\n the number of features.\n n_iss: int\n the number of the elements to create their features.\n n_k: int\n the number of perturbations.\n\n Returns\n -------\n features: np.ndarray\n the null features we want to compute.\n\n \"\"\"\n return np.zeros((n_k, n_iss, n_feats))\n\n\ndef create_empty_features_dict(n_feats, n_iss, n_k):\n \"\"\"Create null features for different iss in an listdict-form.\n\n Parameters\n ----------\n n_feats: int\n the number of features.\n n_iss: int\n the number of the elements to create their features.\n n_k: int\n the number of perturbations.\n\n Returns\n -------\n features: list\n the null features we want to compute.\n\n \"\"\"\n return [[{}]*n_iss]*n_k\n\n\n################################ X_i features #################################\n###############################################################################\ndef create_features_i_array(n_feats, n_iss, n_k):\n \"\"\"Create null features for different iss in an array-form.\n\n Parameters\n ----------\n n_feats: int\n the number of features.\n n_iss: int\n the number of the elements to create their features.\n n_k: int\n the number of perturbations.\n\n Returns\n -------\n features: np.ndarray\n the null features we want to compute.\n\n \"\"\"\n x_i = np.random.random((n_k, n_iss, n_feats))\n return x_i\n\n\ndef create_features_i_dict(n_feats, n_iss, n_k):\n \"\"\"Create null features for different iss in an listdict-form.\n\n Parameters\n ----------\n n_feats: int\n the number of features.\n n_iss: int\n the number of the elements to create their features.\n n_k: int\n the number of perturbations.\n\n Returns\n -------\n features: list\n the null features we want to compute.\n\n \"\"\"\n x_i = []\n for k in range(n_k):\n x_i_k = []\n for i in range(n_iss):\n keys = np.unique(np.random.randint(1, n_feats, n_feats))\n keys = [str(e) for e in keys]\n values = np.random.random(len(keys))\n x_i_k.append(dict(zip(keys, values)))\n x_i.append(x_i_k)\n return x_i\n" ]
[ [ "numpy.random.random", "numpy.random.randint", "numpy.arange", "numpy.zeros" ] ]
reddigari/Eelbrain
[ "6c02b99955d4b5dc7e3054042c182e1a4629b13c" ]
[ "eelbrain/_stats/tests/test_spm.py" ]
[ "# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>\nimport pickle\nfrom nose.tools import eq_\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom eelbrain import datasets\nfrom eelbrain._stats.spm import LM, LMGroup\n\n\ndef test_lm():\n ds = datasets.get_uts()\n model = ds.eval(\"A*B*Y\")\n coeffs = ds['uts'].ols(model)\n\n lm = LM('uts', 'A*B*Y', ds, 'effect')\n eq_(repr(lm), \"<LM: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y>\")\n for i, effect in enumerate(model.effects):\n assert_array_equal(lm.coefficient(effect.name).x, coeffs.x[i])\n\n\ndef test_random_lm():\n # dummy coding\n ds = datasets.get_uts()\n lms = []\n for i in range(5):\n ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape)\n lms.append(LM('uts', 'A*B*Y', ds))\n rlm = LMGroup(lms)\n eq_(repr(rlm), '<LMGroup: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y, n=5>')\n\n # coefficients\n ds = rlm.coefficients_dataset(('A', 'A x B'))\n eq_(ds['term'].cells, ('A', 'A x B'))\n\n # tests\n res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025)\n eq_(res.clusters.n_cases, 1)\n\n # effect coding\n ds = datasets.get_uts()\n lms = []\n for i in range(5):\n ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape)\n lms.append(LM('uts', 'A*B*Y', ds, 'effect'))\n rlm = LMGroup(lms)\n res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025)\n eq_(res.clusters.n_cases, 6)\n\n # persistence\n rlm_p = pickle.loads(pickle.dumps(rlm, pickle.HIGHEST_PROTOCOL))\n eq_(rlm_p.dims, rlm.dims)\n" ]
[ [ "numpy.random.normal" ] ]
manas96/RelationPrediction
[ "06be62a55554971d1b523dc555f4c8616c21c664" ]
[ "code-tf2/encoders/message_gcns/gcn_basis.py" ]
[ "import numpy as np\nimport tensorflow as tf\nfrom common.shared_functions import dot_or_lookup, glorot_variance, make_tf_variable, make_tf_bias\n\nfrom encoders.message_gcns.message_gcn import MessageGcn\n\n\nclass BasisGcn(MessageGcn):\n\n def parse_settings(self):\n self.dropout_keep_probability = float(self.settings['DropoutKeepProbability'])\n\n self.n_coefficients = int(self.settings['NumberOfBasisFunctions'])\n\n def local_initialize_train(self):\n vertex_feature_dimension = self.entity_count if self.onehot_input else self.shape[0]\n type_matrix_shape = (self.relation_count, self.n_coefficients)\n vertex_matrix_shape = (vertex_feature_dimension, self.n_coefficients, self.shape[1])\n self_matrix_shape = (vertex_feature_dimension, self.shape[1])\n\n glorot_var_combined = glorot_variance([vertex_matrix_shape[0], vertex_matrix_shape[2]])\n self.W_forward = make_tf_variable(0, glorot_var_combined, vertex_matrix_shape)\n self.W_backward = make_tf_variable(0, glorot_var_combined, vertex_matrix_shape)\n self.W_self = make_tf_variable(0, glorot_var_combined, self_matrix_shape)\n\n type_init_var = 1\n self.C_forward = make_tf_variable(0, type_init_var, type_matrix_shape)\n self.C_backward = make_tf_variable(0, type_init_var, type_matrix_shape)\n\n self.b = make_tf_bias(self.shape[1])\n\n\n def local_get_weights(self):\n return [self.W_forward, self.W_backward,\n self.C_forward, self.C_backward,\n self.W_self,\n self.b]\n\n def compute_messages(self, sender_features, receiver_features):\n backward_type_scaling, forward_type_scaling = self.compute_coefficients()\n receiver_terms, sender_terms = self.compute_basis_functions(receiver_features, sender_features)\n\n forward_messages = tf.reduce_sum(input_tensor=sender_terms * tf.expand_dims(forward_type_scaling,-1), axis=1)\n backward_messages = tf.reduce_sum(input_tensor=receiver_terms * tf.expand_dims(backward_type_scaling, -1), axis=1)\n\n return forward_messages, backward_messages\n\n def compute_coefficients(self):\n message_types = self.get_graph().get_type_indices()\n forward_type_scaling = tf.nn.embedding_lookup(params=self.C_forward, ids=message_types)\n backward_type_scaling = tf.nn.embedding_lookup(params=self.C_backward, ids=message_types)\n return backward_type_scaling, forward_type_scaling\n\n def compute_basis_functions(self, receiver_features, sender_features):\n sender_terms = self.dot_or_tensor_mul(sender_features, self.W_forward)\n receiver_terms = self.dot_or_tensor_mul(receiver_features, self.W_backward)\n\n return receiver_terms, sender_terms\n\n def dot_or_tensor_mul(self, features, tensor):\n tensor_shape = tf.shape(input=tensor)\n flat_shape = [tensor_shape[0], tensor_shape[1] * tensor_shape[2]]\n\n flattened_tensor = tf.reshape(tensor, flat_shape)\n result_tensor = dot_or_lookup(features, flattened_tensor, onehot_input=self.onehot_input)\n result_tensor = tf.reshape(result_tensor, [-1, tensor_shape[1], tensor_shape[2]])\n\n return result_tensor\n\n def compute_self_loop_messages(self, vertex_features):\n return dot_or_lookup(vertex_features, self.W_self, onehot_input=self.onehot_input)\n\n\n def combine_messages(self, forward_messages, backward_messages, self_loop_messages, previous_code, mode='train'):\n mtr_f = self.get_graph().forward_incidence_matrix(normalization=('global', 'recalculated'))\n mtr_b = self.get_graph().backward_incidence_matrix(normalization=('global', 'recalculated'))\n\n collected_messages_f = tf.sparse.sparse_dense_matmul(mtr_f, forward_messages)\n collected_messages_b = tf.sparse.sparse_dense_matmul(mtr_b, backward_messages)\n\n updated_vertex_embeddings = collected_messages_f + collected_messages_b\n\n if self.use_nonlinearity:\n activated = tf.nn.relu(updated_vertex_embeddings + self_loop_messages)\n else:\n activated = updated_vertex_embeddings + self_loop_messages\n\n return activated\n\n def local_get_regularization(self):\n regularization = tf.reduce_mean(input_tensor=tf.square(self.W_forward))\n regularization += tf.reduce_mean(input_tensor=tf.square(self.W_backward))\n regularization += tf.reduce_mean(input_tensor=tf.square(self.W_self))\n\n return 0.0 * regularization" ]
[ [ "tensorflow.sparse.sparse_dense_matmul", "tensorflow.shape", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.square", "tensorflow.nn.relu", "tensorflow.nn.embedding_lookup" ] ]
Leo-xxx/Deep-Flow-Guided-Video-Inpainting
[ "6310007009d2bfe150f1e4b29c7588f720c4bba2" ]
[ "utils/flow.py" ]
[ "import numpy as np\nimport cv2\n\n\ndef make_colorwheel():\n '''\n Generates a color wheel for optical flow visualization as presented in:\n Baker et al. \"A Database and Evaluation Methodology for Optical Flow\" (ICCV, 2007)\n URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf\n\n According to the C++ source code of Daniel Scharstein\n According to the Matlab source code of Deqing Sun\n '''\n\n RY = 15\n YG = 6\n GC = 4\n CB = 11\n BM = 13\n MR = 6\n\n ncols = RY + YG + GC + CB + BM + MR\n colorwheel = np.zeros((ncols, 3))\n col = 0\n\n # RY\n colorwheel[0:RY, 0] = 255\n colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY)\n col = col + RY\n # YG\n colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG)\n colorwheel[col:col + YG, 1] = 255\n col = col + YG\n # GC\n colorwheel[col:col + GC, 1] = 255\n colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC)\n col = col + GC\n # CB\n colorwheel[col:col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB)\n colorwheel[col:col + CB, 2] = 255\n col = col + CB\n # BM\n colorwheel[col:col + BM, 2] = 255\n colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM)\n col = col + BM\n # MR\n colorwheel[col:col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR)\n colorwheel[col:col + MR, 0] = 255\n return colorwheel\n\n\ndef flow_compute_color(u, v, convert_to_bgr=False):\n '''\n Applies the flow color wheel to (possibly clipped) flow components u and v.\n\n According to the C++ source code of Daniel Scharstein\n According to the Matlab source code of Deqing Sun\n\n :param u: np.ndarray, input horizontal flow\n :param v: np.ndarray, input vertical flow\n :param convert_to_bgr: bool, whether to change ordering and output BGR instead of RGB\n :return:\n '''\n\n flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)\n\n colorwheel = make_colorwheel() # shape [55x3]\n ncols = colorwheel.shape[0]\n\n rad = np.sqrt(np.square(u) + np.square(v))\n a = np.arctan2(-v, -u) / np.pi\n\n fk = (a + 1) / 2 * (ncols - 1) + 1\n k0 = np.floor(fk).astype(np.int32)\n k0[k0 > 53] = 53\n k1 = k0 + 1\n k1[k1 == ncols] = 1\n f = fk - k0\n\n for i in range(colorwheel.shape[1]):\n\n tmp = colorwheel[:, i]\n col0 = tmp[k0] / 255.0\n col1 = tmp[k1] / 255.0\n col = (1 - f) * col0 + f * col1\n\n idx = (rad <= 1)\n col[idx] = 1 - rad[idx] * (1 - col[idx])\n col[~idx] = col[~idx] * 0.75 # out of range?\n\n # Note the 2-i => BGR instead of RGB\n ch_idx = 2 - i if convert_to_bgr else i\n flow_image[:, :, ch_idx] = np.floor(255 * col)\n\n return flow_image\n\n\ndef flow_to_color(flow_uv, clip_flow=None, convert_to_bgr=False):\n '''\n Expects a two dimensional flow image of shape [H,W,2]\n\n According to the C++ source code of Daniel Scharstein\n According to the Matlab source code of Deqing Sun\n\n :param flow_uv: np.ndarray of shape [H,W,2]\n :param clip_flow: float, maximum clipping value for flow\n :return:\n '''\n\n assert flow_uv.ndim == 3, 'input flow must have three dimensions'\n assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'\n\n if clip_flow is not None:\n flow_uv = np.clip(flow_uv, 0, clip_flow)\n\n u = flow_uv[:, :, 0]\n v = flow_uv[:, :, 1]\n\n rad = np.sqrt(np.square(u) + np.square(v))\n rad_max = np.max(rad)\n\n epsilon = 1e-5\n u = u / (rad_max + epsilon)\n v = v / (rad_max + epsilon)\n\n return flow_compute_color(u, v, convert_to_bgr)\n\n\ndef readFlow(name):\n f = open(name, 'rb')\n\n header = f.read(4)\n if header.decode(\"utf-8\") != 'PIEH':\n raise Exception('Flow file header does not contain PIEH')\n\n width = np.fromfile(f, np.int32, 1).squeeze()\n height = np.fromfile(f, np.int32, 1).squeeze()\n\n flow = np.fromfile(f, np.float32, width * height * 2).reshape((height,\n width, 2))\n f.close()\n return flow.astype(np.float32)\n\n\ndef get_warp_label(flow1, flow2, label1, th=50, value=0):\n label2 = np.ones_like(label1, dtype=label1.dtype) * value\n height = flow1.shape[0]\n width = flow1.shape[1]\n flow_t = np.zeros_like(flow1, dtype=flow1.dtype)\n\n grid = np.indices((height, width)).swapaxes(0, 1).swapaxes(1, 2)\n dx = grid[:, :, 0] + flow2[:, :, 1]\n dy = grid[:, :, 1] + flow2[:, :, 0]\n sx = np.floor(dx).astype(int)\n sy = np.floor(dy).astype(int)\n valid = (sx >= 0) & (sx < height - 1) & (sy >= 0) & (sy < width - 1)\n\n sx_mat = np.dstack((sx, sx + 1, sx, sx + 1)).clip(0, height - 1)\n sy_mat = np.dstack((sy, sy, sy + 1, sy + 1)).clip(0, width - 1)\n sxsy_mat = np.abs((1 - np.abs(sx_mat - dx[:, :, np.newaxis])) *\n (1 - np.abs(sy_mat - dy[:, :, np.newaxis])))\n\n for i in range(4):\n flow_t = flow_t + sxsy_mat[:, :, i][:, :, np.\n newaxis] * flow1[sx_mat[:, :, i],\n sy_mat[:, :, i], :]\n\n valid = valid & (np.linalg.norm(\n flow_t[:, :, [1, 0]] + np.dstack((dx, dy)) - grid, axis=2) < th)\n\n flow_t = (flow2 - flow_t) / 2.0\n dx = grid[:, :, 0] + flow_t[:, :, 1]\n dy = grid[:, :, 1] + flow_t[:, :, 0]\n\n valid = valid & (dx >= 0) & (dx < height - 1) & (dy >= 0) & (dy < width - 1)\n label2[valid, :] = label1[dx[valid].round().astype(int), dy[valid].round()\n .astype(int), :]\n return label2\n\n\ndef flow_tf(flow, size):\n flow_shape = flow.shape\n flow_resized = cv2.resize(flow, (size[1], size[0]))\n flow_resized[:, :, 0] *= (float(size[1]) / float(flow_shape[1]))\n flow_resized[:, :, 1] *= (float(size[0]) / float(flow_shape[0]))\n\n return flow_resized" ]
[ [ "numpy.zeros_like", "numpy.arctan2", "numpy.fromfile", "numpy.zeros", "numpy.floor", "numpy.ones_like", "numpy.abs", "numpy.arange", "numpy.dstack", "numpy.max", "numpy.clip", "numpy.indices", "numpy.square" ] ]
project-pantheon/pantheon_glob_planner
[ "c0d50a53b36c4678192ec75ad7a4cd68c570daef" ]
[ "env/lib/python3.5/site-packages/cartopy/tests/crs/test_utm.py" ]
[ "# (C) British Crown Copyright 2018, Met Office\n#\n# This file is part of cartopy.\n#\n# cartopy is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cartopy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with cartopy. If not, see <https://www.gnu.org/licenses/>.\n\"\"\"\nTests for the UTM coordinate system.\n\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function)\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\nimport pytest\n\nimport cartopy.crs as ccrs\n\n\ndef check_proj4_params(crs, other_args):\n expected = other_args | {'proj=utm', 'no_defs', 'units=m'}\n pro4_params = set(crs.proj4_init.lstrip('+').split(' +'))\n assert expected == pro4_params\n\n\n@pytest.mark.parametrize('south', [False, True])\ndef test_default(south):\n zone = 1 # Limits are fixed, so don't bother checking other zones.\n utm = ccrs.UTM(zone, southern_hemisphere=south)\n other_args = {'ellps=WGS84', 'zone={}'.format(zone)}\n if south:\n other_args |= {'south'}\n check_proj4_params(utm, other_args)\n\n assert_almost_equal(np.array(utm.x_limits),\n [-250000, 1250000])\n assert_almost_equal(np.array(utm.y_limits),\n [-10000000, 25000000])\n\n\ndef test_ellipsoid_transform():\n # USGS Professional Paper 1395, pp 269 - 271\n globe = ccrs.Globe(ellipse='clrk66')\n utm = ccrs.UTM(zone=18, globe=globe)\n geodetic = utm.as_geodetic()\n\n other_args = {'ellps=clrk66', 'zone=18'}\n check_proj4_params(utm, other_args)\n\n assert_almost_equal(np.array(utm.x_limits),\n [-250000, 1250000])\n assert_almost_equal(np.array(utm.y_limits),\n [-10000000, 25000000])\n\n result = utm.transform_point(-73.5, 40.5, geodetic)\n assert_almost_equal(result, np.array([127106.5 + 500000, 4484124.4]),\n decimal=1)\n\n inverse_result = geodetic.transform_point(result[0], result[1], utm)\n assert_almost_equal(inverse_result, [-73.5, 40.5])\n" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.array" ] ]
IcyCC/vnpy
[ "04f6ec013daddde2df36590625e0533e260b4bc1" ]
[ "vnpy/app/cta_backtester/ui/widget.py" ]
[ "import numpy as np\nimport pyqtgraph as pg\nfrom datetime import datetime, timedelta\n\nfrom vnpy.trader.constant import Interval, Direction, Offset\nfrom vnpy.trader.engine import MainEngine\nfrom vnpy.trader.ui import QtCore, QtWidgets, QtGui\nfrom vnpy.trader.ui.widget import BaseMonitor, BaseCell, DirectionCell, EnumCell\nfrom vnpy.trader.ui.editor import CodeEditor\nfrom vnpy.event import Event, EventEngine\nfrom vnpy.chart import ChartWidget, CandleItem, VolumeItem\nfrom vnpy.trader.utility import load_json, save_json\n\nfrom ..engine import (\n APP_NAME,\n EVENT_BACKTESTER_LOG,\n EVENT_BACKTESTER_BACKTESTING_FINISHED,\n EVENT_BACKTESTER_OPTIMIZATION_FINISHED,\n OptimizationSetting\n)\n\n\nclass BacktesterManager(QtWidgets.QWidget):\n \"\"\"\"\"\"\n\n setting_filename = \"cta_backtester_setting.json\"\n\n signal_log = QtCore.pyqtSignal(Event)\n signal_backtesting_finished = QtCore.pyqtSignal(Event)\n signal_optimization_finished = QtCore.pyqtSignal(Event)\n\n def __init__(self, main_engine: MainEngine, event_engine: EventEngine):\n \"\"\"\"\"\"\n super().__init__()\n\n self.main_engine = main_engine\n self.event_engine = event_engine\n\n self.backtester_engine = main_engine.get_engine(APP_NAME)\n self.class_names = []\n self.settings = {}\n\n self.target_display = \"\"\n\n self.init_ui()\n self.register_event()\n self.backtester_engine.init_engine()\n self.init_strategy_settings()\n\n def init_strategy_settings(self):\n \"\"\"\"\"\"\n self.class_names = self.backtester_engine.get_strategy_class_names()\n\n for class_name in self.class_names:\n setting = self.backtester_engine.get_default_setting(class_name)\n self.settings[class_name] = setting\n\n self.class_combo.addItems(self.class_names)\n\n def init_ui(self):\n \"\"\"\"\"\"\n self.setWindowTitle(\"CTA回测\")\n\n # Setting Part\n self.class_combo = QtWidgets.QComboBox()\n\n self.symbol_line = QtWidgets.QLineEdit(\"IF88.CFFEX\")\n\n self.interval_combo = QtWidgets.QComboBox()\n for inteval in Interval:\n self.interval_combo.addItem(inteval.value)\n\n end_dt = datetime.now()\n start_dt = end_dt - timedelta(days=3 * 365)\n\n self.start_date_edit = QtWidgets.QDateEdit(\n QtCore.QDate(\n start_dt.year,\n start_dt.month,\n start_dt.day\n )\n )\n self.end_date_edit = QtWidgets.QDateEdit(\n QtCore.QDate.currentDate()\n )\n\n self.rate_line = QtWidgets.QLineEdit(\"0.000025\")\n self.slippage_line = QtWidgets.QLineEdit(\"0.2\")\n self.size_line = QtWidgets.QLineEdit(\"300\")\n self.pricetick_line = QtWidgets.QLineEdit(\"0.2\")\n self.capital_line = QtWidgets.QLineEdit(\"1000000\")\n\n self.inverse_combo = QtWidgets.QComboBox()\n self.inverse_combo.addItems([\"正向\", \"反向\"])\n\n backtesting_button = QtWidgets.QPushButton(\"开始回测\")\n backtesting_button.clicked.connect(self.start_backtesting)\n\n optimization_button = QtWidgets.QPushButton(\"参数优化\")\n optimization_button.clicked.connect(self.start_optimization)\n\n self.result_button = QtWidgets.QPushButton(\"优化结果\")\n self.result_button.clicked.connect(self.show_optimization_result)\n self.result_button.setEnabled(False)\n\n downloading_button = QtWidgets.QPushButton(\"下载数据\")\n downloading_button.clicked.connect(self.start_downloading)\n\n self.order_button = QtWidgets.QPushButton(\"委托记录\")\n self.order_button.clicked.connect(self.show_backtesting_orders)\n self.order_button.setEnabled(False)\n\n self.trade_button = QtWidgets.QPushButton(\"成交记录\")\n self.trade_button.clicked.connect(self.show_backtesting_trades)\n self.trade_button.setEnabled(False)\n\n self.daily_button = QtWidgets.QPushButton(\"每日盈亏\")\n self.daily_button.clicked.connect(self.show_daily_results)\n self.daily_button.setEnabled(False)\n\n self.candle_button = QtWidgets.QPushButton(\"K线图表\")\n self.candle_button.clicked.connect(self.show_candle_chart)\n self.candle_button.setEnabled(False)\n\n edit_button = QtWidgets.QPushButton(\"代码编辑\")\n edit_button.clicked.connect(self.edit_strategy_code)\n\n reload_button = QtWidgets.QPushButton(\"策略重载\")\n reload_button.clicked.connect(self.reload_strategy_class)\n\n for button in [\n backtesting_button,\n optimization_button,\n downloading_button,\n self.result_button,\n self.order_button,\n self.trade_button,\n self.daily_button,\n self.candle_button,\n edit_button,\n reload_button\n ]:\n button.setFixedHeight(button.sizeHint().height() * 2)\n\n form = QtWidgets.QFormLayout()\n form.addRow(\"交易策略\", self.class_combo)\n form.addRow(\"本地代码\", self.symbol_line)\n form.addRow(\"K线周期\", self.interval_combo)\n form.addRow(\"开始日期\", self.start_date_edit)\n form.addRow(\"结束日期\", self.end_date_edit)\n form.addRow(\"手续费率\", self.rate_line)\n form.addRow(\"交易滑点\", self.slippage_line)\n form.addRow(\"合约乘数\", self.size_line)\n form.addRow(\"价格跳动\", self.pricetick_line)\n form.addRow(\"回测资金\", self.capital_line)\n form.addRow(\"合约模式\", self.inverse_combo)\n\n result_grid = QtWidgets.QGridLayout()\n result_grid.addWidget(self.trade_button, 0, 0)\n result_grid.addWidget(self.order_button, 0, 1)\n result_grid.addWidget(self.daily_button, 1, 0)\n result_grid.addWidget(self.candle_button, 1, 1)\n\n left_vbox = QtWidgets.QVBoxLayout()\n left_vbox.addLayout(form)\n left_vbox.addWidget(backtesting_button)\n left_vbox.addWidget(downloading_button)\n left_vbox.addStretch()\n left_vbox.addLayout(result_grid)\n left_vbox.addStretch()\n left_vbox.addWidget(optimization_button)\n left_vbox.addWidget(self.result_button)\n left_vbox.addStretch()\n left_vbox.addWidget(edit_button)\n left_vbox.addWidget(reload_button)\n\n # Result part\n self.statistics_monitor = StatisticsMonitor()\n\n self.log_monitor = QtWidgets.QTextEdit()\n self.log_monitor.setMaximumHeight(400)\n\n self.chart = BacktesterChart()\n self.chart.setMinimumWidth(1000)\n\n self.trade_dialog = BacktestingResultDialog(\n self.main_engine,\n self.event_engine,\n \"回测成交记录\",\n BacktestingTradeMonitor\n )\n self.order_dialog = BacktestingResultDialog(\n self.main_engine,\n self.event_engine,\n \"回测委托记录\",\n BacktestingOrderMonitor\n )\n self.daily_dialog = BacktestingResultDialog(\n self.main_engine,\n self.event_engine,\n \"回测每日盈亏\",\n DailyResultMonitor\n )\n\n # Candle Chart\n self.candle_dialog = CandleChartDialog()\n\n # Layout\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(self.statistics_monitor)\n vbox.addWidget(self.log_monitor)\n\n hbox = QtWidgets.QHBoxLayout()\n hbox.addLayout(left_vbox)\n hbox.addLayout(vbox)\n hbox.addWidget(self.chart)\n self.setLayout(hbox)\n\n # Code Editor\n self.editor = CodeEditor(self.main_engine, self.event_engine)\n\n # Load setting\n setting = load_json(self.setting_filename)\n if not setting:\n return\n\n self.class_combo.setCurrentIndex(\n self.class_combo.findText(setting[\"class_name\"])\n )\n\n self.symbol_line.setText(setting[\"vt_symbol\"])\n\n self.interval_combo.setCurrentIndex(\n self.interval_combo.findText(setting[\"interval\"])\n )\n\n self.rate_line.setText(str(setting[\"rate\"]))\n self.slippage_line.setText(str(setting[\"slippage\"]))\n self.size_line.setText(str(setting[\"size\"]))\n self.pricetick_line.setText(str(setting[\"pricetick\"]))\n self.capital_line.setText(str(setting[\"capital\"]))\n\n if not setting[\"inverse\"]:\n self.inverse_combo.setCurrentIndex(0)\n else:\n self.inverse_combo.setCurrentIndex(1)\n\n def register_event(self):\n \"\"\"\"\"\"\n self.signal_log.connect(self.process_log_event)\n self.signal_backtesting_finished.connect(\n self.process_backtesting_finished_event)\n self.signal_optimization_finished.connect(\n self.process_optimization_finished_event)\n\n self.event_engine.register(EVENT_BACKTESTER_LOG, self.signal_log.emit)\n self.event_engine.register(\n EVENT_BACKTESTER_BACKTESTING_FINISHED, self.signal_backtesting_finished.emit)\n self.event_engine.register(\n EVENT_BACKTESTER_OPTIMIZATION_FINISHED, self.signal_optimization_finished.emit)\n\n def process_log_event(self, event: Event):\n \"\"\"\"\"\"\n msg = event.data\n self.write_log(msg)\n\n def write_log(self, msg):\n \"\"\"\"\"\"\n timestamp = datetime.now().strftime(\"%H:%M:%S\")\n msg = f\"{timestamp}\\t{msg}\"\n self.log_monitor.append(msg)\n\n def process_backtesting_finished_event(self, event: Event):\n \"\"\"\"\"\"\n statistics = self.backtester_engine.get_result_statistics()\n self.statistics_monitor.set_data(statistics)\n\n df = self.backtester_engine.get_result_df()\n self.chart.set_data(df)\n\n self.trade_button.setEnabled(True)\n self.order_button.setEnabled(True)\n self.daily_button.setEnabled(True)\n self.candle_button.setEnabled(True)\n\n def process_optimization_finished_event(self, event: Event):\n \"\"\"\"\"\"\n self.write_log(\"请点击[优化结果]按钮查看\")\n self.result_button.setEnabled(True)\n\n def start_backtesting(self):\n \"\"\"\"\"\"\n class_name = self.class_combo.currentText()\n vt_symbol = self.symbol_line.text()\n interval = self.interval_combo.currentText()\n start = self.start_date_edit.date().toPyDate()\n end = self.end_date_edit.date().toPyDate()\n rate = float(self.rate_line.text())\n slippage = float(self.slippage_line.text())\n size = float(self.size_line.text())\n pricetick = float(self.pricetick_line.text())\n capital = float(self.capital_line.text())\n\n if self.inverse_combo.currentText() == \"正向\":\n inverse = False\n else:\n inverse = True\n\n # Save backtesting parameters\n backtesting_setting = {\n \"class_name\": class_name,\n \"vt_symbol\": vt_symbol,\n \"interval\": interval,\n \"rate\": rate,\n \"slippage\": slippage,\n \"size\": size,\n \"pricetick\": pricetick,\n \"capital\": capital,\n \"inverse\": inverse,\n }\n save_json(self.setting_filename, backtesting_setting)\n\n # Get strategy setting\n old_setting = self.settings[class_name]\n dialog = BacktestingSettingEditor(class_name, old_setting)\n i = dialog.exec()\n if i != dialog.Accepted:\n return\n\n new_setting = dialog.get_setting()\n self.settings[class_name] = new_setting\n\n result = self.backtester_engine.start_backtesting(\n class_name,\n vt_symbol,\n interval,\n start,\n end,\n rate,\n slippage,\n size,\n pricetick,\n capital,\n inverse,\n new_setting\n )\n\n if result:\n self.statistics_monitor.clear_data()\n self.chart.clear_data()\n\n self.trade_button.setEnabled(False)\n self.order_button.setEnabled(False)\n self.daily_button.setEnabled(False)\n self.candle_button.setEnabled(False)\n\n self.trade_dialog.clear_data()\n self.order_dialog.clear_data()\n self.daily_dialog.clear_data()\n self.candle_dialog.clear_data()\n\n def start_optimization(self):\n \"\"\"\"\"\"\n class_name = self.class_combo.currentText()\n vt_symbol = self.symbol_line.text()\n interval = self.interval_combo.currentText()\n start = self.start_date_edit.date().toPyDate()\n end = self.end_date_edit.date().toPyDate()\n rate = float(self.rate_line.text())\n slippage = float(self.slippage_line.text())\n size = float(self.size_line.text())\n pricetick = float(self.pricetick_line.text())\n capital = float(self.capital_line.text())\n\n if self.inverse_combo.currentText() == \"正向\":\n inverse = False\n else:\n inverse = True\n\n parameters = self.settings[class_name]\n dialog = OptimizationSettingEditor(class_name, parameters)\n i = dialog.exec()\n if i != dialog.Accepted:\n return\n\n optimization_setting, use_ga = dialog.get_setting()\n self.target_display = dialog.target_display\n\n self.backtester_engine.start_optimization(\n class_name,\n vt_symbol,\n interval,\n start,\n end,\n rate,\n slippage,\n size,\n pricetick,\n capital,\n inverse,\n optimization_setting,\n use_ga\n )\n\n self.result_button.setEnabled(False)\n\n def start_downloading(self):\n \"\"\"\"\"\"\n vt_symbol = self.symbol_line.text()\n interval = self.interval_combo.currentText()\n start_date = self.start_date_edit.date()\n end_date = self.end_date_edit.date()\n\n start = datetime(start_date.year(), start_date.month(), start_date.day())\n end = datetime(end_date.year(), end_date.month(), end_date.day(), 23, 59, 59)\n\n self.backtester_engine.start_downloading(\n vt_symbol,\n interval,\n start,\n end\n )\n\n def show_optimization_result(self):\n \"\"\"\"\"\"\n result_values = self.backtester_engine.get_result_values()\n\n dialog = OptimizationResultMonitor(\n result_values,\n self.target_display\n )\n dialog.exec_()\n\n def show_backtesting_trades(self):\n \"\"\"\"\"\"\n if not self.trade_dialog.is_updated():\n trades = self.backtester_engine.get_all_trades()\n self.trade_dialog.update_data(trades)\n\n self.trade_dialog.exec_()\n\n def show_backtesting_orders(self):\n \"\"\"\"\"\"\n if not self.order_dialog.is_updated():\n orders = self.backtester_engine.get_all_orders()\n self.order_dialog.update_data(orders)\n\n self.order_dialog.exec_()\n\n def show_daily_results(self):\n \"\"\"\"\"\"\n if not self.daily_dialog.is_updated():\n results = self.backtester_engine.get_all_daily_results()\n self.daily_dialog.update_data(results)\n\n self.daily_dialog.exec_()\n\n def show_candle_chart(self):\n \"\"\"\"\"\"\n if not self.candle_dialog.is_updated():\n history = self.backtester_engine.get_history_data()\n self.candle_dialog.update_history(history)\n\n trades = self.backtester_engine.get_all_trades()\n self.candle_dialog.update_trades(trades)\n\n self.candle_dialog.exec_()\n\n def edit_strategy_code(self):\n \"\"\"\"\"\"\n class_name = self.class_combo.currentText()\n file_path = self.backtester_engine.get_strategy_class_file(class_name)\n\n self.editor.open_editor(file_path)\n self.editor.show()\n\n def reload_strategy_class(self):\n \"\"\"\"\"\"\n self.backtester_engine.reload_strategy_class()\n\n self.class_combo.clear()\n self.init_strategy_settings()\n\n def show(self):\n \"\"\"\"\"\"\n self.showMaximized()\n\n\nclass StatisticsMonitor(QtWidgets.QTableWidget):\n \"\"\"\"\"\"\n KEY_NAME_MAP = {\n \"start_date\": \"首个交易日\",\n \"end_date\": \"最后交易日\",\n\n \"total_days\": \"总交易日\",\n \"profit_days\": \"盈利交易日\",\n \"loss_days\": \"亏损交易日\",\n\n \"capital\": \"起始资金\",\n \"end_balance\": \"结束资金\",\n\n \"total_return\": \"总收益率\",\n \"annual_return\": \"年化收益\",\n \"max_drawdown\": \"最大回撤\",\n \"max_ddpercent\": \"百分比最大回撤\",\n\n \"total_net_pnl\": \"总盈亏\",\n \"total_commission\": \"总手续费\",\n \"total_slippage\": \"总滑点\",\n \"total_turnover\": \"总成交额\",\n \"total_trade_count\": \"总成交笔数\",\n\n \"daily_net_pnl\": \"日均盈亏\",\n \"daily_commission\": \"日均手续费\",\n \"daily_slippage\": \"日均滑点\",\n \"daily_turnover\": \"日均成交额\",\n \"daily_trade_count\": \"日均成交笔数\",\n\n \"daily_return\": \"日均收益率\",\n \"return_std\": \"收益标准差\",\n \"sharpe_ratio\": \"夏普比率\",\n \"return_drawdown_ratio\": \"收益回撤比\"\n }\n\n def __init__(self):\n \"\"\"\"\"\"\n super().__init__()\n\n self.cells = {}\n\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n self.setRowCount(len(self.KEY_NAME_MAP))\n self.setVerticalHeaderLabels(list(self.KEY_NAME_MAP.values()))\n\n self.setColumnCount(1)\n self.horizontalHeader().setVisible(False)\n self.horizontalHeader().setSectionResizeMode(\n QtWidgets.QHeaderView.Stretch\n )\n self.setEditTriggers(self.NoEditTriggers)\n\n for row, key in enumerate(self.KEY_NAME_MAP.keys()):\n cell = QtWidgets.QTableWidgetItem()\n self.setItem(row, 0, cell)\n self.cells[key] = cell\n\n def clear_data(self):\n \"\"\"\"\"\"\n for cell in self.cells.values():\n cell.setText(\"\")\n\n def set_data(self, data: dict):\n \"\"\"\"\"\"\n data[\"capital\"] = f\"{data['capital']:,.2f}\"\n data[\"end_balance\"] = f\"{data['end_balance']:,.2f}\"\n data[\"total_return\"] = f\"{data['total_return']:,.2f}%\"\n data[\"annual_return\"] = f\"{data['annual_return']:,.2f}%\"\n data[\"max_drawdown\"] = f\"{data['max_drawdown']:,.2f}\"\n data[\"max_ddpercent\"] = f\"{data['max_ddpercent']:,.2f}%\"\n data[\"total_net_pnl\"] = f\"{data['total_net_pnl']:,.2f}\"\n data[\"total_commission\"] = f\"{data['total_commission']:,.2f}\"\n data[\"total_slippage\"] = f\"{data['total_slippage']:,.2f}\"\n data[\"total_turnover\"] = f\"{data['total_turnover']:,.2f}\"\n data[\"daily_net_pnl\"] = f\"{data['daily_net_pnl']:,.2f}\"\n data[\"daily_commission\"] = f\"{data['daily_commission']:,.2f}\"\n data[\"daily_slippage\"] = f\"{data['daily_slippage']:,.2f}\"\n data[\"daily_turnover\"] = f\"{data['daily_turnover']:,.2f}\"\n data[\"daily_return\"] = f\"{data['daily_return']:,.2f}%\"\n data[\"return_std\"] = f\"{data['return_std']:,.2f}%\"\n data[\"sharpe_ratio\"] = f\"{data['sharpe_ratio']:,.2f}\"\n data[\"return_drawdown_ratio\"] = f\"{data['return_drawdown_ratio']:,.2f}\"\n\n for key, cell in self.cells.items():\n value = data.get(key, \"\")\n cell.setText(str(value))\n\n\nclass BacktestingSettingEditor(QtWidgets.QDialog):\n \"\"\"\n For creating new strategy and editing strategy parameters.\n \"\"\"\n\n def __init__(\n self, class_name: str, parameters: dict\n ):\n \"\"\"\"\"\"\n super(BacktestingSettingEditor, self).__init__()\n\n self.class_name = class_name\n self.parameters = parameters\n self.edits = {}\n\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n form = QtWidgets.QFormLayout()\n\n # Add vt_symbol and name edit if add new strategy\n self.setWindowTitle(f\"策略参数配置:{self.class_name}\")\n button_text = \"确定\"\n parameters = self.parameters\n\n for name, value in parameters.items():\n type_ = type(value)\n\n edit = QtWidgets.QLineEdit(str(value))\n if type_ is int:\n validator = QtGui.QIntValidator()\n edit.setValidator(validator)\n elif type_ is float:\n validator = QtGui.QDoubleValidator()\n edit.setValidator(validator)\n\n form.addRow(f\"{name} {type_}\", edit)\n\n self.edits[name] = (edit, type_)\n\n button = QtWidgets.QPushButton(button_text)\n button.clicked.connect(self.accept)\n form.addRow(button)\n\n self.setLayout(form)\n\n def get_setting(self):\n \"\"\"\"\"\"\n setting = {}\n\n for name, tp in self.edits.items():\n edit, type_ = tp\n value_text = edit.text()\n\n if type_ == bool:\n if value_text == \"True\":\n value = True\n else:\n value = False\n else:\n value = type_(value_text)\n\n setting[name] = value\n\n return setting\n\n\nclass BacktesterChart(pg.GraphicsWindow):\n \"\"\"\"\"\"\n\n def __init__(self):\n \"\"\"\"\"\"\n super().__init__(title=\"Backtester Chart\")\n\n self.dates = {}\n\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n pg.setConfigOptions(antialias=True)\n\n # Create plot widgets\n self.balance_plot = self.addPlot(\n title=\"账户净值\",\n axisItems={\"bottom\": DateAxis(self.dates, orientation=\"bottom\")}\n )\n self.nextRow()\n\n self.drawdown_plot = self.addPlot(\n title=\"净值回撤\",\n axisItems={\"bottom\": DateAxis(self.dates, orientation=\"bottom\")}\n )\n self.nextRow()\n\n self.pnl_plot = self.addPlot(\n title=\"每日盈亏\",\n axisItems={\"bottom\": DateAxis(self.dates, orientation=\"bottom\")}\n )\n self.nextRow()\n\n self.distribution_plot = self.addPlot(title=\"盈亏分布\")\n\n # Add curves and bars on plot widgets\n self.balance_curve = self.balance_plot.plot(\n pen=pg.mkPen(\"#ffc107\", width=3)\n )\n\n dd_color = \"#303f9f\"\n self.drawdown_curve = self.drawdown_plot.plot(\n fillLevel=-0.3, brush=dd_color, pen=dd_color\n )\n\n profit_color = 'r'\n loss_color = 'g'\n self.profit_pnl_bar = pg.BarGraphItem(\n x=[], height=[], width=0.3, brush=profit_color, pen=profit_color\n )\n self.loss_pnl_bar = pg.BarGraphItem(\n x=[], height=[], width=0.3, brush=loss_color, pen=loss_color\n )\n self.pnl_plot.addItem(self.profit_pnl_bar)\n self.pnl_plot.addItem(self.loss_pnl_bar)\n\n distribution_color = \"#6d4c41\"\n self.distribution_curve = self.distribution_plot.plot(\n fillLevel=-0.3, brush=distribution_color, pen=distribution_color\n )\n\n def clear_data(self):\n \"\"\"\"\"\"\n self.balance_curve.setData([], [])\n self.drawdown_curve.setData([], [])\n self.profit_pnl_bar.setOpts(x=[], height=[])\n self.loss_pnl_bar.setOpts(x=[], height=[])\n self.distribution_curve.setData([], [])\n\n def set_data(self, df):\n \"\"\"\"\"\"\n if df is None:\n return\n\n count = len(df)\n\n self.dates.clear()\n for n, date in enumerate(df.index):\n self.dates[n] = date\n\n # Set data for curve of balance and drawdown\n self.balance_curve.setData(df[\"balance\"])\n self.drawdown_curve.setData(df[\"drawdown\"])\n\n # Set data for daily pnl bar\n profit_pnl_x = []\n profit_pnl_height = []\n loss_pnl_x = []\n loss_pnl_height = []\n\n for count, pnl in enumerate(df[\"net_pnl\"]):\n if pnl >= 0:\n profit_pnl_height.append(pnl)\n profit_pnl_x.append(count)\n else:\n loss_pnl_height.append(pnl)\n loss_pnl_x.append(count)\n\n self.profit_pnl_bar.setOpts(x=profit_pnl_x, height=profit_pnl_height)\n self.loss_pnl_bar.setOpts(x=loss_pnl_x, height=loss_pnl_height)\n\n # Set data for pnl distribution\n hist, x = np.histogram(df[\"net_pnl\"], bins=\"auto\")\n x = x[:-1]\n self.distribution_curve.setData(x, hist)\n\n\nclass DateAxis(pg.AxisItem):\n \"\"\"Axis for showing date data\"\"\"\n\n def __init__(self, dates: dict, *args, **kwargs):\n \"\"\"\"\"\"\n super().__init__(*args, **kwargs)\n self.dates = dates\n\n def tickStrings(self, values, scale, spacing):\n \"\"\"\"\"\"\n strings = []\n for v in values:\n dt = self.dates.get(v, \"\")\n strings.append(str(dt))\n return strings\n\n\nclass OptimizationSettingEditor(QtWidgets.QDialog):\n \"\"\"\n For setting up parameters for optimization.\n \"\"\"\n DISPLAY_NAME_MAP = {\n \"总收益率\": \"total_return\",\n \"夏普比率\": \"sharpe_ratio\",\n \"收益回撤比\": \"return_drawdown_ratio\",\n \"日均盈亏\": \"daily_net_pnl\"\n }\n\n def __init__(\n self, class_name: str, parameters: dict\n ):\n \"\"\"\"\"\"\n super().__init__()\n\n self.class_name = class_name\n self.parameters = parameters\n self.edits = {}\n\n self.optimization_setting = None\n self.use_ga = False\n\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n QLabel = QtWidgets.QLabel\n\n self.target_combo = QtWidgets.QComboBox()\n self.target_combo.addItems(list(self.DISPLAY_NAME_MAP.keys()))\n\n grid = QtWidgets.QGridLayout()\n grid.addWidget(QLabel(\"目标\"), 0, 0)\n grid.addWidget(self.target_combo, 0, 1, 1, 3)\n grid.addWidget(QLabel(\"参数\"), 1, 0)\n grid.addWidget(QLabel(\"开始\"), 1, 1)\n grid.addWidget(QLabel(\"步进\"), 1, 2)\n grid.addWidget(QLabel(\"结束\"), 1, 3)\n\n # Add vt_symbol and name edit if add new strategy\n self.setWindowTitle(f\"优化参数配置:{self.class_name}\")\n\n validator = QtGui.QDoubleValidator()\n row = 2\n\n for name, value in self.parameters.items():\n type_ = type(value)\n if type_ not in [int, float]:\n continue\n\n start_edit = QtWidgets.QLineEdit(str(value))\n step_edit = QtWidgets.QLineEdit(str(1))\n end_edit = QtWidgets.QLineEdit(str(value))\n\n for edit in [start_edit, step_edit, end_edit]:\n edit.setValidator(validator)\n\n grid.addWidget(QLabel(name), row, 0)\n grid.addWidget(start_edit, row, 1)\n grid.addWidget(step_edit, row, 2)\n grid.addWidget(end_edit, row, 3)\n\n self.edits[name] = {\n \"type\": type_,\n \"start\": start_edit,\n \"step\": step_edit,\n \"end\": end_edit\n }\n\n row += 1\n\n parallel_button = QtWidgets.QPushButton(\"多进程优化\")\n parallel_button.clicked.connect(self.generate_parallel_setting)\n grid.addWidget(parallel_button, row, 0, 1, 4)\n\n row += 1\n ga_button = QtWidgets.QPushButton(\"遗传算法优化\")\n ga_button.clicked.connect(self.generate_ga_setting)\n grid.addWidget(ga_button, row, 0, 1, 4)\n\n self.setLayout(grid)\n\n def generate_ga_setting(self):\n \"\"\"\"\"\"\n self.use_ga = True\n self.generate_setting()\n\n def generate_parallel_setting(self):\n \"\"\"\"\"\"\n self.use_ga = False\n self.generate_setting()\n\n def generate_setting(self):\n \"\"\"\"\"\"\n self.optimization_setting = OptimizationSetting()\n\n self.target_display = self.target_combo.currentText()\n target_name = self.DISPLAY_NAME_MAP[self.target_display]\n self.optimization_setting.set_target(target_name)\n\n for name, d in self.edits.items():\n type_ = d[\"type\"]\n start_value = type_(d[\"start\"].text())\n step_value = type_(d[\"step\"].text())\n end_value = type_(d[\"end\"].text())\n\n if start_value == end_value:\n self.optimization_setting.add_parameter(name, start_value)\n else:\n self.optimization_setting.add_parameter(\n name,\n start_value,\n end_value,\n step_value\n )\n\n self.accept()\n\n def get_setting(self):\n \"\"\"\"\"\"\n return self.optimization_setting, self.use_ga\n\n\nclass OptimizationResultMonitor(QtWidgets.QDialog):\n \"\"\"\n For viewing optimization result.\n \"\"\"\n\n def __init__(\n self, result_values: list, target_display: str\n ):\n \"\"\"\"\"\"\n super().__init__()\n\n self.result_values = result_values\n self.target_display = target_display\n\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n self.setWindowTitle(\"参数优化结果\")\n self.resize(1100, 500)\n\n table = QtWidgets.QTableWidget()\n\n table.setColumnCount(2)\n table.setRowCount(len(self.result_values))\n table.setHorizontalHeaderLabels([\"参数\", self.target_display])\n table.setEditTriggers(table.NoEditTriggers)\n table.verticalHeader().setVisible(False)\n\n table.horizontalHeader().setSectionResizeMode(\n 0, QtWidgets.QHeaderView.ResizeToContents\n )\n table.horizontalHeader().setSectionResizeMode(\n 1, QtWidgets.QHeaderView.Stretch\n )\n\n for n, tp in enumerate(self.result_values):\n setting, target_value, _ = tp\n setting_cell = QtWidgets.QTableWidgetItem(str(setting))\n target_cell = QtWidgets.QTableWidgetItem(str(target_value))\n\n setting_cell.setTextAlignment(QtCore.Qt.AlignCenter)\n target_cell.setTextAlignment(QtCore.Qt.AlignCenter)\n\n table.setItem(n, 0, setting_cell)\n table.setItem(n, 1, target_cell)\n\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(table)\n\n self.setLayout(vbox)\n\n\nclass BacktestingTradeMonitor(BaseMonitor):\n \"\"\"\n Monitor for backtesting trade data.\n \"\"\"\n\n headers = {\n \"tradeid\": {\"display\": \"成交号 \", \"cell\": BaseCell, \"update\": False},\n \"orderid\": {\"display\": \"委托号\", \"cell\": BaseCell, \"update\": False},\n \"symbol\": {\"display\": \"代码\", \"cell\": BaseCell, \"update\": False},\n \"exchange\": {\"display\": \"交易所\", \"cell\": EnumCell, \"update\": False},\n \"direction\": {\"display\": \"方向\", \"cell\": DirectionCell, \"update\": False},\n \"offset\": {\"display\": \"开平\", \"cell\": EnumCell, \"update\": False},\n \"price\": {\"display\": \"价格\", \"cell\": BaseCell, \"update\": False},\n \"volume\": {\"display\": \"数量\", \"cell\": BaseCell, \"update\": False},\n \"datetime\": {\"display\": \"时间\", \"cell\": BaseCell, \"update\": False},\n \"gateway_name\": {\"display\": \"接口\", \"cell\": BaseCell, \"update\": False},\n }\n\n\nclass BacktestingOrderMonitor(BaseMonitor):\n \"\"\"\n Monitor for backtesting order data.\n \"\"\"\n\n headers = {\n \"orderid\": {\"display\": \"委托号\", \"cell\": BaseCell, \"update\": False},\n \"symbol\": {\"display\": \"代码\", \"cell\": BaseCell, \"update\": False},\n \"exchange\": {\"display\": \"交易所\", \"cell\": EnumCell, \"update\": False},\n \"type\": {\"display\": \"类型\", \"cell\": EnumCell, \"update\": False},\n \"direction\": {\"display\": \"方向\", \"cell\": DirectionCell, \"update\": False},\n \"offset\": {\"display\": \"开平\", \"cell\": EnumCell, \"update\": False},\n \"price\": {\"display\": \"价格\", \"cell\": BaseCell, \"update\": False},\n \"volume\": {\"display\": \"总数量\", \"cell\": BaseCell, \"update\": False},\n \"traded\": {\"display\": \"已成交\", \"cell\": BaseCell, \"update\": False},\n \"status\": {\"display\": \"状态\", \"cell\": EnumCell, \"update\": False},\n \"datetime\": {\"display\": \"时间\", \"cell\": BaseCell, \"update\": False},\n \"gateway_name\": {\"display\": \"接口\", \"cell\": BaseCell, \"update\": False},\n }\n\n\nclass DailyResultMonitor(BaseMonitor):\n \"\"\"\n Monitor for backtesting daily result.\n \"\"\"\n\n headers = {\n \"date\": {\"display\": \"日期\", \"cell\": BaseCell, \"update\": False},\n \"trade_count\": {\"display\": \"成交笔数\", \"cell\": BaseCell, \"update\": False},\n \"start_pos\": {\"display\": \"开盘持仓\", \"cell\": BaseCell, \"update\": False},\n \"end_pos\": {\"display\": \"收盘持仓\", \"cell\": BaseCell, \"update\": False},\n \"turnover\": {\"display\": \"成交额\", \"cell\": BaseCell, \"update\": False},\n \"commission\": {\"display\": \"手续费\", \"cell\": BaseCell, \"update\": False},\n \"slippage\": {\"display\": \"滑点\", \"cell\": BaseCell, \"update\": False},\n \"trading_pnl\": {\"display\": \"交易盈亏\", \"cell\": BaseCell, \"update\": False},\n \"holding_pnl\": {\"display\": \"持仓盈亏\", \"cell\": BaseCell, \"update\": False},\n \"total_pnl\": {\"display\": \"总盈亏\", \"cell\": BaseCell, \"update\": False},\n \"net_pnl\": {\"display\": \"净盈亏\", \"cell\": BaseCell, \"update\": False},\n }\n\n\nclass BacktestingResultDialog(QtWidgets.QDialog):\n \"\"\"\n \"\"\"\n\n def __init__(\n self,\n main_engine: MainEngine,\n event_engine: EventEngine,\n title: str,\n table_class: QtWidgets.QTableWidget\n ):\n \"\"\"\"\"\"\n super().__init__()\n\n self.main_engine = main_engine\n self.event_engine = event_engine\n self.title = title\n self.table_class = table_class\n\n self.updated = False\n\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n self.setWindowTitle(self.title)\n self.resize(1100, 600)\n\n self.table = self.table_class(self.main_engine, self.event_engine)\n\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(self.table)\n\n self.setLayout(vbox)\n\n def clear_data(self):\n \"\"\"\"\"\"\n self.updated = False\n self.table.setRowCount(0)\n\n def update_data(self, data: list):\n \"\"\"\"\"\"\n self.updated = True\n\n data.reverse()\n for obj in data:\n self.table.insert_new_row(obj)\n\n def is_updated(self):\n \"\"\"\"\"\"\n return self.updated\n\n\nclass CandleChartDialog(QtWidgets.QDialog):\n \"\"\"\n \"\"\"\n\n def __init__(self):\n \"\"\"\"\"\"\n super().__init__()\n\n self.dt_ix_map = {}\n self.updated = False\n self.init_ui()\n\n def init_ui(self):\n \"\"\"\"\"\"\n self.setWindowTitle(\"回测K线图表\")\n self.resize(1400, 800)\n\n # Create chart widget\n self.chart = ChartWidget()\n self.chart.add_plot(\"candle\", hide_x_axis=True)\n self.chart.add_plot(\"volume\", maximum_height=200)\n self.chart.add_item(CandleItem, \"candle\", \"candle\")\n self.chart.add_item(VolumeItem, \"volume\", \"volume\")\n self.chart.add_cursor()\n\n # Add scatter item for showing tradings\n self.trade_scatter = pg.ScatterPlotItem()\n candle_plot = self.chart.get_plot(\"candle\")\n candle_plot.addItem(self.trade_scatter)\n\n # Set layout\n vbox = QtWidgets.QVBoxLayout()\n vbox.addWidget(self.chart)\n self.setLayout(vbox)\n\n def update_history(self, history: list):\n \"\"\"\"\"\"\n self.updated = True\n self.chart.update_history(history)\n\n for ix, bar in enumerate(history):\n self.dt_ix_map[bar.datetime] = ix\n\n def update_trades(self, trades: list):\n \"\"\"\"\"\"\n trade_data = []\n\n for trade in trades:\n ix = self.dt_ix_map[trade.datetime]\n\n scatter = {\n \"pos\": (ix, trade.price),\n \"data\": 1,\n \"size\": 14,\n \"pen\": pg.mkPen((255, 255, 255))\n }\n\n if trade.direction == Direction.LONG:\n scatter_symbol = \"t1\" # Up arrow\n else:\n scatter_symbol = \"t\" # Down arrow\n\n if trade.offset == Offset.OPEN:\n scatter_brush = pg.mkBrush((255, 255, 0)) # Yellow\n else:\n scatter_brush = pg.mkBrush((0, 0, 255)) # Blue\n\n scatter[\"symbol\"] = scatter_symbol\n scatter[\"brush\"] = scatter_brush\n\n trade_data.append(scatter)\n\n self.trade_scatter.setData(trade_data)\n\n def clear_data(self):\n \"\"\"\"\"\"\n self.updated = False\n self.chart.clear_all()\n\n self.dt_ix_map.clear()\n self.trade_scatter.clear()\n\n def is_updated(self):\n \"\"\"\"\"\"\n return self.updated\n" ]
[ [ "numpy.histogram" ] ]
mvaldenegro/paper-subensembles-image-classification
[ "cc3a6567b1de82b9bfb1612ad8d0e73cdd7ae09b" ]
[ "svhn/evaluate_calibration.py" ]
[ "import numpy as np\nimport h5py\nimport pandas as pd\n\nfrom svhn_io import load_svhn\nfrom keras_uncertainty.utils import classifier_calibration_curve, classifier_calibration_error\n\nEPSILON = 1e-10\n\ndef load_hdf5_data(filename):\n inp = h5py.File(filename, \"r\")\n preds = inp[\"preds\"][...]\n\n inp.close()\n\n return preds\n\nNUM_ENSEMBLES = 15\nNUM_BINS=7\n\n#IOD_FILE_PATTERN = \"cnn_svhn-num_ens-{}-preds.hdf5\"\n#OUTPUT_PATTERN = \"svhn-calibration-sub-deepensembles_1_num-ens-{}_cnn_svhn.csv\"\n\nIOD_FILE_PATTERN = \"deepensembles-cnn_svhn-num_ens-{}-preds.hdf5\"\nOUTPUT_PATTERN = \"svhn-calibration-deepensembles-num-ens-{}_cnn_svhn.csv\"\n\nif __name__ == \"__main__\":\n for num_ens in range(1, NUM_ENSEMBLES + 1):\n (_, __), (___, y_true) = load_svhn()\n y_true = y_true.flatten()\n\n y_probs = load_hdf5_data(IOD_FILE_PATTERN.format(num_ens))\n y_confs = np.max(y_probs, axis=1)\n y_pred = np.argmax(y_probs, axis=1)\n\n curve_conf, curve_acc = classifier_calibration_curve(y_pred, y_true, y_confs, num_bins=NUM_BINS)\n error = classifier_calibration_error(y_pred, y_true, y_confs, num_bins=NUM_BINS)\n\n print(\"Processing calibration curve for {} ensembles. Error: {}\".format(num_ens, error))\n\n output_df = pd.DataFrame(data={\"conf\": curve_conf, \"acc\": curve_acc})\n output_df.to_csv(OUTPUT_PATTERN.format(num_ens), sep=';', index=False)" ]
[ [ "numpy.max", "pandas.DataFrame", "numpy.argmax" ] ]
tolysz/numba
[ "d7953a18dbf5ea231dc16e967ce8e9b754578ea6" ]
[ "numba/targets/npyimpl.py" ]
[ "\"\"\"\nImplementation of functions in the Numpy package.\n\"\"\"\n\n\nimport math\nimport sys\nimport itertools\nfrom collections import namedtuple\n\nfrom llvmlite.llvmpy import core as lc\n\nimport numpy as np\nimport operator\n\nfrom . import builtins, callconv, ufunc_db, arrayobj\nfrom .imputils import Registry, impl_ret_new_ref, force_error_model\nfrom .. import typing, types, cgutils, numpy_support, utils\nfrom ..numpy_support import ufunc_find_matching_loop, select_array_wrapper, from_dtype\nfrom ..typing import npydecl\nfrom ..extending import overload, intrinsic\n\nfrom .. import errors\n\nregistry = Registry()\nlower = registry.lower\n\n\n########################################################################\n\n# In the way we generate code, ufuncs work with scalar as well as\n# with array arguments. The following helper classes help dealing\n# with scalar and array arguments in a regular way.\n#\n# In short, the classes provide a uniform interface. The interface\n# handles the indexing of as many dimensions as the array may have.\n# For scalars, all indexing is ignored and when the value is read,\n# the scalar is returned. For arrays code for actual indexing is\n# generated and reading performs the appropriate indirection.\n\nclass _ScalarIndexingHelper(object):\n def update_indices(self, loop_indices, name):\n pass\n\n def as_values(self):\n pass\n\n\nclass _ScalarHelper(object):\n \"\"\"Helper class to handle scalar arguments (and result).\n Note that store_data is only used when generating code for\n a scalar ufunc and to write the output value.\n\n For loading, the value is directly used without having any\n kind of indexing nor memory backing it up. This is the use\n for input arguments.\n\n For storing, a variable is created in the stack where the\n value will be written.\n\n Note that it is not supported (as it is unneeded for our\n current use-cases) reading back a stored value. This class\n will always \"load\" the original value it got at its creation.\n \"\"\"\n def __init__(self, ctxt, bld, val, ty):\n self.context = ctxt\n self.builder = bld\n self.val = val\n self.base_type = ty\n intpty = ctxt.get_value_type(types.intp)\n self.shape = [lc.Constant.int(intpty, 1)]\n\n lty = ctxt.get_data_type(ty) if ty != types.boolean else lc.Type.int(1)\n self._ptr = cgutils.alloca_once(bld, lty)\n\n def create_iter_indices(self):\n return _ScalarIndexingHelper()\n\n def load_data(self, indices):\n return self.val\n\n def store_data(self, indices, val):\n self.builder.store(val, self._ptr)\n\n @property\n def return_val(self):\n return self.builder.load(self._ptr)\n\n\nclass _ArrayIndexingHelper(namedtuple('_ArrayIndexingHelper',\n ('array', 'indices'))):\n def update_indices(self, loop_indices, name):\n bld = self.array.builder\n intpty = self.array.context.get_value_type(types.intp)\n ONE = lc.Constant.int(lc.Type.int(intpty.width), 1)\n\n # we are only interested in as many inner dimensions as dimensions\n # the indexed array has (the outer dimensions are broadcast, so\n # ignoring the outer indices produces the desired result.\n indices = loop_indices[len(loop_indices) - len(self.indices):]\n for src, dst, dim in zip(indices, self.indices, self.array.shape):\n cond = bld.icmp(lc.ICMP_UGT, dim, ONE)\n with bld.if_then(cond):\n bld.store(src, dst)\n\n def as_values(self):\n \"\"\"\n The indexing helper is built using alloca for each value, so it\n actually contains pointers to the actual indices to load. Note\n that update_indices assumes the same. This method returns the\n indices as values\n \"\"\"\n bld = self.array.builder\n return [bld.load(index) for index in self.indices]\n\n\nclass _ArrayHelper(namedtuple('_ArrayHelper', ('context', 'builder',\n 'shape', 'strides', 'data',\n 'layout', 'base_type', 'ndim',\n 'return_val'))):\n \"\"\"Helper class to handle array arguments/result.\n It provides methods to generate code loading/storing specific\n items as well as support code for handling indices.\n \"\"\"\n def create_iter_indices(self):\n intpty = self.context.get_value_type(types.intp)\n ZERO = lc.Constant.int(lc.Type.int(intpty.width), 0)\n\n indices = []\n for i in range(self.ndim):\n x = cgutils.alloca_once(self.builder, lc.Type.int(intpty.width))\n self.builder.store(ZERO, x)\n indices.append(x)\n return _ArrayIndexingHelper(self, indices)\n\n def _load_effective_address(self, indices):\n return cgutils.get_item_pointer2(self.context,\n self.builder,\n data=self.data,\n shape=self.shape,\n strides=self.strides,\n layout=self.layout,\n inds=indices)\n\n def load_data(self, indices):\n model = self.context.data_model_manager[self.base_type]\n ptr = self._load_effective_address(indices)\n return model.load_from_data_pointer(self.builder, ptr)\n\n def store_data(self, indices, value):\n ctx = self.context\n bld = self.builder\n store_value = ctx.get_value_as_data(bld, self.base_type, value)\n assert ctx.get_data_type(self.base_type) == store_value.type\n bld.store(store_value, self._load_effective_address(indices))\n\n\ndef _prepare_argument(ctxt, bld, inp, tyinp, where='input operand'):\n \"\"\"returns an instance of the appropriate Helper (either\n _ScalarHelper or _ArrayHelper) class to handle the argument.\n using the polymorphic interface of the Helper classes, scalar\n and array cases can be handled with the same code\"\"\"\n\n # first un-Optional Optionals\n if isinstance(tyinp, types.Optional):\n oty = tyinp\n tyinp = tyinp.type\n inp = ctxt.cast(bld, inp, oty, tyinp)\n\n # then prepare the arg for a concrete instance\n if isinstance(tyinp, types.ArrayCompatible):\n ary = ctxt.make_array(tyinp)(ctxt, bld, inp)\n shape = cgutils.unpack_tuple(bld, ary.shape, tyinp.ndim)\n strides = cgutils.unpack_tuple(bld, ary.strides, tyinp.ndim)\n return _ArrayHelper(ctxt, bld, shape, strides, ary.data,\n tyinp.layout, tyinp.dtype, tyinp.ndim, inp)\n elif types.unliteral(tyinp) in types.number_domain | set([types.boolean]):\n return _ScalarHelper(ctxt, bld, inp, tyinp)\n else:\n raise NotImplementedError('unsupported type for {0}: {1}'.format(where, str(tyinp)))\n\n\n_broadcast_onto_sig = types.intp(types.intp, types.CPointer(types.intp),\n types.intp, types.CPointer(types.intp))\ndef _broadcast_onto(src_ndim, src_shape, dest_ndim, dest_shape):\n '''Low-level utility function used in calculating a shape for\n an implicit output array. This function assumes that the\n destination shape is an LLVM pointer to a C-style array that was\n already initialized to a size of one along all axes.\n\n Returns an integer value:\n >= 1 : Succeeded. Return value should equal the number of dimensions in\n the destination shape.\n 0 : Failed to broadcast because source shape is larger than the\n destination shape (this case should be weeded out at type\n checking).\n < 0 : Failed to broadcast onto destination axis, at axis number ==\n -(return_value + 1).\n '''\n if src_ndim > dest_ndim:\n # This check should have been done during type checking, but\n # let's be defensive anyway...\n return 0\n else:\n src_index = 0\n dest_index = dest_ndim - src_ndim\n while src_index < src_ndim:\n src_dim_size = src_shape[src_index]\n dest_dim_size = dest_shape[dest_index]\n # Check to see if we've already mutated the destination\n # shape along this axis.\n if dest_dim_size != 1:\n # If we have mutated the destination shape already,\n # then the source axis size must either be one,\n # or the destination axis size.\n if src_dim_size != dest_dim_size and src_dim_size != 1:\n return -(dest_index + 1)\n elif src_dim_size != 1:\n # If the destination size is still its initial\n dest_shape[dest_index] = src_dim_size\n src_index += 1\n dest_index += 1\n return dest_index\n\ndef _build_array(context, builder, array_ty, input_types, inputs):\n \"\"\"Utility function to handle allocation of an implicit output array\n given the target context, builder, output array type, and a list of\n _ArrayHelper instances.\n \"\"\"\n intp_ty = context.get_value_type(types.intp)\n def make_intp_const(val):\n return context.get_constant(types.intp, val)\n\n ZERO = make_intp_const(0)\n ONE = make_intp_const(1)\n\n src_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim,\n \"src_shape\")\n dest_ndim = make_intp_const(array_ty.ndim)\n dest_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim,\n \"dest_shape\")\n dest_shape_addrs = tuple(cgutils.gep_inbounds(builder, dest_shape, index)\n for index in range(array_ty.ndim))\n\n # Initialize the destination shape with all ones.\n for dest_shape_addr in dest_shape_addrs:\n builder.store(ONE, dest_shape_addr)\n\n # For each argument, try to broadcast onto the destination shape,\n # mutating along any axis where the argument shape is not one and\n # the destination shape is one.\n for arg_number, arg in enumerate(inputs):\n if not hasattr(arg, \"ndim\"): # Skip scalar arguments\n continue\n arg_ndim = make_intp_const(arg.ndim)\n for index in range(arg.ndim):\n builder.store(arg.shape[index],\n cgutils.gep_inbounds(builder, src_shape, index))\n arg_result = context.compile_internal(\n builder, _broadcast_onto, _broadcast_onto_sig,\n [arg_ndim, src_shape, dest_ndim, dest_shape])\n with cgutils.if_unlikely(builder,\n builder.icmp(lc.ICMP_SLT, arg_result, ONE)):\n msg = \"unable to broadcast argument %d to output array\" % (\n arg_number,)\n\n loc = errors.loc_info.get('loc', None)\n if loc is not None:\n msg += '\\nFile \"%s\", line %d, ' % (loc.filename, loc.line)\n\n context.call_conv.return_user_exc(builder, ValueError, (msg,))\n\n real_array_ty = array_ty.as_array\n\n dest_shape_tup = tuple(builder.load(dest_shape_addr)\n for dest_shape_addr in dest_shape_addrs)\n array_val = arrayobj._empty_nd_impl(context, builder, real_array_ty,\n dest_shape_tup)\n\n # Get the best argument to call __array_wrap__ on\n array_wrapper_index = select_array_wrapper(input_types)\n array_wrapper_ty = input_types[array_wrapper_index]\n try:\n # __array_wrap__(source wrapped array, out array) -> out wrapped array\n array_wrap = context.get_function('__array_wrap__',\n array_ty(array_wrapper_ty, real_array_ty))\n except NotImplementedError:\n # If it's the same priority as a regular array, assume we\n # should use the allocated array unchanged.\n if array_wrapper_ty.array_priority != types.Array.array_priority:\n raise\n out_val = array_val._getvalue()\n else:\n wrap_args = (inputs[array_wrapper_index].return_val, array_val._getvalue())\n out_val = array_wrap(builder, wrap_args)\n\n ndim = array_ty.ndim\n shape = cgutils.unpack_tuple(builder, array_val.shape, ndim)\n strides = cgutils.unpack_tuple(builder, array_val.strides, ndim)\n return _ArrayHelper(context, builder, shape, strides, array_val.data,\n array_ty.layout, array_ty.dtype, ndim,\n out_val)\n\n\ndef numpy_ufunc_kernel(context, builder, sig, args, kernel_class,\n explicit_output=True):\n # This is the code generator that builds all the looping needed\n # to execute a numpy functions over several dimensions (including\n # scalar cases).\n #\n # context - the code generation context\n # builder - the code emitter\n # sig - signature of the ufunc\n # args - the args to the ufunc\n # kernel_class - a code generating subclass of _Kernel that provides\n # explicit_output - if the output was explicit in the call\n # (ie: np.add(x,y,r))\n\n arguments = [_prepare_argument(context, builder, arg, tyarg)\n for arg, tyarg in zip(args, sig.args)]\n if not explicit_output:\n ret_ty = sig.return_type\n if isinstance(ret_ty, types.ArrayCompatible):\n output = _build_array(context, builder, ret_ty, sig.args, arguments)\n else:\n output = _prepare_argument(\n context, builder,\n lc.Constant.null(context.get_value_type(ret_ty)), ret_ty)\n arguments.append(output)\n elif context.enable_nrt:\n # Incref the output\n context.nrt.incref(builder, sig.return_type, args[-1])\n\n inputs = arguments[0:-1]\n output = arguments[-1]\n\n outer_sig = [a.base_type for a in arguments]\n #signature expects return type first, while we have it last:\n outer_sig = outer_sig[-1:] + outer_sig[:-1]\n outer_sig = typing.signature(*outer_sig)\n kernel = kernel_class(context, builder, outer_sig)\n intpty = context.get_value_type(types.intp)\n\n indices = [inp.create_iter_indices() for inp in inputs]\n\n loopshape = output.shape\n with cgutils.loop_nest(builder, loopshape, intp=intpty) as loop_indices:\n vals_in = []\n for i, (index, arg) in enumerate(zip(indices, inputs)):\n index.update_indices(loop_indices, i)\n vals_in.append(arg.load_data(index.as_values()))\n\n val_out = kernel.generate(*vals_in)\n output.store_data(loop_indices, val_out)\n out = arguments[-1].return_val\n return impl_ret_new_ref(context, builder, sig.return_type, out)\n\n\n# Kernels are the code to be executed inside the multidimensional loop.\nclass _Kernel(object):\n def __init__(self, context, builder, outer_sig):\n self.context = context\n self.builder = builder\n self.outer_sig = outer_sig\n\n def cast(self, val, fromty, toty):\n \"\"\"Numpy uses cast semantics that are different from standard Python\n (for example, it does allow casting from complex to float).\n\n This method acts as a patch to context.cast so that it allows\n complex to real/int casts.\n\n \"\"\"\n if (isinstance(fromty, types.Complex) and\n not isinstance(toty, types.Complex)):\n # attempt conversion of the real part to the specified type.\n # note that NumPy issues a warning in this kind of conversions\n newty = fromty.underlying_float\n attr = self.context.get_getattr(fromty, 'real')\n val = attr(self.context, self.builder, fromty, val, 'real')\n fromty = newty\n # let the regular cast do the rest...\n\n return self.context.cast(self.builder, val, fromty, toty)\n\n\ndef _ufunc_db_function(ufunc):\n \"\"\"Use the ufunc loop type information to select the code generation\n function from the table provided by the dict_of_kernels. The dict\n of kernels maps the loop identifier to a function with the\n following signature: (context, builder, signature, args).\n\n The loop type information has the form 'AB->C'. The letters to the\n left of '->' are the input types (specified as NumPy letter\n types). The letters to the right of '->' are the output\n types. There must be 'ufunc.nin' letters to the left of '->', and\n 'ufunc.nout' letters to the right.\n\n For example, a binary float loop resulting in a float, will have\n the following signature: 'ff->f'.\n\n A given ufunc implements many loops. The list of loops implemented\n for a given ufunc can be accessed using the 'types' attribute in\n the ufunc object. The NumPy machinery selects the first loop that\n fits a given calling signature (in our case, what we call the\n outer_sig). This logic is mimicked by 'ufunc_find_matching_loop'.\n \"\"\"\n\n class _KernelImpl(_Kernel):\n def __init__(self, context, builder, outer_sig):\n super(_KernelImpl, self).__init__(context, builder, outer_sig)\n loop = ufunc_find_matching_loop(\n ufunc, outer_sig.args + (outer_sig.return_type,))\n self.fn = ufunc_db.get_ufunc_info(ufunc).get(loop.ufunc_sig)\n self.inner_sig = typing.signature(\n *(loop.outputs + loop.inputs))\n\n if self.fn is None:\n msg = \"Don't know how to lower ufunc '{0}' for loop '{1}'\"\n raise NotImplementedError(msg.format(ufunc.__name__, loop))\n\n def generate(self, *args):\n isig = self.inner_sig\n osig = self.outer_sig\n\n cast_args = [self.cast(val, inty, outty)\n for val, inty, outty in zip(args, osig.args,\n isig.args)]\n with force_error_model(self.context, 'numpy'):\n res = self.fn(self.context, self.builder, isig, cast_args)\n dmm = self.context.data_model_manager\n res = dmm[isig.return_type].from_return(self.builder, res)\n return self.cast(res, isig.return_type, osig.return_type)\n\n return _KernelImpl\n\n\n################################################################################\n# Helper functions that register the ufuncs\n\n_kernels = {} # Temporary map from ufunc's to their kernel implementation class\n\ndef register_unary_ufunc_kernel(ufunc, kernel):\n def unary_ufunc(context, builder, sig, args):\n return numpy_ufunc_kernel(context, builder, sig, args, kernel)\n\n def unary_ufunc_no_explicit_output(context, builder, sig, args):\n return numpy_ufunc_kernel(context, builder, sig, args, kernel,\n explicit_output=False)\n\n _any = types.Any\n\n # (array or scalar, out=array)\n lower(ufunc, _any, types.Array)(unary_ufunc)\n # (array or scalar)\n lower(ufunc, _any)(unary_ufunc_no_explicit_output)\n\n _kernels[ufunc] = kernel\n\n\ndef register_binary_ufunc_kernel(ufunc, kernel):\n def binary_ufunc(context, builder, sig, args):\n return numpy_ufunc_kernel(context, builder, sig, args, kernel)\n\n def binary_ufunc_no_explicit_output(context, builder, sig, args):\n return numpy_ufunc_kernel(context, builder, sig, args, kernel,\n explicit_output=False)\n\n _any = types.Any\n\n # (array or scalar, array o scalar, out=array)\n lower(ufunc, _any, _any, types.Array)(binary_ufunc)\n # (scalar, scalar)\n lower(ufunc, _any, _any)(binary_ufunc_no_explicit_output)\n\n _kernels[ufunc] = kernel\n\n\ndef register_unary_operator_kernel(operator, kernel, inplace=False):\n assert not inplace # are there any inplace unary operators?\n def lower_unary_operator(context, builder, sig, args):\n return numpy_ufunc_kernel(context, builder, sig, args, kernel,\n explicit_output=False)\n _arr_kind = types.Array\n lower(operator, _arr_kind)(lower_unary_operator)\n\n\ndef register_binary_operator_kernel(op, kernel, inplace=False):\n def lower_binary_operator(context, builder, sig, args):\n return numpy_ufunc_kernel(context, builder, sig, args, kernel,\n explicit_output=False)\n\n def lower_inplace_operator(context, builder, sig, args):\n # The visible signature is (A, B) -> A\n # The implementation's signature (with explicit output)\n # is (A, B, A) -> A\n args = tuple(args) + (args[0],)\n sig = typing.signature(sig.return_type, *sig.args + (sig.args[0],))\n return numpy_ufunc_kernel(context, builder, sig, args, kernel,\n explicit_output=True)\n\n _any = types.Any\n _arr_kind = types.Array\n formal_sigs = [(_arr_kind, _arr_kind), (_any, _arr_kind), (_arr_kind, _any)]\n for sig in formal_sigs:\n if not inplace:\n lower(op, *sig)(lower_binary_operator)\n else:\n lower(op, *sig)(lower_inplace_operator)\n\n\n\n################################################################################\n# Use the contents of ufunc_db to initialize the supported ufuncs\n\nfor ufunc in ufunc_db.get_ufuncs():\n if ufunc.nin == 1:\n register_unary_ufunc_kernel(ufunc, _ufunc_db_function(ufunc))\n elif ufunc.nin == 2:\n register_binary_ufunc_kernel(ufunc, _ufunc_db_function(ufunc))\n else:\n raise RuntimeError(\"Don't know how to register ufuncs from ufunc_db with arity > 2\")\n\n\n@lower(operator.pos, types.Array)\ndef array_positive_impl(context, builder, sig, args):\n '''Lowering function for +(array) expressions. Defined here\n (numba.targets.npyimpl) since the remaining array-operator\n lowering functions are also registered in this module.\n '''\n class _UnaryPositiveKernel(_Kernel):\n def generate(self, *args):\n [val] = args\n return val\n\n return numpy_ufunc_kernel(context, builder, sig, args,\n _UnaryPositiveKernel, explicit_output=False)\n\n\nfor _op_map in (npydecl.NumpyRulesUnaryArrayOperator._op_map,\n npydecl.NumpyRulesArrayOperator._op_map,\n ):\n for operator, ufunc_name in _op_map.items():\n ufunc = getattr(np, ufunc_name)\n kernel = _kernels[ufunc]\n if ufunc.nin == 1:\n register_unary_operator_kernel(operator, kernel)\n elif ufunc.nin == 2:\n register_binary_operator_kernel(operator, kernel)\n else:\n raise RuntimeError(\"There shouldn't be any non-unary or binary operators\")\n\nfor _op_map in (npydecl.NumpyRulesInplaceArrayOperator._op_map,\n ):\n for operator, ufunc_name in _op_map.items():\n ufunc = getattr(np, ufunc_name)\n kernel = _kernels[ufunc]\n if ufunc.nin == 1:\n register_unary_operator_kernel(operator, kernel, inplace=True)\n elif ufunc.nin == 2:\n register_binary_operator_kernel(operator, kernel, inplace=True)\n else:\n raise RuntimeError(\"There shouldn't be any non-unary or binary operators\")\n\n\n\ndel _kernels\n\n@intrinsic\ndef _make_dtype_object(typingctx, desc):\n \"\"\"Given a string or NumberClass description *desc*, returns the dtype object.\n \"\"\"\n def from_nb_type(nb_type):\n return_type = types.DType(nb_type)\n sig = return_type(desc)\n\n def codegen(context, builder, signature, args):\n # All dtype objects are dummy values in LLVM.\n # They only exist in the type level.\n return context.get_dummy_value()\n\n return sig, codegen\n\n if isinstance(desc, types.Literal):\n # Convert the str description into np.dtype then to numba type.\n nb_type = from_dtype(np.dtype(desc.literal_value))\n return from_nb_type(nb_type)\n elif isinstance(desc, types.functions.NumberClass):\n thestr = str(desc.dtype)\n # Convert the str description into np.dtype then to numba type.\n nb_type = from_dtype(np.dtype(thestr))\n return from_nb_type(nb_type)\n\n@overload(np.dtype)\ndef numpy_dtype(desc):\n \"\"\"Provide an implementation so that numpy.dtype function can be lowered.\n \"\"\"\n if isinstance(desc, (types.Literal, types.functions.NumberClass)):\n def imp(desc):\n return _make_dtype_object(desc)\n return imp\n else:\n raise TypeError('unknown dtype descriptor: {}'.format(desc))\n" ]
[ [ "numpy.dtype" ] ]
NicoleEic/projects
[ "028a4bb4b49539fc98b442f0a2f9434e95c94561" ]
[ "neuro_scripts/manual_rigid_body/manual_rigid_body.py" ]
[ "import numpy as np\nimport nibabel as nib\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')\nfrom my_functions.matrix_stuff import *\n\ndef manual_rigid_body(fname = 'example_brain.nii.gz',\n outmat = 'transformation.mat',\n outimg = 'example_brain_transformed.nii.gz',\n theta = np.radians([0,0,0]),\n translation_vec = [0,0,0],\n type = 'rotation',\n flip_coordinates = [True, False, False]):\n\n \"\"\"\n Function to perform a rigid body transformation based on manually determined parameters.\n\n Args:\n - fname (str): filepath to input nifti image (.nii.gz)\n - outmat (str): filepath of output 4x4 transformation matrix (.mat)\n - outimg (str): filepath of transformed output image (.nii.gz)\n - theta (np.array): vector of rotation angles in x,y,z dimension (in radians)\n - translation_vec (np.array): vector for translation in x,y,z (in image coordinates)\n - type (str): can be 'rotation' or 'translation' or 'rotation_translation'\n - flip_coordinates (boolean vector): indicates for which axis the sign of the offset needs to be flipped\n\n Returns:\n - M (np.array): output 4x4 transformation matrix\n - M is written to outmat\n - the output image (outimg) is written out\n\n Note on flip_coordinates:\n Voxel coordinates in the image are expected to increase in the following directions\n (it's similar to determining the reorient-command):\n - first dimension: left -> right\n - second dimension: posterir -> anterior\n - third dimension: inferior -> superior\n\n if they go the other way, change input variable accordingly, e.g.:\n flip_coordinates = [True, False, False]\n \"\"\"\n\n # get sform from image to determine offset of coordinate-system\n img = nib.load(fname)\n aff = img.get_affine()\n offset = aff[0:3,3]\n\n # which type of manipulation is requested\n if type == 'rotation':\n print('do rotation only')\n M = rotation(theta, offset, flip_coordinates)\n elif type == 'translation':\n print('do translation only')\n M = vector_to_translation_matrix(translation_vec)\n elif type == 'rotation_translation':\n print('do combined rotation and translation')\n M = rotation_translation(theta, translation_vec, offset, flip_coordinates)\n\n # save output matrix\n print('output matrix: ', M)\n print('save in: ', outmat)\n save_matrix4x4(M, outmat)\n\n # apply transformation to input image\n applywarp_command = \"applywarp -i \" + fname + \" -r \" + fname + \" --premat=\" + outmat + \" --interp=nn -o \" + outimg\n print('run flirt: ', applywarp_command)\n os.system(applywarp_command)\n\n return M\n" ]
[ [ "numpy.radians" ] ]