body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
@property def response_code(self): '\n Gets the response_code of this UpdateHttpRedirectDetails.\n The response code returned for the redirect to the client. For more information, see `RFC 7231`__.\n\n __ https://tools.ietf.org/html/rfc7231#section-6.4\n\n\n :return: The response_code of this UpdateHttpRedirectDetails.\n :rtype: int\n ' return self._response_code
-2,241,557,004,388,330,200
Gets the response_code of this UpdateHttpRedirectDetails. The response code returned for the redirect to the client. For more information, see `RFC 7231`__. __ https://tools.ietf.org/html/rfc7231#section-6.4 :return: The response_code of this UpdateHttpRedirectDetails. :rtype: int
darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py
response_code
revnav/sandbox
python
@property def response_code(self): '\n Gets the response_code of this UpdateHttpRedirectDetails.\n The response code returned for the redirect to the client. For more information, see `RFC 7231`__.\n\n __ https://tools.ietf.org/html/rfc7231#section-6.4\n\n\n :return: The response_code of this UpdateHttpRedirectDetails.\n :rtype: int\n ' return self._response_code
@response_code.setter def response_code(self, response_code): '\n Sets the response_code of this UpdateHttpRedirectDetails.\n The response code returned for the redirect to the client. For more information, see `RFC 7231`__.\n\n __ https://tools.ietf.org/html/rfc7231#section-6.4\n\n\n :param response_code: The response_code of this UpdateHttpRedirectDetails.\n :type: int\n ' self._response_code = response_code
1,383,919,902,411,671,000
Sets the response_code of this UpdateHttpRedirectDetails. The response code returned for the redirect to the client. For more information, see `RFC 7231`__. __ https://tools.ietf.org/html/rfc7231#section-6.4 :param response_code: The response_code of this UpdateHttpRedirectDetails. :type: int
darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py
response_code
revnav/sandbox
python
@response_code.setter def response_code(self, response_code): '\n Sets the response_code of this UpdateHttpRedirectDetails.\n The response code returned for the redirect to the client. For more information, see `RFC 7231`__.\n\n __ https://tools.ietf.org/html/rfc7231#section-6.4\n\n\n :param response_code: The response_code of this UpdateHttpRedirectDetails.\n :type: int\n ' self._response_code = response_code
@property def freeform_tags(self): '\n Gets the freeform_tags of this UpdateHttpRedirectDetails.\n Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.\n For more information, see `Resource Tags`__.\n\n Example: `{"Department": "Finance"}`\n\n __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm\n\n\n :return: The freeform_tags of this UpdateHttpRedirectDetails.\n :rtype: dict(str, str)\n ' return self._freeform_tags
8,730,621,104,488,083,000
Gets the freeform_tags of this UpdateHttpRedirectDetails. Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see `Resource Tags`__. Example: `{"Department": "Finance"}` __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm :return: The freeform_tags of this UpdateHttpRedirectDetails. :rtype: dict(str, str)
darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py
freeform_tags
revnav/sandbox
python
@property def freeform_tags(self): '\n Gets the freeform_tags of this UpdateHttpRedirectDetails.\n Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.\n For more information, see `Resource Tags`__.\n\n Example: `{"Department": "Finance"}`\n\n __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm\n\n\n :return: The freeform_tags of this UpdateHttpRedirectDetails.\n :rtype: dict(str, str)\n ' return self._freeform_tags
@freeform_tags.setter def freeform_tags(self, freeform_tags): '\n Sets the freeform_tags of this UpdateHttpRedirectDetails.\n Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.\n For more information, see `Resource Tags`__.\n\n Example: `{"Department": "Finance"}`\n\n __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm\n\n\n :param freeform_tags: The freeform_tags of this UpdateHttpRedirectDetails.\n :type: dict(str, str)\n ' self._freeform_tags = freeform_tags
-6,774,238,780,399,861,000
Sets the freeform_tags of this UpdateHttpRedirectDetails. Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see `Resource Tags`__. Example: `{"Department": "Finance"}` __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm :param freeform_tags: The freeform_tags of this UpdateHttpRedirectDetails. :type: dict(str, str)
darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py
freeform_tags
revnav/sandbox
python
@freeform_tags.setter def freeform_tags(self, freeform_tags): '\n Sets the freeform_tags of this UpdateHttpRedirectDetails.\n Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.\n For more information, see `Resource Tags`__.\n\n Example: `{"Department": "Finance"}`\n\n __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm\n\n\n :param freeform_tags: The freeform_tags of this UpdateHttpRedirectDetails.\n :type: dict(str, str)\n ' self._freeform_tags = freeform_tags
@property def defined_tags(self): '\n Gets the defined_tags of this UpdateHttpRedirectDetails.\n Defined tags for this resource. Each key is predefined and scoped to a namespace.\n For more information, see `Resource Tags`__.\n\n Example: `{"Operations": {"CostCenter": "42"}}`\n\n __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm\n\n\n :return: The defined_tags of this UpdateHttpRedirectDetails.\n :rtype: dict(str, dict(str, object))\n ' return self._defined_tags
-2,485,616,187,404,974,600
Gets the defined_tags of this UpdateHttpRedirectDetails. Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see `Resource Tags`__. Example: `{"Operations": {"CostCenter": "42"}}` __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm :return: The defined_tags of this UpdateHttpRedirectDetails. :rtype: dict(str, dict(str, object))
darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py
defined_tags
revnav/sandbox
python
@property def defined_tags(self): '\n Gets the defined_tags of this UpdateHttpRedirectDetails.\n Defined tags for this resource. Each key is predefined and scoped to a namespace.\n For more information, see `Resource Tags`__.\n\n Example: `{"Operations": {"CostCenter": "42"}}`\n\n __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm\n\n\n :return: The defined_tags of this UpdateHttpRedirectDetails.\n :rtype: dict(str, dict(str, object))\n ' return self._defined_tags
@defined_tags.setter def defined_tags(self, defined_tags): '\n Sets the defined_tags of this UpdateHttpRedirectDetails.\n Defined tags for this resource. Each key is predefined and scoped to a namespace.\n For more information, see `Resource Tags`__.\n\n Example: `{"Operations": {"CostCenter": "42"}}`\n\n __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm\n\n\n :param defined_tags: The defined_tags of this UpdateHttpRedirectDetails.\n :type: dict(str, dict(str, object))\n ' self._defined_tags = defined_tags
651,850,713,821,746,700
Sets the defined_tags of this UpdateHttpRedirectDetails. Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see `Resource Tags`__. Example: `{"Operations": {"CostCenter": "42"}}` __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm :param defined_tags: The defined_tags of this UpdateHttpRedirectDetails. :type: dict(str, dict(str, object))
darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py
defined_tags
revnav/sandbox
python
@defined_tags.setter def defined_tags(self, defined_tags): '\n Sets the defined_tags of this UpdateHttpRedirectDetails.\n Defined tags for this resource. Each key is predefined and scoped to a namespace.\n For more information, see `Resource Tags`__.\n\n Example: `{"Operations": {"CostCenter": "42"}}`\n\n __ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm\n\n\n :param defined_tags: The defined_tags of this UpdateHttpRedirectDetails.\n :type: dict(str, dict(str, object))\n ' self._defined_tags = defined_tags
def __call__(self, image, label): 'Call function of RandomMirrow_pair.\n\n :param image: usually the feature image, for example, the LR image for super solution dataset,\n the initial image for the segmentation dataset, and etc\n :type image: PIL image\n :param label: usually the label image, for example, the HR image for super solution dataset,\n the mask image for the segmentation dataset, and etc\n :type lebel: PIL image\n :return: the image after transform\n :rtype: list, erery item is a PIL image, the first one is feature image, the second is label image\n ' flip = ((np.random.choice(2) * 2) - 1) channels_image = image.shape[(- 1)] channels_label = label.shape[(- 1)] if (channels_image == 3): image = image[:, :, ::flip] else: image = image[:, ::flip] if (channels_label == 3): label = label[:, :, ::flip] else: label = label[:, ::flip] return (image, label)
1,972,414,202,479,548,700
Call function of RandomMirrow_pair. :param image: usually the feature image, for example, the LR image for super solution dataset, the initial image for the segmentation dataset, and etc :type image: PIL image :param label: usually the label image, for example, the HR image for super solution dataset, the mask image for the segmentation dataset, and etc :type lebel: PIL image :return: the image after transform :rtype: list, erery item is a PIL image, the first one is feature image, the second is label image
vega/datasets/transforms/RandomMirrow_pair.py
__call__
NiuRc/vega
python
def __call__(self, image, label): 'Call function of RandomMirrow_pair.\n\n :param image: usually the feature image, for example, the LR image for super solution dataset,\n the initial image for the segmentation dataset, and etc\n :type image: PIL image\n :param label: usually the label image, for example, the HR image for super solution dataset,\n the mask image for the segmentation dataset, and etc\n :type lebel: PIL image\n :return: the image after transform\n :rtype: list, erery item is a PIL image, the first one is feature image, the second is label image\n ' flip = ((np.random.choice(2) * 2) - 1) channels_image = image.shape[(- 1)] channels_label = label.shape[(- 1)] if (channels_image == 3): image = image[:, :, ::flip] else: image = image[:, ::flip] if (channels_label == 3): label = label[:, :, ::flip] else: label = label[:, ::flip] return (image, label)
@classmethod def setup_class(cls): 'Setup the test class.' super().setup_class() cls._patch_logger() doc_path = os.path.join(ROOT_DIR, MD_FILE) cls.code_blocks = extract_code_blocks(filepath=doc_path, filter_='python') test_code_path = os.path.join(CUR_PATH, PY_FILE) cls.python_file = extract_python_code(test_code_path)
-5,111,679,204,263,988,000
Setup the test class.
tests/test_docs/test_standalone_transaction/test_standalone_transaction.py
setup_class
valory-xyz/agents-aea
python
@classmethod def setup_class(cls): super().setup_class() cls._patch_logger() doc_path = os.path.join(ROOT_DIR, MD_FILE) cls.code_blocks = extract_code_blocks(filepath=doc_path, filter_='python') test_code_path = os.path.join(CUR_PATH, PY_FILE) cls.python_file = extract_python_code(test_code_path)
def test_read_md_file(self): 'Test the last code block, that is the full listing of the demo from the Markdown.' assert (self.code_blocks[(- 1)] == self.python_file), 'Files must be exactly the same.'
-1,915,740,591,947,311,900
Test the last code block, that is the full listing of the demo from the Markdown.
tests/test_docs/test_standalone_transaction/test_standalone_transaction.py
test_read_md_file
valory-xyz/agents-aea
python
def test_read_md_file(self): assert (self.code_blocks[(- 1)] == self.python_file), 'Files must be exactly the same.'
@pytest.mark.integration(reruns=MAX_FLAKY_RERUNS_INTEGRATION) def test_run_end_to_end(self): 'Run the transaction from the file.' try: run() self.mocked_logger_info.assert_any_call('Transaction complete.') except RuntimeError: test_logger.info('RuntimeError: Some transactions have failed')
6,095,263,727,091,250,000
Run the transaction from the file.
tests/test_docs/test_standalone_transaction/test_standalone_transaction.py
test_run_end_to_end
valory-xyz/agents-aea
python
@pytest.mark.integration(reruns=MAX_FLAKY_RERUNS_INTEGRATION) def test_run_end_to_end(self): try: run() self.mocked_logger_info.assert_any_call('Transaction complete.') except RuntimeError: test_logger.info('RuntimeError: Some transactions have failed')
def test_code_blocks_exist(self): 'Test that all the code-blocks exist in the python file.' for blocks in self.code_blocks: assert (blocks in self.python_file), "Code-block doesn't exist in the python file."
5,125,484,754,777,977,000
Test that all the code-blocks exist in the python file.
tests/test_docs/test_standalone_transaction/test_standalone_transaction.py
test_code_blocks_exist
valory-xyz/agents-aea
python
def test_code_blocks_exist(self): for blocks in self.code_blocks: assert (blocks in self.python_file), "Code-block doesn't exist in the python file."
@tf_export('PeriodicResample') def periodic_resample(values, shape, name=None): 'Periodically resample elements of a tensor to conform to `shape`.\n\n This function implements a slightly more generic version of the subpixel\n convolutions found in this [paper](https://arxiv.org/abs/1609.05158).\n\n The formula for computing the elements in the `output` tensor is as follows:\n `T` = `values` tensor of rank `R`\n `S` = desired `shape` of output tensor (vector of length `R`)\n `P` = `output` tensor of rank `R`\n \\((T_1,\\ldots,T_R)\\) = shape(`T`)\n \\([S_1,\\ldots,S_q,\\ldots,S_R]\\) = elements of vector `S`\n\n A single element in `S` is left unspecified (denoted \\(S_q=-1\\)).\n Let \\(f_i\\) denote the (possibly non-integer) factor that relates the original\n dimension to the desired dimensions, \\(S_i=f_i T_i\\), for \\(i\\neq q\\) where\n \\(f_i>0\\).\n Define the following:\n \\(g_i=\\lceil f_i\\rceil\\)\n \\(t=\\prod_i T_i\\)\n \\(s=\\prod_{i\\neq q} S_i\\)\n \\(S_q\\) can then be defined as by \\(S_q=\\lfloor t/s\\rfloor\\).\n The elements of the resulting tensor are defined as\n \\(P_{s_1,\\ldots,s_R}=T_{h_1,\\ldots,h_q,\\ldots,h_R}\\).\n The \\(h_i\\) (\\(i\\neq q\\)) are defined by \\(h_i=\\lfloor s_i/g_i\\rfloor\\).\n \\(h_q=S_q\\sum_{j\\neq q}^{q-1}G_j \\mathrm{mod}(s_j,g_j) + s_q\\), where\n \\(G_j=\\prod_{i}^{j-1}g_i\\) (\\(G_0=1\\)).\n\n One drawback of this method is that whenever the output dimensions are slightly\n less than integer multiples of the input dimensions, many of the tensor elements\n are repeated in an inefficient way. This is resolved by specifying that all\n desired dimensions are integer multiples of the input tensor.\n\n For example:\n\n ```prettyprint\n `input` is [[ 0 1 2 3]\n [ 4 5 6 7]\n [ 8 9 10 11]]\n\n tf.periodic_resample(input, [6, None]) ==> [[ 0 1]\n [ 2 3]\n [ 4 5]\n [ 6 7]\n [ 8 9]\n [10 11]]\n ```\n\n Args:\n values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`, `uint32`, `uint64`, `bfloat16`.\n The tensor of rank `R` to periodic_resample\n shape: A `tf.TensorShape` or list of `ints`.\n A 1-D tensor representing the desired shape of the output tensor.\n Exactly one element of this tensor must have the value `None` which represents\n that this dimension of `values` can be adjusted downward in order to\n accommodate increases in other dimensions. The specified sizes of the\n non-adjustable dimensions must by at least as large as in the `values` tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `values`.\n Periodically resampled tensor that has dimensions specified as in\n `shape` except that the dimension specified as `None` will be minimally\n decreased as necessary.\n ' shape = _execute.make_shape(shape, 'shape') _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('PeriodicResample', values=values, shape=shape, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ('T', _op.get_attr('T'), 'shape', _op.get_attr('shape')) else: (_attr_T, (values,)) = _execute.args_to_matching_eager([values], _ctx) _inputs_flat = [values] _attrs = ('T', _attr_T, 'shape', shape) _result = _execute.execute(b'PeriodicResample', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient('PeriodicResample', _inputs_flat, _attrs, _result, name) (_result,) = _result return _result
377,029,510,498,402,050
Periodically resample elements of a tensor to conform to `shape`. This function implements a slightly more generic version of the subpixel convolutions found in this [paper](https://arxiv.org/abs/1609.05158). The formula for computing the elements in the `output` tensor is as follows: `T` = `values` tensor of rank `R` `S` = desired `shape` of output tensor (vector of length `R`) `P` = `output` tensor of rank `R` \((T_1,\ldots,T_R)\) = shape(`T`) \([S_1,\ldots,S_q,\ldots,S_R]\) = elements of vector `S` A single element in `S` is left unspecified (denoted \(S_q=-1\)). Let \(f_i\) denote the (possibly non-integer) factor that relates the original dimension to the desired dimensions, \(S_i=f_i T_i\), for \(i\neq q\) where \(f_i>0\). Define the following: \(g_i=\lceil f_i\rceil\) \(t=\prod_i T_i\) \(s=\prod_{i\neq q} S_i\) \(S_q\) can then be defined as by \(S_q=\lfloor t/s\rfloor\). The elements of the resulting tensor are defined as \(P_{s_1,\ldots,s_R}=T_{h_1,\ldots,h_q,\ldots,h_R}\). The \(h_i\) (\(i\neq q\)) are defined by \(h_i=\lfloor s_i/g_i\rfloor\). \(h_q=S_q\sum_{j\neq q}^{q-1}G_j \mathrm{mod}(s_j,g_j) + s_q\), where \(G_j=\prod_{i}^{j-1}g_i\) (\(G_0=1\)). One drawback of this method is that whenever the output dimensions are slightly less than integer multiples of the input dimensions, many of the tensor elements are repeated in an inefficient way. This is resolved by specifying that all desired dimensions are integer multiples of the input tensor. For example: ```prettyprint `input` is [[ 0 1 2 3] [ 4 5 6 7] [ 8 9 10 11]] tf.periodic_resample(input, [6, None]) ==> [[ 0 1] [ 2 3] [ 4 5] [ 6 7] [ 8 9] [10 11]] ``` Args: values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`, `uint32`, `uint64`, `bfloat16`. The tensor of rank `R` to periodic_resample shape: A `tf.TensorShape` or list of `ints`. A 1-D tensor representing the desired shape of the output tensor. Exactly one element of this tensor must have the value `None` which represents that this dimension of `values` can be adjusted downward in order to accommodate increases in other dimensions. The specified sizes of the non-adjustable dimensions must by at least as large as in the `values` tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `values`. Periodically resampled tensor that has dimensions specified as in `shape` except that the dimension specified as `None` will be minimally decreased as necessary.
tensorflow/contrib/periodic_resample/python/ops/gen_periodic_resample_op.py
periodic_resample
gian1312/suchen
python
@tf_export('PeriodicResample') def periodic_resample(values, shape, name=None): 'Periodically resample elements of a tensor to conform to `shape`.\n\n This function implements a slightly more generic version of the subpixel\n convolutions found in this [paper](https://arxiv.org/abs/1609.05158).\n\n The formula for computing the elements in the `output` tensor is as follows:\n `T` = `values` tensor of rank `R`\n `S` = desired `shape` of output tensor (vector of length `R`)\n `P` = `output` tensor of rank `R`\n \\((T_1,\\ldots,T_R)\\) = shape(`T`)\n \\([S_1,\\ldots,S_q,\\ldots,S_R]\\) = elements of vector `S`\n\n A single element in `S` is left unspecified (denoted \\(S_q=-1\\)).\n Let \\(f_i\\) denote the (possibly non-integer) factor that relates the original\n dimension to the desired dimensions, \\(S_i=f_i T_i\\), for \\(i\\neq q\\) where\n \\(f_i>0\\).\n Define the following:\n \\(g_i=\\lceil f_i\\rceil\\)\n \\(t=\\prod_i T_i\\)\n \\(s=\\prod_{i\\neq q} S_i\\)\n \\(S_q\\) can then be defined as by \\(S_q=\\lfloor t/s\\rfloor\\).\n The elements of the resulting tensor are defined as\n \\(P_{s_1,\\ldots,s_R}=T_{h_1,\\ldots,h_q,\\ldots,h_R}\\).\n The \\(h_i\\) (\\(i\\neq q\\)) are defined by \\(h_i=\\lfloor s_i/g_i\\rfloor\\).\n \\(h_q=S_q\\sum_{j\\neq q}^{q-1}G_j \\mathrm{mod}(s_j,g_j) + s_q\\), where\n \\(G_j=\\prod_{i}^{j-1}g_i\\) (\\(G_0=1\\)).\n\n One drawback of this method is that whenever the output dimensions are slightly\n less than integer multiples of the input dimensions, many of the tensor elements\n are repeated in an inefficient way. This is resolved by specifying that all\n desired dimensions are integer multiples of the input tensor.\n\n For example:\n\n ```prettyprint\n `input` is [[ 0 1 2 3]\n [ 4 5 6 7]\n [ 8 9 10 11]]\n\n tf.periodic_resample(input, [6, None]) ==> [[ 0 1]\n [ 2 3]\n [ 4 5]\n [ 6 7]\n [ 8 9]\n [10 11]]\n ```\n\n Args:\n values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`, `uint32`, `uint64`, `bfloat16`.\n The tensor of rank `R` to periodic_resample\n shape: A `tf.TensorShape` or list of `ints`.\n A 1-D tensor representing the desired shape of the output tensor.\n Exactly one element of this tensor must have the value `None` which represents\n that this dimension of `values` can be adjusted downward in order to\n accommodate increases in other dimensions. The specified sizes of the\n non-adjustable dimensions must by at least as large as in the `values` tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `values`.\n Periodically resampled tensor that has dimensions specified as in\n `shape` except that the dimension specified as `None` will be minimally\n decreased as necessary.\n ' shape = _execute.make_shape(shape, 'shape') _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('PeriodicResample', values=values, shape=shape, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ('T', _op.get_attr('T'), 'shape', _op.get_attr('shape')) else: (_attr_T, (values,)) = _execute.args_to_matching_eager([values], _ctx) _inputs_flat = [values] _attrs = ('T', _attr_T, 'shape', shape) _result = _execute.execute(b'PeriodicResample', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient('PeriodicResample', _inputs_flat, _attrs, _result, name) (_result,) = _result return _result
def encrypt(password): '\n Take a password string, encrypt it with Fernet symmetric encryption,\n and return the result (bytes), with the decryption key (bytes)\n ' encryption_key = Fernet.generate_key() fernet = Fernet(encryption_key) encrypted_password = fernet.encrypt(password.encode('utf-8')) return (encrypted_password, encryption_key)
1,055,429,369,133,855,200
Take a password string, encrypt it with Fernet symmetric encryption, and return the result (bytes), with the decryption key (bytes)
snappass/main.py
encrypt
47Billion/snappass
python
def encrypt(password): '\n Take a password string, encrypt it with Fernet symmetric encryption,\n and return the result (bytes), with the decryption key (bytes)\n ' encryption_key = Fernet.generate_key() fernet = Fernet(encryption_key) encrypted_password = fernet.encrypt(password.encode('utf-8')) return (encrypted_password, encryption_key)
def decrypt(password, decryption_key): '\n Decrypt a password (bytes) using the provided key (bytes),\n and return the plain-text password (bytes).\n ' fernet = Fernet(decryption_key) return fernet.decrypt(password)
4,633,125,073,615,073,000
Decrypt a password (bytes) using the provided key (bytes), and return the plain-text password (bytes).
snappass/main.py
decrypt
47Billion/snappass
python
def decrypt(password, decryption_key): '\n Decrypt a password (bytes) using the provided key (bytes),\n and return the plain-text password (bytes).\n ' fernet = Fernet(decryption_key) return fernet.decrypt(password)
@check_redis_alive def set_password(password, ttl): '\n Encrypt and store the password for the specified lifetime.\n\n Returns a token comprised of the key where the encrypted password\n is stored, and the decryption key.\n ' storage_key = (REDIS_PREFIX + uuid.uuid4().hex) (encrypted_password, encryption_key) = encrypt(password) redis_client.setex(storage_key, ttl, encrypted_password) encryption_key = encryption_key.decode('utf-8') token = TOKEN_SEPARATOR.join([storage_key, encryption_key]) return token
7,699,990,961,423,685,000
Encrypt and store the password for the specified lifetime. Returns a token comprised of the key where the encrypted password is stored, and the decryption key.
snappass/main.py
set_password
47Billion/snappass
python
@check_redis_alive def set_password(password, ttl): '\n Encrypt and store the password for the specified lifetime.\n\n Returns a token comprised of the key where the encrypted password\n is stored, and the decryption key.\n ' storage_key = (REDIS_PREFIX + uuid.uuid4().hex) (encrypted_password, encryption_key) = encrypt(password) redis_client.setex(storage_key, ttl, encrypted_password) encryption_key = encryption_key.decode('utf-8') token = TOKEN_SEPARATOR.join([storage_key, encryption_key]) return token
@check_redis_alive def get_password(token): '\n From a given token, return the initial password.\n\n If the token is tilde-separated, we decrypt the password fetched from Redis.\n If not, the password is simply returned as is.\n ' (storage_key, decryption_key) = parse_token(token) password = redis_client.get(storage_key) redis_client.delete(storage_key) if (password is not None): if (decryption_key is not None): password = decrypt(password, decryption_key) return password.decode('utf-8')
7,419,285,449,449,767,000
From a given token, return the initial password. If the token is tilde-separated, we decrypt the password fetched from Redis. If not, the password is simply returned as is.
snappass/main.py
get_password
47Billion/snappass
python
@check_redis_alive def get_password(token): '\n From a given token, return the initial password.\n\n If the token is tilde-separated, we decrypt the password fetched from Redis.\n If not, the password is simply returned as is.\n ' (storage_key, decryption_key) = parse_token(token) password = redis_client.get(storage_key) redis_client.delete(storage_key) if (password is not None): if (decryption_key is not None): password = decrypt(password, decryption_key) return password.decode('utf-8')
def clean_input(): "\n Make sure we're not getting bad data from the front end,\n format data to be machine readable\n " if empty(request.form.get('password', '')): abort(400) if empty(request.form.get('ttl', '')): abort(400) time_period = request.form['ttl'].lower() if (time_period not in TIME_CONVERSION): abort(400) return (TIME_CONVERSION[time_period], request.form['password'])
4,107,296,819,267,789,000
Make sure we're not getting bad data from the front end, format data to be machine readable
snappass/main.py
clean_input
47Billion/snappass
python
def clean_input(): "\n Make sure we're not getting bad data from the front end,\n format data to be machine readable\n " if empty(request.form.get('password', )): abort(400) if empty(request.form.get('ttl', )): abort(400) time_period = request.form['ttl'].lower() if (time_period not in TIME_CONVERSION): abort(400) return (TIME_CONVERSION[time_period], request.form['password'])
def prepare_to_disconnect(self) -> None: 'Called when we will disconnect with the peer.' pass
4,461,026,119,724,351,500
Called when we will disconnect with the peer.
hathor/p2p/states/base.py
prepare_to_disconnect
HathorNetwork/hathor-core
python
def prepare_to_disconnect(self) -> None: pass
def get_classes(dataset): 'Get class names of a dataset.' alias2name = {} for (name, aliases) in dataset_aliases.items(): for alias in aliases: alias2name[alias] = name if mmcv.is_str(dataset): if (dataset in alias2name): labels = eval((alias2name[dataset] + '_classes()')) else: raise ValueError('Unrecognized dataset: {}'.format(dataset)) else: raise TypeError('dataset must a str, but got {}'.format(type(dataset))) return labels
1,408,958,000,846,626,000
Get class names of a dataset.
my_configs/new/mmdet/core/evaluation/class_names.py
get_classes
UESTC-Liuxin/TianChi
python
def get_classes(dataset): alias2name = {} for (name, aliases) in dataset_aliases.items(): for alias in aliases: alias2name[alias] = name if mmcv.is_str(dataset): if (dataset in alias2name): labels = eval((alias2name[dataset] + '_classes()')) else: raise ValueError('Unrecognized dataset: {}'.format(dataset)) else: raise TypeError('dataset must a str, but got {}'.format(type(dataset))) return labels
def add_entities(self, devices, action): 'Mock add devices.' for device in devices: self.DEVICES.append(device)
436,858,714,708,463,360
Mock add devices.
tests/components/sensor/test_canary.py
add_entities
27tech/home-assistant
python
def add_entities(self, devices, action): for device in devices: self.DEVICES.append(device)
def setUp(self): 'Initialize values for this testcase class.' self.hass = get_test_home_assistant() self.config = copy.deepcopy(VALID_CONFIG)
1,489,049,775,505,830,400
Initialize values for this testcase class.
tests/components/sensor/test_canary.py
setUp
27tech/home-assistant
python
def setUp(self): self.hass = get_test_home_assistant() self.config = copy.deepcopy(VALID_CONFIG)
def tearDown(self): 'Stop everything that was started.' self.hass.stop()
8,876,712,114,682,698,000
Stop everything that was started.
tests/components/sensor/test_canary.py
tearDown
27tech/home-assistant
python
def tearDown(self): self.hass.stop()
def test_setup_sensors(self): 'Test the sensor setup.' online_device_at_home = mock_device(20, 'Dining Room', True, 'Canary') offline_device_at_home = mock_device(21, 'Front Yard', False, 'Canary') online_device_at_work = mock_device(22, 'Office', True, 'Canary') self.hass.data[DATA_CANARY] = Mock() self.hass.data[DATA_CANARY].locations = [mock_location('Home', True, devices=[online_device_at_home, offline_device_at_home]), mock_location('Work', True, devices=[online_device_at_work])] canary.setup_platform(self.hass, self.config, self.add_entities, None) assert (6 == len(self.DEVICES))
5,771,754,367,131,772,000
Test the sensor setup.
tests/components/sensor/test_canary.py
test_setup_sensors
27tech/home-assistant
python
def test_setup_sensors(self): online_device_at_home = mock_device(20, 'Dining Room', True, 'Canary') offline_device_at_home = mock_device(21, 'Front Yard', False, 'Canary') online_device_at_work = mock_device(22, 'Office', True, 'Canary') self.hass.data[DATA_CANARY] = Mock() self.hass.data[DATA_CANARY].locations = [mock_location('Home', True, devices=[online_device_at_home, offline_device_at_home]), mock_location('Work', True, devices=[online_device_at_work])] canary.setup_platform(self.hass, self.config, self.add_entities, None) assert (6 == len(self.DEVICES))
def test_temperature_sensor(self): 'Test temperature sensor with fahrenheit.' device = mock_device(10, 'Family Room', 'Canary') location = mock_location('Home', False) data = Mock() data.get_reading.return_value = 21.1234 sensor = CanarySensor(data, SENSOR_TYPES[0], location, device) sensor.update() assert ('Home Family Room Temperature' == sensor.name) assert ('°C' == sensor.unit_of_measurement) assert (21.12 == sensor.state) assert ('mdi:thermometer' == sensor.icon)
2,299,848,690,376,426,500
Test temperature sensor with fahrenheit.
tests/components/sensor/test_canary.py
test_temperature_sensor
27tech/home-assistant
python
def test_temperature_sensor(self): device = mock_device(10, 'Family Room', 'Canary') location = mock_location('Home', False) data = Mock() data.get_reading.return_value = 21.1234 sensor = CanarySensor(data, SENSOR_TYPES[0], location, device) sensor.update() assert ('Home Family Room Temperature' == sensor.name) assert ('°C' == sensor.unit_of_measurement) assert (21.12 == sensor.state) assert ('mdi:thermometer' == sensor.icon)
def test_temperature_sensor_with_none_sensor_value(self): 'Test temperature sensor with fahrenheit.' device = mock_device(10, 'Family Room', 'Canary') location = mock_location('Home', False) data = Mock() data.get_reading.return_value = None sensor = CanarySensor(data, SENSOR_TYPES[0], location, device) sensor.update() assert (sensor.state is None)
4,995,280,553,888,205,000
Test temperature sensor with fahrenheit.
tests/components/sensor/test_canary.py
test_temperature_sensor_with_none_sensor_value
27tech/home-assistant
python
def test_temperature_sensor_with_none_sensor_value(self): device = mock_device(10, 'Family Room', 'Canary') location = mock_location('Home', False) data = Mock() data.get_reading.return_value = None sensor = CanarySensor(data, SENSOR_TYPES[0], location, device) sensor.update() assert (sensor.state is None)
def test_humidity_sensor(self): 'Test humidity sensor.' device = mock_device(10, 'Family Room', 'Canary') location = mock_location('Home') data = Mock() data.get_reading.return_value = 50.4567 sensor = CanarySensor(data, SENSOR_TYPES[1], location, device) sensor.update() assert ('Home Family Room Humidity' == sensor.name) assert ('%' == sensor.unit_of_measurement) assert (50.46 == sensor.state) assert ('mdi:water-percent' == sensor.icon)
845,884,128,519,718,300
Test humidity sensor.
tests/components/sensor/test_canary.py
test_humidity_sensor
27tech/home-assistant
python
def test_humidity_sensor(self): device = mock_device(10, 'Family Room', 'Canary') location = mock_location('Home') data = Mock() data.get_reading.return_value = 50.4567 sensor = CanarySensor(data, SENSOR_TYPES[1], location, device) sensor.update() assert ('Home Family Room Humidity' == sensor.name) assert ('%' == sensor.unit_of_measurement) assert (50.46 == sensor.state) assert ('mdi:water-percent' == sensor.icon)
def test_air_quality_sensor_with_very_abnormal_reading(self): 'Test air quality sensor.' device = mock_device(10, 'Family Room', 'Canary') location = mock_location('Home') data = Mock() data.get_reading.return_value = 0.4 sensor = CanarySensor(data, SENSOR_TYPES[2], location, device) sensor.update() assert ('Home Family Room Air Quality' == sensor.name) assert (sensor.unit_of_measurement is None) assert (0.4 == sensor.state) assert ('mdi:weather-windy' == sensor.icon) air_quality = sensor.device_state_attributes[ATTR_AIR_QUALITY] assert (STATE_AIR_QUALITY_VERY_ABNORMAL == air_quality)
1,296,633,011,677,636,000
Test air quality sensor.
tests/components/sensor/test_canary.py
test_air_quality_sensor_with_very_abnormal_reading
27tech/home-assistant
python
def test_air_quality_sensor_with_very_abnormal_reading(self): device = mock_device(10, 'Family Room', 'Canary') location = mock_location('Home') data = Mock() data.get_reading.return_value = 0.4 sensor = CanarySensor(data, SENSOR_TYPES[2], location, device) sensor.update() assert ('Home Family Room Air Quality' == sensor.name) assert (sensor.unit_of_measurement is None) assert (0.4 == sensor.state) assert ('mdi:weather-windy' == sensor.icon) air_quality = sensor.device_state_attributes[ATTR_AIR_QUALITY] assert (STATE_AIR_QUALITY_VERY_ABNORMAL == air_quality)
def test_air_quality_sensor_with_abnormal_reading(self): 'Test air quality sensor.' device = mock_device(10, 'Family Room', 'Canary') location = mock_location('Home') data = Mock() data.get_reading.return_value = 0.59 sensor = CanarySensor(data, SENSOR_TYPES[2], location, device) sensor.update() assert ('Home Family Room Air Quality' == sensor.name) assert (sensor.unit_of_measurement is None) assert (0.59 == sensor.state) assert ('mdi:weather-windy' == sensor.icon) air_quality = sensor.device_state_attributes[ATTR_AIR_QUALITY] assert (STATE_AIR_QUALITY_ABNORMAL == air_quality)
-5,240,649,530,815,184,000
Test air quality sensor.
tests/components/sensor/test_canary.py
test_air_quality_sensor_with_abnormal_reading
27tech/home-assistant
python
def test_air_quality_sensor_with_abnormal_reading(self): device = mock_device(10, 'Family Room', 'Canary') location = mock_location('Home') data = Mock() data.get_reading.return_value = 0.59 sensor = CanarySensor(data, SENSOR_TYPES[2], location, device) sensor.update() assert ('Home Family Room Air Quality' == sensor.name) assert (sensor.unit_of_measurement is None) assert (0.59 == sensor.state) assert ('mdi:weather-windy' == sensor.icon) air_quality = sensor.device_state_attributes[ATTR_AIR_QUALITY] assert (STATE_AIR_QUALITY_ABNORMAL == air_quality)
def test_air_quality_sensor_with_normal_reading(self): 'Test air quality sensor.' device = mock_device(10, 'Family Room', 'Canary') location = mock_location('Home') data = Mock() data.get_reading.return_value = 1.0 sensor = CanarySensor(data, SENSOR_TYPES[2], location, device) sensor.update() assert ('Home Family Room Air Quality' == sensor.name) assert (sensor.unit_of_measurement is None) assert (1.0 == sensor.state) assert ('mdi:weather-windy' == sensor.icon) air_quality = sensor.device_state_attributes[ATTR_AIR_QUALITY] assert (STATE_AIR_QUALITY_NORMAL == air_quality)
-8,526,312,091,169,359,000
Test air quality sensor.
tests/components/sensor/test_canary.py
test_air_quality_sensor_with_normal_reading
27tech/home-assistant
python
def test_air_quality_sensor_with_normal_reading(self): device = mock_device(10, 'Family Room', 'Canary') location = mock_location('Home') data = Mock() data.get_reading.return_value = 1.0 sensor = CanarySensor(data, SENSOR_TYPES[2], location, device) sensor.update() assert ('Home Family Room Air Quality' == sensor.name) assert (sensor.unit_of_measurement is None) assert (1.0 == sensor.state) assert ('mdi:weather-windy' == sensor.icon) air_quality = sensor.device_state_attributes[ATTR_AIR_QUALITY] assert (STATE_AIR_QUALITY_NORMAL == air_quality)
def test_air_quality_sensor_with_none_sensor_value(self): 'Test air quality sensor.' device = mock_device(10, 'Family Room', 'Canary') location = mock_location('Home') data = Mock() data.get_reading.return_value = None sensor = CanarySensor(data, SENSOR_TYPES[2], location, device) sensor.update() assert (sensor.state is None) assert (sensor.device_state_attributes is None)
2,885,458,907,634,710,000
Test air quality sensor.
tests/components/sensor/test_canary.py
test_air_quality_sensor_with_none_sensor_value
27tech/home-assistant
python
def test_air_quality_sensor_with_none_sensor_value(self): device = mock_device(10, 'Family Room', 'Canary') location = mock_location('Home') data = Mock() data.get_reading.return_value = None sensor = CanarySensor(data, SENSOR_TYPES[2], location, device) sensor.update() assert (sensor.state is None) assert (sensor.device_state_attributes is None)
def test_battery_sensor(self): 'Test battery sensor.' device = mock_device(10, 'Family Room', 'Canary Flex') location = mock_location('Home') data = Mock() data.get_reading.return_value = 70.4567 sensor = CanarySensor(data, SENSOR_TYPES[4], location, device) sensor.update() assert ('Home Family Room Battery' == sensor.name) assert ('%' == sensor.unit_of_measurement) assert (70.46 == sensor.state) assert ('mdi:battery-70' == sensor.icon)
-7,136,044,291,612,272,000
Test battery sensor.
tests/components/sensor/test_canary.py
test_battery_sensor
27tech/home-assistant
python
def test_battery_sensor(self): device = mock_device(10, 'Family Room', 'Canary Flex') location = mock_location('Home') data = Mock() data.get_reading.return_value = 70.4567 sensor = CanarySensor(data, SENSOR_TYPES[4], location, device) sensor.update() assert ('Home Family Room Battery' == sensor.name) assert ('%' == sensor.unit_of_measurement) assert (70.46 == sensor.state) assert ('mdi:battery-70' == sensor.icon)
def test_wifi_sensor(self): 'Test battery sensor.' device = mock_device(10, 'Family Room', 'Canary Flex') location = mock_location('Home') data = Mock() data.get_reading.return_value = (- 57) sensor = CanarySensor(data, SENSOR_TYPES[3], location, device) sensor.update() assert ('Home Family Room Wifi' == sensor.name) assert ('dBm' == sensor.unit_of_measurement) assert ((- 57) == sensor.state) assert ('mdi:wifi' == sensor.icon)
6,827,206,662,562,437,000
Test battery sensor.
tests/components/sensor/test_canary.py
test_wifi_sensor
27tech/home-assistant
python
def test_wifi_sensor(self): device = mock_device(10, 'Family Room', 'Canary Flex') location = mock_location('Home') data = Mock() data.get_reading.return_value = (- 57) sensor = CanarySensor(data, SENSOR_TYPES[3], location, device) sensor.update() assert ('Home Family Room Wifi' == sensor.name) assert ('dBm' == sensor.unit_of_measurement) assert ((- 57) == sensor.state) assert ('mdi:wifi' == sensor.icon)
def lock_change_receiver(): '\n A decorator for connecting receivers to signals that a lock has change.\n\n @receiver(post_save, sender=MyModel)\n def signal_receiver(sender, **kwargs):\n ...\n\n ' def _decorator(func): LockCache.lock_change_receivers.append(func) return func return _decorator
6,489,382,657,107,884,000
A decorator for connecting receivers to signals that a lock has change. @receiver(post_save, sender=MyModel) def signal_receiver(sender, **kwargs): ...
chroma_core/services/job_scheduler/lock_cache.py
lock_change_receiver
beevans/integrated-manager-for-lustre
python
def lock_change_receiver(): '\n A decorator for connecting receivers to signals that a lock has change.\n\n @receiver(post_save, sender=MyModel)\n def signal_receiver(sender, **kwargs):\n ...\n\n ' def _decorator(func): LockCache.lock_change_receivers.append(func) return func return _decorator
@asyncio.coroutine def async_setup_platform(hass, config, async_add_entities, discovery_info=None): 'Setup the Kodi platform.' host = config.get(CONF_HOST) port = config.get(CONF_PORT) if (host.startswith('http://') or host.startswith('https://')): host = host.lstrip('http://').lstrip('https://') _LOGGER.warning('Kodi host name should no longer conatin http:// See updated definitions here: https://home-assistant.io/components/media_player.kodi/') entity = KodiDevice(hass, name=config.get(CONF_NAME), host=host, port=port, username=config.get(CONF_USERNAME), password=config.get(CONF_PASSWORD), turn_off_action=config.get(CONF_TURN_OFF_ACTION)) (yield from async_add_entities([entity], update_before_add=True))
-8,385,398,847,880,551,000
Setup the Kodi platform.
homeassistant/components/media_player/kodi.py
async_setup_platform
sbidoul/home-assistant
python
@asyncio.coroutine def async_setup_platform(hass, config, async_add_entities, discovery_info=None): host = config.get(CONF_HOST) port = config.get(CONF_PORT) if (host.startswith('http://') or host.startswith('https://')): host = host.lstrip('http://').lstrip('https://') _LOGGER.warning('Kodi host name should no longer conatin http:// See updated definitions here: https://home-assistant.io/components/media_player.kodi/') entity = KodiDevice(hass, name=config.get(CONF_NAME), host=host, port=port, username=config.get(CONF_USERNAME), password=config.get(CONF_PASSWORD), turn_off_action=config.get(CONF_TURN_OFF_ACTION)) (yield from async_add_entities([entity], update_before_add=True))
def __init__(self, hass, name, host, port, username=None, password=None, turn_off_action=None): 'Initialize the Kodi device.' import jsonrpc_async self.hass = hass self._name = name kwargs = {'timeout': DEFAULT_TIMEOUT, 'session': async_get_clientsession(hass)} if (username is not None): kwargs['auth'] = aiohttp.BasicAuth(username, password) image_auth_string = '{}:{}@'.format(username, password) else: image_auth_string = '' self._http_url = 'http://{}:{}/jsonrpc'.format(host, port) self._image_url = 'http://{}{}:{}/image'.format(image_auth_string, host, port) self._server = jsonrpc_async.Server(self._http_url, **kwargs) self._turn_off_action = turn_off_action self._players = list() self._properties = None self._item = None self._app_properties = None
52,345,360,531,689,720
Initialize the Kodi device.
homeassistant/components/media_player/kodi.py
__init__
sbidoul/home-assistant
python
def __init__(self, hass, name, host, port, username=None, password=None, turn_off_action=None): import jsonrpc_async self.hass = hass self._name = name kwargs = {'timeout': DEFAULT_TIMEOUT, 'session': async_get_clientsession(hass)} if (username is not None): kwargs['auth'] = aiohttp.BasicAuth(username, password) image_auth_string = '{}:{}@'.format(username, password) else: image_auth_string = self._http_url = 'http://{}:{}/jsonrpc'.format(host, port) self._image_url = 'http://{}{}:{}/image'.format(image_auth_string, host, port) self._server = jsonrpc_async.Server(self._http_url, **kwargs) self._turn_off_action = turn_off_action self._players = list() self._properties = None self._item = None self._app_properties = None
@property def name(self): 'Return the name of the device.' return self._name
-4,231,536,673,663,769,600
Return the name of the device.
homeassistant/components/media_player/kodi.py
name
sbidoul/home-assistant
python
@property def name(self): return self._name
@asyncio.coroutine def _get_players(self): 'Return the active player objects or None.' import jsonrpc_async try: return (yield from self._server.Player.GetActivePlayers()) except jsonrpc_async.jsonrpc.TransportError: if (self._players is not None): _LOGGER.info('Unable to fetch kodi data') _LOGGER.debug('Unable to fetch kodi data', exc_info=True) return None
2,476,486,676,554,726,000
Return the active player objects or None.
homeassistant/components/media_player/kodi.py
_get_players
sbidoul/home-assistant
python
@asyncio.coroutine def _get_players(self): import jsonrpc_async try: return (yield from self._server.Player.GetActivePlayers()) except jsonrpc_async.jsonrpc.TransportError: if (self._players is not None): _LOGGER.info('Unable to fetch kodi data') _LOGGER.debug('Unable to fetch kodi data', exc_info=True) return None
@property def state(self): 'Return the state of the device.' if (self._players is None): return STATE_OFF if (len(self._players) == 0): return STATE_IDLE if ((self._properties['speed'] == 0) and (not self._properties['live'])): return STATE_PAUSED else: return STATE_PLAYING
2,635,478,583,098,425,300
Return the state of the device.
homeassistant/components/media_player/kodi.py
state
sbidoul/home-assistant
python
@property def state(self): if (self._players is None): return STATE_OFF if (len(self._players) == 0): return STATE_IDLE if ((self._properties['speed'] == 0) and (not self._properties['live'])): return STATE_PAUSED else: return STATE_PLAYING
@asyncio.coroutine def async_update(self): 'Retrieve latest state.' self._players = (yield from self._get_players()) if ((self._players is not None) and (len(self._players) > 0)): player_id = self._players[0]['playerid'] assert isinstance(player_id, int) self._properties = (yield from self._server.Player.GetProperties(player_id, ['time', 'totaltime', 'speed', 'live'])) self._item = (yield from self._server.Player.GetItem(player_id, ['title', 'file', 'uniqueid', 'thumbnail', 'artist']))['item'] self._app_properties = (yield from self._server.Application.GetProperties(['volume', 'muted'])) else: self._properties = None self._item = None self._app_properties = None
3,957,076,217,456,253,400
Retrieve latest state.
homeassistant/components/media_player/kodi.py
async_update
sbidoul/home-assistant
python
@asyncio.coroutine def async_update(self): self._players = (yield from self._get_players()) if ((self._players is not None) and (len(self._players) > 0)): player_id = self._players[0]['playerid'] assert isinstance(player_id, int) self._properties = (yield from self._server.Player.GetProperties(player_id, ['time', 'totaltime', 'speed', 'live'])) self._item = (yield from self._server.Player.GetItem(player_id, ['title', 'file', 'uniqueid', 'thumbnail', 'artist']))['item'] self._app_properties = (yield from self._server.Application.GetProperties(['volume', 'muted'])) else: self._properties = None self._item = None self._app_properties = None
@property def volume_level(self): 'Volume level of the media player (0..1).' if (self._app_properties is not None): return (self._app_properties['volume'] / 100.0)
-3,302,400,317,399,943,700
Volume level of the media player (0..1).
homeassistant/components/media_player/kodi.py
volume_level
sbidoul/home-assistant
python
@property def volume_level(self): if (self._app_properties is not None): return (self._app_properties['volume'] / 100.0)
@property def is_volume_muted(self): 'Boolean if volume is currently muted.' if (self._app_properties is not None): return self._app_properties['muted']
-8,554,226,172,136,375,000
Boolean if volume is currently muted.
homeassistant/components/media_player/kodi.py
is_volume_muted
sbidoul/home-assistant
python
@property def is_volume_muted(self): if (self._app_properties is not None): return self._app_properties['muted']
@property def media_content_id(self): 'Content ID of current playing media.' if (self._item is not None): return self._item.get('uniqueid', None)
595,466,316,143,477,800
Content ID of current playing media.
homeassistant/components/media_player/kodi.py
media_content_id
sbidoul/home-assistant
python
@property def media_content_id(self): if (self._item is not None): return self._item.get('uniqueid', None)
@property def media_content_type(self): 'Content type of current playing media.' if ((self._players is not None) and (len(self._players) > 0)): return self._players[0]['type']
5,483,545,840,153,864,000
Content type of current playing media.
homeassistant/components/media_player/kodi.py
media_content_type
sbidoul/home-assistant
python
@property def media_content_type(self): if ((self._players is not None) and (len(self._players) > 0)): return self._players[0]['type']
@property def media_duration(self): 'Duration of current playing media in seconds.' if ((self._properties is not None) and (not self._properties['live'])): total_time = self._properties['totaltime'] return (((total_time['hours'] * 3600) + (total_time['minutes'] * 60)) + total_time['seconds'])
5,925,154,130,673,134,000
Duration of current playing media in seconds.
homeassistant/components/media_player/kodi.py
media_duration
sbidoul/home-assistant
python
@property def media_duration(self): if ((self._properties is not None) and (not self._properties['live'])): total_time = self._properties['totaltime'] return (((total_time['hours'] * 3600) + (total_time['minutes'] * 60)) + total_time['seconds'])
@property def media_image_url(self): 'Image url of current playing media.' if (self._item is None): return None url_components = urllib.parse.urlparse(self._item['thumbnail']) if (url_components.scheme == 'image'): return '{}/{}'.format(self._image_url, urllib.parse.quote_plus(self._item['thumbnail']))
-1,714,821,105,424,071,200
Image url of current playing media.
homeassistant/components/media_player/kodi.py
media_image_url
sbidoul/home-assistant
python
@property def media_image_url(self): if (self._item is None): return None url_components = urllib.parse.urlparse(self._item['thumbnail']) if (url_components.scheme == 'image'): return '{}/{}'.format(self._image_url, urllib.parse.quote_plus(self._item['thumbnail']))
@property def media_title(self): 'Title of current playing media.' if (self._item is not None): return self._item.get('title', self._item.get('label', self._item.get('file', 'unknown')))
-5,467,127,369,238,786,000
Title of current playing media.
homeassistant/components/media_player/kodi.py
media_title
sbidoul/home-assistant
python
@property def media_title(self): if (self._item is not None): return self._item.get('title', self._item.get('label', self._item.get('file', 'unknown')))
@property def supported_media_commands(self): 'Flag of media commands that are supported.' supported_media_commands = SUPPORT_KODI if (self._turn_off_action in TURN_OFF_ACTION): supported_media_commands |= SUPPORT_TURN_OFF return supported_media_commands
-2,403,220,702,515,875,300
Flag of media commands that are supported.
homeassistant/components/media_player/kodi.py
supported_media_commands
sbidoul/home-assistant
python
@property def supported_media_commands(self): supported_media_commands = SUPPORT_KODI if (self._turn_off_action in TURN_OFF_ACTION): supported_media_commands |= SUPPORT_TURN_OFF return supported_media_commands
@asyncio.coroutine def async_turn_off(self): 'Execute turn_off_action to turn off media player.' if (self._turn_off_action == 'quit'): (yield from self._server.Application.Quit()) elif (self._turn_off_action == 'hibernate'): (yield from self._server.System.Hibernate()) elif (self._turn_off_action == 'suspend'): (yield from self._server.System.Suspend()) elif (self._turn_off_action == 'reboot'): (yield from self._server.System.Reboot()) elif (self._turn_off_action == 'shutdown'): (yield from self._server.System.Shutdown()) else: _LOGGER.warning('turn_off requested but turn_off_action is none')
4,009,115,081,220,730,400
Execute turn_off_action to turn off media player.
homeassistant/components/media_player/kodi.py
async_turn_off
sbidoul/home-assistant
python
@asyncio.coroutine def async_turn_off(self): if (self._turn_off_action == 'quit'): (yield from self._server.Application.Quit()) elif (self._turn_off_action == 'hibernate'): (yield from self._server.System.Hibernate()) elif (self._turn_off_action == 'suspend'): (yield from self._server.System.Suspend()) elif (self._turn_off_action == 'reboot'): (yield from self._server.System.Reboot()) elif (self._turn_off_action == 'shutdown'): (yield from self._server.System.Shutdown()) else: _LOGGER.warning('turn_off requested but turn_off_action is none')
@asyncio.coroutine def async_volume_up(self): 'Volume up the media player.' assert ((yield from self._server.Input.ExecuteAction('volumeup')) == 'OK')
3,553,895,284,560,768,500
Volume up the media player.
homeassistant/components/media_player/kodi.py
async_volume_up
sbidoul/home-assistant
python
@asyncio.coroutine def async_volume_up(self): assert ((yield from self._server.Input.ExecuteAction('volumeup')) == 'OK')
@asyncio.coroutine def async_volume_down(self): 'Volume down the media player.' assert ((yield from self._server.Input.ExecuteAction('volumedown')) == 'OK')
-4,431,140,225,079,229,400
Volume down the media player.
homeassistant/components/media_player/kodi.py
async_volume_down
sbidoul/home-assistant
python
@asyncio.coroutine def async_volume_down(self): assert ((yield from self._server.Input.ExecuteAction('volumedown')) == 'OK')
def async_set_volume_level(self, volume): 'Set volume level, range 0..1.\n\n This method must be run in the event loop and returns a coroutine.\n ' return self._server.Application.SetVolume(int((volume * 100)))
8,457,646,432,285,553,000
Set volume level, range 0..1. This method must be run in the event loop and returns a coroutine.
homeassistant/components/media_player/kodi.py
async_set_volume_level
sbidoul/home-assistant
python
def async_set_volume_level(self, volume): 'Set volume level, range 0..1.\n\n This method must be run in the event loop and returns a coroutine.\n ' return self._server.Application.SetVolume(int((volume * 100)))
def async_mute_volume(self, mute): 'Mute (true) or unmute (false) media player.\n\n This method must be run in the event loop and returns a coroutine.\n ' return self._server.Application.SetMute(mute)
97,019,771,065,311,620
Mute (true) or unmute (false) media player. This method must be run in the event loop and returns a coroutine.
homeassistant/components/media_player/kodi.py
async_mute_volume
sbidoul/home-assistant
python
def async_mute_volume(self, mute): 'Mute (true) or unmute (false) media player.\n\n This method must be run in the event loop and returns a coroutine.\n ' return self._server.Application.SetMute(mute)
@asyncio.coroutine def async_set_play_state(self, state): 'Helper method for play/pause/toggle.' players = (yield from self._get_players()) if (len(players) != 0): (yield from self._server.Player.PlayPause(players[0]['playerid'], state))
6,011,301,021,335,337,000
Helper method for play/pause/toggle.
homeassistant/components/media_player/kodi.py
async_set_play_state
sbidoul/home-assistant
python
@asyncio.coroutine def async_set_play_state(self, state): players = (yield from self._get_players()) if (len(players) != 0): (yield from self._server.Player.PlayPause(players[0]['playerid'], state))
def async_media_play_pause(self): 'Pause media on media player.\n\n This method must be run in the event loop and returns a coroutine.\n ' return self.async_set_play_state('toggle')
-7,341,939,771,844,527,000
Pause media on media player. This method must be run in the event loop and returns a coroutine.
homeassistant/components/media_player/kodi.py
async_media_play_pause
sbidoul/home-assistant
python
def async_media_play_pause(self): 'Pause media on media player.\n\n This method must be run in the event loop and returns a coroutine.\n ' return self.async_set_play_state('toggle')
def async_media_play(self): 'Play media.\n\n This method must be run in the event loop and returns a coroutine.\n ' return self.async_set_play_state(True)
-2,520,068,111,156,763,600
Play media. This method must be run in the event loop and returns a coroutine.
homeassistant/components/media_player/kodi.py
async_media_play
sbidoul/home-assistant
python
def async_media_play(self): 'Play media.\n\n This method must be run in the event loop and returns a coroutine.\n ' return self.async_set_play_state(True)
def async_media_pause(self): 'Pause the media player.\n\n This method must be run in the event loop and returns a coroutine.\n ' return self.async_set_play_state(False)
6,682,939,788,905,580,000
Pause the media player. This method must be run in the event loop and returns a coroutine.
homeassistant/components/media_player/kodi.py
async_media_pause
sbidoul/home-assistant
python
def async_media_pause(self): 'Pause the media player.\n\n This method must be run in the event loop and returns a coroutine.\n ' return self.async_set_play_state(False)
@asyncio.coroutine def async_media_stop(self): 'Stop the media player.' players = (yield from self._get_players()) if (len(players) != 0): (yield from self._server.Player.Stop(players[0]['playerid']))
-6,347,300,543,494,530,000
Stop the media player.
homeassistant/components/media_player/kodi.py
async_media_stop
sbidoul/home-assistant
python
@asyncio.coroutine def async_media_stop(self): players = (yield from self._get_players()) if (len(players) != 0): (yield from self._server.Player.Stop(players[0]['playerid']))
@asyncio.coroutine def _goto(self, direction): 'Helper method used for previous/next track.' players = (yield from self._get_players()) if (len(players) != 0): if (direction == 'previous'): (yield from self._server.Player.Seek(players[0]['playerid'], 0)) (yield from self._server.Player.GoTo(players[0]['playerid'], direction))
-3,141,966,149,428,018,700
Helper method used for previous/next track.
homeassistant/components/media_player/kodi.py
_goto
sbidoul/home-assistant
python
@asyncio.coroutine def _goto(self, direction): players = (yield from self._get_players()) if (len(players) != 0): if (direction == 'previous'): (yield from self._server.Player.Seek(players[0]['playerid'], 0)) (yield from self._server.Player.GoTo(players[0]['playerid'], direction))
def async_media_next_track(self): 'Send next track command.\n\n This method must be run in the event loop and returns a coroutine.\n ' return self._goto('next')
4,079,830,510,610,262,000
Send next track command. This method must be run in the event loop and returns a coroutine.
homeassistant/components/media_player/kodi.py
async_media_next_track
sbidoul/home-assistant
python
def async_media_next_track(self): 'Send next track command.\n\n This method must be run in the event loop and returns a coroutine.\n ' return self._goto('next')
def async_media_previous_track(self): 'Send next track command.\n\n This method must be run in the event loop and returns a coroutine.\n ' return self._goto('previous')
-763,769,962,323,606,400
Send next track command. This method must be run in the event loop and returns a coroutine.
homeassistant/components/media_player/kodi.py
async_media_previous_track
sbidoul/home-assistant
python
def async_media_previous_track(self): 'Send next track command.\n\n This method must be run in the event loop and returns a coroutine.\n ' return self._goto('previous')
@asyncio.coroutine def async_media_seek(self, position): 'Send seek command.' players = (yield from self._get_players()) time = {} time['milliseconds'] = int(((position % 1) * 1000)) position = int(position) time['seconds'] = int((position % 60)) position /= 60 time['minutes'] = int((position % 60)) position /= 60 time['hours'] = int(position) if (len(players) != 0): (yield from self._server.Player.Seek(players[0]['playerid'], time))
-2,504,506,522,176,259,600
Send seek command.
homeassistant/components/media_player/kodi.py
async_media_seek
sbidoul/home-assistant
python
@asyncio.coroutine def async_media_seek(self, position): players = (yield from self._get_players()) time = {} time['milliseconds'] = int(((position % 1) * 1000)) position = int(position) time['seconds'] = int((position % 60)) position /= 60 time['minutes'] = int((position % 60)) position /= 60 time['hours'] = int(position) if (len(players) != 0): (yield from self._server.Player.Seek(players[0]['playerid'], time))
def async_play_media(self, media_type, media_id, **kwargs): 'Send the play_media command to the media player.\n\n This method must be run in the event loop and returns a coroutine.\n ' if (media_type == 'CHANNEL'): return self._server.Player.Open({'item': {'channelid': int(media_id)}}) else: return self._server.Player.Open({'item': {'file': str(media_id)}})
1,485,067,972,021,763,000
Send the play_media command to the media player. This method must be run in the event loop and returns a coroutine.
homeassistant/components/media_player/kodi.py
async_play_media
sbidoul/home-assistant
python
def async_play_media(self, media_type, media_id, **kwargs): 'Send the play_media command to the media player.\n\n This method must be run in the event loop and returns a coroutine.\n ' if (media_type == 'CHANNEL'): return self._server.Player.Open({'item': {'channelid': int(media_id)}}) else: return self._server.Player.Open({'item': {'file': str(media_id)}})
def gradients(output_node, node_list, scheduler_policy=None): 'Take gradient of output node with respect to each node in node_list.\n\n Parameters\n ----------\n output_node: output node that we are taking derivative of.\n node_list: list of nodes that we are taking derivative wrt.\n\n Returns\n -------\n A list of gradient values, one for each node in node_list respectively.\n\n ' from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [OnesLike.oneslike_op(output_node)] node_to_output_grad = {} reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) for i in range(len(node.inputs)): if (node.inputs[i] not in node_to_output_grads_list): node_to_output_grads_list[node.inputs[i]] = [] node_to_output_grads_list[node.inputs[i]].append(input_grads_list[i]) if (scheduler_policy == 'swap'): for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] return grad_node_list
-7,271,513,188,193,601,000
Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively.
python/athena/gpu_ops/StreamExecutor.py
gradients
DMALab/TSplit
python
def gradients(output_node, node_list, scheduler_policy=None): 'Take gradient of output node with respect to each node in node_list.\n\n Parameters\n ----------\n output_node: output node that we are taking derivative of.\n node_list: list of nodes that we are taking derivative wrt.\n\n Returns\n -------\n A list of gradient values, one for each node in node_list respectively.\n\n ' from . import OnesLike node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [OnesLike.oneslike_op(output_node)] node_to_output_grad = {} reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) for i in range(len(node.inputs)): if (node.inputs[i] not in node_to_output_grads_list): node_to_output_grads_list[node.inputs[i]] = [] node_to_output_grads_list[node.inputs[i]].append(input_grads_list[i]) if (scheduler_policy == 'swap'): for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [node_to_output_grad[node] for node in node_list] return grad_node_list
def distributed_gradients(output_node, node_list, scheduler_policy=None): 'Take gradient of output node with respect to each node in node_list.\n\n Parameters\n ----------\n output_node: output node that we are taking derivative of.\n node_list: list of nodes that we are taking derivative wrt.\n\n Returns\n -------\n A list of gradient values, one for each node in node_list respectively.\n\n ' from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) for i in range(len(node.inputs)): if (node.inputs[i] not in node_to_output_grads_list): node_to_output_grads_list[node.inputs[i]] = [] node_to_output_grads_list[node.inputs[i]].append(input_grads_list[i]) if (scheduler_policy == 'swap'): for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list
-8,072,602,044,061,928,000
Take gradient of output node with respect to each node in node_list. Parameters ---------- output_node: output node that we are taking derivative of. node_list: list of nodes that we are taking derivative wrt. Returns ------- A list of gradient values, one for each node in node_list respectively.
python/athena/gpu_ops/StreamExecutor.py
distributed_gradients
DMALab/TSplit
python
def distributed_gradients(output_node, node_list, scheduler_policy=None): 'Take gradient of output node with respect to each node in node_list.\n\n Parameters\n ----------\n output_node: output node that we are taking derivative of.\n node_list: list of nodes that we are taking derivative wrt.\n\n Returns\n -------\n A list of gradient values, one for each node in node_list respectively.\n\n ' from .OnesLike import oneslike_op node_to_output_grads_list = {} node_to_output_grads_list[output_node] = [oneslike_op(output_node)] node_to_output_grad = {} reverse_topo_order = reversed(find_topo_sort([output_node])) for node in reverse_topo_order: output_grad = sum_node_list(node_to_output_grads_list[node]) node_to_output_grad[node] = output_grad input_grads_list = node.op.gradient(node, output_grad) for i in range(len(node.inputs)): if (node.inputs[i] not in node_to_output_grads_list): node_to_output_grads_list[node.inputs[i]] = [] node_to_output_grads_list[node.inputs[i]].append(input_grads_list[i]) if (scheduler_policy == 'swap'): for node in node_list: if node.swap: node_to_output_grad[node].swap = True grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list] return grad_node_list
def find_topo_sort(node_list): 'Given a list of nodes, return a topo ordering of nodes ending in them.\n\n A simple algorithm is to do a post-order DFS traversal on the given nodes,\n going backwards based on input edges. Since a node is added to the ordering\n after all its predecessors are traversed due to post-order DFS, we get a\n topological sort.\n\n ' visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order
-6,221,163,888,668,146,000
Given a list of nodes, return a topo ordering of nodes ending in them. A simple algorithm is to do a post-order DFS traversal on the given nodes, going backwards based on input edges. Since a node is added to the ordering after all its predecessors are traversed due to post-order DFS, we get a topological sort.
python/athena/gpu_ops/StreamExecutor.py
find_topo_sort
DMALab/TSplit
python
def find_topo_sort(node_list): 'Given a list of nodes, return a topo ordering of nodes ending in them.\n\n A simple algorithm is to do a post-order DFS traversal on the given nodes,\n going backwards based on input edges. Since a node is added to the ordering\n after all its predecessors are traversed due to post-order DFS, we get a\n topological sort.\n\n ' visited = set() topo_order = [] for node in node_list: topo_sort_dfs(node, visited, topo_order) return topo_order
def topo_sort_dfs(node, visited, topo_order): 'Post-order DFS' if (node in visited): return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node)
-1,676,523,686,901,708,000
Post-order DFS
python/athena/gpu_ops/StreamExecutor.py
topo_sort_dfs
DMALab/TSplit
python
def topo_sort_dfs(node, visited, topo_order): if (node in visited): return visited.add(node) for n in node.inputs: topo_sort_dfs(n, visited, topo_order) topo_order.append(node)
def sum_node_list(node_list): 'Custom sum func to avoid creating redundant nodes in Python sum func.' from operator import add from functools import reduce return reduce(add, node_list)
-2,496,900,066,984,713,700
Custom sum func to avoid creating redundant nodes in Python sum func.
python/athena/gpu_ops/StreamExecutor.py
sum_node_list
DMALab/TSplit
python
def sum_node_list(node_list): from operator import add from functools import reduce return reduce(add, node_list)
def broadcast_rule(shape_a, shape_b): 'Return output shape of broadcast shape_a, shape_b.\n e.g. broadcast_rule((3,2), (4,3,2))\n returns output_shape = (4,3,2)\n\n Check out explanations and more examples at\n https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html\n http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/\n ' assert isinstance(shape_a, tuple) assert isinstance(shape_b, tuple) if (len(shape_a) > len(shape_b)): (longer_shape, shorter_shape) = (shape_a, shape_b) else: (longer_shape, shorter_shape) = (shape_b, shape_a) len_diff = (len(longer_shape) - len(shorter_shape)) for i in range(len_diff): shorter_shape = ((1,) + shorter_shape) assert (len(shorter_shape) == len(longer_shape)) output_shape = list(longer_shape) for i in range(len(output_shape)): assert ((shorter_shape[i] == longer_shape[i]) or (shorter_shape[i] == 1) or (longer_shape[i] == 1)) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
-5,697,759,041,333,517,000
Return output shape of broadcast shape_a, shape_b. e.g. broadcast_rule((3,2), (4,3,2)) returns output_shape = (4,3,2) Check out explanations and more examples at https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/
python/athena/gpu_ops/StreamExecutor.py
broadcast_rule
DMALab/TSplit
python
def broadcast_rule(shape_a, shape_b): 'Return output shape of broadcast shape_a, shape_b.\n e.g. broadcast_rule((3,2), (4,3,2))\n returns output_shape = (4,3,2)\n\n Check out explanations and more examples at\n https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html\n http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/\n ' assert isinstance(shape_a, tuple) assert isinstance(shape_b, tuple) if (len(shape_a) > len(shape_b)): (longer_shape, shorter_shape) = (shape_a, shape_b) else: (longer_shape, shorter_shape) = (shape_b, shape_a) len_diff = (len(longer_shape) - len(shorter_shape)) for i in range(len_diff): shorter_shape = ((1,) + shorter_shape) assert (len(shorter_shape) == len(longer_shape)) output_shape = list(longer_shape) for i in range(len(output_shape)): assert ((shorter_shape[i] == longer_shape[i]) or (shorter_shape[i] == 1) or (longer_shape[i] == 1)) output_shape[i] = max(shorter_shape[i], longer_shape[i]) return tuple(output_shape)
def __init__(self, eval_node_list, ctx=None, stream=None, policy=None): '\n Parameters\n ----------\n eval_node_list: list of nodes whose values need to be computed.\n ctx: runtime DLContext, default is None which means np.ndarray on cpu\n topo_order: list of nodes in topological order\n node_to_shape_map: dict from node to shape of the node\n node_to_arr_map: dict from node to ndarray.NDArray allocated for node\n feed_shapes: shapes of feed_dict from last run(...)\n ' self.eval_node_list = eval_node_list self.ctx = ctx if (stream is None): self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if (self.policy == 'swap'): self.swap_queue = []
6,018,590,634,589,362,000
Parameters ---------- eval_node_list: list of nodes whose values need to be computed. ctx: runtime DLContext, default is None which means np.ndarray on cpu topo_order: list of nodes in topological order node_to_shape_map: dict from node to shape of the node node_to_arr_map: dict from node to ndarray.NDArray allocated for node feed_shapes: shapes of feed_dict from last run(...)
python/athena/gpu_ops/StreamExecutor.py
__init__
DMALab/TSplit
python
def __init__(self, eval_node_list, ctx=None, stream=None, policy=None): '\n Parameters\n ----------\n eval_node_list: list of nodes whose values need to be computed.\n ctx: runtime DLContext, default is None which means np.ndarray on cpu\n topo_order: list of nodes in topological order\n node_to_shape_map: dict from node to shape of the node\n node_to_arr_map: dict from node to ndarray.NDArray allocated for node\n feed_shapes: shapes of feed_dict from last run(...)\n ' self.eval_node_list = eval_node_list self.ctx = ctx if (stream is None): self.stream = create_stream_handle(ctx) else: self.stream = stream self.stream.sync() self.topo_order = find_topo_sort(self.eval_node_list) self.node_to_shape_map = None self.node_to_arr_map = None self.feed_shapes = None self.policy = policy if (self.policy == 'swap'): self.swap_queue = []
def infer_shape(self, feed_shapes): 'Given shapes of feed_dict nodes, infer shape for all nodes in graph.\n\n Implementation note:\n Iteratively calls node.op.infer_shape to infer shapes.\n Node shapes stored in self.node_to_shape_map.\n\n Parameters\n ----------\n feed_shapes: node->shapes mapping for feed_dict nodes.\n ' 'TODO: Your code here' self.node_to_shape_map = {} for node in self.topo_order: if (node in feed_shapes): self.node_to_shape_map[node] = feed_shapes[node] else: input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape(node, input_shapes)
7,062,469,075,092,815,000
Given shapes of feed_dict nodes, infer shape for all nodes in graph. Implementation note: Iteratively calls node.op.infer_shape to infer shapes. Node shapes stored in self.node_to_shape_map. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes.
python/athena/gpu_ops/StreamExecutor.py
infer_shape
DMALab/TSplit
python
def infer_shape(self, feed_shapes): 'Given shapes of feed_dict nodes, infer shape for all nodes in graph.\n\n Implementation note:\n Iteratively calls node.op.infer_shape to infer shapes.\n Node shapes stored in self.node_to_shape_map.\n\n Parameters\n ----------\n feed_shapes: node->shapes mapping for feed_dict nodes.\n ' 'TODO: Your code here' self.node_to_shape_map = {} for node in self.topo_order: if (node in feed_shapes): self.node_to_shape_map[node] = feed_shapes[node] else: input_shapes = [self.node_to_shape_map[n] for n in node.inputs] self.node_to_shape_map[node] = node.op.infer_shape(node, input_shapes)
def memory_plan(self, feed_shapes): 'Allocates ndarray.NDArray for every node except feed_dict nodes.\n\n Implementation note:\n Option 1: Alloc a ndarray.NDArray per node that persists across run()\n Option 2: Implement a memory pool to reuse memory for nodes of same\n shapes. More details see Lecture 7.\n\n For both options, self.node_to_arr_map stores node->NDArray mapping to\n allow mapping to persist across multiple executor.run().\n\n Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray.\n\n Parameters\n ----------\n feed_shapes: node->shapes mapping for feed_dict nodes.\n ' 'TODO: Your code here' assert (self.ctx is not None) self.node_to_arr_map = {} for (node, shape) in self.node_to_shape_map.items(): if (self.policy == 'swap'): if (not node.swap): self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) elif (self.policy == 'vdnn'): self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx)
4,789,048,243,198,603,000
Allocates ndarray.NDArray for every node except feed_dict nodes. Implementation note: Option 1: Alloc a ndarray.NDArray per node that persists across run() Option 2: Implement a memory pool to reuse memory for nodes of same shapes. More details see Lecture 7. For both options, self.node_to_arr_map stores node->NDArray mapping to allow mapping to persist across multiple executor.run(). Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray. Parameters ---------- feed_shapes: node->shapes mapping for feed_dict nodes.
python/athena/gpu_ops/StreamExecutor.py
memory_plan
DMALab/TSplit
python
def memory_plan(self, feed_shapes): 'Allocates ndarray.NDArray for every node except feed_dict nodes.\n\n Implementation note:\n Option 1: Alloc a ndarray.NDArray per node that persists across run()\n Option 2: Implement a memory pool to reuse memory for nodes of same\n shapes. More details see Lecture 7.\n\n For both options, self.node_to_arr_map stores node->NDArray mapping to\n allow mapping to persist across multiple executor.run().\n\n Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray.\n\n Parameters\n ----------\n feed_shapes: node->shapes mapping for feed_dict nodes.\n ' 'TODO: Your code here' assert (self.ctx is not None) self.node_to_arr_map = {} for (node, shape) in self.node_to_shape_map.items(): if (self.policy == 'swap'): if (not node.swap): self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx) elif (self.policy == 'vdnn'): self.node_to_arr_map[node] = np.empty(shape) else: self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx)
def run(self, feed_dict, convert_to_numpy_ret_vals=False): '\n Parameters\n ----------\n feed_dict: a dictionary of node->np.ndarray supplied by user.\n convert_to_numpy_ret_vals: whether to convert ret vals to np.array\n\n Returns\n -------\n A list of values for nodes in eval_node_list. NDArray or np.ndarray.\n ' def are_feed_shapes_equal(sa, sb): if ((not isinstance(sa, dict)) or (not isinstance(sb, dict))): return False unmatched_item = (set(sa.items()) ^ set(sb.items())) return (len(unmatched_item) == 0) use_numpy = (self.ctx is None) node_to_val_map = {} for (node, value) in feed_dict.items(): if use_numpy: assert isinstance(value, np.ndarray) node_to_val_map[node] = value elif isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, 'feed_dict value type not supported' feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if (node in node_to_val_map): continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if ((not use_numpy) and convert_to_numpy_ret_vals): return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list]
2,019,878,336,962,382,000
Parameters ---------- feed_dict: a dictionary of node->np.ndarray supplied by user. convert_to_numpy_ret_vals: whether to convert ret vals to np.array Returns ------- A list of values for nodes in eval_node_list. NDArray or np.ndarray.
python/athena/gpu_ops/StreamExecutor.py
run
DMALab/TSplit
python
def run(self, feed_dict, convert_to_numpy_ret_vals=False): '\n Parameters\n ----------\n feed_dict: a dictionary of node->np.ndarray supplied by user.\n convert_to_numpy_ret_vals: whether to convert ret vals to np.array\n\n Returns\n -------\n A list of values for nodes in eval_node_list. NDArray or np.ndarray.\n ' def are_feed_shapes_equal(sa, sb): if ((not isinstance(sa, dict)) or (not isinstance(sb, dict))): return False unmatched_item = (set(sa.items()) ^ set(sb.items())) return (len(unmatched_item) == 0) use_numpy = (self.ctx is None) node_to_val_map = {} for (node, value) in feed_dict.items(): if use_numpy: assert isinstance(value, np.ndarray) node_to_val_map[node] = value elif isinstance(value, np.ndarray): node_to_val_map[node] = ndarray.array(value, ctx=self.ctx) elif isinstance(value, ndarray.NDArray): node_to_val_map[node] = value else: assert False, 'feed_dict value type not supported' feed_shapes = {} for node in node_to_val_map: feed_shapes[node] = node_to_val_map[node].shape if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)): self.infer_shape(feed_shapes) self.feed_shapes = feed_shapes if (not use_numpy): self.memory_plan(self.feed_shapes) for node in self.topo_order: if (node in node_to_val_map): continue input_vals = [node_to_val_map[n] for n in node.inputs] if use_numpy: node_val = np.empty(shape=self.node_to_shape_map[node]) else: node_val = self.node_to_arr_map[node] node.op.compute(node, input_vals, node_val, use_numpy, self.stream) node_to_val_map[node] = node_val self.stream.sync() if ((not use_numpy) and convert_to_numpy_ret_vals): return [node_to_val_map[n].asnumpy() for n in self.eval_node_list] return [node_to_val_map[n] for n in self.eval_node_list]
@inlineCallbacks def _callback(self): '\n This will be called repeatedly every `self.interval` seconds.\n `self.subscriptions` contain tuples of (obj, args, kwargs) for\n each subscribing object.\n\n If overloading, this callback is expected to handle all\n subscriptions when it is triggered. It should not return\n anything and should not traceback on poorly designed hooks.\n The callback should ideally work under @inlineCallbacks so it\n can yield appropriately.\n\n The _hook_key, which is passed down through the handler via\n kwargs is used here to identify which hook method to call.\n\n ' self._to_add = [] self._to_remove = [] self._is_ticking = True for (store_key, (args, kwargs)) in self.subscriptions.iteritems(): callback = (yield kwargs.pop('_callback', 'at_tick')) obj = (yield kwargs.pop('_obj', None)) try: if callable(callback): (yield callback(*args, **kwargs)) continue if ((not obj) or (not obj.pk)): self._to_remove.append(store_key) continue else: (yield _GA(obj, callback)(*args, **kwargs)) except ObjectDoesNotExist: log_trace('Removing ticker.') self._to_remove.append(store_key) except Exception: log_trace() finally: kwargs['_callback'] = callback kwargs['_obj'] = obj self._is_ticking = False for store_key in self._to_remove: self.remove(store_key) for (store_key, (args, kwargs)) in self._to_add: self.add(store_key, *args, **kwargs) self._to_remove = [] self._to_add = []
-6,995,095,253,856,614,000
This will be called repeatedly every `self.interval` seconds. `self.subscriptions` contain tuples of (obj, args, kwargs) for each subscribing object. If overloading, this callback is expected to handle all subscriptions when it is triggered. It should not return anything and should not traceback on poorly designed hooks. The callback should ideally work under @inlineCallbacks so it can yield appropriately. The _hook_key, which is passed down through the handler via kwargs is used here to identify which hook method to call.
evennia/scripts/tickerhandler.py
_callback
orkim/evennia
python
@inlineCallbacks def _callback(self): '\n This will be called repeatedly every `self.interval` seconds.\n `self.subscriptions` contain tuples of (obj, args, kwargs) for\n each subscribing object.\n\n If overloading, this callback is expected to handle all\n subscriptions when it is triggered. It should not return\n anything and should not traceback on poorly designed hooks.\n The callback should ideally work under @inlineCallbacks so it\n can yield appropriately.\n\n The _hook_key, which is passed down through the handler via\n kwargs is used here to identify which hook method to call.\n\n ' self._to_add = [] self._to_remove = [] self._is_ticking = True for (store_key, (args, kwargs)) in self.subscriptions.iteritems(): callback = (yield kwargs.pop('_callback', 'at_tick')) obj = (yield kwargs.pop('_obj', None)) try: if callable(callback): (yield callback(*args, **kwargs)) continue if ((not obj) or (not obj.pk)): self._to_remove.append(store_key) continue else: (yield _GA(obj, callback)(*args, **kwargs)) except ObjectDoesNotExist: log_trace('Removing ticker.') self._to_remove.append(store_key) except Exception: log_trace() finally: kwargs['_callback'] = callback kwargs['_obj'] = obj self._is_ticking = False for store_key in self._to_remove: self.remove(store_key) for (store_key, (args, kwargs)) in self._to_add: self.add(store_key, *args, **kwargs) self._to_remove = [] self._to_add = []
def __init__(self, interval): '\n Set up the ticker\n\n Args:\n interval (int): The stepping interval.\n\n ' self.interval = interval self.subscriptions = {} self._is_ticking = False self._to_remove = [] self._to_add = [] self.task = ExtendedLoopingCall(self._callback)
-8,686,783,412,515,952,000
Set up the ticker Args: interval (int): The stepping interval.
evennia/scripts/tickerhandler.py
__init__
orkim/evennia
python
def __init__(self, interval): '\n Set up the ticker\n\n Args:\n interval (int): The stepping interval.\n\n ' self.interval = interval self.subscriptions = {} self._is_ticking = False self._to_remove = [] self._to_add = [] self.task = ExtendedLoopingCall(self._callback)
def validate(self, start_delay=None): '\n Start/stop the task depending on how many subscribers we have\n using it.\n\n Args:\n start_delay (int): Time to way before starting.\n\n ' subs = self.subscriptions if self.task.running: if (not subs): self.task.stop() elif subs: self.task.start(self.interval, now=False, start_delay=start_delay)
-7,546,078,601,667,794,000
Start/stop the task depending on how many subscribers we have using it. Args: start_delay (int): Time to way before starting.
evennia/scripts/tickerhandler.py
validate
orkim/evennia
python
def validate(self, start_delay=None): '\n Start/stop the task depending on how many subscribers we have\n using it.\n\n Args:\n start_delay (int): Time to way before starting.\n\n ' subs = self.subscriptions if self.task.running: if (not subs): self.task.stop() elif subs: self.task.start(self.interval, now=False, start_delay=start_delay)
def add(self, store_key, *args, **kwargs): '\n Sign up a subscriber to this ticker.\n Args:\n store_key (str): Unique storage hash for this ticker subscription.\n args (any, optional): Arguments to call the hook method with.\n\n Kwargs:\n _start_delay (int): If set, this will be\n used to delay the start of the trigger instead of\n `interval`.\n\n ' if self._is_ticking: self._to_start.append((store_key, (args, kwargs))) else: start_delay = kwargs.pop('_start_delay', None) self.subscriptions[store_key] = (args, kwargs) self.validate(start_delay=start_delay)
-7,771,333,856,930,565,000
Sign up a subscriber to this ticker. Args: store_key (str): Unique storage hash for this ticker subscription. args (any, optional): Arguments to call the hook method with. Kwargs: _start_delay (int): If set, this will be used to delay the start of the trigger instead of `interval`.
evennia/scripts/tickerhandler.py
add
orkim/evennia
python
def add(self, store_key, *args, **kwargs): '\n Sign up a subscriber to this ticker.\n Args:\n store_key (str): Unique storage hash for this ticker subscription.\n args (any, optional): Arguments to call the hook method with.\n\n Kwargs:\n _start_delay (int): If set, this will be\n used to delay the start of the trigger instead of\n `interval`.\n\n ' if self._is_ticking: self._to_start.append((store_key, (args, kwargs))) else: start_delay = kwargs.pop('_start_delay', None) self.subscriptions[store_key] = (args, kwargs) self.validate(start_delay=start_delay)
def remove(self, store_key): '\n Unsubscribe object from this ticker\n\n Args:\n store_key (str): Unique store key.\n\n ' if self._is_ticking: self._to_remove.append(store_key) else: self.subscriptions.pop(store_key, False) self.validate()
5,622,023,714,555,823,000
Unsubscribe object from this ticker Args: store_key (str): Unique store key.
evennia/scripts/tickerhandler.py
remove
orkim/evennia
python
def remove(self, store_key): '\n Unsubscribe object from this ticker\n\n Args:\n store_key (str): Unique store key.\n\n ' if self._is_ticking: self._to_remove.append(store_key) else: self.subscriptions.pop(store_key, False) self.validate()
def stop(self): '\n Kill the Task, regardless of subscriptions.\n\n ' self.subscriptions = {} self.validate()
-9,085,155,564,099,422,000
Kill the Task, regardless of subscriptions.
evennia/scripts/tickerhandler.py
stop
orkim/evennia
python
def stop(self): '\n \n\n ' self.subscriptions = {} self.validate()
def __init__(self): '\n Initialize the pool.\n\n ' self.tickers = {}
3,492,824,550,037,405,000
Initialize the pool.
evennia/scripts/tickerhandler.py
__init__
orkim/evennia
python
def __init__(self): '\n \n\n ' self.tickers = {}
def add(self, store_key, *args, **kwargs): '\n Add new ticker subscriber.\n\n Args:\n store_key (str): Unique storage hash.\n args (any, optional): Arguments to send to the hook method.\n\n ' (_, _, _, interval, _, _) = store_key if (not interval): log_err(_ERROR_ADD_TICKER.format(store_key=store_key)) return if (interval not in self.tickers): self.tickers[interval] = self.ticker_class(interval) self.tickers[interval].add(store_key, *args, **kwargs)
-1,308,441,702,683,874,800
Add new ticker subscriber. Args: store_key (str): Unique storage hash. args (any, optional): Arguments to send to the hook method.
evennia/scripts/tickerhandler.py
add
orkim/evennia
python
def add(self, store_key, *args, **kwargs): '\n Add new ticker subscriber.\n\n Args:\n store_key (str): Unique storage hash.\n args (any, optional): Arguments to send to the hook method.\n\n ' (_, _, _, interval, _, _) = store_key if (not interval): log_err(_ERROR_ADD_TICKER.format(store_key=store_key)) return if (interval not in self.tickers): self.tickers[interval] = self.ticker_class(interval) self.tickers[interval].add(store_key, *args, **kwargs)
def remove(self, store_key): '\n Remove subscription from pool.\n\n Args:\n store_key (str): Unique storage hash to remove\n\n ' (_, _, _, interval, _, _) = store_key if (interval in self.tickers): self.tickers[interval].remove(store_key) if (not self.tickers[interval]): del self.tickers[interval]
5,483,067,318,597,050,000
Remove subscription from pool. Args: store_key (str): Unique storage hash to remove
evennia/scripts/tickerhandler.py
remove
orkim/evennia
python
def remove(self, store_key): '\n Remove subscription from pool.\n\n Args:\n store_key (str): Unique storage hash to remove\n\n ' (_, _, _, interval, _, _) = store_key if (interval in self.tickers): self.tickers[interval].remove(store_key) if (not self.tickers[interval]): del self.tickers[interval]
def stop(self, interval=None): '\n Stop all scripts in pool. This is done at server reload since\n restoring the pool will automatically re-populate the pool.\n\n Args:\n interval (int, optional): Only stop tickers with this\n interval.\n\n ' if (interval and (interval in self.tickers)): self.tickers[interval].stop() else: for ticker in self.tickers.values(): ticker.stop()
-8,835,157,636,026,703,000
Stop all scripts in pool. This is done at server reload since restoring the pool will automatically re-populate the pool. Args: interval (int, optional): Only stop tickers with this interval.
evennia/scripts/tickerhandler.py
stop
orkim/evennia
python
def stop(self, interval=None): '\n Stop all scripts in pool. This is done at server reload since\n restoring the pool will automatically re-populate the pool.\n\n Args:\n interval (int, optional): Only stop tickers with this\n interval.\n\n ' if (interval and (interval in self.tickers)): self.tickers[interval].stop() else: for ticker in self.tickers.values(): ticker.stop()
def __init__(self, save_name='ticker_storage'): '\n Initialize handler\n\n save_name (str, optional): The name of the ServerConfig\n instance to store the handler state persistently.\n\n ' self.ticker_storage = {} self.save_name = save_name self.ticker_pool = self.ticker_pool_class()
8,811,394,493,094,823,000
Initialize handler save_name (str, optional): The name of the ServerConfig instance to store the handler state persistently.
evennia/scripts/tickerhandler.py
__init__
orkim/evennia
python
def __init__(self, save_name='ticker_storage'): '\n Initialize handler\n\n save_name (str, optional): The name of the ServerConfig\n instance to store the handler state persistently.\n\n ' self.ticker_storage = {} self.save_name = save_name self.ticker_pool = self.ticker_pool_class()
def _get_callback(self, callback): "\n Analyze callback and determine its consituents\n\n Args:\n callback (function or method): This is either a stand-alone\n function or class method on a typeclassed entitye (that is,\n an entity that can be saved to the database).\n\n Returns:\n ret (tuple): This is a tuple of the form `(obj, path, callfunc)`,\n where `obj` is the database object the callback is defined on\n if it's a method (otherwise `None`) and vice-versa, `path` is\n the python-path to the stand-alone function (`None` if a method).\n The `callfunc` is either the name of the method to call or the\n callable function object itself.\n\n " (outobj, outpath, outcallfunc) = (None, None, None) if callable(callback): if inspect.ismethod(callback): outobj = callback.im_self outcallfunc = callback.im_func.func_name elif inspect.isfunction(callback): outpath = ('%s.%s' % (callback.__module__, callback.func_name)) outcallfunc = callback else: raise TypeError(('%s is not a callable function or method.' % callback)) return (outobj, outpath, outcallfunc)
-7,416,390,526,428,229,000
Analyze callback and determine its consituents Args: callback (function or method): This is either a stand-alone function or class method on a typeclassed entitye (that is, an entity that can be saved to the database). Returns: ret (tuple): This is a tuple of the form `(obj, path, callfunc)`, where `obj` is the database object the callback is defined on if it's a method (otherwise `None`) and vice-versa, `path` is the python-path to the stand-alone function (`None` if a method). The `callfunc` is either the name of the method to call or the callable function object itself.
evennia/scripts/tickerhandler.py
_get_callback
orkim/evennia
python
def _get_callback(self, callback): "\n Analyze callback and determine its consituents\n\n Args:\n callback (function or method): This is either a stand-alone\n function or class method on a typeclassed entitye (that is,\n an entity that can be saved to the database).\n\n Returns:\n ret (tuple): This is a tuple of the form `(obj, path, callfunc)`,\n where `obj` is the database object the callback is defined on\n if it's a method (otherwise `None`) and vice-versa, `path` is\n the python-path to the stand-alone function (`None` if a method).\n The `callfunc` is either the name of the method to call or the\n callable function object itself.\n\n " (outobj, outpath, outcallfunc) = (None, None, None) if callable(callback): if inspect.ismethod(callback): outobj = callback.im_self outcallfunc = callback.im_func.func_name elif inspect.isfunction(callback): outpath = ('%s.%s' % (callback.__module__, callback.func_name)) outcallfunc = callback else: raise TypeError(('%s is not a callable function or method.' % callback)) return (outobj, outpath, outcallfunc)
def _store_key(self, obj, path, interval, callfunc, idstring='', persistent=True): '\n Tries to create a store_key for the object.\n\n Args:\n obj (Object, tuple or None): Subscribing object if any. If a tuple, this is\n a packed_obj tuple from dbserialize.\n path (str or None): Python-path to callable, if any.\n interval (int): Ticker interval.\n callfunc (callable or str): This is either the callable function or\n the name of the method to call. Note that the callable is never\n stored in the key; that is uniquely identified with the python-path.\n idstring (str, optional): Additional separator between\n different subscription types.\n persistent (bool, optional): If this ticker should survive a system\n shutdown or not.\n\n Returns:\n store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval,\n idstring, persistent)` that uniquely identifies the\n ticker. Here, `packed_obj` is the unique string representation of the\n object or `None`. The `methodname` is the string name of the method on\n `packed_obj` to call, or `None` if `packed_obj` is unset. `path` is\n the Python-path to a non-method callable, or `None`. Finally, `interval`\n `idstring` and `persistent` are integers, strings and bools respectively.\n\n ' interval = int(interval) persistent = bool(persistent) packed_obj = pack_dbobj(obj) methodname = (callfunc if (callfunc and isinstance(callfunc, basestring)) else None) outpath = (path if (path and isinstance(path, basestring)) else None) return (packed_obj, methodname, outpath, interval, idstring, persistent)
6,292,473,907,682,195,000
Tries to create a store_key for the object. Args: obj (Object, tuple or None): Subscribing object if any. If a tuple, this is a packed_obj tuple from dbserialize. path (str or None): Python-path to callable, if any. interval (int): Ticker interval. callfunc (callable or str): This is either the callable function or the name of the method to call. Note that the callable is never stored in the key; that is uniquely identified with the python-path. idstring (str, optional): Additional separator between different subscription types. persistent (bool, optional): If this ticker should survive a system shutdown or not. Returns: store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval, idstring, persistent)` that uniquely identifies the ticker. Here, `packed_obj` is the unique string representation of the object or `None`. The `methodname` is the string name of the method on `packed_obj` to call, or `None` if `packed_obj` is unset. `path` is the Python-path to a non-method callable, or `None`. Finally, `interval` `idstring` and `persistent` are integers, strings and bools respectively.
evennia/scripts/tickerhandler.py
_store_key
orkim/evennia
python
def _store_key(self, obj, path, interval, callfunc, idstring=, persistent=True): '\n Tries to create a store_key for the object.\n\n Args:\n obj (Object, tuple or None): Subscribing object if any. If a tuple, this is\n a packed_obj tuple from dbserialize.\n path (str or None): Python-path to callable, if any.\n interval (int): Ticker interval.\n callfunc (callable or str): This is either the callable function or\n the name of the method to call. Note that the callable is never\n stored in the key; that is uniquely identified with the python-path.\n idstring (str, optional): Additional separator between\n different subscription types.\n persistent (bool, optional): If this ticker should survive a system\n shutdown or not.\n\n Returns:\n store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval,\n idstring, persistent)` that uniquely identifies the\n ticker. Here, `packed_obj` is the unique string representation of the\n object or `None`. The `methodname` is the string name of the method on\n `packed_obj` to call, or `None` if `packed_obj` is unset. `path` is\n the Python-path to a non-method callable, or `None`. Finally, `interval`\n `idstring` and `persistent` are integers, strings and bools respectively.\n\n ' interval = int(interval) persistent = bool(persistent) packed_obj = pack_dbobj(obj) methodname = (callfunc if (callfunc and isinstance(callfunc, basestring)) else None) outpath = (path if (path and isinstance(path, basestring)) else None) return (packed_obj, methodname, outpath, interval, idstring, persistent)
def save(self): '\n Save ticker_storage as a serialized string into a temporary\n ServerConf field. Whereas saving is done on the fly, if called\n by server when it shuts down, the current timer of each ticker\n will be saved so it can start over from that point.\n\n ' if self.ticker_storage: start_delays = dict(((interval, ticker.task.next_call_time()) for (interval, ticker) in self.ticker_pool.tickers.items())) to_save = {store_key: (args, kwargs) for (store_key, (args, kwargs)) in self.ticker_storage.items() if ((store_key[1] and (('_obj' in kwargs) and kwargs['_obj'].pk) and hasattr(kwargs['_obj'], store_key[1])) or store_key[2])} for (store_key, (args, kwargs)) in to_save.items(): interval = store_key[1] kwargs['_start_delay'] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save)) else: ServerConfig.objects.conf(key=self.save_name, delete=True)
-2,642,650,010,556,636,700
Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point.
evennia/scripts/tickerhandler.py
save
orkim/evennia
python
def save(self): '\n Save ticker_storage as a serialized string into a temporary\n ServerConf field. Whereas saving is done on the fly, if called\n by server when it shuts down, the current timer of each ticker\n will be saved so it can start over from that point.\n\n ' if self.ticker_storage: start_delays = dict(((interval, ticker.task.next_call_time()) for (interval, ticker) in self.ticker_pool.tickers.items())) to_save = {store_key: (args, kwargs) for (store_key, (args, kwargs)) in self.ticker_storage.items() if ((store_key[1] and (('_obj' in kwargs) and kwargs['_obj'].pk) and hasattr(kwargs['_obj'], store_key[1])) or store_key[2])} for (store_key, (args, kwargs)) in to_save.items(): interval = store_key[1] kwargs['_start_delay'] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save)) else: ServerConfig.objects.conf(key=self.save_name, delete=True)
def restore(self, server_reload=True): '\n Restore ticker_storage from database and re-initialize the\n handler from storage. This is triggered by the server at\n restart.\n\n Args:\n server_reload (bool, optional): If this is False, it means\n the server went through a cold reboot and all\n non-persistent tickers must be killed.\n\n ' restored_tickers = ServerConfig.objects.conf(key=self.save_name) if restored_tickers: restored_tickers = dbunserialize(restored_tickers) self.ticker_storage = {} for (store_key, (args, kwargs)) in restored_tickers.iteritems(): try: (obj, callfunc, path, interval, idstring, persistent) = store_key if ((not persistent) and (not server_reload)): continue if (isinstance(callfunc, basestring) and (not obj)): continue store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) if (obj and callfunc): kwargs['_callback'] = callfunc kwargs['_obj'] = obj elif path: (modname, varname) = path.rsplit('.', 1) callback = variable_from_module(modname, varname) kwargs['_callback'] = callback kwargs['_obj'] = None else: log_err(('Tickerhandler: Removing malformed ticker: %s' % str(store_key))) continue except Exception: log_trace(('Tickerhandler: Removing malformed ticker: %s' % str(store_key))) continue self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs)
4,263,887,369,786,780,700
Restore ticker_storage from database and re-initialize the handler from storage. This is triggered by the server at restart. Args: server_reload (bool, optional): If this is False, it means the server went through a cold reboot and all non-persistent tickers must be killed.
evennia/scripts/tickerhandler.py
restore
orkim/evennia
python
def restore(self, server_reload=True): '\n Restore ticker_storage from database and re-initialize the\n handler from storage. This is triggered by the server at\n restart.\n\n Args:\n server_reload (bool, optional): If this is False, it means\n the server went through a cold reboot and all\n non-persistent tickers must be killed.\n\n ' restored_tickers = ServerConfig.objects.conf(key=self.save_name) if restored_tickers: restored_tickers = dbunserialize(restored_tickers) self.ticker_storage = {} for (store_key, (args, kwargs)) in restored_tickers.iteritems(): try: (obj, callfunc, path, interval, idstring, persistent) = store_key if ((not persistent) and (not server_reload)): continue if (isinstance(callfunc, basestring) and (not obj)): continue store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) if (obj and callfunc): kwargs['_callback'] = callfunc kwargs['_obj'] = obj elif path: (modname, varname) = path.rsplit('.', 1) callback = variable_from_module(modname, varname) kwargs['_callback'] = callback kwargs['_obj'] = None else: log_err(('Tickerhandler: Removing malformed ticker: %s' % str(store_key))) continue except Exception: log_trace(('Tickerhandler: Removing malformed ticker: %s' % str(store_key))) continue self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs)
def add(self, interval=60, callback=None, idstring='', persistent=True, *args, **kwargs): '\n Add subscription to tickerhandler\n\n Args:\n interval (int, optional): Interval in seconds between calling\n `callable(*args, **kwargs)`\n callable (callable function or method, optional): This\n should either be a stand-alone function or a method on a\n typeclassed entity (that is, one that can be saved to the\n database).\n idstring (str, optional): Identifier for separating\n this ticker-subscription from others with the same\n interval. Allows for managing multiple calls with\n the same time interval and callback.\n persistent (bool, optional): A ticker will always survive\n a server reload. If this is unset, the ticker will be\n deleted by a server shutdown.\n args, kwargs (optional): These will be passed into the\n callback every time it is called.\n\n Notes:\n The callback will be identified by type and stored either as\n as combination of serialized database object + methodname or\n as a python-path to the module + funcname. These strings will\n be combined iwth `interval` and `idstring` to define a\n unique storage key for saving. These must thus all be supplied\n when wanting to modify/remove the ticker later.\n\n ' if isinstance(callback, int): raise RuntimeError('TICKER_HANDLER.add has changed: the interval is now the first argument, callback the second.') (obj, path, callfunc) = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) kwargs['_obj'] = obj kwargs['_callback'] = callfunc self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) self.save()
-9,201,289,920,555,109,000
Add subscription to tickerhandler Args: interval (int, optional): Interval in seconds between calling `callable(*args, **kwargs)` callable (callable function or method, optional): This should either be a stand-alone function or a method on a typeclassed entity (that is, one that can be saved to the database). idstring (str, optional): Identifier for separating this ticker-subscription from others with the same interval. Allows for managing multiple calls with the same time interval and callback. persistent (bool, optional): A ticker will always survive a server reload. If this is unset, the ticker will be deleted by a server shutdown. args, kwargs (optional): These will be passed into the callback every time it is called. Notes: The callback will be identified by type and stored either as as combination of serialized database object + methodname or as a python-path to the module + funcname. These strings will be combined iwth `interval` and `idstring` to define a unique storage key for saving. These must thus all be supplied when wanting to modify/remove the ticker later.
evennia/scripts/tickerhandler.py
add
orkim/evennia
python
def add(self, interval=60, callback=None, idstring=, persistent=True, *args, **kwargs): '\n Add subscription to tickerhandler\n\n Args:\n interval (int, optional): Interval in seconds between calling\n `callable(*args, **kwargs)`\n callable (callable function or method, optional): This\n should either be a stand-alone function or a method on a\n typeclassed entity (that is, one that can be saved to the\n database).\n idstring (str, optional): Identifier for separating\n this ticker-subscription from others with the same\n interval. Allows for managing multiple calls with\n the same time interval and callback.\n persistent (bool, optional): A ticker will always survive\n a server reload. If this is unset, the ticker will be\n deleted by a server shutdown.\n args, kwargs (optional): These will be passed into the\n callback every time it is called.\n\n Notes:\n The callback will be identified by type and stored either as\n as combination of serialized database object + methodname or\n as a python-path to the module + funcname. These strings will\n be combined iwth `interval` and `idstring` to define a\n unique storage key for saving. These must thus all be supplied\n when wanting to modify/remove the ticker later.\n\n ' if isinstance(callback, int): raise RuntimeError('TICKER_HANDLER.add has changed: the interval is now the first argument, callback the second.') (obj, path, callfunc) = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) kwargs['_obj'] = obj kwargs['_callback'] = callfunc self.ticker_storage[store_key] = (args, kwargs) self.ticker_pool.add(store_key, *args, **kwargs) self.save()
def remove(self, interval=60, callback=None, idstring='', persistent=True): '\n Remove object from ticker or only remove it from tickers with\n a given interval.\n\n Args:\n interval (int, optional): Interval of ticker to remove.\n callback (callable function or method): Either a function or\n the method of a typeclassed object.\n idstring (str, optional): Identifier id of ticker to remove.\n\n ' if isinstance(callback, int): raise RuntimeError('TICKER_HANDLER.remove has changed: the interval is now the first argument, callback the second.') (obj, path, callfunc) = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) to_remove = self.ticker_storage.pop(store_key, None) if to_remove: self.ticker_pool.remove(store_key) self.save()
7,312,347,190,382,608,000
Remove object from ticker or only remove it from tickers with a given interval. Args: interval (int, optional): Interval of ticker to remove. callback (callable function or method): Either a function or the method of a typeclassed object. idstring (str, optional): Identifier id of ticker to remove.
evennia/scripts/tickerhandler.py
remove
orkim/evennia
python
def remove(self, interval=60, callback=None, idstring=, persistent=True): '\n Remove object from ticker or only remove it from tickers with\n a given interval.\n\n Args:\n interval (int, optional): Interval of ticker to remove.\n callback (callable function or method): Either a function or\n the method of a typeclassed object.\n idstring (str, optional): Identifier id of ticker to remove.\n\n ' if isinstance(callback, int): raise RuntimeError('TICKER_HANDLER.remove has changed: the interval is now the first argument, callback the second.') (obj, path, callfunc) = self._get_callback(callback) store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent) to_remove = self.ticker_storage.pop(store_key, None) if to_remove: self.ticker_pool.remove(store_key) self.save()
def clear(self, interval=None): '\n Stop/remove tickers from handler.\n\n Args:\n interval (int): Only stop tickers with this interval.\n\n Notes:\n This is the only supported way to kill tickers related to\n non-db objects.\n\n ' self.ticker_pool.stop(interval) if interval: self.ticker_storage = dict(((store_key, store_key) for store_key in self.ticker_storage if (store_key[1] != interval))) else: self.ticker_storage = {} self.save()
952,231,011,741,671,700
Stop/remove tickers from handler. Args: interval (int): Only stop tickers with this interval. Notes: This is the only supported way to kill tickers related to non-db objects.
evennia/scripts/tickerhandler.py
clear
orkim/evennia
python
def clear(self, interval=None): '\n Stop/remove tickers from handler.\n\n Args:\n interval (int): Only stop tickers with this interval.\n\n Notes:\n This is the only supported way to kill tickers related to\n non-db objects.\n\n ' self.ticker_pool.stop(interval) if interval: self.ticker_storage = dict(((store_key, store_key) for store_key in self.ticker_storage if (store_key[1] != interval))) else: self.ticker_storage = {} self.save()
def all(self, interval=None): '\n Get all subscriptions.\n\n Args:\n interval (int): Limit match to tickers with this interval.\n\n Returns:\n tickers (list): If `interval` was given, this is a list of\n tickers using that interval.\n tickerpool_layout (dict): If `interval` was *not* given,\n this is a dict {interval1: [ticker1, ticker2, ...], ...}\n\n ' if (interval is None): return dict(((interval, ticker.subscriptions) for (interval, ticker) in self.ticker_pool.tickers.iteritems())) else: ticker = self.ticker_pool.tickers.get(interval, None) if ticker: return {interval: ticker.subscriptions}
4,919,551,878,231,397,000
Get all subscriptions. Args: interval (int): Limit match to tickers with this interval. Returns: tickers (list): If `interval` was given, this is a list of tickers using that interval. tickerpool_layout (dict): If `interval` was *not* given, this is a dict {interval1: [ticker1, ticker2, ...], ...}
evennia/scripts/tickerhandler.py
all
orkim/evennia
python
def all(self, interval=None): '\n Get all subscriptions.\n\n Args:\n interval (int): Limit match to tickers with this interval.\n\n Returns:\n tickers (list): If `interval` was given, this is a list of\n tickers using that interval.\n tickerpool_layout (dict): If `interval` was *not* given,\n this is a dict {interval1: [ticker1, ticker2, ...], ...}\n\n ' if (interval is None): return dict(((interval, ticker.subscriptions) for (interval, ticker) in self.ticker_pool.tickers.iteritems())) else: ticker = self.ticker_pool.tickers.get(interval, None) if ticker: return {interval: ticker.subscriptions}
def all_display(self): '\n Get all tickers on an easily displayable form.\n\n Returns:\n tickers (dict): A list of all storekeys\n\n ' store_keys = [] for ticker in self.ticker_pool.tickers.itervalues(): for ((objtup, callfunc, path, interval, idstring, persistent), (args, kwargs)) in ticker.subscriptions.iteritems(): store_keys.append((kwargs.get('_obj', None), callfunc, path, interval, idstring, persistent)) return store_keys
-7,547,110,222,363,837,000
Get all tickers on an easily displayable form. Returns: tickers (dict): A list of all storekeys
evennia/scripts/tickerhandler.py
all_display
orkim/evennia
python
def all_display(self): '\n Get all tickers on an easily displayable form.\n\n Returns:\n tickers (dict): A list of all storekeys\n\n ' store_keys = [] for ticker in self.ticker_pool.tickers.itervalues(): for ((objtup, callfunc, path, interval, idstring, persistent), (args, kwargs)) in ticker.subscriptions.iteritems(): store_keys.append((kwargs.get('_obj', None), callfunc, path, interval, idstring, persistent)) return store_keys
def parse_command_line(): 'Parse the command line options' desc = 'The Stars Align - Day 10 of Advent of Code 2018' sample = 'sample: python aoc_10.py input.txt' parser = argparse.ArgumentParser(description=desc, epilog=sample) parser.add_argument('-v', '--verbose', action='store_true', default=False, dest='verbose', help='Print status messages to stdout') parser.add_argument('-p', '--part', action='store', default=1, type=int, dest='part', help='Puzzle Part (1 or 2)') parser.add_argument('-l', '--limit', action='store', default=0, type=int, dest='limit', help='Maximum limit (e.g., time, size, recursion) before stopping') parser.add_argument('filepath', metavar='FILENAME', action='store', type=str, help='Location of puzzle input') return parser.parse_args()
-1,921,321,716,781,806,600
Parse the command line options
2018/10_TheStarsAlign/aoc_10.py
parse_command_line
deanearlwright/AdventOfCode
python
def parse_command_line(): desc = 'The Stars Align - Day 10 of Advent of Code 2018' sample = 'sample: python aoc_10.py input.txt' parser = argparse.ArgumentParser(description=desc, epilog=sample) parser.add_argument('-v', '--verbose', action='store_true', default=False, dest='verbose', help='Print status messages to stdout') parser.add_argument('-p', '--part', action='store', default=1, type=int, dest='part', help='Puzzle Part (1 or 2)') parser.add_argument('-l', '--limit', action='store', default=0, type=int, dest='limit', help='Maximum limit (e.g., time, size, recursion) before stopping') parser.add_argument('filepath', metavar='FILENAME', action='store', type=str, help='Location of puzzle input') return parser.parse_args()
def part_one(args, input_lines): 'Process part one of the puzzle' solver = lights.Lights(part2=False, text=input_lines) solution = solver.part_one(verbose=args.verbose, limit=args.limit) if (solution is None): print('There is no solution') else: print(('The solution for part one is %s' % solution)) return (solution is not None)
3,090,454,062,781,860,000
Process part one of the puzzle
2018/10_TheStarsAlign/aoc_10.py
part_one
deanearlwright/AdventOfCode
python
def part_one(args, input_lines): solver = lights.Lights(part2=False, text=input_lines) solution = solver.part_one(verbose=args.verbose, limit=args.limit) if (solution is None): print('There is no solution') else: print(('The solution for part one is %s' % solution)) return (solution is not None)
def part_two(args, input_lines): 'Process part two of the puzzle' solver = lights.Lights(part2=True, text=input_lines) solution = solver.part_two(verbose=args.verbose, limit=args.limit) if (solution is None): print('There is no solution') else: print(('The solution for part two is %s' % solution)) return (solution is not None)
-7,008,301,873,587,400,000
Process part two of the puzzle
2018/10_TheStarsAlign/aoc_10.py
part_two
deanearlwright/AdventOfCode
python
def part_two(args, input_lines): solver = lights.Lights(part2=True, text=input_lines) solution = solver.part_two(verbose=args.verbose, limit=args.limit) if (solution is None): print('There is no solution') else: print(('The solution for part two is %s' % solution)) return (solution is not None)
def from_file(filepath): 'Read the file' return from_text(open(filepath).read())
5,800,071,608,339,332,000
Read the file
2018/10_TheStarsAlign/aoc_10.py
from_file
deanearlwright/AdventOfCode
python
def from_file(filepath): return from_text(open(filepath).read())
def from_text(text): 'Break the text into trimed, non-comment lines' lines = [] for line in text.split('\n'): line = line.rstrip(' \r') if (not line): continue if line.startswith('!'): continue lines.append(line) return lines
-6,028,014,871,540,136,000
Break the text into trimed, non-comment lines
2018/10_TheStarsAlign/aoc_10.py
from_text
deanearlwright/AdventOfCode
python
def from_text(text): lines = [] for line in text.split('\n'): line = line.rstrip(' \r') if (not line): continue if line.startswith('!'): continue lines.append(line) return lines
def main(): 'Read the Advent of Code problem and solve it' args = parse_command_line() input_text = from_file(args.filepath) if (args.part == 1): result = part_one(args, input_text) else: result = part_two(args, input_text) if result: sys.exit(0) sys.exit(2)
3,592,193,595,729,941,500
Read the Advent of Code problem and solve it
2018/10_TheStarsAlign/aoc_10.py
main
deanearlwright/AdventOfCode
python
def main(): args = parse_command_line() input_text = from_file(args.filepath) if (args.part == 1): result = part_one(args, input_text) else: result = part_two(args, input_text) if result: sys.exit(0) sys.exit(2)
def read_country_code(): '\n 获取国家中英文字典\n :return:\n ' country_dict = {} for (key, val) in namemap.nameMap.items(): country_dict[val] = key return country_dict
-7,936,220,243,036,575,000
获取国家中英文字典 :return:
python-data-analysis/2019-nCoV-global/global_map.py
read_country_code
DearCasper/python-learning
python
def read_country_code(): '\n 获取国家中英文字典\n :return:\n ' country_dict = {} for (key, val) in namemap.nameMap.items(): country_dict[val] = key return country_dict
def read_csv(): '\n 读取数据,返回国家英文名称列表和累计确诊数列表\n :return:\n ' country_dict = read_country_code() data = pd.read_csv('2019-nCoV.csv', index_col=False) countrys_names = list() confirmed_count = list() for x in range(len(data.index)): if (data['name'].iloc[x] in country_dict.keys()): countrys_names.append(country_dict[data['name'].iloc[x]]) confirmed_count.append(data['confirm'].iloc[x]) else: print(data['name'].iloc[x]) return (countrys_names, confirmed_count)
-7,097,631,232,518,895,000
读取数据,返回国家英文名称列表和累计确诊数列表 :return:
python-data-analysis/2019-nCoV-global/global_map.py
read_csv
DearCasper/python-learning
python
def read_csv(): '\n 读取数据,返回国家英文名称列表和累计确诊数列表\n :return:\n ' country_dict = read_country_code() data = pd.read_csv('2019-nCoV.csv', index_col=False) countrys_names = list() confirmed_count = list() for x in range(len(data.index)): if (data['name'].iloc[x] in country_dict.keys()): countrys_names.append(country_dict[data['name'].iloc[x]]) confirmed_count.append(data['confirm'].iloc[x]) else: print(data['name'].iloc[x]) return (countrys_names, confirmed_count)