hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3a373e469546b3ab9cba2a722ab7dc51df112fb2
| 7,167 |
py
|
Python
|
src/ops/sklearn_ops.py
|
pybokeh/dagster-examples
|
459cfbe00585f1d123e49058685c74149efb867d
|
[
"MIT"
] | null | null | null |
src/ops/sklearn_ops.py
|
pybokeh/dagster-examples
|
459cfbe00585f1d123e49058685c74149efb867d
|
[
"MIT"
] | null | null | null |
src/ops/sklearn_ops.py
|
pybokeh/dagster-examples
|
459cfbe00585f1d123e49058685c74149efb867d
|
[
"MIT"
] | null | null | null |
from dagster import op, In, Out, Output, String
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
import numpy as np
import pandas as pd
import scipy
import sklearn
@op(
description="Fetch free-hand text to generic features training or test data",
out={
"df": Out(
description="pandas dataframe containing training or test data",
dagster_type=pd.DataFrame,
)
},
config_schema={
"full_path": String
}
)
def fetch_freehand_text_to_generic_data(context) -> pd.DataFrame:
df = pd.read_csv(context.op_config["full_path"])
context.log.info(f"Number of (rows, columns): {df.shape}")
yield Output(df, output_name="df")
@op(
description="Separate features from the target label.",
ins={
"df": In(
description="pandas dataframe",
)
},
out={
"X": Out(
description="Feature matrix",
dagster_type=pd.Series,
),
"y": Out(
description="Target label",
dagster_type=pd.Series,
),
}
)
def separate_features_from_target_label(context, df: pd.DataFrame):
"""
It is assumed the 1st column of the dataframe is the text features column
and the 2nd column is the target label
"""
X = df.iloc[:, 0]
y = df.iloc[:, 1]
context.log.info(f"Type of X is: {type(X)}")
context.log.info(f"Dimensions of X: {X.shape}")
context.log.info(f"Type of y is: {type(y)}")
context.log.info(f"Dimensions of y: {y.shape}")
yield Output(X, output_name="X")
yield Output(y, output_name="y")
@op(
description="Label encode the categorical target label",
ins={
"y": In(
description="Target label",
)
},
out={
"y_encoded": Out(
description="Target label that has been label encoded",
dagster_type=np.ndarray
),
"label_encoder": Out(
description="Label encoder",
dagster_type=sklearn.preprocessing._label.LabelEncoder,
),
}
)
def label_encode_target(context, y: pd.Series):
le = LabelEncoder()
le.fit(y)
y_encoded = le.fit_transform(y)
context.log.info(f"Dimenions of encoded y: {y_encoded.shape}")
yield Output(y_encoded, output_name="y_encoded")
yield Output(le, output_name="label_encoder")
@op(
description="Encode the features of training data: CountVectorize then TfIdTransform",
ins={
"X": In(
description="pandas series containing unencoded features/text document data",
)
},
out={
"X_encoded_train": Out(
description="Features data that has been both count vectorized and tfid transformed",
dagster_type=scipy.sparse.csr.csr_matrix
),
"count_vect": Out(
description="Fitted count vector model",
dagster_type=sklearn.feature_extraction.text.CountVectorizer
),
"tfid_vect": Out(
description="Fitted tfid model",
dagster_type=sklearn.feature_extraction.text.TfidfTransformer
),
},
)
def count_tfid_transform_train(context, X: pd.Series):
count_vect = CountVectorizer()
X_count = count_vect.fit_transform(X)
tfid_vect = TfidfTransformer()
X_tfid = tfid_vect.fit_transform(X_count)
context.log.info(f"Dimenions of encoded X: {X_tfid.shape}")
yield Output(X_tfid, output_name="X_encoded_train")
yield Output(count_vect, output_name="count_vect")
yield Output(tfid_vect, output_name="tfid_vect")
@op(
description="Encode the features of test data: CountVectorize and then TfIdTransform",
ins={
"df": In(
description="Pandas dataframe of test data",
),
"count_vect": In(
description="Count vector model already fitted on test data",
),
"tfid_vect": In(
description="tfidt vector model already fitted on test data",
),
},
out={
"X_encoded_test": Out(
description="Encoded features of test data",
dagster_type=scipy.sparse.csr.csr_matrix
)
}
)
def count_tfid_transform_test(
context,
df: pd.DataFrame,
count_vect: sklearn.feature_extraction.text.CountVectorizer,
tfid_vect: sklearn.feature_extraction.text.TfidfTransformer
):
"""
It is assumed the 1st column of the file is the text features data
"""
X_test = df.iloc[:, 0]
X_test_count_vect = count_vect.transform(X_test)
X_test_tfid = tfid_vect.transform(X_test_count_vect)
context.log.info(f"Dimensions of encoded X test: {X_test_tfid.shape}")
yield Output(X_test_tfid, output_name="X_encoded_test")
@op(
description="Create SGD Classifier model",
ins={
"X_encoded": In(
description="Feature matrix encoded with count vectorizer and tfid transform",
),
"y_encoded": In(
description="Feature matrix encoded with count vectorizer and tfid transform",
),
},
out={
"clf": Out(
description="Trained SGD classifier model",
dagster_type=sklearn.linear_model._stochastic_gradient.SGDClassifier,
)
}
)
def create_sgd_classifier_model(
context,
X_encoded: scipy.sparse.csr.csr_matrix,
y_encoded: np.ndarray
):
clf = SGDClassifier().fit(X_encoded, y_encoded)
yield Output(clf, output_name="clf")
@op(
description="Make prediction",
ins={
"X_test_encoded": In(
description="Encoded features matrix",
),
"clf": In(
description="Classification model already fitted on training data",
),
"label_encoder": In(
description="Label encoder fitted on the training data",
),
},
out={
"prediction": Out(
description="Predicted values",
dagster_type=np.ndarray
)
}
)
def predict(
context,
X_test_encoded: scipy.sparse.csr.csr_matrix,
clf,
label_encoder: sklearn.preprocessing._label.LabelEncoder
):
predicted = clf.predict(X_test_encoded)
context.log.info(f"Dimenions of encoded X test: {X_test_encoded.shape}")
context.log.info(f"Sample prediction values: {label_encoder.classes_[predicted[:5]]}")
yield Output(predicted, output_name="prediction")
@op(
description="Measure accuracy score",
ins={
"predicted_values": In(
description="predicted values from model",
),
"actual_values": In(
description="Actual values",
)
}
)
def get_accuracy_score(context, predicted_values: np.ndarray, actual_values: np.ndarray):
pass
| 29.134146 | 98 | 0.610297 |
f06ab4aba44d268a0a0dc31f95fe18a717006bfe
| 40 |
py
|
Python
|
test/1.py
|
hopingpeople/test
|
5b672c44e9b69275d45c59ab5e40b04b8e4fe105
|
[
"MIT"
] | null | null | null |
test/1.py
|
hopingpeople/test
|
5b672c44e9b69275d45c59ab5e40b04b8e4fe105
|
[
"MIT"
] | null | null | null |
test/1.py
|
hopingpeople/test
|
5b672c44e9b69275d45c59ab5e40b04b8e4fe105
|
[
"MIT"
] | null | null | null |
a = 1
b = 2
def sum():
return a+b
| 5.714286 | 14 | 0.45 |
fae6ce1c0a9f34f9eeb48cef4b1536a78e6a44d2
| 1,696 |
py
|
Python
|
azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/virtual_machine_agent_instance_view_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1 |
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/virtual_machine_agent_instance_view_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 54 |
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/virtual_machine_agent_instance_view_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2 |
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineAgentInstanceView(Model):
"""The instance view of the VM Agent running on the virtual machine.
:param vm_agent_version: The VM Agent full version.
:type vm_agent_version: str
:param extension_handlers: The virtual machine extension handler instance
view.
:type extension_handlers:
list[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineExtensionHandlerInstanceView]
:param statuses: The resource status information.
:type statuses:
list[~azure.mgmt.compute.v2016_03_30.models.InstanceViewStatus]
"""
_attribute_map = {
'vm_agent_version': {'key': 'vmAgentVersion', 'type': 'str'},
'extension_handlers': {'key': 'extensionHandlers', 'type': '[VirtualMachineExtensionHandlerInstanceView]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(self, *, vm_agent_version: str=None, extension_handlers=None, statuses=None, **kwargs) -> None:
super(VirtualMachineAgentInstanceView, self).__init__(**kwargs)
self.vm_agent_version = vm_agent_version
self.extension_handlers = extension_handlers
self.statuses = statuses
| 42.4 | 115 | 0.664505 |
e84c66c0bdd05e5a00e446840fc4e50e173527c6
| 37,335 |
py
|
Python
|
Lib/unittest/test/testmock/testasync.py
|
gerph/cpython
|
98813cb03c2371789669c3d8debf8fca2a344de9
|
[
"CNRI-Python-GPL-Compatible"
] | 2,441 |
2020-07-31T06:45:53.000Z
|
2022-03-30T15:56:49.000Z
|
Lib/unittest/test/testmock/testasync.py
|
gerph/cpython
|
98813cb03c2371789669c3d8debf8fca2a344de9
|
[
"CNRI-Python-GPL-Compatible"
] | 238 |
2020-10-21T04:54:00.000Z
|
2022-03-31T21:49:03.000Z
|
Lib/unittest/test/testmock/testasync.py
|
gerph/cpython
|
98813cb03c2371789669c3d8debf8fca2a344de9
|
[
"CNRI-Python-GPL-Compatible"
] | 93 |
2020-08-09T12:00:17.000Z
|
2022-03-25T07:57:24.000Z
|
import asyncio
import inspect
import re
import unittest
from unittest.mock import (ANY, call, AsyncMock, patch, MagicMock, Mock,
create_autospec, sentinel, _CallList)
def tearDownModule():
asyncio.set_event_loop_policy(None)
class AsyncClass:
def __init__(self):
pass
async def async_method(self):
pass
def normal_method(self):
pass
@classmethod
async def async_class_method(cls):
pass
@staticmethod
async def async_static_method():
pass
class AwaitableClass:
def __await__(self):
yield
async def async_func():
pass
async def async_func_args(a, b, *, c):
pass
def normal_func():
pass
class NormalClass(object):
def a(self):
pass
async_foo_name = f'{__name__}.AsyncClass'
normal_foo_name = f'{__name__}.NormalClass'
class AsyncPatchDecoratorTest(unittest.TestCase):
def test_is_coroutine_function_patch(self):
@patch.object(AsyncClass, 'async_method')
def test_async(mock_method):
self.assertTrue(asyncio.iscoroutinefunction(mock_method))
test_async()
def test_is_async_patch(self):
@patch.object(AsyncClass, 'async_method')
def test_async(mock_method):
m = mock_method()
self.assertTrue(inspect.isawaitable(m))
asyncio.run(m)
@patch(f'{async_foo_name}.async_method')
def test_no_parent_attribute(mock_method):
m = mock_method()
self.assertTrue(inspect.isawaitable(m))
asyncio.run(m)
test_async()
test_no_parent_attribute()
def test_is_AsyncMock_patch(self):
@patch.object(AsyncClass, 'async_method')
def test_async(mock_method):
self.assertIsInstance(mock_method, AsyncMock)
test_async()
def test_is_AsyncMock_patch_staticmethod(self):
@patch.object(AsyncClass, 'async_static_method')
def test_async(mock_method):
self.assertIsInstance(mock_method, AsyncMock)
test_async()
def test_is_AsyncMock_patch_classmethod(self):
@patch.object(AsyncClass, 'async_class_method')
def test_async(mock_method):
self.assertIsInstance(mock_method, AsyncMock)
test_async()
def test_async_def_patch(self):
@patch(f"{__name__}.async_func", return_value=1)
@patch(f"{__name__}.async_func_args", return_value=2)
async def test_async(func_args_mock, func_mock):
self.assertEqual(func_args_mock._mock_name, "async_func_args")
self.assertEqual(func_mock._mock_name, "async_func")
self.assertIsInstance(async_func, AsyncMock)
self.assertIsInstance(async_func_args, AsyncMock)
self.assertEqual(await async_func(), 1)
self.assertEqual(await async_func_args(1, 2, c=3), 2)
asyncio.run(test_async())
self.assertTrue(inspect.iscoroutinefunction(async_func))
class AsyncPatchCMTest(unittest.TestCase):
def test_is_async_function_cm(self):
def test_async():
with patch.object(AsyncClass, 'async_method') as mock_method:
self.assertTrue(asyncio.iscoroutinefunction(mock_method))
test_async()
def test_is_async_cm(self):
def test_async():
with patch.object(AsyncClass, 'async_method') as mock_method:
m = mock_method()
self.assertTrue(inspect.isawaitable(m))
asyncio.run(m)
test_async()
def test_is_AsyncMock_cm(self):
def test_async():
with patch.object(AsyncClass, 'async_method') as mock_method:
self.assertIsInstance(mock_method, AsyncMock)
test_async()
def test_async_def_cm(self):
async def test_async():
with patch(f"{__name__}.async_func", AsyncMock()):
self.assertIsInstance(async_func, AsyncMock)
self.assertTrue(inspect.iscoroutinefunction(async_func))
asyncio.run(test_async())
class AsyncMockTest(unittest.TestCase):
def test_iscoroutinefunction_default(self):
mock = AsyncMock()
self.assertTrue(asyncio.iscoroutinefunction(mock))
def test_iscoroutinefunction_function(self):
async def foo(): pass
mock = AsyncMock(foo)
self.assertTrue(asyncio.iscoroutinefunction(mock))
self.assertTrue(inspect.iscoroutinefunction(mock))
def test_isawaitable(self):
mock = AsyncMock()
m = mock()
self.assertTrue(inspect.isawaitable(m))
asyncio.run(m)
self.assertIn('assert_awaited', dir(mock))
def test_iscoroutinefunction_normal_function(self):
def foo(): pass
mock = AsyncMock(foo)
self.assertTrue(asyncio.iscoroutinefunction(mock))
self.assertTrue(inspect.iscoroutinefunction(mock))
def test_future_isfuture(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
fut = asyncio.Future()
loop.stop()
loop.close()
mock = AsyncMock(fut)
self.assertIsInstance(mock, asyncio.Future)
class AsyncAutospecTest(unittest.TestCase):
def test_is_AsyncMock_patch(self):
@patch(async_foo_name, autospec=True)
def test_async(mock_method):
self.assertIsInstance(mock_method.async_method, AsyncMock)
self.assertIsInstance(mock_method, MagicMock)
@patch(async_foo_name, autospec=True)
def test_normal_method(mock_method):
self.assertIsInstance(mock_method.normal_method, MagicMock)
test_async()
test_normal_method()
def test_create_autospec_instance(self):
with self.assertRaises(RuntimeError):
create_autospec(async_func, instance=True)
def test_create_autospec_awaitable_class(self):
awaitable_mock = create_autospec(spec=AwaitableClass())
self.assertIsInstance(create_autospec(awaitable_mock), AsyncMock)
def test_create_autospec(self):
spec = create_autospec(async_func_args)
awaitable = spec(1, 2, c=3)
async def main():
await awaitable
self.assertEqual(spec.await_count, 0)
self.assertIsNone(spec.await_args)
self.assertEqual(spec.await_args_list, [])
spec.assert_not_awaited()
asyncio.run(main())
self.assertTrue(asyncio.iscoroutinefunction(spec))
self.assertTrue(asyncio.iscoroutine(awaitable))
self.assertEqual(spec.await_count, 1)
self.assertEqual(spec.await_args, call(1, 2, c=3))
self.assertEqual(spec.await_args_list, [call(1, 2, c=3)])
spec.assert_awaited_once()
spec.assert_awaited_once_with(1, 2, c=3)
spec.assert_awaited_with(1, 2, c=3)
spec.assert_awaited()
def test_patch_with_autospec(self):
async def test_async():
with patch(f"{__name__}.async_func_args", autospec=True) as mock_method:
awaitable = mock_method(1, 2, c=3)
self.assertIsInstance(mock_method.mock, AsyncMock)
self.assertTrue(asyncio.iscoroutinefunction(mock_method))
self.assertTrue(asyncio.iscoroutine(awaitable))
self.assertTrue(inspect.isawaitable(awaitable))
# Verify the default values during mock setup
self.assertEqual(mock_method.await_count, 0)
self.assertEqual(mock_method.await_args_list, [])
self.assertIsNone(mock_method.await_args)
mock_method.assert_not_awaited()
await awaitable
self.assertEqual(mock_method.await_count, 1)
self.assertEqual(mock_method.await_args, call(1, 2, c=3))
self.assertEqual(mock_method.await_args_list, [call(1, 2, c=3)])
mock_method.assert_awaited_once()
mock_method.assert_awaited_once_with(1, 2, c=3)
mock_method.assert_awaited_with(1, 2, c=3)
mock_method.assert_awaited()
mock_method.reset_mock()
self.assertEqual(mock_method.await_count, 0)
self.assertIsNone(mock_method.await_args)
self.assertEqual(mock_method.await_args_list, [])
asyncio.run(test_async())
class AsyncSpecTest(unittest.TestCase):
def test_spec_normal_methods_on_class(self):
def inner_test(mock_type):
mock = mock_type(AsyncClass)
self.assertIsInstance(mock.async_method, AsyncMock)
self.assertIsInstance(mock.normal_method, MagicMock)
for mock_type in [AsyncMock, MagicMock]:
with self.subTest(f"test method types with {mock_type}"):
inner_test(mock_type)
def test_spec_normal_methods_on_class_with_mock(self):
mock = Mock(AsyncClass)
self.assertIsInstance(mock.async_method, AsyncMock)
self.assertIsInstance(mock.normal_method, Mock)
def test_spec_mock_type_kw(self):
def inner_test(mock_type):
async_mock = mock_type(spec=async_func)
self.assertIsInstance(async_mock, mock_type)
with self.assertWarns(RuntimeWarning):
# Will raise a warning because never awaited
self.assertTrue(inspect.isawaitable(async_mock()))
sync_mock = mock_type(spec=normal_func)
self.assertIsInstance(sync_mock, mock_type)
for mock_type in [AsyncMock, MagicMock, Mock]:
with self.subTest(f"test spec kwarg with {mock_type}"):
inner_test(mock_type)
def test_spec_mock_type_positional(self):
def inner_test(mock_type):
async_mock = mock_type(async_func)
self.assertIsInstance(async_mock, mock_type)
with self.assertWarns(RuntimeWarning):
# Will raise a warning because never awaited
self.assertTrue(inspect.isawaitable(async_mock()))
sync_mock = mock_type(normal_func)
self.assertIsInstance(sync_mock, mock_type)
for mock_type in [AsyncMock, MagicMock, Mock]:
with self.subTest(f"test spec positional with {mock_type}"):
inner_test(mock_type)
def test_spec_as_normal_kw_AsyncMock(self):
mock = AsyncMock(spec=normal_func)
self.assertIsInstance(mock, AsyncMock)
m = mock()
self.assertTrue(inspect.isawaitable(m))
asyncio.run(m)
def test_spec_as_normal_positional_AsyncMock(self):
mock = AsyncMock(normal_func)
self.assertIsInstance(mock, AsyncMock)
m = mock()
self.assertTrue(inspect.isawaitable(m))
asyncio.run(m)
def test_spec_async_mock(self):
@patch.object(AsyncClass, 'async_method', spec=True)
def test_async(mock_method):
self.assertIsInstance(mock_method, AsyncMock)
test_async()
def test_spec_parent_not_async_attribute_is(self):
@patch(async_foo_name, spec=True)
def test_async(mock_method):
self.assertIsInstance(mock_method, MagicMock)
self.assertIsInstance(mock_method.async_method, AsyncMock)
test_async()
def test_target_async_spec_not(self):
@patch.object(AsyncClass, 'async_method', spec=NormalClass.a)
def test_async_attribute(mock_method):
self.assertIsInstance(mock_method, MagicMock)
self.assertFalse(inspect.iscoroutine(mock_method))
self.assertFalse(inspect.isawaitable(mock_method))
test_async_attribute()
def test_target_not_async_spec_is(self):
@patch.object(NormalClass, 'a', spec=async_func)
def test_attribute_not_async_spec_is(mock_async_func):
self.assertIsInstance(mock_async_func, AsyncMock)
test_attribute_not_async_spec_is()
def test_spec_async_attributes(self):
@patch(normal_foo_name, spec=AsyncClass)
def test_async_attributes_coroutines(MockNormalClass):
self.assertIsInstance(MockNormalClass.async_method, AsyncMock)
self.assertIsInstance(MockNormalClass, MagicMock)
test_async_attributes_coroutines()
class AsyncSpecSetTest(unittest.TestCase):
def test_is_AsyncMock_patch(self):
@patch.object(AsyncClass, 'async_method', spec_set=True)
def test_async(async_method):
self.assertIsInstance(async_method, AsyncMock)
def test_is_async_AsyncMock(self):
mock = AsyncMock(spec_set=AsyncClass.async_method)
self.assertTrue(asyncio.iscoroutinefunction(mock))
self.assertIsInstance(mock, AsyncMock)
def test_is_child_AsyncMock(self):
mock = MagicMock(spec_set=AsyncClass)
self.assertTrue(asyncio.iscoroutinefunction(mock.async_method))
self.assertFalse(asyncio.iscoroutinefunction(mock.normal_method))
self.assertIsInstance(mock.async_method, AsyncMock)
self.assertIsInstance(mock.normal_method, MagicMock)
self.assertIsInstance(mock, MagicMock)
def test_magicmock_lambda_spec(self):
mock_obj = MagicMock()
mock_obj.mock_func = MagicMock(spec=lambda x: x)
with patch.object(mock_obj, "mock_func") as cm:
self.assertIsInstance(cm, MagicMock)
class AsyncArguments(unittest.IsolatedAsyncioTestCase):
async def test_add_return_value(self):
async def addition(self, var):
return var + 1
mock = AsyncMock(addition, return_value=10)
output = await mock(5)
self.assertEqual(output, 10)
async def test_add_side_effect_exception(self):
async def addition(var):
return var + 1
mock = AsyncMock(addition, side_effect=Exception('err'))
with self.assertRaises(Exception):
await mock(5)
async def test_add_side_effect_coroutine(self):
async def addition(var):
return var + 1
mock = AsyncMock(side_effect=addition)
result = await mock(5)
self.assertEqual(result, 6)
async def test_add_side_effect_normal_function(self):
def addition(var):
return var + 1
mock = AsyncMock(side_effect=addition)
result = await mock(5)
self.assertEqual(result, 6)
async def test_add_side_effect_iterable(self):
vals = [1, 2, 3]
mock = AsyncMock(side_effect=vals)
for item in vals:
self.assertEqual(await mock(), item)
with self.assertRaises(StopAsyncIteration) as e:
await mock()
async def test_add_side_effect_exception_iterable(self):
class SampleException(Exception):
pass
vals = [1, SampleException("foo")]
mock = AsyncMock(side_effect=vals)
self.assertEqual(await mock(), 1)
with self.assertRaises(SampleException) as e:
await mock()
async def test_return_value_AsyncMock(self):
value = AsyncMock(return_value=10)
mock = AsyncMock(return_value=value)
result = await mock()
self.assertIs(result, value)
async def test_return_value_awaitable(self):
fut = asyncio.Future()
fut.set_result(None)
mock = AsyncMock(return_value=fut)
result = await mock()
self.assertIsInstance(result, asyncio.Future)
async def test_side_effect_awaitable_values(self):
fut = asyncio.Future()
fut.set_result(None)
mock = AsyncMock(side_effect=[fut])
result = await mock()
self.assertIsInstance(result, asyncio.Future)
with self.assertRaises(StopAsyncIteration):
await mock()
async def test_side_effect_is_AsyncMock(self):
effect = AsyncMock(return_value=10)
mock = AsyncMock(side_effect=effect)
result = await mock()
self.assertEqual(result, 10)
async def test_wraps_coroutine(self):
value = asyncio.Future()
ran = False
async def inner():
nonlocal ran
ran = True
return value
mock = AsyncMock(wraps=inner)
result = await mock()
self.assertEqual(result, value)
mock.assert_awaited()
self.assertTrue(ran)
async def test_wraps_normal_function(self):
value = 1
ran = False
def inner():
nonlocal ran
ran = True
return value
mock = AsyncMock(wraps=inner)
result = await mock()
self.assertEqual(result, value)
mock.assert_awaited()
self.assertTrue(ran)
async def test_await_args_list_order(self):
async_mock = AsyncMock()
mock2 = async_mock(2)
mock1 = async_mock(1)
await mock1
await mock2
async_mock.assert_has_awaits([call(1), call(2)])
self.assertEqual(async_mock.await_args_list, [call(1), call(2)])
self.assertEqual(async_mock.call_args_list, [call(2), call(1)])
class AsyncMagicMethods(unittest.TestCase):
def test_async_magic_methods_return_async_mocks(self):
m_mock = MagicMock()
self.assertIsInstance(m_mock.__aenter__, AsyncMock)
self.assertIsInstance(m_mock.__aexit__, AsyncMock)
self.assertIsInstance(m_mock.__anext__, AsyncMock)
# __aiter__ is actually a synchronous object
# so should return a MagicMock
self.assertIsInstance(m_mock.__aiter__, MagicMock)
def test_sync_magic_methods_return_magic_mocks(self):
a_mock = AsyncMock()
self.assertIsInstance(a_mock.__enter__, MagicMock)
self.assertIsInstance(a_mock.__exit__, MagicMock)
self.assertIsInstance(a_mock.__next__, MagicMock)
self.assertIsInstance(a_mock.__len__, MagicMock)
def test_magicmock_has_async_magic_methods(self):
m_mock = MagicMock()
self.assertTrue(hasattr(m_mock, "__aenter__"))
self.assertTrue(hasattr(m_mock, "__aexit__"))
self.assertTrue(hasattr(m_mock, "__anext__"))
def test_asyncmock_has_sync_magic_methods(self):
a_mock = AsyncMock()
self.assertTrue(hasattr(a_mock, "__enter__"))
self.assertTrue(hasattr(a_mock, "__exit__"))
self.assertTrue(hasattr(a_mock, "__next__"))
self.assertTrue(hasattr(a_mock, "__len__"))
def test_magic_methods_are_async_functions(self):
m_mock = MagicMock()
self.assertIsInstance(m_mock.__aenter__, AsyncMock)
self.assertIsInstance(m_mock.__aexit__, AsyncMock)
# AsyncMocks are also coroutine functions
self.assertTrue(asyncio.iscoroutinefunction(m_mock.__aenter__))
self.assertTrue(asyncio.iscoroutinefunction(m_mock.__aexit__))
class AsyncContextManagerTest(unittest.TestCase):
class WithAsyncContextManager:
async def __aenter__(self, *args, **kwargs):
self.entered = True
return self
async def __aexit__(self, *args, **kwargs):
self.exited = True
class WithSyncContextManager:
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
pass
class ProductionCode:
# Example real-world(ish) code
def __init__(self):
self.session = None
async def main(self):
async with self.session.post('https://python.org') as response:
val = await response.json()
return val
def test_set_return_value_of_aenter(self):
def inner_test(mock_type):
pc = self.ProductionCode()
pc.session = MagicMock(name='sessionmock')
cm = mock_type(name='magic_cm')
response = AsyncMock(name='response')
response.json = AsyncMock(return_value={'json': 123})
cm.__aenter__.return_value = response
pc.session.post.return_value = cm
result = asyncio.run(pc.main())
self.assertEqual(result, {'json': 123})
for mock_type in [AsyncMock, MagicMock]:
with self.subTest(f"test set return value of aenter with {mock_type}"):
inner_test(mock_type)
def test_mock_supports_async_context_manager(self):
def inner_test(mock_type):
called = False
cm = self.WithAsyncContextManager()
cm_mock = mock_type(cm)
async def use_context_manager():
nonlocal called
async with cm_mock as result:
called = True
return result
cm_result = asyncio.run(use_context_manager())
self.assertTrue(called)
self.assertTrue(cm_mock.__aenter__.called)
self.assertTrue(cm_mock.__aexit__.called)
cm_mock.__aenter__.assert_awaited()
cm_mock.__aexit__.assert_awaited()
# We mock __aenter__ so it does not return self
self.assertIsNot(cm_mock, cm_result)
for mock_type in [AsyncMock, MagicMock]:
with self.subTest(f"test context manager magics with {mock_type}"):
inner_test(mock_type)
def test_mock_customize_async_context_manager(self):
instance = self.WithAsyncContextManager()
mock_instance = MagicMock(instance)
expected_result = object()
mock_instance.__aenter__.return_value = expected_result
async def use_context_manager():
async with mock_instance as result:
return result
self.assertIs(asyncio.run(use_context_manager()), expected_result)
def test_mock_customize_async_context_manager_with_coroutine(self):
enter_called = False
exit_called = False
async def enter_coroutine(*args):
nonlocal enter_called
enter_called = True
async def exit_coroutine(*args):
nonlocal exit_called
exit_called = True
instance = self.WithAsyncContextManager()
mock_instance = MagicMock(instance)
mock_instance.__aenter__ = enter_coroutine
mock_instance.__aexit__ = exit_coroutine
async def use_context_manager():
async with mock_instance:
pass
asyncio.run(use_context_manager())
self.assertTrue(enter_called)
self.assertTrue(exit_called)
def test_context_manager_raise_exception_by_default(self):
async def raise_in(context_manager):
async with context_manager:
raise TypeError()
instance = self.WithAsyncContextManager()
mock_instance = MagicMock(instance)
with self.assertRaises(TypeError):
asyncio.run(raise_in(mock_instance))
class AsyncIteratorTest(unittest.TestCase):
class WithAsyncIterator(object):
def __init__(self):
self.items = ["foo", "NormalFoo", "baz"]
def __aiter__(self):
return self
async def __anext__(self):
try:
return self.items.pop()
except IndexError:
pass
raise StopAsyncIteration
def test_aiter_set_return_value(self):
mock_iter = AsyncMock(name="tester")
mock_iter.__aiter__.return_value = [1, 2, 3]
async def main():
return [i async for i in mock_iter]
result = asyncio.run(main())
self.assertEqual(result, [1, 2, 3])
def test_mock_aiter_and_anext_asyncmock(self):
def inner_test(mock_type):
instance = self.WithAsyncIterator()
mock_instance = mock_type(instance)
# Check that the mock and the real thing bahave the same
# __aiter__ is not actually async, so not a coroutinefunction
self.assertFalse(asyncio.iscoroutinefunction(instance.__aiter__))
self.assertFalse(asyncio.iscoroutinefunction(mock_instance.__aiter__))
# __anext__ is async
self.assertTrue(asyncio.iscoroutinefunction(instance.__anext__))
self.assertTrue(asyncio.iscoroutinefunction(mock_instance.__anext__))
for mock_type in [AsyncMock, MagicMock]:
with self.subTest(f"test aiter and anext corourtine with {mock_type}"):
inner_test(mock_type)
def test_mock_async_for(self):
async def iterate(iterator):
accumulator = []
async for item in iterator:
accumulator.append(item)
return accumulator
expected = ["FOO", "BAR", "BAZ"]
def test_default(mock_type):
mock_instance = mock_type(self.WithAsyncIterator())
self.assertEqual(asyncio.run(iterate(mock_instance)), [])
def test_set_return_value(mock_type):
mock_instance = mock_type(self.WithAsyncIterator())
mock_instance.__aiter__.return_value = expected[:]
self.assertEqual(asyncio.run(iterate(mock_instance)), expected)
def test_set_return_value_iter(mock_type):
mock_instance = mock_type(self.WithAsyncIterator())
mock_instance.__aiter__.return_value = iter(expected[:])
self.assertEqual(asyncio.run(iterate(mock_instance)), expected)
for mock_type in [AsyncMock, MagicMock]:
with self.subTest(f"default value with {mock_type}"):
test_default(mock_type)
with self.subTest(f"set return_value with {mock_type}"):
test_set_return_value(mock_type)
with self.subTest(f"set return_value iterator with {mock_type}"):
test_set_return_value_iter(mock_type)
class AsyncMockAssert(unittest.TestCase):
def setUp(self):
self.mock = AsyncMock()
async def _runnable_test(self, *args, **kwargs):
await self.mock(*args, **kwargs)
async def _await_coroutine(self, coroutine):
return await coroutine
def test_assert_called_but_not_awaited(self):
mock = AsyncMock(AsyncClass)
with self.assertWarns(RuntimeWarning):
# Will raise a warning because never awaited
mock.async_method()
self.assertTrue(asyncio.iscoroutinefunction(mock.async_method))
mock.async_method.assert_called()
mock.async_method.assert_called_once()
mock.async_method.assert_called_once_with()
with self.assertRaises(AssertionError):
mock.assert_awaited()
with self.assertRaises(AssertionError):
mock.async_method.assert_awaited()
def test_assert_called_then_awaited(self):
mock = AsyncMock(AsyncClass)
mock_coroutine = mock.async_method()
mock.async_method.assert_called()
mock.async_method.assert_called_once()
mock.async_method.assert_called_once_with()
with self.assertRaises(AssertionError):
mock.async_method.assert_awaited()
asyncio.run(self._await_coroutine(mock_coroutine))
# Assert we haven't re-called the function
mock.async_method.assert_called_once()
mock.async_method.assert_awaited()
mock.async_method.assert_awaited_once()
mock.async_method.assert_awaited_once_with()
def test_assert_called_and_awaited_at_same_time(self):
with self.assertRaises(AssertionError):
self.mock.assert_awaited()
with self.assertRaises(AssertionError):
self.mock.assert_called()
asyncio.run(self._runnable_test())
self.mock.assert_called_once()
self.mock.assert_awaited_once()
def test_assert_called_twice_and_awaited_once(self):
mock = AsyncMock(AsyncClass)
coroutine = mock.async_method()
with self.assertWarns(RuntimeWarning):
# The first call will be awaited so no warning there
# But this call will never get awaited, so it will warn here
mock.async_method()
with self.assertRaises(AssertionError):
mock.async_method.assert_awaited()
mock.async_method.assert_called()
asyncio.run(self._await_coroutine(coroutine))
mock.async_method.assert_awaited()
mock.async_method.assert_awaited_once()
def test_assert_called_once_and_awaited_twice(self):
mock = AsyncMock(AsyncClass)
coroutine = mock.async_method()
mock.async_method.assert_called_once()
asyncio.run(self._await_coroutine(coroutine))
with self.assertRaises(RuntimeError):
# Cannot reuse already awaited coroutine
asyncio.run(self._await_coroutine(coroutine))
mock.async_method.assert_awaited()
def test_assert_awaited_but_not_called(self):
with self.assertRaises(AssertionError):
self.mock.assert_awaited()
with self.assertRaises(AssertionError):
self.mock.assert_called()
with self.assertRaises(TypeError):
# You cannot await an AsyncMock, it must be a coroutine
asyncio.run(self._await_coroutine(self.mock))
with self.assertRaises(AssertionError):
self.mock.assert_awaited()
with self.assertRaises(AssertionError):
self.mock.assert_called()
def test_assert_has_calls_not_awaits(self):
kalls = [call('foo')]
with self.assertWarns(RuntimeWarning):
# Will raise a warning because never awaited
self.mock('foo')
self.mock.assert_has_calls(kalls)
with self.assertRaises(AssertionError):
self.mock.assert_has_awaits(kalls)
def test_assert_has_mock_calls_on_async_mock_no_spec(self):
with self.assertWarns(RuntimeWarning):
# Will raise a warning because never awaited
self.mock()
kalls_empty = [('', (), {})]
self.assertEqual(self.mock.mock_calls, kalls_empty)
with self.assertWarns(RuntimeWarning):
# Will raise a warning because never awaited
self.mock('foo')
self.mock('baz')
mock_kalls = ([call(), call('foo'), call('baz')])
self.assertEqual(self.mock.mock_calls, mock_kalls)
def test_assert_has_mock_calls_on_async_mock_with_spec(self):
a_class_mock = AsyncMock(AsyncClass)
with self.assertWarns(RuntimeWarning):
# Will raise a warning because never awaited
a_class_mock.async_method()
kalls_empty = [('', (), {})]
self.assertEqual(a_class_mock.async_method.mock_calls, kalls_empty)
self.assertEqual(a_class_mock.mock_calls, [call.async_method()])
with self.assertWarns(RuntimeWarning):
# Will raise a warning because never awaited
a_class_mock.async_method(1, 2, 3, a=4, b=5)
method_kalls = [call(), call(1, 2, 3, a=4, b=5)]
mock_kalls = [call.async_method(), call.async_method(1, 2, 3, a=4, b=5)]
self.assertEqual(a_class_mock.async_method.mock_calls, method_kalls)
self.assertEqual(a_class_mock.mock_calls, mock_kalls)
def test_async_method_calls_recorded(self):
with self.assertWarns(RuntimeWarning):
# Will raise warnings because never awaited
self.mock.something(3, fish=None)
self.mock.something_else.something(6, cake=sentinel.Cake)
self.assertEqual(self.mock.method_calls, [
("something", (3,), {'fish': None}),
("something_else.something", (6,), {'cake': sentinel.Cake})
],
"method calls not recorded correctly")
self.assertEqual(self.mock.something_else.method_calls,
[("something", (6,), {'cake': sentinel.Cake})],
"method calls not recorded correctly")
def test_async_arg_lists(self):
def assert_attrs(mock):
names = ('call_args_list', 'method_calls', 'mock_calls')
for name in names:
attr = getattr(mock, name)
self.assertIsInstance(attr, _CallList)
self.assertIsInstance(attr, list)
self.assertEqual(attr, [])
assert_attrs(self.mock)
with self.assertWarns(RuntimeWarning):
# Will raise warnings because never awaited
self.mock()
self.mock(1, 2)
self.mock(a=3)
self.mock.reset_mock()
assert_attrs(self.mock)
a_mock = AsyncMock(AsyncClass)
with self.assertWarns(RuntimeWarning):
# Will raise warnings because never awaited
a_mock.async_method()
a_mock.async_method(1, a=3)
a_mock.reset_mock()
assert_attrs(a_mock)
def test_assert_awaited(self):
with self.assertRaises(AssertionError):
self.mock.assert_awaited()
asyncio.run(self._runnable_test())
self.mock.assert_awaited()
def test_assert_awaited_once(self):
with self.assertRaises(AssertionError):
self.mock.assert_awaited_once()
asyncio.run(self._runnable_test())
self.mock.assert_awaited_once()
asyncio.run(self._runnable_test())
with self.assertRaises(AssertionError):
self.mock.assert_awaited_once()
def test_assert_awaited_with(self):
msg = 'Not awaited'
with self.assertRaisesRegex(AssertionError, msg):
self.mock.assert_awaited_with('foo')
asyncio.run(self._runnable_test())
msg = 'expected await not found'
with self.assertRaisesRegex(AssertionError, msg):
self.mock.assert_awaited_with('foo')
asyncio.run(self._runnable_test('foo'))
self.mock.assert_awaited_with('foo')
asyncio.run(self._runnable_test('SomethingElse'))
with self.assertRaises(AssertionError):
self.mock.assert_awaited_with('foo')
def test_assert_awaited_once_with(self):
with self.assertRaises(AssertionError):
self.mock.assert_awaited_once_with('foo')
asyncio.run(self._runnable_test('foo'))
self.mock.assert_awaited_once_with('foo')
asyncio.run(self._runnable_test('foo'))
with self.assertRaises(AssertionError):
self.mock.assert_awaited_once_with('foo')
def test_assert_any_wait(self):
with self.assertRaises(AssertionError):
self.mock.assert_any_await('foo')
asyncio.run(self._runnable_test('baz'))
with self.assertRaises(AssertionError):
self.mock.assert_any_await('foo')
asyncio.run(self._runnable_test('foo'))
self.mock.assert_any_await('foo')
asyncio.run(self._runnable_test('SomethingElse'))
self.mock.assert_any_await('foo')
def test_assert_has_awaits_no_order(self):
calls = [call('foo'), call('baz')]
with self.assertRaises(AssertionError) as cm:
self.mock.assert_has_awaits(calls)
self.assertEqual(len(cm.exception.args), 1)
asyncio.run(self._runnable_test('foo'))
with self.assertRaises(AssertionError):
self.mock.assert_has_awaits(calls)
asyncio.run(self._runnable_test('foo'))
with self.assertRaises(AssertionError):
self.mock.assert_has_awaits(calls)
asyncio.run(self._runnable_test('baz'))
self.mock.assert_has_awaits(calls)
asyncio.run(self._runnable_test('SomethingElse'))
self.mock.assert_has_awaits(calls)
def test_assert_has_awaits_ordered(self):
calls = [call('foo'), call('baz')]
with self.assertRaises(AssertionError):
self.mock.assert_has_awaits(calls, any_order=True)
asyncio.run(self._runnable_test('baz'))
with self.assertRaises(AssertionError):
self.mock.assert_has_awaits(calls, any_order=True)
asyncio.run(self._runnable_test('bamf'))
with self.assertRaises(AssertionError):
self.mock.assert_has_awaits(calls, any_order=True)
asyncio.run(self._runnable_test('foo'))
self.mock.assert_has_awaits(calls, any_order=True)
asyncio.run(self._runnable_test('qux'))
self.mock.assert_has_awaits(calls, any_order=True)
def test_assert_not_awaited(self):
self.mock.assert_not_awaited()
asyncio.run(self._runnable_test())
with self.assertRaises(AssertionError):
self.mock.assert_not_awaited()
def test_assert_has_awaits_not_matching_spec_error(self):
async def f(x=None): pass
self.mock = AsyncMock(spec=f)
asyncio.run(self._runnable_test(1))
with self.assertRaisesRegex(
AssertionError,
'^{}$'.format(
re.escape('Awaits not found.\n'
'Expected: [call()]\n'
'Actual: [call(1)]'))) as cm:
self.mock.assert_has_awaits([call()])
self.assertIsNone(cm.exception.__cause__)
with self.assertRaisesRegex(
AssertionError,
'^{}$'.format(
re.escape(
'Error processing expected awaits.\n'
"Errors: [None, TypeError('too many positional "
"arguments')]\n"
'Expected: [call(), call(1, 2)]\n'
'Actual: [call(1)]'))) as cm:
self.mock.assert_has_awaits([call(), call(1, 2)])
self.assertIsInstance(cm.exception.__cause__, TypeError)
| 35.693117 | 84 | 0.648078 |
d8d59c0ac46f2ab2bc9dd825cf7819b4f192a62d
| 9,842 |
py
|
Python
|
Examples/Python/models/customer_create.py
|
Duett-AS/DuettAPI
|
ffd2f6060d6cfcc62e18a1c3b00ba1db58d5d289
|
[
"MIT"
] | 4 |
2021-11-25T10:25:02.000Z
|
2021-12-01T09:37:55.000Z
|
Examples/Python/models/customer_create.py
|
Duett-AS/DuettAPI
|
ffd2f6060d6cfcc62e18a1c3b00ba1db58d5d289
|
[
"MIT"
] | null | null | null |
Examples/Python/models/customer_create.py
|
Duett-AS/DuettAPI
|
ffd2f6060d6cfcc62e18a1c3b00ba1db58d5d289
|
[
"MIT"
] | 2 |
2021-11-26T11:23:26.000Z
|
2021-12-09T10:44:34.000Z
|
# coding: utf-8
"""
OpenApi
##  _How to take the Duett api in use_ To be able to use the API, you must have an interaction key and a client key. Eksample curl: ```swift curl -X 'GET' \\ 'https://api.duett.no/article/v1 \\ -H 'accept: application/json' \\ -H 'X-Api-Integration-Key: 89ff1c42-9d90-435a-8a94-20207bc06e1a' \\ -H 'X-Api-Client-Key: 7896feb3-aaa2-4fd2-aaa2-c69de5fd1e5f' ``` ##### [Metode description][1] ##### [Filtering data in openApi][2] ### Use a code generator as to auto-generate api client: ##### [NSwagStudio](https://github.com/RicoSuter/NSwag/wiki/NSwagStudio) ##### [Visual studio add-in](https://marketplace.visualstudio.com/items?itemName=ChristianResmaHelle.ApiClientCodeGenerator) \\ ##### [Online code generator for many languages and versions](https://github.com/swagger-api/swagger-codegen) *** [1]: ../metods-help.html [2]: ../query-help.html # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CustomerCreate(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'external_id': 'str',
'vat_number': 'str',
'bank_account_number': 'str',
'invoice_email': 'str',
'hour_attestation_mobile': 'str',
'address': 'StreetAddress',
'contact_info': 'Contact',
'contact_person': 'ContactPerson'
}
attribute_map = {
'external_id': 'externalId',
'vat_number': 'vatNumber',
'bank_account_number': 'bankAccountNumber',
'invoice_email': 'invoiceEmail',
'hour_attestation_mobile': 'hourAttestationMobile',
'address': 'address',
'contact_info': 'contactInfo',
'contact_person': 'contactPerson'
}
def __init__(self, external_id=None, vat_number=None, bank_account_number=None, invoice_email=None, hour_attestation_mobile=None, address=None, contact_info=None, contact_person=None): # noqa: E501
"""CustomerCreate - a model defined in Swagger""" # noqa: E501
self._external_id = None
self._vat_number = None
self._bank_account_number = None
self._invoice_email = None
self._hour_attestation_mobile = None
self._address = None
self._contact_info = None
self._contact_person = None
self.discriminator = None
self.external_id = external_id
if vat_number is not None:
self.vat_number = vat_number
if bank_account_number is not None:
self.bank_account_number = bank_account_number
if invoice_email is not None:
self.invoice_email = invoice_email
if hour_attestation_mobile is not None:
self.hour_attestation_mobile = hour_attestation_mobile
self.address = address
self.contact_info = contact_info
if contact_person is not None:
self.contact_person = contact_person
@property
def external_id(self):
"""Gets the external_id of this CustomerCreate. # noqa: E501
:return: The external_id of this CustomerCreate. # noqa: E501
:rtype: str
"""
return self._external_id
@external_id.setter
def external_id(self, external_id):
"""Sets the external_id of this CustomerCreate.
:param external_id: The external_id of this CustomerCreate. # noqa: E501
:type: str
"""
if external_id is None:
raise ValueError("Invalid value for `external_id`, must not be `None`") # noqa: E501
self._external_id = external_id
@property
def vat_number(self):
"""Gets the vat_number of this CustomerCreate. # noqa: E501
:return: The vat_number of this CustomerCreate. # noqa: E501
:rtype: str
"""
return self._vat_number
@vat_number.setter
def vat_number(self, vat_number):
"""Sets the vat_number of this CustomerCreate.
:param vat_number: The vat_number of this CustomerCreate. # noqa: E501
:type: str
"""
self._vat_number = vat_number
@property
def bank_account_number(self):
"""Gets the bank_account_number of this CustomerCreate. # noqa: E501
:return: The bank_account_number of this CustomerCreate. # noqa: E501
:rtype: str
"""
return self._bank_account_number
@bank_account_number.setter
def bank_account_number(self, bank_account_number):
"""Sets the bank_account_number of this CustomerCreate.
:param bank_account_number: The bank_account_number of this CustomerCreate. # noqa: E501
:type: str
"""
self._bank_account_number = bank_account_number
@property
def invoice_email(self):
"""Gets the invoice_email of this CustomerCreate. # noqa: E501
:return: The invoice_email of this CustomerCreate. # noqa: E501
:rtype: str
"""
return self._invoice_email
@invoice_email.setter
def invoice_email(self, invoice_email):
"""Sets the invoice_email of this CustomerCreate.
:param invoice_email: The invoice_email of this CustomerCreate. # noqa: E501
:type: str
"""
self._invoice_email = invoice_email
@property
def hour_attestation_mobile(self):
"""Gets the hour_attestation_mobile of this CustomerCreate. # noqa: E501
:return: The hour_attestation_mobile of this CustomerCreate. # noqa: E501
:rtype: str
"""
return self._hour_attestation_mobile
@hour_attestation_mobile.setter
def hour_attestation_mobile(self, hour_attestation_mobile):
"""Sets the hour_attestation_mobile of this CustomerCreate.
:param hour_attestation_mobile: The hour_attestation_mobile of this CustomerCreate. # noqa: E501
:type: str
"""
self._hour_attestation_mobile = hour_attestation_mobile
@property
def address(self):
"""Gets the address of this CustomerCreate. # noqa: E501
:return: The address of this CustomerCreate. # noqa: E501
:rtype: StreetAddress
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this CustomerCreate.
:param address: The address of this CustomerCreate. # noqa: E501
:type: StreetAddress
"""
if address is None:
raise ValueError("Invalid value for `address`, must not be `None`") # noqa: E501
self._address = address
@property
def contact_info(self):
"""Gets the contact_info of this CustomerCreate. # noqa: E501
:return: The contact_info of this CustomerCreate. # noqa: E501
:rtype: Contact
"""
return self._contact_info
@contact_info.setter
def contact_info(self, contact_info):
"""Sets the contact_info of this CustomerCreate.
:param contact_info: The contact_info of this CustomerCreate. # noqa: E501
:type: Contact
"""
if contact_info is None:
raise ValueError("Invalid value for `contact_info`, must not be `None`") # noqa: E501
self._contact_info = contact_info
@property
def contact_person(self):
"""Gets the contact_person of this CustomerCreate. # noqa: E501
:return: The contact_person of this CustomerCreate. # noqa: E501
:rtype: ContactPerson
"""
return self._contact_person
@contact_person.setter
def contact_person(self, contact_person):
"""Sets the contact_person of this CustomerCreate.
:param contact_person: The contact_person of this CustomerCreate. # noqa: E501
:type: ContactPerson
"""
self._contact_person = contact_person
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CustomerCreate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CustomerCreate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.25 | 970 | 0.625787 |
b7f984436e21305aa509bcada6d51ec1ae4531d0
| 763 |
py
|
Python
|
lesson1/exercise2_1.py
|
mfeindt0705/ansiblenetmf
|
8e1935e030c8adf621b8a7b5866f7ebc65420f09
|
[
"Apache-2.0"
] | null | null | null |
lesson1/exercise2_1.py
|
mfeindt0705/ansiblenetmf
|
8e1935e030c8adf621b8a7b5866f7ebc65420f09
|
[
"Apache-2.0"
] | 5 |
2020-02-26T20:13:55.000Z
|
2021-12-13T19:56:22.000Z
|
lesson1/exercise2_1.py
|
mfeindt0705/ansiblenetmf
|
8e1935e030c8adf621b8a7b5866f7ebc65420f09
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""
define base functions for yaml file
read-file and write_file
"""
# !/usr/bin/env python3
"""
def basic functions for yaml processing
"""
import yaml
from pprint import pprint
def read_yaml(filename):
"""
yaml file processing
function has filename as input and returns the file data processed by yaml
"""
with open(filename, 'r') as f:
return yaml.load(f)
def write_yaml(vdata, filename):
"""
yaml file processing
function has data structure and filename as input
data is written to file filename as a yaml formatted file
"""
return None
if __name__ == "__main__":
filename = input("Bitte geben Sie den Dateinamen ein : ")
vdata = read_yaml(filename)
pprint(vdata)
| 20.078947 | 78 | 0.68152 |
de7416d92ccdf00fa437ba1a1fba0498623ed6ec
| 16,422 |
py
|
Python
|
backend/main.py
|
johnr0/LanguageRule
|
f97d0a0c560e8feffe4e4aa9eb88eeacccc3e11a
|
[
"MIT"
] | null | null | null |
backend/main.py
|
johnr0/LanguageRule
|
f97d0a0c560e8feffe4e4aa9eb88eeacccc3e11a
|
[
"MIT"
] | null | null | null |
backend/main.py
|
johnr0/LanguageRule
|
f97d0a0c560e8feffe4e4aa9eb88eeacccc3e11a
|
[
"MIT"
] | null | null | null |
from sanic import Sanic
from sanic.response import json, file, text
from nltk.corpus import wordnet as wn
import numpy as np
import stanfordnlp
import nltk
from nltk.stem import WordNetLemmatizer
from empath import Empath
import json as js
from .db import db_connect, init_db
from .random_code import get_random_code
con=db_connect()
cur=con.cursor()
# init_db(cur)
nlp = stanfordnlp.Pipeline()
wordtags = nltk.ConditionalFreqDist((w.lower(), t) for w, t in nltk.corpus.brown.tagged_words(tagset="universal"))
Lemmatizer = WordNetLemmatizer()
emp = Empath()
app = Sanic()
app.static('/bundle.js', 'dist/bundle.js')
#app.static('/', 'src')
@app.route('/')
async def index(request):
return await file('dist/index.html')
# possible TODO ? we might need to add related parameters in the route...
@app.route('/<path:path>')
async def index2(request, path):
return await file('dist/index.html')
@app.route('/query_keyphrase', methods=['POST'])
def query_keyphrase(request):
keyphrase_id = request.json['keyphrase_id']
keyphrase = con.execute('SELECT * FROM keyphrase WHERE keyphrase_id=?', (keyphrase_id,))
keyphrase = keyphrase.fetchall()
if len(keyphrase)>0:
keyphrase=keyphrase[0]
return json({'keyphrase':keyphrase[1]})
else:
return json({'keyphrase': False})
def extract_examples(concept):
hypo_list = []
for i in concept.hyponyms():
hypo_list.append(i.name())
for i in concept.hyponyms():
hypo_list = hypo_list + extract_examples(i)
hypo_list = list(set(hypo_list))
return hypo_list
def extract_examples_short(concept):
hypo_list = []
for i in concept.hyponyms():
hypo_list.append(i.name())
if len(hypo_list)>9:
return list(set(hypo_list))
for i in concept.hyponyms():
hypo_list = hypo_list + extract_examples_short(i)
if len(hypo_list)>9:
break
hypo_list = list(set(hypo_list))
return hypo_list
def example_dist_sorted(concept, distance_words, short=False):
if short==True:
examples = extract_examples_short(concept)
else:
examples = extract_examples(concept)
hypo_dict = {}
for hypo in examples:
cur_h = wn.synset(hypo)
dists = []
for distance_word in distance_words:
dists_sub=[]
cur_ds = wn.synsets(distance_word)
for cur_d in cur_ds:
dist = cur_d.path_similarity(cur_h)
# print(dist)
if dist==None:
dist = 0
dists_sub.append(dist)
dists.append(np.max(dists_sub))
hypo_dict[hypo]=np.mean(dists)
to_return = sorted(hypo_dict, key=hypo_dict.get, reverse=False)
# print(hypo_dict)
# print(to_return)
return to_return
def extract_candidate_common_parents(words):
print('start extracting candidates!')
result_common_hypernyms = []
hypernym_dict = {}
for idx1 in range(len(words)):
for idx2 in range(idx1+1, len(words)):
word_synsets1 = wn.synsets(words[idx1])
word_synsets2 = wn.synsets(words[idx2])
for word_synset1 in word_synsets1:
for word_synset2 in word_synsets2:
common_hypernyms = word_synset1.lowest_common_hypernyms(word_synset2)
for common_hypernym in common_hypernyms:
if common_hypernym.name() not in hypernym_dict:
hypernym_dict[common_hypernym.name()]=set()
hypernym_dict[common_hypernym.name()].add(word_synset1.name())
hypernym_dict[common_hypernym.name()].add(word_synset2.name())
result_common_hypernyms = result_common_hypernyms + common_hypernyms
# print(hypernym_dict)
result_common_hypernyms = list(set(result_common_hypernyms))
print(result_common_hypernyms)
# print(result_common_hypernyms)
to_return_common_hypernyms = result_common_hypernyms.copy()
# for idx1 in range(len(result_common_hypernyms)):
# for idx2 in range(idx1+1, len(result_common_hypernyms)):
# word_synset1 = result_common_hypernyms[idx1]
# word_synset2 = result_common_hypernyms[idx2]
# if len(hypernym_dict[word_synset1.name()] & hypernym_dict[word_synset2.name()])>0:
# common_hypernyms = word_synset1.lowest_common_hypernyms(word_synset2)
# for common_hypernym in common_hypernyms:
# if common_hypernym.name()==word_synset1.name():
# if word_synset2 in to_return_common_hypernyms:
# to_return_common_hypernyms.remove(word_synset2)
# elif common_hypernym.name()==word_synset2.name():
# if word_synset1 in to_return_common_hypernyms:
# to_return_common_hypernyms.remove(word_synset1)
# else:
# if word_synset1 in to_return_common_hypernyms:
# to_return_common_hypernyms.remove(word_synset1)
# if word_synset2 in to_return_common_hypernyms:
# to_return_common_hypernyms.remove(word_synset2)
# print('from', word_synset1, word_synset2, 'result', common_hypernyms)
# to_return_common_hypernyms = to_return_common_hypernyms + common_hypernyms
# to_return_common_hypernyms = list(set(to_return_common_hypernyms))
data_dict = {}
hypo_dict = {}
for hypo in to_return_common_hypernyms:
cur_h = hypo
dists = []
for distance_word in words:
dists_sub=[]
cur_ds = wn.synsets(distance_word)
for cur_d in cur_ds:
dist = cur_d.path_similarity(cur_h)
# print(dist)
if dist==None:
dist = 0
dists_sub.append(dist)
dists.append(np.max(dists_sub))
hypo_dict[hypo.name()]=np.mean(dists)
# print(hypo_dict)
to_return = sorted(hypo_dict, key=hypo_dict.get, reverse=True)
print(to_return)
return_dict_list = []
for item in to_return:
s=wn.synset(item)
each_dict = {
'name': s.name(),
'definition': s.definition(),
'examples': example_dist_sorted(s, words, True),
}
return_dict_list.append(each_dict)
print(len(return_dict_list))
return return_dict_list
@app.route('/query_pos', methods=["POST"],)
def query_pos(request):
word = request.json['word']
pos_list = list(wordtags[word])
print(pos_list)
wnword = wn.synsets(word)
for w in wnword:
cur_pos = w.pos()
if cur_pos=='n':
pos_list.append('NOUN')
elif cur_pos=='v':
pos_list.append('VERB')
elif cur_pos=='a':
pos_list.append('ADJ')
elif cur_pos=='r':
pos_list.append('ADV')
pos_list = list(set(pos_list))
return json({'query_result': pos_list})
@app.route('/query_empath_examples', methods=["POST"],)
def query_empath_examples(request):
words_passed = request.json['words']
pos = request.json['pos']
name = words_passed[len(words_passed)-1]+'_similar'
examples = emp.create_category(name, words_passed)
examples_in_pos = []
for example in examples:
pos_list = list(wordtags[example])
wnword = wn.synsets(example)
for w in wnword:
cur_pos = w.pos()
if cur_pos=='n':
pos_list.append('NOUN')
elif cur_pos=='v':
pos_list.append('VERB')
elif cur_pos=='a':
pos_list.append('ADJ')
elif cur_pos=='r':
pos_list.append('ADV')
pos_list = list(set(pos_list))
if pos in pos_list:
if pos=='VERB':
examples_in_pos.append(Lemmatizer.lemmatize(example.lower(), pos='v'))
elif pos=='NOUN':
examples_in_pos.append(Lemmatizer.lemmatize(example.lower(), pos='n'))
elif pos=='ADJ':
examples_in_pos.append(Lemmatizer.lemmatize(example.lower(), pos='a'))
elif pos=='ADV':
examples_in_pos.append(Lemmatizer.lemmatize(example.lower(), pos='r'))
else:
examples_in_pos.append(example.lower())
examples_in_pos = words_passed+examples_in_pos
examples_in_pos = list(set(examples_in_pos))
return json({'query_result': examples_in_pos})
@app.route('/query_wordnet_word', methods=["POST",])
def query_wordnet_word(request):
print("result!")
word = request.json['word']
synset = wn.synsets(word)
distance_words = [word]
return_list = []
for s in synset:
each_dict = {
'name': s.name(),
'definition': s.definition(),
'examples': example_dist_sorted(s, distance_words),
}
return_list.append(each_dict)
return json({'query_result': return_list})
@app.route("/query_wordnet_hypernym", methods=["POST",])
def query_wordnet_hypernym(request):
word = request.json['word']
word = wn.synset(word)
hypernyms = word.hypernyms()
print(request.json)
if 'example_words' in request.json:
distance_words = request.json['example_words']
distance_words.append(request.json['origin_word'])
else:
distance_words = [request.json['origin_word']]
print(distance_words)
return_list = []
for s in hypernyms:
each_dict = {
'name': s.name(),
'definition': s.definition(),
'examples': example_dist_sorted(s, distance_words),
}
return_list.append(each_dict)
return json({'query_result': return_list})
@app.route("/query_wordnet_resorted_examples", methods=["POST",])
def query_wordnet_resorted_examples(request):
word_list = request.json['list']
distance_words = request.json['example_words']
distance_words.append(request.json['origin_word'])
for idx1, sub_list in enumerate(word_list):
for idx2, item in enumerate(sub_list):
word = wn.synset(item['name'])
new_example = example_dist_sorted(word, distance_words)
word_list[idx1][idx2]['examples'] = new_example
return json({"resorted_list": word_list})
@app.route("/query_wordnet_common_hypernym", methods=["POST",])
def query_wordnet_common_hypernym(request):
words = request.json['words']
return_list = extract_candidate_common_parents(words)
return json({'query_result': return_list})
def concept_dfs(cur_concept, path, target_concept):
# print(cur_concept, path)
if target_concept.name()==cur_concept.name():
path.append(cur_concept)
return path
elif len(cur_concept.hypernyms())>0:
cur_results = []
arg_min_list = []
for hyper in cur_concept.hypernyms():
path.append(cur_concept)
# print(path_)
result = concept_dfs(hyper, path, target_concept)
# print(result)
if len(result)!=0:
cur_results.append(result)
arg_min_list.append(len(result))
if len(cur_results)>0:
# print('cur_results', cur_results)
return cur_results[np.argmin(arg_min_list)]
# print('went through all')
return []
else:
return []
def find_concept_path(init_word, examples, choosen_word):
examples.append(init_word)
init_word_synsets = wn.synsets(init_word)
choosen_word_synset = wn.synset(choosen_word)
return_list = []
item_dict= {}
for init_word_synset in init_word_synsets:
dist = init_word_synset.path_similarity(choosen_word_synset)
# print(dist)
if dist!=None:
result = concept_dfs(init_word_synset, [], choosen_word_synset)
if len(result)>0:
# print(result)
single_list = []
for r in result:
single_list.append(r.name())
if r.name() not in item_dict:
item_dict[r.name()] = {
'name':r.name(),
'definition':r.definition(),
'examples': example_dist_sorted(r,examples),
}
while len(r.hypernyms())!=0:
r = r.hypernyms()[0]
single_list.append(r.name())
if r.name() not in item_dict:
item_dict[r.name()] = {
'name':r.name(),
'definition':r.definition(),
'examples': example_dist_sorted(r,examples),
}
return_list.append(single_list)
# print(return_list)
# print(item_dict)
return return_list, item_dict
@app.route("/query_wordnet_common_hypernym_path", methods=["POST",])
def query_wordnet_common_hypernym_path(request):
init_word = request.json['init_word']
examples = request.json['examples']
choosen_word = request.json['choosen_word']
# choosen_word =
# find_concept_path()
return_list, item_dict = find_concept_path(init_word, examples, choosen_word)
return json({'return_result': return_list, 'item_dict': item_dict})
def return_dependency(words, selected_indexes, original_text):
doc = nlp(original_text)
indexes={}
cur_order = 0
for idx1, sentence in enumerate(doc.sentences):
for idx2, word in enumerate(sentence.words):
if word.pos not in ['.', ',']:
indexes[str(idx1)+'_'+str(idx2)]=cur_order
cur_order = cur_order+1
print(indexes)
dependencies = []
for sen_idx, sentence in enumerate(doc.sentences):
for dependency in sentence.dependencies:
or_cur_idx = str(sen_idx)+'_'+str(int(dependency[0].index)-1)
or_head_idx = str(sen_idx)+'_'+str(int(dependency[2].index)-1)
if or_cur_idx in indexes and or_head_idx in indexes:
cur_idx = indexes[or_cur_idx]
head_idx = indexes[or_head_idx]
if cur_idx in selected_indexes and head_idx in selected_indexes:
dep_dic = {
'head': cur_idx,
'word': head_idx,
'dependency':dependency[1]
}
# print(dep_dic)
dependencies.append(dep_dic)
return dependencies
@app.route("/query_dependency_original_text", methods=["POST",])
def query_dependency_original_text(request):
words = request.json['words']
selected_indexes = request.json['selected_indexes']
original_text = request.json['original_text']
dependencies = return_dependency(words, selected_indexes, original_text)
return json({'dependencies': dependencies})
@app.route("/query_dependencies_from_examples", methods=["POST",])
def query_dependencies_from_examples(request):
examples = request.json['examples']
query_results = []
for example in examples:
words = example['words']
selected_indexes = example['selected_indexes']
original_text = example['original_text']
dependencies = return_dependency(words, selected_indexes, original_text)
query_results.append(dependencies[0]['dependency'])
dependency = max(query_results, key=query_results.count)
return json({'dependency': dependency})
@app.route("/query_rule_storage", methods=["POST",])
def query_rule_storage(request):
input_condition = request.json['input_condition']
output_condition = request.json['output_condition']
backend_condition = request.json['backend_condition']
user_id = request.json['user_id']
keyphrase_id = request.json['keyphrase_id']
rule = request.json['rule']
rule_id = get_random_code(lambda rule_id: len(con.execute(
"SELECT * FROM rule where rule_id=?", (rule_id,)
).fetchall()) == 0)
con.execute(
'INSERT INTO rule (rule_id, user_id, input_condition, output_condition, backend_condition, rule, keyphrase)'\
'VALUES (?, ?, ?, ?, ?, json(?), ?)',
(rule_id,user_id,input_condition,output_condition,backend_condition, js.dumps(rule), keyphrase_id)
)
con.commit()
return json({'done':'done'})
@app.route("/input_keyphrase", methods=["POST",])
def input_keyphrase(request):
phrase = request.json['phrase']
keyphrase_id = get_random_code(lambda keyphrase_id: len(con.execute(
"SELECT * FROM keyphrase where keyphrase_id=?", (keyphrase_id,)
).fetchall())==0)
con.execute(
'INSERT INTO keyphrase (keyphrase_id, keyphrase_text)'\
'VALUES (?, ?)',
(keyphrase_id, phrase)
)
con.commit()
return json({'keyphrase_id':keyphrase_id})
@app.route("/example_post", methods=["POST",])
def create_user(request):
return text("POST data: %s" % request.body)
@app.route("/example_json")
def post_json(request):
return json({ "received": True, "data": request.json })
@app.route("/query_string")
def query_string(request):
return json({ "parsed": True, "args": request.args, "url": request.url,
"query_string": request.query_string })
@app.websocket('/ws_data')
async def feed(request, ws):
while True:
data = 'hello!'
print('Sending: ' + data)
await ws.send(data)
data = await ws.recv()
print('Received: ' + data)
| 33.175758 | 114 | 0.6635 |
87be6b625896e405fac4d799b160f7f509ba6ce8
| 31,600 |
py
|
Python
|
apps/life_sci/python/dgllife/utils/featurizers.py
|
shyustc/dgl
|
bdf1bb52e6cb7514e57d648bcba8ed660c11ca9c
|
[
"Apache-2.0"
] | 1 |
2020-04-10T04:22:01.000Z
|
2020-04-10T04:22:01.000Z
|
apps/life_sci/python/dgllife/utils/featurizers.py
|
shyustc/dgl
|
bdf1bb52e6cb7514e57d648bcba8ed660c11ca9c
|
[
"Apache-2.0"
] | null | null | null |
apps/life_sci/python/dgllife/utils/featurizers.py
|
shyustc/dgl
|
bdf1bb52e6cb7514e57d648bcba8ed660c11ca9c
|
[
"Apache-2.0"
] | null | null | null |
"""Node and edge featurization for molecular graphs."""
import dgl.backend as F
import itertools
import numpy as np
from collections import defaultdict
from rdkit import Chem
__all__ = ['one_hot_encoding',
'atom_type_one_hot',
'atomic_number_one_hot',
'atomic_number',
'atom_degree_one_hot',
'atom_degree',
'atom_total_degree_one_hot',
'atom_total_degree',
'atom_explicit_valence_one_hot',
'atom_explicit_valence',
'atom_implicit_valence_one_hot',
'atom_implicit_valence',
'atom_hybridization_one_hot',
'atom_total_num_H_one_hot',
'atom_total_num_H',
'atom_formal_charge_one_hot',
'atom_formal_charge',
'atom_num_radical_electrons_one_hot',
'atom_num_radical_electrons',
'atom_is_aromatic_one_hot',
'atom_is_aromatic',
'atom_is_in_ring_one_hot',
'atom_is_in_ring',
'atom_chiral_tag_one_hot',
'atom_mass',
'ConcatFeaturizer',
'BaseAtomFeaturizer',
'CanonicalAtomFeaturizer',
'bond_type_one_hot',
'bond_is_conjugated_one_hot',
'bond_is_conjugated',
'bond_is_in_ring_one_hot',
'bond_is_in_ring',
'bond_stereo_one_hot',
'BaseBondFeaturizer',
'CanonicalBondFeaturizer']
def one_hot_encoding(x, allowable_set, encode_unknown=False):
"""One-hot encoding.
Parameters
----------
x
Value to encode.
allowable_set : list
The elements of the allowable_set should be of the
same type as x.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element.
Returns
-------
list
List of boolean values where at most one value is True.
The list is of length ``len(allowable_set)`` if ``encode_unknown=False``
and ``len(allowable_set) + 1`` otherwise.
"""
if encode_unknown and (allowable_set[-1] is not None):
allowable_set.append(None)
if encode_unknown and (x not in allowable_set):
x = None
return list(map(lambda s: x == s, allowable_set))
#################################################################
# Atom featurization
#################################################################
def atom_type_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the type of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of str
Atom types to consider. Default: ``C``, ``N``, ``O``, ``S``, ``F``, ``Si``, ``P``,
``Cl``, ``Br``, ``Mg``, ``Na``, ``Ca``, ``Fe``, ``As``, ``Al``, ``I``, ``B``, ``V``,
``K``, ``Tl``, ``Yb``, ``Sb``, ``Sn``, ``Ag``, ``Pd``, ``Co``, ``Se``, ``Ti``, ``Zn``,
``H``, ``Li``, ``Ge``, ``Cu``, ``Au``, ``Ni``, ``Cd``, ``In``, ``Mn``, ``Zr``, ``Cr``,
``Pt``, ``Hg``, ``Pb``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
"""
if allowable_set is None:
allowable_set = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca',
'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb', 'Sb', 'Sn',
'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H', 'Li', 'Ge', 'Cu', 'Au',
'Ni', 'Cd', 'In', 'Mn', 'Zr', 'Cr', 'Pt', 'Hg', 'Pb']
return one_hot_encoding(atom.GetSymbol(), allowable_set, encode_unknown)
def atomic_number_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the atomic number of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of int
Atomic numbers to consider. Default: ``1`` - ``100``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
"""
if allowable_set is None:
allowable_set = list(range(1, 101))
return one_hot_encoding(atom.GetAtomicNum(), allowable_set, encode_unknown)
def atomic_number(atom):
"""Get the atomic number for an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one int only.
"""
return [atom.GetAtomicNum()]
def atom_degree_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the degree of an atom.
Note that the result will be different depending on whether the Hs are
explicitly modeled in the graph.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of int
Atom degrees to consider. Default: ``0`` - ``10``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
atom_total_degree_one_hot
"""
if allowable_set is None:
allowable_set = list(range(11))
return one_hot_encoding(atom.GetDegree(), allowable_set, encode_unknown)
def atom_degree(atom):
"""Get the degree of an atom.
Note that the result will be different depending on whether the Hs are
explicitly modeled in the graph.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one int only.
See Also
--------
atom_total_degree
"""
return [atom.GetDegree()]
def atom_total_degree_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the degree of an atom including Hs.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list
Total degrees to consider. Default: ``0`` - ``5``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
See Also
--------
atom_degree_one_hot
"""
if allowable_set is None:
allowable_set = list(range(6))
return one_hot_encoding(atom.GetTotalDegree(), allowable_set, encode_unknown)
def atom_total_degree(atom):
"""The degree of an atom including Hs.
See Also
--------
atom_degree
Returns
-------
list
List containing one int only.
"""
return [atom.GetTotalDegree()]
def atom_explicit_valence_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the explicit valence of an aotm.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of int
Atom explicit valences to consider. Default: ``1`` - ``6``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
"""
if allowable_set is None:
allowable_set = list(range(1, 7))
return one_hot_encoding(atom.GetExplicitValence(), allowable_set, encode_unknown)
def atom_explicit_valence(atom):
"""Get the explicit valence of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one int only.
"""
return [atom.GetExplicitValence()]
def atom_implicit_valence_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the implicit valence of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of int
Atom implicit valences to consider. Default: ``0`` - ``6``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
"""
if allowable_set is None:
allowable_set = list(range(7))
return one_hot_encoding(atom.GetImplicitValence(), allowable_set, encode_unknown)
def atom_implicit_valence(atom):
"""Get the implicit valence of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Reurns
------
list
List containing one int only.
"""
return [atom.GetImplicitValence()]
def atom_hybridization_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the hybridization of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of rdkit.Chem.rdchem.HybridizationType
Atom hybridizations to consider. Default: ``Chem.rdchem.HybridizationType.SP``,
``Chem.rdchem.HybridizationType.SP2``, ``Chem.rdchem.HybridizationType.SP3``,
``Chem.rdchem.HybridizationType.SP3D``, ``Chem.rdchem.HybridizationType.SP3D2``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
"""
if allowable_set is None:
allowable_set = [Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2]
return one_hot_encoding(atom.GetHybridization(), allowable_set, encode_unknown)
def atom_total_num_H_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the total number of Hs of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of int
Total number of Hs to consider. Default: ``0`` - ``4``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
"""
if allowable_set is None:
allowable_set = list(range(5))
return one_hot_encoding(atom.GetTotalNumHs(), allowable_set, encode_unknown)
def atom_total_num_H(atom):
"""Get the total number of Hs of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one int only.
"""
return [atom.GetTotalNumHs()]
def atom_formal_charge_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the formal charge of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of int
Formal charges to consider. Default: ``-2`` - ``2``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
"""
if allowable_set is None:
allowable_set = list(range(-2, 3))
return one_hot_encoding(atom.GetFormalCharge(), allowable_set, encode_unknown)
def atom_formal_charge(atom):
"""Get formal charge for an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one int only.
"""
return [atom.GetFormalCharge()]
def atom_num_radical_electrons_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the number of radical electrons of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of int
Number of radical electrons to consider. Default: ``0`` - ``4``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
"""
if allowable_set is None:
allowable_set = list(range(5))
return one_hot_encoding(atom.GetNumRadicalElectrons(), allowable_set, encode_unknown)
def atom_num_radical_electrons(atom):
"""Get the number of radical electrons for an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one int only.
"""
return [atom.GetNumRadicalElectrons()]
def atom_is_aromatic_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for whether the atom is aromatic.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of bool
Conditions to consider. Default: ``False`` and ``True``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
"""
if allowable_set is None:
allowable_set = [False, True]
return one_hot_encoding(atom.GetIsAromatic(), allowable_set, encode_unknown)
def atom_is_aromatic(atom):
"""Get whether the atom is aromatic.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one bool only.
"""
return [atom.GetIsAromatic()]
def atom_is_in_ring_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for whether the atom is in ring.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of bool
Conditions to consider. Default: ``False`` and ``True``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
"""
if allowable_set is None:
allowable_set = [False, True]
return one_hot_encoding(atom.IsInRing(), allowable_set, encode_unknown)
def atom_is_in_ring(atom):
"""Get whether the atom is in ring.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one bool only.
"""
return [atom.IsInRing()]
def atom_chiral_tag_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the chiral tag of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of rdkit.Chem.rdchem.ChiralType
Chiral tags to consider. Default: ``rdkit.Chem.rdchem.ChiralType.CHI_UNSPECIFIED``,
``rdkit.Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW``,
``rdkit.Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW``,
``rdkit.Chem.rdchem.ChiralType.CHI_OTHER``.
"""
if allowable_set is None:
allowable_set = [Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
Chem.rdchem.ChiralType.CHI_OTHER]
return one_hot_encoding(atom.GetChiralTag(), allowable_set, encode_unknown)
def atom_mass(atom, coef=0.01):
"""Get the mass of an atom and scale it.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
coef : float
The mass will be multiplied by ``coef``.
Returns
-------
list
List containing one float only.
"""
return [atom.GetMass() * coef]
class ConcatFeaturizer(object):
"""Concatenate the evaluation results of multiple functions as a single feature.
Parameters
----------
func_list : list
List of functions for computing molecular descriptors from objects of a same
particular data type, e.g. ``rdkit.Chem.rdchem.Atom``. Each function is of signature
``func(data_type) -> list of float or bool or int``. The resulting order of
the features will follow that of the functions in the list.
"""
def __init__(self, func_list):
self.func_list = func_list
def __call__(self, x):
"""Featurize the input data.
Parameters
----------
x :
Data to featurize.
Returns
-------
list
List of feature values, which can be of type bool, float or int.
"""
return list(itertools.chain.from_iterable(
[func(x) for func in self.func_list]))
class BaseAtomFeaturizer(object):
"""An abstract class for atom featurizers.
Loop over all atoms in a molecule and featurize them with the ``featurizer_funcs``.
**We assume the resulting DGLGraph will not contain any virtual nodes.**
Parameters
----------
featurizer_funcs : dict
Mapping feature name to the featurization function.
Each function is of signature ``func(rdkit.Chem.rdchem.Atom) -> list or 1D numpy array``.
feat_sizes : dict
Mapping feature name to the size of the corresponding feature. If None, they will be
computed when needed. Default: None.
Examples
--------
>>> from dgl.data.life_sci import BaseAtomFeaturizer, atom_mass, atom_degree_one_hot
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles('CCO')
>>> atom_featurizer = BaseAtomFeaturizer({'mass': atom_mass, 'degree': atom_degree_one_hot})
>>> atom_featurizer(mol)
{'mass': tensor([[0.1201],
[0.1201],
[0.1600]]),
'degree': tensor([[0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])}
"""
def __init__(self, featurizer_funcs, feat_sizes=None):
self.featurizer_funcs = featurizer_funcs
if feat_sizes is None:
feat_sizes = dict()
self._feat_sizes = feat_sizes
def feat_size(self, feat_name):
"""Get the feature size for ``feat_name``.
Returns
-------
int
Feature size for the feature with name ``feat_name``.
"""
if feat_name not in self.featurizer_funcs:
return ValueError('Expect feat_name to be in {}, got {}'.format(
list(self.featurizer_funcs.keys()), feat_name))
if feat_name not in self._feat_sizes:
atom = Chem.MolFromSmiles('C').GetAtomWithIdx(0)
self._feat_sizes[feat_name] = len(self.featurizer_funcs[feat_name](atom))
return self._feat_sizes[feat_name]
def __call__(self, mol):
"""Featurize all atoms in a molecule.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
Returns
-------
dict
For each function in self.featurizer_funcs with the key ``k``, store the computed
feature under the key ``k``. Each feature is a tensor of dtype float32 and shape
(N, M), where N is the number of atoms in the molecule.
"""
num_atoms = mol.GetNumAtoms()
atom_features = defaultdict(list)
# Compute features for each atom
for i in range(num_atoms):
atom = mol.GetAtomWithIdx(i)
for feat_name, feat_func in self.featurizer_funcs.items():
atom_features[feat_name].append(feat_func(atom))
# Stack the features and convert them to float arrays
processed_features = dict()
for feat_name, feat_list in atom_features.items():
feat = np.stack(feat_list)
processed_features[feat_name] = F.zerocopy_from_numpy(feat.astype(np.float32))
return processed_features
class CanonicalAtomFeaturizer(BaseAtomFeaturizer):
"""A default featurizer for atoms.
The atom features include:
* **One hot encoding of the atom type**. The supported atom types include
``C``, ``N``, ``O``, ``S``, ``F``, ``Si``, ``P``, ``Cl``, ``Br``, ``Mg``,
``Na``, ``Ca``, ``Fe``, ``As``, ``Al``, ``I``, ``B``, ``V``, ``K``, ``Tl``,
``Yb``, ``Sb``, ``Sn``, ``Ag``, ``Pd``, ``Co``, ``Se``, ``Ti``, ``Zn``,
``H``, ``Li``, ``Ge``, ``Cu``, ``Au``, ``Ni``, ``Cd``, ``In``, ``Mn``, ``Zr``,
``Cr``, ``Pt``, ``Hg``, ``Pb``.
* **One hot encoding of the atom degree**. The supported possibilities
include ``0 - 10``.
* **One hot encoding of the number of implicit Hs on the atom**. The supported
possibilities include ``0 - 6``.
* **Formal charge of the atom**.
* **Number of radical electrons of the atom**.
* **One hot encoding of the atom hybridization**. The supported possibilities include
``SP``, ``SP2``, ``SP3``, ``SP3D``, ``SP3D2``.
* **Whether the atom is aromatic**.
* **One hot encoding of the number of total Hs on the atom**. The supported possibilities
include ``0 - 4``.
**We assume the resulting DGLGraph will not contain any virtual nodes.**
Parameters
----------
atom_data_field : str
Name for storing atom features in DGLGraphs, default to be 'h'.
"""
def __init__(self, atom_data_field='h'):
super(CanonicalAtomFeaturizer, self).__init__(
featurizer_funcs={atom_data_field: ConcatFeaturizer(
[atom_type_one_hot,
atom_degree_one_hot,
atom_implicit_valence_one_hot,
atom_formal_charge,
atom_num_radical_electrons,
atom_hybridization_one_hot,
atom_is_aromatic,
atom_total_num_H_one_hot]
)})
def bond_type_one_hot(bond, allowable_set=None, encode_unknown=False):
"""One hot encoding for the type of a bond.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
RDKit bond instance.
allowable_set : list of Chem.rdchem.BondType
Bond types to consider. Default: ``Chem.rdchem.BondType.SINGLE``,
``Chem.rdchem.BondType.DOUBLE``, ``Chem.rdchem.BondType.TRIPLE``,
``Chem.rdchem.BondType.AROMATIC``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
"""
if allowable_set is None:
allowable_set = [Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC]
return one_hot_encoding(bond.GetBondType(), allowable_set, encode_unknown)
def bond_is_conjugated_one_hot(bond, allowable_set=None, encode_unknown=False):
"""One hot encoding for whether the bond is conjugated.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
RDKit bond instance.
allowable_set : list of bool
Conditions to consider. Default: ``False`` and ``True``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
"""
if allowable_set is None:
allowable_set = [False, True]
return one_hot_encoding(bond.GetIsConjugated(), allowable_set, encode_unknown)
def bond_is_conjugated(bond):
"""Get whether the bond is conjugated.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
RDKit bond instance.
Returns
-------
list
List containing one bool only.
"""
return [bond.GetIsConjugated()]
def bond_is_in_ring_one_hot(bond, allowable_set=None, encode_unknown=False):
"""One hot encoding for whether the bond is in a ring of any size.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
RDKit bond instance.
allowable_set : list of bool
Conditions to consider. Default: ``False`` and ``True``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
"""
if allowable_set is None:
allowable_set = [False, True]
return one_hot_encoding(bond.IsInRing(), allowable_set, encode_unknown)
def bond_is_in_ring(bond):
"""Get whether the bond is in a ring of any size.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
RDKit bond instance.
Returns
-------
list
List containing one bool only.
"""
return [bond.IsInRing()]
def bond_stereo_one_hot(bond, allowable_set=None, encode_unknown=False):
"""One hot encoding for the stereo configuration of a bond.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
RDKit bond instance.
allowable_set : list of rdkit.Chem.rdchem.BondStereo
Stereo configurations to consider. Default: ``rdkit.Chem.rdchem.BondStereo.STEREONONE``,
``rdkit.Chem.rdchem.BondStereo.STEREOANY``, ``rdkit.Chem.rdchem.BondStereo.STEREOZ``,
``rdkit.Chem.rdchem.BondStereo.STEREOE``, ``rdkit.Chem.rdchem.BondStereo.STEREOCIS``,
``rdkit.Chem.rdchem.BondStereo.STEREOTRANS``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
"""
if allowable_set is None:
allowable_set = [Chem.rdchem.BondStereo.STEREONONE,
Chem.rdchem.BondStereo.STEREOANY,
Chem.rdchem.BondStereo.STEREOZ,
Chem.rdchem.BondStereo.STEREOE,
Chem.rdchem.BondStereo.STEREOCIS,
Chem.rdchem.BondStereo.STEREOTRANS]
return one_hot_encoding(bond.GetStereo(), allowable_set, encode_unknown)
class BaseBondFeaturizer(object):
"""An abstract class for bond featurizers.
Loop over all bonds in a molecule and featurize them with the ``featurizer_funcs``.
We assume the constructed ``DGLGraph`` is a bi-directed graph where the **i** th bond in the
molecule, i.e. ``mol.GetBondWithIdx(i)``, corresponds to the **(2i)**-th and **(2i+1)**-th edges
in the DGLGraph.
**We assume the resulting DGLGraph will be created with :func:`smiles_to_bigraph` without
self loops.**
Parameters
----------
featurizer_funcs : dict
Mapping feature name to the featurization function.
Each function is of signature ``func(rdkit.Chem.rdchem.Bond) -> list or 1D numpy array``.
feat_sizes : dict
Mapping feature name to the size of the corresponding feature. If None, they will be
computed when needed. Default: None.
Examples
--------
>>> from dgl.data.life_sci import BaseBondFeaturizer, bond_type_one_hot, bond_is_in_ring
>>> from rdkit import Chem
>>> mol = Chem.MolFromSmiles('CCO')
>>> bond_featurizer = BaseBondFeaturizer({'bond_type': bond_type_one_hot, 'in_ring': bond_is_in_ring})
>>> bond_featurizer(mol)
{'bond_type': tensor([[1., 0., 0., 0.],
[1., 0., 0., 0.],
[1., 0., 0., 0.],
[1., 0., 0., 0.]]),
'in_ring': tensor([[0.], [0.], [0.], [0.]])}
"""
def __init__(self, featurizer_funcs, feat_sizes=None):
self.featurizer_funcs = featurizer_funcs
if feat_sizes is None:
feat_sizes = dict()
self._feat_sizes = feat_sizes
def feat_size(self, feat_name):
"""Get the feature size for ``feat_name``.
Returns
-------
int
Feature size for the feature with name ``feat_name``.
"""
if feat_name not in self.featurizer_funcs:
return ValueError('Expect feat_name to be in {}, got {}'.format(
list(self.featurizer_funcs.keys()), feat_name))
if feat_name not in self._feat_sizes:
bond = Chem.MolFromSmiles('CO').GetBondWithIdx(0)
self._feat_sizes[feat_name] = len(self.featurizer_funcs[feat_name](bond))
return self._feat_sizes[feat_name]
def __call__(self, mol):
"""Featurize all bonds in a molecule.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
Returns
-------
dict
For each function in self.featurizer_funcs with the key ``k``, store the computed
feature under the key ``k``. Each feature is a tensor of dtype float32 and shape
(N, M), where N is the number of atoms in the molecule.
"""
num_bonds = mol.GetNumBonds()
bond_features = defaultdict(list)
# Compute features for each bond
for i in range(num_bonds):
bond = mol.GetBondWithIdx(i)
for feat_name, feat_func in self.featurizer_funcs.items():
feat = feat_func(bond)
bond_features[feat_name].extend([feat, feat.copy()])
# Stack the features and convert them to float arrays
processed_features = dict()
for feat_name, feat_list in bond_features.items():
feat = np.stack(feat_list)
processed_features[feat_name] = F.zerocopy_from_numpy(feat.astype(np.float32))
return processed_features
class CanonicalBondFeaturizer(BaseBondFeaturizer):
"""A default featurizer for bonds.
The bond features include:
* **One hot encoding of the bond type**. The supported bond types include
``SINGLE``, ``DOUBLE``, ``TRIPLE``, ``AROMATIC``.
* **Whether the bond is conjugated.**.
* **Whether the bond is in a ring of any size.**
* **One hot encoding of the stereo configuration of a bond**. The supported bond stereo
configurations include ``STEREONONE``, ``STEREOANY``, ``STEREOZ``, ``STEREOE``,
``STEREOCIS``, ``STEREOTRANS``.
**We assume the resulting DGLGraph will be created with :func:`smiles_to_bigraph` without
self loops.**
"""
def __init__(self, bond_data_field='e'):
super(CanonicalBondFeaturizer, self).__init__(
featurizer_funcs={bond_data_field: ConcatFeaturizer(
[bond_type_one_hot,
bond_is_conjugated,
bond_is_in_ring,
bond_stereo_one_hot]
)})
| 33.228181 | 106 | 0.609051 |
8fd5390ed2abaa6fd89508263c284fcd63395ca6
| 41,707 |
py
|
Python
|
var/spack/repos/builtin/packages/py-tensorflow/package.py
|
lcnzg/spack
|
5b9f60f9bb159113bfd8a0c8f3f4a8a0c2f55d7e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 |
2018-11-16T02:42:57.000Z
|
2019-06-06T19:18:50.000Z
|
var/spack/repos/builtin/packages/py-tensorflow/package.py
|
lcnzg/spack
|
5b9f60f9bb159113bfd8a0c8f3f4a8a0c2f55d7e
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32 |
2020-12-15T17:29:20.000Z
|
2022-03-21T15:08:31.000Z
|
var/spack/repos/builtin/packages/py-tensorflow/package.py
|
hainest/spack
|
c592e17d49118f253b552ef6165745ded8ac0ea5
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 |
2021-07-19T20:31:27.000Z
|
2021-07-19T21:14:14.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import tempfile
class PyTensorflow(Package, CudaPackage):
"""TensorFlow is an Open Source Software Library for Machine Intelligence
"""
homepage = "https://www.tensorflow.org"
url = "https://github.com/tensorflow/tensorflow/archive/v2.3.1.tar.gz"
maintainers = ['adamjstewart', 'aweits']
import_modules = ['tensorflow']
version('2.4.1', sha256='f681331f8fc0800883761c7709d13cda11942d4ad5ff9f44ad855e9dc78387e0')
version('2.4.0', sha256='26c833b7e1873936379e810a39d14700281125257ddda8cd822c89111db6f6ae')
version('2.3.2', sha256='21a703d2e68cd0677f6f9ce329198c24fd8203125599d791af9f1de61aadf31f')
version('2.3.1', sha256='ee534dd31a811f7a759453567257d1e643f216d8d55a25c32d2fbfff8153a1ac')
version('2.3.0', sha256='2595a5c401521f20a2734c4e5d54120996f8391f00bb62a57267d930bce95350')
version('2.2.2', sha256='fb4b5d26c5b983350f7ce8297b71176a86a69e91faf66e6ebb1e58538ad3bb51')
version('2.2.1', sha256='e6a28e64236d729e598dbeaa02152219e67d0ac94d6ed22438606026a02e0f88')
version('2.2.0', sha256='69cd836f87b8c53506c4f706f655d423270f5a563b76dc1cfa60fbc3184185a3')
version('2.1.3', sha256='cfa66cce372f486e95a42beb1aacfefdaf0092c5efaaaa92459b381fde931fb8')
version('2.1.2', sha256='3f941cda0ed12dfef5472e46f1d0238ea85da7583d73f1132d2ef050fda6e8ad')
version('2.1.1', sha256='a200bc16e4b630db3ac7225bcb6f239a76841967b0aec1d7d7bbe44dc5661318')
version('2.1.0', sha256='638e541a4981f52c69da4a311815f1e7989bf1d67a41d204511966e1daed14f7')
version('2.0.4', sha256='6ca3ce1255da8d655080a89db10da03f72c361d7faecc9a35e6af26ff12c06e6')
version('2.0.3', sha256='6314299a723441bd9892e5c2af182c2be7d2256e20e71026e1cb1264cb497f33')
version('2.0.2', sha256='a548742bbafd302eec51e2794d7687674a64f6b10ce1414073858cb83c0cefc2')
version('2.0.1', sha256='29197d30923b9670992ee4b9c6161f50c7452e9a4158c720746e846080ac245a')
version('2.0.0', sha256='49b5f0495cd681cbcb5296a4476853d4aea19a43bdd9f179c928a977308a0617')
version('1.15.5', sha256='4c4d23e311093ded2d2e287b18d7c45b07b5984ab88a1d2f91f8f13c886123db')
version('1.15.4', sha256='e18c55e771ad136f9bf3a70ea8f0e2d36662b2ba7c890f9eaf7950554557c7fa')
version('1.15.3', sha256='9ab1d92e58eb813922b040acc7622b32d73c2d8d971fe6491a06f9df4c778151')
version('1.15.2', sha256='d95d75d26a298211b5e802842e87fda5b8b14f6ad83719377b391e5fb71b8746')
version('1.15.1', sha256='19b6e72bc8675937f618cede364d7228a71c2eeaffc42801bcefd98dda7ca056')
version('1.15.0', sha256='a5d49c00a175a61da7431a9b289747d62339be9cf37600330ad63b611f7f5dc9')
version('1.14.0', sha256='aa2a6a1daafa3af66807cfe0bc77bfe1144a9a53df9a96bab52e3e575b3047ed')
version('1.13.2', sha256='abe3bf0c47845a628b7df4c57646f41a10ee70f914f1b018a5c761be75e1f1a9')
version('1.13.1', sha256='7cd19978e6bc7edc2c847bce19f95515a742b34ea5e28e4389dade35348f58ed')
version('1.12.3', sha256='b9e5488e84f4a133ed20b18605f0cd6301f11d356bd959712db4e7b9301d0462')
version('1.12.2', sha256='90ffc7cf1df5e4b8385c9108db18d5d5034ec423547c0e167d44f5746a20d06b')
version('1.12.1', sha256='7b559a3ae56322b7a7e4307f45f9fce96022c533a98b32c18bfdff8c5838271d')
version('1.12.0', sha256='3c87b81e37d4ed7f3da6200474fa5e656ffd20d8811068572f43610cae97ca92')
version('1.11.0', sha256='f49ce3f1d04cee854bc9f74fa9696991140b34a2e2447f35f01391b72c8bfa9f')
version('1.10.1', sha256='83092d709800e2d93d4d4b1bcacaeb74f2f328962ed764cb35bbee20402879c6')
version('1.10.0', sha256='ee9cb98d9e0d8106f2f4ed52a38fe89399324af303e1401567e5b64a9f86744b')
version('1.9.0', sha256='ffc3151b06823d57b4a408261ba8efe53601563dfe93af0866751d4f6ca5068c')
version('1.8.0', sha256='47646952590fd213b747247e6870d89bb4a368a95ae3561513d6c76e44f92a75')
version('1.7.1', sha256='3147f8c60d1f30da23a831bcf732e74b935dcee7c62e4b8b85f0f093030b52c8')
version('1.7.0', sha256='c676a96fc8700722816b2b98c85578b2f99fac7a7b2484c9c7f0641484f8d50d')
version('1.6.0', sha256='03cf1423446abbead6bd8c3cf6e6affa7d99746cd119691b012aac9a1795f4fb')
version('1.5.1', sha256='cab2157783905e12a7a3baae3264edfb739dd92d5658019a131fff4b14190240')
version('1.5.0', sha256='0642781c3a3a8c2c4834b91b86aec385f0b2ada7d721571458079478cc5b29c8')
version('1.4.1', sha256='1f75e463318419a1b3ae076d5a92697c1d3a85e8377c946a5510b651ff5c0d60')
version('1.4.0', sha256='8a0ad8d61f8f6c0282c548661994a5ab83ac531bac496c3041dedc1ab021107b')
version('1.3.1', sha256='ded509c209f8a1d390df8a2f44be5b5c29963172b0e0f095304efb59765d0523')
version('1.3.0', sha256='e1af1bb767b57c3416de0d43a5f74d174c42b85231dffd36f3630173534d4307')
version('1.2.1', sha256='f2baf09b1a9a0500907b4d5cb5473070b3ecede06ed6e8d1096873c91922fb9e')
version('1.2.0', sha256='03dbf7548d1fc1c11ed58da5fa68616f795c819f868f43478cbcaa26abed374f')
version('1.1.0', sha256='aad4470f52fa59f54de7b9a2da727429e6755d91d756f245f952698c42a60027')
version('1.0.1', sha256='deea3c65e0703da96d9c3f1162e464c51d37659dd129396af134e9e8f1ea8c05')
version('1.0.0', sha256='db8b3b8f4134b7c9c1b4165492ad5d5bb78889fcd99ffdffc325e97da3e8c677')
version('0.12.0', sha256='13a1d4e98c82eae7e26fe75384de1517d6126f63ba5d302392ec02ac3ae4b1b9')
version('0.11.0', sha256='24242ff696234bb1e58d09d45169b148525ccb706f980a4a92ddd3b82c7546dc')
version('0.10.0', sha256='f32df04e8f7186aaf6723fc5396733b2f6c2fd6fe4a53a54a68b80f3ec855680')
version('0.9.0', sha256='3128c396af19518c642d3e590212291e1d93c5b047472a10cf3245b53adac9c9')
version('0.8.0', sha256='f201ba7fb7609a6416968d4e1920d87d67be693b5bc7d34b6b4a79860a9a8a4e')
version('0.7.1', sha256='ef34121432f7a522cf9f99a56cdd86e370cc5fa3ee31255ca7cb17f36b8dfc0d')
version('0.7.0', sha256='43dd3051f947aa66e6fc09dac2f86a2efe2e019736bbd091c138544b86d717ce')
version('0.6.0', sha256='f86ace45e99053b09749cd55ab79c57274d8c7460ae763c5e808d81ffbc3b657')
variant('mkl', default=False, description='Build with MKL support')
variant('jemalloc', default=False, description='Build with jemalloc as malloc support')
variant('gcp', default=False, description='Build with Google Cloud Platform support')
variant('hdfs', default=False, description='Build with Hadoop File System support')
variant('aws', default=False, description='Build with Amazon AWS Platform support')
variant('kafka', default=False, description='Build with Apache Kafka Platform support')
variant('ignite', default=False, description='Build with Apache Ignite support')
variant('xla', default=False, description='Build with XLA JIT support')
variant('gdr', default=False, description='Build with GDR support')
variant('verbs', default=False, description='Build with libverbs support')
variant('ngraph', default=False, description='Build with Intel nGraph support')
variant('opencl', default=False, description='Build with OpenCL SYCL support')
variant('computecpp', default=False, description='Build with ComputeCPP support')
variant('rocm', default=False, description='Build with ROCm support')
variant('tensorrt', default=False, description='Build with TensorRT support')
variant('cuda', default=sys.platform != 'darwin', description='Build with CUDA support')
variant('nccl', default=sys.platform.startswith('linux'), description='Enable NVIDIA NCCL support')
variant('mpi', default=False, description='Build with MPI support')
variant('android', default=False, description='Configure for Android builds')
variant('ios', default=False, description='Build with iOS support (macOS only)')
variant('monolithic', default=False, description='Static monolithic build')
variant('numa', default=False, description='Build with NUMA support')
variant('dynamic_kernels', default=False, description='Build kernels into separate shared objects')
extends('python')
depends_on('python@3:', type=('build', 'run'), when='@2.1:')
# python 3.8 support in tensorflow 2.2
# see tensorflow issue #33374
depends_on('python@:3.7', type=('build', 'run'), when='@:2.2')
# TODO: Older versions of TensorFlow don't list the viable version range,
# just the minimum version of bazel that will work. The latest version of
# bazel doesn't seem to work, so for now we force them to use min version.
# Need to investigate further.
# See _TF_MIN_BAZEL_VERSION and _TF_MAX_BAZEL_VERSION in configure.py
depends_on('bazel@3.1.0:3.99.0', type='build', when='@2.3:')
depends_on('bazel@2.0.0', type='build', when='@2.2.0:2.2.999')
depends_on('bazel@0.27.1:0.29.1', type='build', when='@2.1.0:2.1.999')
depends_on('bazel@0.24.1:0.26.1', type='build', when='@1.15:2.0')
# See call to check_bazel_version in configure.py
depends_on('bazel@0.24.1:0.25.2', type='build', when='@1.14.0')
depends_on('bazel@0.19.0:0.21.0', type='build', when='@1.13.0:1.13.2')
depends_on('bazel@0.24.1:0.25.0', type='build', when='@1.12.1')
depends_on('bazel@0.15.0', type='build', when='@1.10:1.12.0,1.12.2:1.12.3')
depends_on('bazel@0.10.0', type='build', when='@1.7:1.9')
# See call to check_version in tensorflow/workspace.bzl
depends_on('bazel@0.5.4', type='build', when='@1.4:1.6')
# See MIN_BAZEL_VERSION in configure
depends_on('bazel@0.4.5', type='build', when='@1.2:1.3')
# See call to check_version in WORKSPACE
depends_on('bazel@0.4.2', type='build', when='@1.0:1.1')
depends_on('bazel@0.3.2', type='build', when='@0.12.0:0.12.1')
depends_on('bazel@0.3.0', type='build', when='@0.11.0')
depends_on('bazel@0.2.0', type='build', when='@0.9:0.10')
depends_on('bazel@0.1.4', type='build', when='@0.7:0.8')
depends_on('bazel@0.1.1', type='build', when='@0.5:0.6')
depends_on('swig', type='build')
depends_on('py-setuptools', type='build')
depends_on('py-future', type='build', when='^python@:2')
# Listed under REQUIRED_PACKAGES in tensorflow/tools/pip_package/setup.py
depends_on('py-absl-py@0.10:0.999', type=('build', 'run'), when='@2.4.0:')
depends_on('py-absl-py@0.7.0:', type=('build', 'run'), when='@1.12.1,1.14:2.3')
depends_on('py-absl-py@0.1.6:', type=('build', 'run'), when='@1.5:1.11')
depends_on('py-astunparse@1.6.3:1.6.999', type=('build', 'run'), when='@2.4.0:')
depends_on('py-astunparse@1.6.3', type=('build', 'run'), when='@2.2:2.3')
depends_on('py-astor@0.6.0:', type=('build', 'run'), when='@1.6:2.1')
depends_on('py-backports-weakref@1.0:', type=('build', 'run'), when='@1.3: ^python@:3.3')
depends_on('py-backports-weakref@1.0rc1', type=('build', 'run'), when='@1.2.0:1.2.1')
depends_on('py-enum34@1.1.6:', type=('build', 'run'), when='@1.5: ^python@:3.3')
depends_on('py-enum34@1.1.6:', type=('build', 'run'), when='@1.4.0:1.4.1')
depends_on('py-gast@0.3.3', type=('build', 'run'), when='@2.2:')
depends_on('py-gast@0.2.2', type=('build', 'run'), when='@1.15:2.1')
depends_on('py-gast@0.2.0:', type=('build', 'run'), when='@1.6:1.14')
depends_on('py-google-pasta@0.2:0.999', type=('build', 'run'), when='@2.4.0:')
depends_on('py-google-pasta@0.1.8:', type=('build', 'run'), when='@2.1:2.3')
depends_on('py-google-pasta@0.1.6:', type=('build', 'run'), when='@1.14:2.0')
depends_on('py-google-pasta@0.1.2:', type=('build', 'run'), when='@1.12.1')
# propagate the mpi variant setting for h5py/hdf5 to avoid unexpected crashes
depends_on('py-h5py@2.10.0:2.10.999+mpi', type=('build', 'run'), when='@2.2:+mpi')
depends_on('py-h5py@2.10.0:2.10.999~mpi', type=('build', 'run'), when='@2.2:~mpi')
depends_on('hdf5+mpi', type='build', when='@2.2:+mpi')
depends_on('hdf5~mpi', type='build', when='@2.2:~mpi')
depends_on('py-keras-applications@1.0.8:', type=('build', 'run'), when='@1.15:2.1')
depends_on('py-keras-applications@1.0.6:', type=('build', 'run'), when='@1.12:1.14')
depends_on('py-keras-applications@1.0.5:', type=('build', 'run'), when='@1.11.0:1.11.999')
depends_on('py-keras-preprocessing@1.1.2:1.1.999', type=('build', 'run'), when='@2.4:')
depends_on('py-keras-preprocessing@1.1.1:1.999', type=('build', 'run'), when='@2.3:2.3.999')
depends_on('py-keras-preprocessing@1.1.0:', type=('build', 'run'), when='@2.1:2.2')
depends_on('py-keras-preprocessing@1.0.5:', type=('build', 'run'), when='@1.12:2.0')
depends_on('py-keras-preprocessing@1.0.3:', type=('build', 'run'), when='@1.11:1.11.999')
# https://github.com/tensorflow/tensorflow/issues/40688
depends_on('py-numpy@1.19.2:1.19.999', type=('build', 'run'), when='@2.4.0:')
depends_on('py-numpy@1.16.0:1.18', type=('build', 'run'), when='@1.13.2,1.15:2.3')
depends_on('py-numpy@1.14.5:1.18', type=('build', 'run'), when='@1.12.1,1.14.0')
depends_on('py-numpy@1.13.3:1.14.5', type=('build', 'run'), when='@1.10.0:1.10.1')
depends_on('py-numpy@1.13.3:', type=('build', 'run'), when='@1.6:1.9')
depends_on('py-numpy@1.12.1:', type=('build', 'run'), when='@1.4:1.5')
depends_on('py-numpy@1.11.0:', type=('build', 'run'), when='@0.11:1.3')
depends_on('py-numpy@1.10.1:', type=('build', 'run'), when='@0.7.1:0.7.999 platform=darwin')
depends_on('py-numpy@1.8.2:', type=('build', 'run'), when='@0.6:0.10')
depends_on('py-numpy@1.9.2:', type=('build', 'run'), when='@0.5.0')
depends_on('py-opt-einsum@3.3.0:3.3.999', type=('build', 'run'), when='@2.4.0:')
depends_on('py-opt-einsum@2.3.2:', type=('build', 'run'), when='@1.15:2.3')
depends_on('py-protobuf@3.9.2:', type=('build', 'run'), when='@2.3:')
depends_on('py-protobuf@3.8.0:', type=('build', 'run'), when='@2.1:2.2')
depends_on('py-protobuf@3.6.1:', type=('build', 'run'), when='@1.12:2.0')
depends_on('py-protobuf@3.6.0:', type=('build', 'run'), when='@1.10:1.11')
depends_on('py-protobuf@3.4.0:', type=('build', 'run'), when='@1.5:1.9')
depends_on('py-protobuf@3.3.0:', type=('build', 'run'), when='@1.3:1.4')
depends_on('py-protobuf@3.2.0:', type=('build', 'run'), when='@1.1:1.2')
depends_on('py-protobuf@3.1.0:', type=('build', 'run'), when='@0.12.1:1.0')
depends_on('py-protobuf@3.1.0', type=('build', 'run'), when='@0.12.0')
depends_on('py-protobuf@3.0.0', type=('build', 'run'), when='@0.11.0')
depends_on('py-protobuf@3.0.0b2', type=('build', 'run'), when='@0.7.1:0.10')
depends_on('py-protobuf@3.0.0a3', type=('build', 'run'), when='@0.6:0.7.0')
depends_on('protobuf')
depends_on('flatbuffers+python@1.12.0:1.12.999', type=('build', 'run'), when='@2.4.0:')
# tensorboard
# tensorflow-estimator
depends_on('py-termcolor@1.1.0:1.1.999', type=('build', 'run'), when='@2.4.0:')
depends_on('py-termcolor@1.1.0:', type=('build', 'run'), when='@1.6:2.3')
depends_on('py-wrapt@1.12.1:1.12.999', type=('build', 'run'), when='@2.4.0:')
depends_on('py-wrapt@1.11.1:', type=('build', 'run'), when='@1.12.1,1.14:2.3')
depends_on('py-wheel', type=('build', 'run'), when='@0.6:2.3')
depends_on('py-wheel@0.26:', type=('build', 'run'), when='@0.6:2.3 ^python@3:')
depends_on('py-wheel@0.35:0.999', type=('build', 'run'), when='@2.4.0: ^python@3:')
depends_on('py-mock@2.0.0:', type=('build', 'run'), when='@0.10: ^python@:2')
depends_on('py-functools32@3.2.3:', type=('build', 'run'), when='@1.15: ^python@:2')
depends_on('py-six@1.15.0:1.15.999', type=('build', 'run'), when='@2.4.0:')
depends_on('py-six@1.12.0:', type=('build', 'run'), when='@2.1:2.3')
depends_on('py-six@1.10.0:', type=('build', 'run'), when='@:2.0')
depends_on('py-scipy@1.2.2', type=('build', 'run'), when='@2.1.0:2.1.1,2.2.0,2.3.0 ^python@:2')
depends_on('py-scipy@1.4.1', type=('build', 'run'), when='@2.1.0:2.1.1,2.2.0,2.3.0 ^python@3:')
depends_on('py-grpcio@1.8.6:', type=('build', 'run'), when='@1.6:1.7')
depends_on('py-typing-extensions@3.7.4:3.7.999', type=('build', 'run'), when='@2.4.0:')
if sys.byteorder == 'little':
# Only builds correctly on little-endian machines
depends_on('py-grpcio@1.8.6:', type=('build', 'run'), when='@1.8:2.3')
depends_on('py-grpcio@1.32.0:1.32.999', type=('build', 'run'), when='@2.4:')
# TODO: add packages for some of these dependencies
depends_on('mkl', when='+mkl')
depends_on('curl', when='+gcp')
# depends_on('computecpp', when='+opencl+computecpp')
# depends_on('trisycl', when='+opencl~computepp')
depends_on('cuda@:10.2', when='+cuda @:2.3')
depends_on('cuda@:11.1', when='+cuda @2.4.0:')
depends_on('cudnn', when='+cuda')
depends_on('cudnn@6.5', when='@0.5:0.6 +cuda')
# depends_on('tensorrt', when='+tensorrt')
depends_on('nccl', when='+nccl')
depends_on('mpi', when='+mpi')
# depends_on('android-ndk@10:18', when='+android')
# depends_on('android-sdk', when='+android')
# Check configure and configure.py to see when these variants are supported
conflicts('+mkl', when='@:1.0')
conflicts('+mkl', when='platform=darwin', msg='Darwin is not yet supported')
conflicts('+jemalloc', when='@:0')
conflicts('+jemalloc', when='platform=darwin', msg='Currently jemalloc is only support on Linux platform')
conflicts('+jemalloc', when='platform=cray', msg='Currently jemalloc is only support on Linux platform')
conflicts('+gcp', when='@:0.8')
conflicts('+hdfs', when='@:0.10')
conflicts('+aws', when='@:1.3')
conflicts('+kafka', when='@:1.5,2.1:')
conflicts('+ignite', when='@:1.11,2.1:')
conflicts('+xla', when='@:0')
conflicts('+gdr', when='@:1.3')
conflicts('+verbs', when='@:1.1')
conflicts('+ngraph', when='@:1.10')
conflicts('+opencl', when='@:0.11')
conflicts('+computecpp', when='@:0.11')
conflicts('+computecpp', when='~opencl')
conflicts('+rocm', when='@:1.11')
conflicts('+cuda', when='platform=darwin', msg='There is no GPU support for macOS')
conflicts('cuda_arch=none', when='+cuda', msg='Must specify CUDA compute capabilities of your GPU, see https://developer.nvidia.com/cuda-gpus')
conflicts('cuda_arch=20', when='@1.12.1,1.14:', msg='TensorFlow only supports compute capabilities >= 3.5')
conflicts('cuda_arch=30', when='@1.12.1,1.14:', msg='TensorFlow only supports compute capabilities >= 3.5')
conflicts('cuda_arch=32', when='@1.12.1,1.14:', msg='TensorFlow only supports compute capabilities >= 3.5')
conflicts('cuda_arch=20', when='@1.4:1.12.0,1.12.2:1.12.3', msg='Only compute capabilities 3.0 or higher are supported')
conflicts('+tensorrt', when='@:1.5')
conflicts('+tensorrt', when='~cuda')
conflicts('+tensorrt', when='platform=darwin', msg='Currently TensorRT is only supported on Linux platform')
conflicts('+tensorrt', when='platform=cray', msg='Currently TensorRT is only supported on Linux platform')
conflicts('+nccl', when='@:1.7')
conflicts('+nccl', when='~cuda')
conflicts('+nccl', when='platform=darwin', msg='Currently NCCL is only supported on Linux platform')
conflicts('+nccl', when='platform=cray', msg='Currently NCCL is only supported on Linux platform')
conflicts('+mpi', when='@:1.2')
conflicts('+android', when='@:1.4')
conflicts('+ios', when='@:1.12.0,1.12.2:1.13')
conflicts('+ios', when='platform=linux', msg='iOS support only available on macOS')
conflicts('+ios', when='platform=cray', msg='iOS support only available on macOS')
conflicts('+monolithic', when='@:1.3')
conflicts('+numa', when='@:1.12.0,1.12.2:1.13')
conflicts('+dynamic_kernels', when='@:1.12.0,1.12.2:1.12.3')
# TODO: why is this needed?
patch('url-zlib.patch', when='@0.10.0')
# TODO: why is this needed?
patch('crosstool.patch', when='@0.10.0+cuda')
# Avoid build error: "no such package '@io_bazel_rules_docker..."
patch('io_bazel_rules_docker2.patch', when='@1.15:2.0')
# Avoide build error: "name 'new_http_archive' is not defined"
patch('http_archive.patch', when='@1.12.3')
# Backport of 837c8b6b upstream
# "Remove contrib cloud bigtable and storage ops/kernels."
# Allows 2.0.* releases to build with '--config=nogcp'
patch('0001-Remove-contrib-cloud-bigtable-and-storage-ops-kernel.patch',
when='@2.0.0:2.0.999')
# for fcc
patch('1-1_fcc_tf_patch.patch', when='@2.1.0:2.1.99%fj')
# do not import contrib.cloud if not available
patch('https://github.com/tensorflow/tensorflow/commit/ed62ac8203999513dfae03498e871ea35eb60cc4.patch',
sha256='c37d14622a86b164e2411ea45a04f756ac61b2044d251f19ab17733c508e5305', when='@1.14.0')
# import_contrib_cloud patch for older versions
patch('contrib_cloud_1.10.patch', when='@1.10:1.13')
patch('contrib_cloud_1.9.patch', when='@1.9')
patch('contrib_cloud_1.4.patch', when='@1.4:1.8')
patch('contrib_cloud_1.1.patch', when='@1.1:1.3')
phases = ['configure', 'build', 'install']
# https://www.tensorflow.org/install/source
def setup_build_environment(self, env):
spec = self.spec
# Please specify the location of python
env.set('PYTHON_BIN_PATH', spec['python'].command.path)
# Please input the desired Python library path to use
env.set('PYTHON_LIB_PATH', site_packages_dir)
# Ensure swig is in PATH or set SWIG_PATH
env.set('SWIG_PATH', spec['swig'].prefix.bin.swig)
# Do you wish to build TensorFlow with MKL support?
if '+mkl' in spec:
env.set('TF_NEED_MKL', '1')
# Do you wish to download MKL LIB from the web?
env.set('TF_DOWNLOAD_MKL', '0')
# Please specify the location where MKL is installed
env.set('MKL_INSTALL_PATH', spec['mkl'].prefix)
else:
env.set('TF_NEED_MKL', '0')
# Do you wish to build TensorFlow with jemalloc as malloc support?
if '+jemalloc' in spec:
env.set('TF_NEED_JEMALLOC', '1')
else:
env.set('TF_NEED_JEMALLOC', '0')
# Do you wish to build TensorFlow with Google Cloud Platform support?
if '+gcp' in spec:
env.set('TF_NEED_GCP', '1')
else:
env.set('TF_NEED_GCP', '0')
# Do you wish to build TensorFlow with Hadoop File System support?
if '+hdfs' in spec:
env.set('TF_NEED_HDFS', '1')
else:
env.set('TF_NEED_HDFS', '0')
# Do you wish to build TensorFlow with Amazon AWS Platform support?
if '+aws' in spec:
env.set('TF_NEED_AWS', '1')
env.set('TF_NEED_S3', '1')
else:
env.set('TF_NEED_AWS', '0')
env.set('TF_NEED_S3', '0')
# Do you wish to build TensorFlow with Apache Kafka Platform support?
if '+kafka' in spec:
env.set('TF_NEED_KAFKA', '1')
else:
env.set('TF_NEED_KAFKA', '0')
# Do you wish to build TensorFlow with Apache Ignite support?
if '+ignite' in spec:
env.set('TF_NEED_IGNITE', '1')
else:
env.set('TF_NEED_IGNITE', '0')
# Do you wish to build TensorFlow with XLA JIT support?
if '+xla' in spec:
env.set('TF_ENABLE_XLA', '1')
else:
env.set('TF_ENABLE_XLA', '0')
# Do you wish to build TensorFlow with GDR support?
if '+gdr' in spec:
env.set('TF_NEED_GDR', '1')
else:
env.set('TF_NEED_GDR', '0')
# Do you wish to build TensorFlow with VERBS support?
if '+verbs' in spec:
env.set('TF_NEED_VERBS', '1')
else:
env.set('TF_NEED_VERBS', '0')
# Do you wish to build TensorFlow with nGraph support?
if '+ngraph' in spec:
env.set('TF_NEED_NGRAPH', '1')
else:
env.set('TF_NEED_NGRAPH', '0')
# Do you wish to build TensorFlow with OpenCL SYCL support?
if '+opencl' in spec:
env.set('TF_NEED_OPENCL_SYCL', '1')
env.set('TF_NEED_OPENCL', '1')
# Please specify which C++ compiler should be used as the host
# C++ compiler
env.set('HOST_CXX_COMPILER', spack_cxx)
# Please specify which C compiler should be used as the host
# C compiler
env.set('HOST_C_COMPILER', spack_cc)
# Do you wish to build TensorFlow with ComputeCPP support?
if '+computecpp' in spec:
env.set('TF_NEED_COMPUTECPP', '1')
# Please specify the location where ComputeCpp is installed
env.set('COMPUTECPP_TOOLKIT_PATH', spec['computecpp'].prefix)
else:
env.set('TF_NEED_COMPUTECPP', '0')
# Please specify the location of the triSYCL include directory
env.set('TRISYCL_INCLUDE_DIR', spec['trisycl'].prefix.include)
else:
env.set('TF_NEED_OPENCL_SYCL', '0')
env.set('TF_NEED_OPENCL', '0')
# Do you wish to build TensorFlow with ROCm support?
if '+rocm' in spec:
env.set('TF_NEED_ROCM', '1')
else:
env.set('TF_NEED_ROCM', '0')
# Do you wish to build TensorFlow with CUDA support?
if '+cuda' in spec:
env.set('TF_NEED_CUDA', '1')
# Do you want to use clang as CUDA compiler?
env.set('TF_CUDA_CLANG', '0')
# Please specify which gcc nvcc should use as the host compiler
env.set('GCC_HOST_COMPILER_PATH', spack_cc)
cuda_paths = [
spec['cuda'].prefix,
spec['cudnn'].prefix,
]
# Do you wish to build TensorFlow with TensorRT support?
if '+tensorrt' in spec:
env.set('TF_NEED_TENSORRT', '1')
cuda_paths.append(spec['tensorrt'].prefix)
# Please specify the TensorRT version you want to use
env.set('TF_TENSORRT_VERSION',
spec['tensorrt'].version.up_to(1))
# Please specify the location where TensorRT is installed
env.set('TENSORRT_INSTALL_PATH', spec['tensorrt'].prefix)
else:
env.set('TF_NEED_TENSORRT', '0')
env.unset('TF_TENSORRT_VERSION')
# Please specify the CUDA SDK version you want to use
env.set('TF_CUDA_VERSION', spec['cuda'].version.up_to(2))
# Please specify the cuDNN version you want to use
env.set('TF_CUDNN_VERSION', spec['cudnn'].version.up_to(1))
if '+nccl' in spec:
cuda_paths.append(spec['nccl'].prefix)
# Please specify the locally installed NCCL version to use
env.set('TF_NCCL_VERSION', spec['nccl'].version.up_to(1))
# Please specify the location where NCCL is installed
env.set('NCCL_INSTALL_PATH', spec['nccl'].prefix)
env.set('NCCL_HDR_PATH', spec['nccl'].prefix.include)
else:
env.unset('TF_NCCL_VERSION')
# Please specify the comma-separated list of base paths to
# look for CUDA libraries and headers
env.set('TF_CUDA_PATHS', ','.join(cuda_paths))
# Please specify the location where CUDA toolkit is installed
env.set('CUDA_TOOLKIT_PATH', spec['cuda'].prefix)
# Please specify the location where CUDNN library is installed
env.set('CUDNN_INSTALL_PATH', spec['cudnn'].prefix)
# Please specify a list of comma-separated CUDA compute
# capabilities you want to build with. You can find the compute
# capability of your device at:
# https://developer.nvidia.com/cuda-gpus.
# Please note that each additional compute capability significantly
# increases your build time and binary size, and that TensorFlow
# only supports compute capabilities >= 3.5
capabilities = ','.join('{0:.1f}'.format(
float(i) / 10.0) for i in spec.variants['cuda_arch'].value)
env.set('TF_CUDA_COMPUTE_CAPABILITIES', capabilities)
else:
env.set('TF_NEED_CUDA', '0')
# Do you wish to download a fresh release of clang? (Experimental)
env.set('TF_DOWNLOAD_CLANG', '0')
# Do you wish to build TensorFlow with MPI support?
if '+mpi' in spec:
env.set('TF_NEED_MPI', '1')
# Please specify the MPI toolkit folder
env.set('MPI_HOME', spec['mpi'].prefix)
else:
env.set('TF_NEED_MPI', '0')
env.unset('MPI_HOME')
# Please specify optimization flags to use during compilation when
# bazel option '--config=opt' is specified
env.set('CC_OPT_FLAGS', spec.target.optimization_flags(
spec.compiler.name, spec.compiler.version))
# Would you like to interactively configure ./WORKSPACE for
# Android builds?
if '+android' in spec:
env.set('TF_SET_ANDROID_WORKSPACE', '1')
# Please specify the home path of the Android NDK to use
env.set('ANDROID_NDK_HOME', spec['android-ndk'].prefix)
env.set('ANDROID_NDK_API_LEVEL', spec['android-ndk'].version)
# Please specify the home path of the Android SDK to use
env.set('ANDROID_SDK_HOME', spec['android-sdk'].prefix)
env.set('ANDROID_SDK_API_LEVEL', spec['android-sdk'].version)
# Please specify the Android SDK API level to use
env.set('ANDROID_API_LEVEL', spec['android-sdk'].version)
# Please specify an Android build tools version to use
env.set('ANDROID_BUILD_TOOLS_VERSION', spec['android-sdk'].version)
else:
env.set('TF_SET_ANDROID_WORKSPACE', '0')
# Do you wish to build TensorFlow with iOS support?
if '+ios' in spec:
env.set('TF_CONFIGURE_IOS', '1')
else:
env.set('TF_CONFIGURE_IOS', '0')
# set tmpdir to a non-NFS filesystem
# (because bazel uses ~/.cache/bazel)
# TODO: This should be checked for non-nfsy filesystem, but the current
# best idea for it is to check
# subprocess.call([
# 'stat', '--file-system', '--format=%T', tmp_path
# ])
# to not be nfs. This is only valid for Linux and we'd like to
# stay at least also OSX compatible
tmp_path = tempfile.mkdtemp(prefix='spack')
env.set('TEST_TMPDIR', tmp_path)
env.set('TF_SYSTEM_LIBS', 'com_google_protobuf')
# NOTE: INCLUDEDIR is not just relevant to protobuf
# see third_party/systemlibs/jsoncpp.BUILD
env.set('INCLUDEDIR', spec['protobuf'].prefix.include)
def patch(self):
if self.spec.satisfies('@2.3.0:'):
filter_file('deps = protodeps + well_known_proto_libs(),',
'deps = protodeps,',
'tensorflow/core/platform/default/build_config.bzl',
string=True)
if self.spec.satisfies('@2.4.0:'):
text = '''
def protobuf_deps():
pass
'''
with open('third_party/systemlibs/protobuf_deps.bzl', 'w') as f:
f.write(text)
filter_file(
'"//third_party/systemlibs:protobuf.bzl": "protobuf.bzl",',
'"//third_party/systemlibs:protobuf.bzl": "protobuf.bzl",\n'
'"//third_party/systemlibs:protobuf_deps.bzl": "protobuf_deps.bzl",', # noqa: E501
'tensorflow/workspace.bzl',
string=True)
def configure(self, spec, prefix):
# NOTE: configure script is interactive. If you set the appropriate
# environment variables, this interactivity is skipped. If you don't,
# Spack hangs during the configure phase. Use `spack build-env` to
# determine which environment variables must be set for a particular
# version.
configure()
@run_after('configure')
def post_configure_fixes(self):
spec = self.spec
# make sure xla is actually turned off
if spec.satisfies('~xla'):
filter_file(r'--define with_xla_support=true',
r'--define with_xla_support=false',
'.tf_configure.bazelrc')
if spec.satisfies('@1.5.0: ~android'):
# env variable is somehow ignored -> brute force
# TODO: find a better solution
filter_file(r'if workspace_has_any_android_rule\(\)',
r'if True',
'configure.py')
# version dependent fixes
if spec.satisfies('@1.3.0:1.5.0'):
# checksum for protobuf that bazel downloads (@github) changed
filter_file(r'sha256 = "6d43b9d223ce09e5d4ce8b0060cb8a7513577a35a64c7e3dad10f0703bf3ad93"',
r'sha256 = "e5fdeee6b28cf6c38d61243adff06628baa434a22b5ebb7432d2a7fbabbdb13d"',
'tensorflow/workspace.bzl')
# starting with tensorflow 1.3, tensorboard becomes a dependency
# (...but is not really needed? Tensorboard should depend on
# tensorflow, not the other way!)
# -> remove from list of required packages
filter_file(r"'tensorflow-tensorboard",
r"#'tensorflow-tensorboard",
'tensorflow/tools/pip_package/setup.py')
if spec.satisfies('@1.5.0: ~gcp'):
# google cloud support seems to be installed on default, leading
# to boringssl error manually set the flag to false to avoid
# installing gcp support
# https://github.com/tensorflow/tensorflow/issues/20677#issuecomment-404634519
filter_file(r'--define with_gcp_support=true',
r'--define with_gcp_support=false',
'.tf_configure.bazelrc')
if spec.satisfies('@1.6.0:'):
# tensorboard name changed
filter_file(r"'tensorboard >=",
r"#'tensorboard >=",
'tensorflow/tools/pip_package/setup.py')
if spec.satisfies('@1.8.0: ~opencl'):
# 1.8.0 and 1.9.0 aborts with numpy import error during python_api
# generation somehow the wrong PYTHONPATH is used...
# set --distinct_host_configuration=false as a workaround
# https://github.com/tensorflow/tensorflow/issues/22395#issuecomment-431229451
filter_file('build --action_env TF_NEED_OPENCL_SYCL="0"',
'build --action_env TF_NEED_OPENCL_SYCL="0"\n'
'build --distinct_host_configuration=false\n'
'build --action_env PYTHONPATH="{0}"'.format(
env['PYTHONPATH']),
'.tf_configure.bazelrc')
if spec.satisfies('@1.13.1'):
# tensorflow_estimator is an API for tensorflow
# tensorflow-estimator imports tensorflow during build, so
# tensorflow has to be set up first
filter_file(r"'tensorflow_estimator >=",
r"#'tensorflow_estimator >=",
'tensorflow/tools/pip_package/setup.py')
if spec.satisfies('@2.0.0:'):
# now it depends on the nightly versions...
filter_file(r"'tf-estimator-nightly >=",
r"#'tf-estimator-nightly >=",
'tensorflow/tools/pip_package/setup.py')
filter_file(r"REQUIRED_PACKAGES\[i\] = 'tb-nightly >=",
r"pass #REQUIRED_PACKAGES\[i\] = 'tb-nightly >=",
'tensorflow/tools/pip_package/setup.py')
filter_file(r"'tb-nightly >=",
r"#'tb-nightly >=",
'tensorflow/tools/pip_package/setup.py')
if spec.satisfies('@1.13.1 +nccl'):
filter_file(
r'^build --action_env NCCL_INSTALL_PATH=.*',
r'build --action_env NCCL_INSTALL_PATH="' +
spec['nccl'].libs.directories[0] + '"',
'.tf_configure.bazelrc')
filter_file(
r'^build --action_env NCCL_HDR_PATH=.*',
r'build --action_env NCCL_HDR_PATH="' +
spec['nccl'].prefix.include + '"',
'.tf_configure.bazelrc')
# see tensorflow issue #31187 on github
if spec.satisfies('@2.0.0:2.0.999'):
filter_file(r'\#define RUY_DONOTUSEDIRECTLY_AVX512 1',
'#define RUY_DONOTUSEDIRECTLY_AVX512 0',
'tensorflow/lite/experimental/ruy/platform.h')
if spec.satisfies('+cuda'):
libs = spec['cuda'].libs.directories
libs.extend(spec['cudnn'].libs.directories)
if '+nccl' in spec:
libs.extend(spec['nccl'].libs.directories)
if '+tensorrt' in spec:
libs.extend(spec['tensorrt'].libs.directories)
slibs = ':'.join(libs)
filter_file('build --action_env TF_NEED_OPENCL_SYCL="0"',
'build --action_env TF_NEED_OPENCL_SYCL="0"\n'
'build --action_env LD_LIBRARY_PATH="' + slibs + '"',
'.tf_configure.bazelrc')
filter_file('build:opt --copt=-march=native', '',
'.tf_configure.bazelrc')
filter_file('build:opt --host_copt=-march=native', '',
'.tf_configure.bazelrc')
def build(self, spec, prefix):
tmp_path = env['TEST_TMPDIR']
# https://docs.bazel.build/versions/master/command-line-reference.html
args = [
# Don't allow user or system .bazelrc to override build settings
'--nohome_rc',
'--nosystem_rc',
# Bazel does not work properly on NFS, switch to /tmp
'--output_user_root=' + tmp_path,
'build',
# Spack logs don't handle colored output well
'--color=no',
'--jobs={0}'.format(make_jobs),
'--config=opt',
# Enable verbose output for failures
'--verbose_failures',
# Show (formatted) subcommands being executed
'--subcommands=pretty_print',
# Ask bazel to explain what it's up to
# Needs a filename as argument
'--explain=explainlogfile.txt',
# Increase verbosity of explanation,
'--verbose_explanations',
]
if spec.satisfies('^bazel@:3.5'):
# removed in bazel 3.6
args.append('--incompatible_no_support_tools_in_action_inputs=false')
# See .bazelrc for when each config flag is supported
if spec.satisfies('@1.12.1:'):
if '+mkl' in spec:
args.append('--config=mkl')
if '+monolithic' in spec:
args.append('--config=monolithic')
if '+gdr' in spec:
args.append('--config=gdr')
if '+verbs' in spec:
args.append('--config=verbs')
if '+ngraph' in spec:
args.append('--config=ngraph')
if '+dynamic_kernels' in spec:
args.append('--config=dynamic_kernels')
if '+cuda' in spec:
args.append('--config=cuda')
if '~aws' in spec:
args.append('--config=noaws')
if '~gcp' in spec:
args.append('--config=nogcp')
if '~hdfs' in spec:
args.append('--config=nohdfs')
if '~nccl' in spec:
args.append('--config=nonccl')
if spec.satisfies('@1.12.1:2.0'):
if '~ignite' in spec:
args.append('--config=noignite')
if '~kafka' in spec:
args.append('--config=nokafka')
if spec.satisfies('@1.12.1,1.14:'):
if '+numa' in spec:
args.append('--config=numa')
if spec.satisfies('@2:'):
args.append('--config=v2')
args.append('//tensorflow/tools/pip_package:build_pip_package')
bazel(*args)
build_pip_package = Executable(
'bazel-bin/tensorflow/tools/pip_package/build_pip_package')
buildpath = join_path(self.stage.source_path, 'spack-build')
build_pip_package('--src', buildpath)
def install(self, spec, prefix):
tmp_path = env['TEST_TMPDIR']
buildpath = join_path(self.stage.source_path, 'spack-build')
with working_dir(buildpath):
setup_py('install', '--prefix={0}'.format(prefix),
'--single-version-externally-managed', '--root=/')
remove_linked_tree(tmp_path)
def test(self):
"""Attempts to import modules of the installed package."""
# Make sure we are importing the installed modules,
# not the ones in the source directory
for module in self.import_modules:
self.run_test(self.spec['python'].command.path,
['-c', 'import {0}'.format(module)],
purpose='checking import of {0}'.format(module),
work_dir='spack-test')
| 51.745658 | 147 | 0.61968 |
81e1e20ce65e2894ad2e497fa414faf67a68f00c
| 14,103 |
py
|
Python
|
tests/analyses/reaching_definitions/test_dep_graph.py
|
mikenawrocki/angr
|
57f5593e902f5ad58709bc8f4ce7859134300ffb
|
[
"BSD-2-Clause"
] | 6,132 |
2015-08-06T23:24:47.000Z
|
2022-03-31T21:49:34.000Z
|
tests/analyses/reaching_definitions/test_dep_graph.py
|
mikenawrocki/angr
|
57f5593e902f5ad58709bc8f4ce7859134300ffb
|
[
"BSD-2-Clause"
] | 2,272 |
2015-08-10T08:40:07.000Z
|
2022-03-31T23:46:44.000Z
|
tests/analyses/reaching_definitions/test_dep_graph.py
|
mikenawrocki/angr
|
57f5593e902f5ad58709bc8f4ce7859134300ffb
|
[
"BSD-2-Clause"
] | 1,155 |
2015-08-06T23:37:39.000Z
|
2022-03-31T05:54:11.000Z
|
# pylint: disable=no-self-use
from random import randrange
from unittest import mock, TestCase
import networkx
import claripy
from angr.code_location import CodeLocation
from angr.knowledge_plugins.key_definitions.atoms import Atom, MemoryLocation, Register
from angr.knowledge_plugins.key_definitions.definition import Definition
from angr.analyses.reaching_definitions.dep_graph import DepGraph
from angr.analyses.reaching_definitions.external_codeloc import ExternalCodeLocation
_PAST_N = set()
def unique_randrange(range_):
n = randrange(range_)
while n in _PAST_N:
n = randrange(range_)
_PAST_N.add(n)
return n
def _a_mock_definition(atom: Atom=None):
# Randomise code locations to forcefully produce "different" <Definition>s.
statement_index = unique_randrange(1000)
code_location = CodeLocation(0x42, statement_index)
return Definition(atom, code_location)
class TestDepGraph(TestCase):
class ArchMock:
def __init__(self): pass
@property
def bits(self): return 32
class CFGMock:
def __init__(self, memory_data): self._memory_data = memory_data
@property
def memory_data(self): return self._memory_data
class MemoryDataMock:
def __init__(self, address, content, size, sort):
self._address = address
self._content = content
self._size = size
self._sort = sort
@property
def address(self): return self._address
@property
def content(self): return self._content
@property
def size(self): return self._size
@property
def sort(self): return self._sort
class SectionMock:
def __init__(self, is_writable): self._is_writable = is_writable
@property
def is_writable(self): return self._is_writable
class MainObjectMock:
def __init__(self, section): self._section = section
def find_section_containing(self, _): return self._section
class LoaderMock:
def __init__(self, main_object): self._main_object = main_object
@property
def main_object(self): return self._main_object
def setUp(self):
self.memory_address = 0x42424242
self.string_in_memory = 'some string of data in memory'
self.string_in_memory_length = len(self.string_in_memory + '\x00')
def test_dep_graph_has_a_default_graph(self):
dep_graph = DepGraph()
self.assertEqual(isinstance(dep_graph.graph, networkx.DiGraph), True)
def test_dep_graph_refuses_to_instanciate_with_an_inadequate_graph(self):
a_graph = networkx.DiGraph([(1, 2)])
self.assertRaises(TypeError, DepGraph, a_graph)
def test_delegate_add_node_to_the_underlying_graph_object(self):
with mock.patch.object(networkx.DiGraph, 'add_node') as digraph_add_node_mock:
definition = _a_mock_definition()
dep_graph = DepGraph()
dep_graph.add_node(definition)
digraph_add_node_mock.assert_called_once_with(definition)
def test_delegate_nodes_to_the_underlying_graph_object(self):
with mock.patch.object(networkx.DiGraph, 'nodes') as digraph_nodes_mock:
dep_graph = DepGraph()
dep_graph.nodes()
digraph_nodes_mock.assert_called_once()
def test_delegate_predecessors_to_the_underlying_graph_object(self):
with mock.patch.object(networkx.DiGraph, 'predecessors') as digraph_predecessors_mock:
definition = _a_mock_definition()
dep_graph = DepGraph()
dep_graph.predecessors(definition)
digraph_predecessors_mock.assert_called_once_with(definition)
def test_delegate_add_edge_to_the_underlying_graph_object(self):
with mock.patch.object(networkx.DiGraph, 'add_edge') as digraph_add_edge_mock:
use = (_a_mock_definition(), _a_mock_definition())
labels = { 'attribute1': 'value1', 'attribute2': 'value2' }
dep_graph = DepGraph()
dep_graph.add_edge(*use, **labels)
digraph_add_edge_mock.assert_called_once_with(*use, **labels)
def test_transitive_closure_of_a_node(self):
dep_graph = DepGraph()
# A -> B, B -> D, C -> D
A = _a_mock_definition()
B = _a_mock_definition()
C = _a_mock_definition()
D = _a_mock_definition()
uses = [
(A, B),
(B, D),
(C, D),
]
for use in uses:
dep_graph.add_edge(*use)
result = dep_graph.transitive_closure(D)
result_nodes = set(result.nodes)
result_edges = set(result.edges)
self.assertSetEqual(result_nodes, {D, B, C, A})
self.assertSetEqual(result_edges, {(B, D), (C, D), (A, B)})
def test_transitive_closure_includes_beginning_node_with_memoized_content(self):
dep_graph = DepGraph()
# A -> B
# B -> C
# C -> D
A = _a_mock_definition()
B = _a_mock_definition()
C = _a_mock_definition()
D = _a_mock_definition()
uses = [
(A, B),
(B, C),
(C, D)
]
for use in uses:
dep_graph.add_edge(*use)
closure_0 = dep_graph.transitive_closure(C)
self.assertNotIn(D, closure_0)
closure_1 = dep_graph.transitive_closure(D)
self.assertIn(D, closure_1)
self.assertTrue(closure_1.has_edge(A, B))
self.assertTrue(closure_1.has_edge(B, C))
self.assertTrue(closure_1.has_edge(C, D))
def test_transitive_closure_of_a_node_should_copy_labels_from_original_graph(self):
dep_graph = DepGraph()
# A -> B
A = _a_mock_definition()
B = _a_mock_definition()
uses = [(A, B)]
for use in uses:
dep_graph.add_edge(*use, label='some data')
result = dep_graph.transitive_closure(B).get_edge_data(A, B)['label']
self.assertEqual(result, 'some data')
def test_transitive_closure_of_a_node_on_a_graph_with_loops_should_still_terminate(self):
dep_graph = DepGraph()
# A -> B, B -> C, C -> D, D -> A
A = _a_mock_definition()
B = _a_mock_definition()
C = _a_mock_definition()
D = _a_mock_definition()
uses = [
(A, B),
(B, C),
(C, D),
(D, A),
]
for use in uses:
dep_graph.add_edge(*use)
result = dep_graph.transitive_closure(C)
result_nodes = set(result.nodes)
result_edges = set(result.edges)
self.assertSetEqual(result_nodes, {A, B, C, D})
self.assertSetEqual(result_edges, {(A, B), (B, C), (C, D), (D, A)})
def test_contains_atom_returns_true_if_the_dependency_graph_contains_a_definition_of_the_given_atom(self):
dep_graph = DepGraph()
r0 = Register(8, 4)
# A -> B
A = _a_mock_definition(r0)
B = _a_mock_definition()
uses = [ (A, B) ]
for use in uses:
dep_graph.add_edge(*use)
result = dep_graph.contains_atom(r0)
self.assertTrue(result)
def test_contains_atom_returns_false_if_the_dependency_graph_does_not_contain_a_definition_of_the_given_atom(self):
dep_graph = DepGraph()
# A -> B
A = _a_mock_definition()
B = _a_mock_definition()
uses = [ (A, B) ]
for use in uses:
dep_graph.add_edge(*use)
result = dep_graph.contains_atom(Register(8, 4))
self.assertFalse(result)
def test_add_dependencies_for_concrete_pointers_of_fails_if_the_given_definition_is_not_in_the_graph(self):
dependency_graph = DepGraph()
definition = Definition(
Register(0, 4),
CodeLocation(0x42, 0),
)
with self.assertRaises(AssertionError) as cm:
dependency_graph.add_dependencies_for_concrete_pointers_of([claripy.BVS('TOP', 32)], definition, None, None)
ex = cm.exception
self.assertEqual(str(ex), 'The given Definition must be present in the given graph.')
def test_add_dependencies_for_concrete_pointers_of_adds_a_definition_for_data_pointed_to_by_given_definition(self):
arch = self.ArchMock()
loader = self.LoaderMock(self.MainObjectMock(self.SectionMock(True)))
memory_datum = self.MemoryDataMock(
self.memory_address,
str.encode(self.string_in_memory),
len(self.string_in_memory),
'string'
)
cfg = self.CFGMock({ self.memory_address: memory_datum })
register_definition = Definition(
Register(0, 4),
None,
)
dependency_graph = DepGraph()
dependency_graph.add_node(register_definition)
dependency_graph.add_dependencies_for_concrete_pointers_of([claripy.BVV(self.memory_address, arch.bits)],
register_definition, cfg, loader)
memory_definition = Definition(
MemoryLocation(self.memory_address, self.string_in_memory_length),
ExternalCodeLocation(),
)
nodes = list(dependency_graph.nodes())
predecessors = list(dependency_graph.graph.predecessors(register_definition))
self.assertEqual(nodes, [register_definition, memory_definition])
self.assertListEqual(predecessors, [memory_definition])
def test_add_dependencies_for_concrete_pointers_of_does_nothing_if_data_pointed_to_by_definition_is_already_in_dependency_graph(self):
arch = self.ArchMock()
loader = self.LoaderMock(self.MainObjectMock(self.SectionMock(True)))
memory_datum = self.MemoryDataMock(
self.memory_address,
str.encode(self.string_in_memory),
len(self.string_in_memory),
'string'
)
cfg = self.CFGMock({ self.memory_address: memory_datum })
memory_location_definition = Definition(
MemoryLocation(self.memory_address, self.string_in_memory_length),
CodeLocation(0, 0),
)
register_definition = Definition(
Register(0, 4),
CodeLocation(0x42, 0),
)
dependency_graph = DepGraph(networkx.DiGraph([
(memory_location_definition, register_definition)
]))
nodes_before_call = dependency_graph.nodes()
dependency_graph.add_dependencies_for_concrete_pointers_of(
[claripy.BVV(self.memory_address, arch.bits)],
register_definition,
cfg,
loader
)
self.assertEqual(nodes_before_call, dependency_graph.nodes())
def test_add_dependencies_for_concrete_pointers_of_does_nothing_if_pointer_is_not_concrete(self):
arch = self.ArchMock()
cfg = self.CFGMock({})
loader = self.LoaderMock(self.MainObjectMock(self.SectionMock(True)))
register_definition = Definition(
Register(0, 4),
CodeLocation(0x42, 0),
)
dependency_graph = DepGraph()
dependency_graph.add_node(register_definition)
nodes_before_call = dependency_graph.nodes()
dependency_graph.add_dependencies_for_concrete_pointers_of(
[claripy.BVS("TOP", arch.bits)],
register_definition,
cfg,
loader,
)
self.assertEqual(nodes_before_call, dependency_graph.nodes())
def test_add_dependencies_for_concrete_pointers_of_create_memory_location_with_undefined_data_if_data_pointed_to_by_definition_is_not_known(self):
arch = self.ArchMock()
loader = self.LoaderMock(self.MainObjectMock(self.SectionMock(True)))
datum_content = None
datum_size = 0x4242
memory_datum = self.MemoryDataMock(
self.memory_address,
datum_content,
datum_size,
'unknown'
)
cfg = self.CFGMock({ self.memory_address: memory_datum })
memory_definition = Definition(
MemoryLocation(self.memory_address, datum_size),
ExternalCodeLocation(),
)
register_definition = Definition(
Register(0, 4),
CodeLocation(0x42, 0),
)
dependency_graph = DepGraph()
dependency_graph.add_node(register_definition)
dependency_graph.add_dependencies_for_concrete_pointers_of(
[claripy.BVV(self.memory_address, arch.bits)],
register_definition,
cfg,
loader,
)
nodes = list(dependency_graph.nodes())
predecessors = list(dependency_graph.graph.predecessors(register_definition))
self.assertEqual(nodes, [register_definition, memory_definition])
self.assertListEqual(predecessors, [memory_definition])
def test_add_dependencies_for_concrete_pointers_of_adds_a_definition_with_codelocation_in_binary_if_data_in_readonly_memory(self):
arch = self.ArchMock()
writable = False
loader = self.LoaderMock(self.MainObjectMock(self.SectionMock(writable)))
memory_datum = self.MemoryDataMock(
self.memory_address,
str.encode(self.string_in_memory),
len(self.string_in_memory),
'string'
)
cfg = self.CFGMock({ self.memory_address: memory_datum })
register_definition = Definition(
Register(0, 4),
CodeLocation(0x42, 0),
)
dependency_graph = DepGraph()
dependency_graph.add_node(register_definition)
dependency_graph.add_dependencies_for_concrete_pointers_of(
[claripy.BVV(self.memory_address, arch.bits)],
register_definition,
cfg,
loader,
)
origin_codelocation = CodeLocation(0, 0, info={'readonly': True})
predecessor = list(dependency_graph.graph.predecessors(register_definition))[0]
self.assertEqual(predecessor.codeloc, origin_codelocation)
| 33.739234 | 150 | 0.647735 |
96e125ec5aa02313a1615a9630dae1a661e5c4fd
| 2,339 |
py
|
Python
|
srgan_pytorch/utils/transform.py
|
nisargshah1999/SRGAN-PyTorch
|
093fba8ee4e571d71ac9644350bdd03a1a547765
|
[
"Apache-2.0"
] | 2 |
2021-08-22T06:27:48.000Z
|
2021-08-22T06:36:43.000Z
|
srgan_pytorch/utils/transform.py
|
ekstra26/SRGAN-PyTorch
|
3cc4c034362070ba1e02549acca2088572fd7ec2
|
[
"Apache-2.0"
] | null | null | null |
srgan_pytorch/utils/transform.py
|
ekstra26/SRGAN-PyTorch
|
3cc4c034362070ba1e02549acca2088572fd7ec2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import PIL.BmpImagePlugin
import cv2
import numpy as np
import torch
import torchvision.transforms as transforms
from PIL import Image
__all__ = [
"opencv2pil", "opencv2tensor", "pil2opencv", "process_image"
]
def opencv2pil(image: np.ndarray) -> PIL.BmpImagePlugin.BmpImageFile:
""" OpenCV Convert to PIL.Image format.
Returns:
PIL.Image.
"""
image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
return image
def opencv2tensor(image: np.ndarray, gpu: int) -> torch.Tensor:
""" OpenCV Convert to torch.Tensor format.
Returns:
torch.Tensor.
"""
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
nhwc_image = torch.from_numpy(rgb_image).div(255.0).unsqueeze(0)
input_tensor = nhwc_image.permute(0, 3, 1, 2)
if gpu is not None:
input_tensor = input_tensor.cuda(gpu, non_blocking=True)
return input_tensor
def pil2opencv(image: PIL.BmpImagePlugin.BmpImageFile) -> np.ndarray:
""" PIL.Image Convert to OpenCV format.
Returns:
np.ndarray.
"""
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
return image
def process_image(image: PIL.BmpImagePlugin.BmpImageFile, gpu: int = None) -> torch.Tensor:
""" PIL.Image Convert to PyTorch format.
Args:
image (PIL.BmpImagePlugin.BmpImageFile): File read by PIL.Image.
gpu (int): Graphics card model.
Returns:
torch.Tensor.
"""
tensor = transforms.ToTensor()(image)
input_tensor = tensor.unsqueeze(0)
if gpu is not None:
input_tensor = input_tensor.cuda(gpu, non_blocking=True)
return input_tensor
| 30.376623 | 91 | 0.68106 |
90ce6b7b58ee0c317f2b5cda64d37a1a3c153449
| 9,068 |
py
|
Python
|
mail/api.py
|
mitodl/odl-video-service
|
9a292c88aa475f2ce720a93a769f7b46bcd8fa62
|
[
"BSD-3-Clause"
] | 3 |
2017-08-19T02:00:28.000Z
|
2022-01-11T20:53:37.000Z
|
mail/api.py
|
mitodl/odl-video-service
|
9a292c88aa475f2ce720a93a769f7b46bcd8fa62
|
[
"BSD-3-Clause"
] | 915 |
2017-04-18T15:46:40.000Z
|
2022-03-23T17:47:03.000Z
|
mail/api.py
|
mitodl/odl-video-service
|
9a292c88aa475f2ce720a93a769f7b46bcd8fa62
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Provides functions for sending and retrieving data about in-app email
"""
import json
import re
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template.loader import render_to_string
from django.urls import reverse
from rest_framework import status
from mail.exceptions import SendBatchException
from mail.utils import chunks
from odl_video import logging
log = logging.getLogger(__name__)
class MailgunClient:
"""
Provides functions for communicating with the Mailgun REST API.
"""
_basic_auth_credentials = ("api", settings.MAILGUN_KEY)
@staticmethod
def default_params():
"""
Default params for Mailgun request. This a method instead of an attribute to allow for the
overriding of settings values.
Returns:
dict: A dict of default parameters for the Mailgun API
"""
return {"from": settings.EMAIL_SUPPORT}
@classmethod
def _mailgun_request( # pylint: disable=too-many-arguments
cls, request_func, endpoint, params, sender_name=None, raise_for_status=True
):
"""
Sends a request to the Mailgun API
Args:
request_func (function): requests library HTTP function (get/post/etc.)
endpoint (str): Mailgun endpoint (eg: 'messages', 'events')
params (dict): Dict of params to add to the request as 'data'
raise_for_status (bool): If true, check the status and raise for non-2xx statuses
Returns:
requests.Response: HTTP response
"""
mailgun_url = "{}/{}".format(settings.MAILGUN_URL, endpoint)
email_params = cls.default_params()
email_params.update(params)
# Update 'from' address if sender_name was specified
if sender_name is not None:
email_params["from"] = "{sender_name} <{email}>".format(
sender_name=sender_name, email=email_params["from"]
)
response = request_func(
mailgun_url, auth=cls._basic_auth_credentials, data=email_params
)
if response.status_code == status.HTTP_401_UNAUTHORIZED:
message = "Mailgun API keys not properly configured."
log.error(message)
raise ImproperlyConfigured(message)
if raise_for_status:
response.raise_for_status()
return response
@classmethod
def send_batch( # pylint:disable=too-many-arguments,too-many-locals
cls,
subject,
html_body,
text_body,
recipients,
sender_address=None,
sender_name=None,
chunk_size=settings.MAILGUN_BATCH_CHUNK_SIZE,
raise_for_status=True,
):
"""
Sends a text email to a list of recipients (one email per recipient) via batch.
Args:
subject (str): email subject
html_body (str): email html body
text_body (str): email text body
recipients (iterable of (recipient, context)):
A list where each tuple is:
(recipient, context)
Where the recipient is an email address and context is a dict of variables for templating
sender_address (str): Sender email address
sender_name (str): Sender name
chunk_size (int): The maximum amount of emails to be sent at the same time
raise_for_status (bool): If true, raise for non 2xx statuses
Returns:
list:
List of responses which are HTTP responses from Mailgun.
Raises:
SendBatchException:
If there is at least one exception, this exception is raised with all other exceptions in a list
along with recipients we failed to send to.
"""
# Convert null contexts to empty dicts
recipients = ((email, context or {}) for email, context in recipients)
if settings.MAILGUN_RECIPIENT_OVERRIDE is not None:
# This is used for debugging only
recipients = [(settings.MAILGUN_RECIPIENT_OVERRIDE, {})]
responses = []
exception_pairs = []
for chunk in chunks(recipients, chunk_size=chunk_size):
chunk_dict = {email: context for email, context in chunk}
emails = list(chunk_dict.keys())
params = {
"to": emails,
"subject": subject,
"html": html_body,
"text": text_body,
"recipient-variables": json.dumps(chunk_dict),
}
if sender_address:
params["from"] = sender_address
try:
response = cls._mailgun_request(
requests.post,
"messages",
params,
sender_name=sender_name,
raise_for_status=raise_for_status,
)
responses.append(response)
except ImproperlyConfigured:
raise
except Exception as exception: # pylint: disable=broad-except
exception_pairs.append((emails, exception))
if exception_pairs:
raise SendBatchException(exception_pairs)
return responses
@classmethod
def send_individual_email( # pylint:disable=too-many-arguments
cls,
subject,
html_body,
text_body,
recipient,
recipient_variables=None,
sender_address=None,
sender_name=None,
raise_for_status=True,
):
"""
Sends a text email to a single recipient.
Args:
subject (str): email subject
html_body (str): email text body
text_body (str): email html body
recipient (str): email recipient
recipient_variables (dict): A dict of template variables to use (may be None for empty)
sender_address (str): Sender email address
sender_name (str): Sender name
raise_for_status (bool): If true and a non-zero response was received,
Returns:
requests.Response: response from Mailgun
"""
# Since .send_batch() returns a list, we need to return the first in the list
responses = cls.send_batch(
subject,
html_body,
text_body,
[(recipient, recipient_variables)],
sender_address=sender_address,
sender_name=sender_name,
raise_for_status=raise_for_status,
)
return responses[0]
def render_email_templates(template_name, context):
"""
Renders the email templates for the email
Args:
template_name (str): name of the template, this should match a directory in mail/templates
context (dict): context data for the email
Returns:
(str, str, str): tuple of the templates for subject, text_body, html_body
"""
subject_text = render_to_string(
"{}/subject.txt".format(template_name), context
).rstrip()
context.update({"subject": subject_text})
html_text = render_to_string("{}/body.html".format(template_name), context)
# pynliner internally uses bs4, which we can now modify the inlined version into a plaintext version
# this avoids parsing the body twice in bs4
soup = BeautifulSoup(html_text, "html5lib")
for link in soup.find_all("a"):
link.replace_with("{} ({})".format(link.string, link.attrs["href"]))
# clear any surviving style and title tags, so their contents don't get printed
for style in soup.find_all(["style", "title"]):
style.clear() # clear contents, just removing the tag isn't enough
fallback_text = soup.get_text().strip()
# truncate more than 3 consecutive newlines
fallback_text = re.sub(r"\n\s*\n", "\n\n\n", fallback_text)
# ltrim the left side of all lines
fallback_text = re.sub(
r"^([ ]+)([\s\\X])", r"\2", fallback_text, flags=re.MULTILINE
)
return subject_text, fallback_text, html_text
def context_for_video(video):
"""
Returns an email context for the given video
Args:
video (Video): video this email is about
Returns:
dict: the context for this user
"""
context = {
"video_url": urljoin(
settings.ODL_VIDEO_BASE_URL,
reverse("video-detail", kwargs={"video_key": video.hexkey}),
),
"video_title": video.title,
"collection_title": video.collection.title,
"collection_url": urljoin(
settings.ODL_VIDEO_BASE_URL,
reverse(
"collection-react-view",
kwargs={"collection_key": video.collection.hexkey},
),
),
"support_email": settings.EMAIL_SUPPORT,
"static_url": urljoin(settings.ODL_VIDEO_BASE_URL, settings.STATIC_URL),
}
return context
| 34.090226 | 111 | 0.615792 |
9fb7b4d0cad67db2d2d4b56e43d8837b8160cdb0
| 68,302 |
py
|
Python
|
python/paddle/fluid/layers/control_flow.py
|
skylarch/Paddle
|
d58d8df6f5f7aa6fd2f0780f87475055db57a80d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/layers/control_flow.py
|
skylarch/Paddle
|
d58d8df6f5f7aa6fd2f0780f87475055db57a80d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/layers/control_flow.py
|
skylarch/Paddle
|
d58d8df6f5f7aa6fd2f0780f87475055db57a80d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from .layer_function_generator import autodoc, templatedoc
from .tensor import assign, fill_constant
from .. import core
from ..framework import Program, Variable, Operator
from ..layer_helper import LayerHelper, unique_name
from ..initializer import force_init_on_cpu
from .ops import logical_and, logical_not, logical_or
import numpy
import warnings
from functools import reduce
__all__ = [
'While',
'Switch',
'increment',
'array_write',
'create_array',
'less_than',
'equal',
'array_read',
'array_length',
'IfElse',
'DynamicRNN',
'StaticRNN',
'reorder_lod_tensor_by_rank',
'ParallelDo',
'Print',
'is_empty',
]
def split_lod_tensor(input, mask, level=0):
"""
This function takes in an input that contains the complete lod information,
and takes in a mask which is used to mask certain parts of the input.
The output is the true branch and the false branch with the mask applied to
the input at a certain level in the tensor. Mainly used in IfElse to split
data into two parts.
Args:
input(tuple|list|None): The input tensor that contains complete
lod information needed to construct the output.
mask(list): A bool column vector which masks the input.
level(int): The specific lod level to split.
Returns:
tuple(Variable, Variable):
The true branch of tensor as per the mask applied to input.
The false branch of tensor as per the mask applied to input.
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[1])
x.persistable = True
y = fluid.layers.data(name='y', shape=[1])
y.persistable = True
out_true, out_false = fluid.layers.split_lod_tensor(
input=x, mask=y, level=level)
"""
helper = LayerHelper('split_lod_tensor', **locals())
out_true = helper.create_tmp_variable(dtype=input.dtype)
out_false = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='split_lod_tensor',
inputs={
'X': input,
'Mask': mask,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': level})
return out_true, out_false
def merge_lod_tensor(in_true, in_false, x, mask, level=0):
"""
**merge_lod_tensor**
This function takes in an input :math:`x`, the True branch, the False
branch and a binary :math:`mask`. Using this information, this function
merges the True and False branches of the tensor into a single tensor as
output at a certain lod level indicated by :math:`level`. Used in IfElse
to merge the output if True block and False Block.
Args:
in_true(tuple|list|None): The True branch to be merged.
in_false(tuple|list|None): The False branch to be merged.
x(tuple|list|None): The input tensor that contains complete
lod information needed to construct the output.
mask(list): A bool column vector which masks the input.
level(int): The specific lod level to merge.
Returns:
Variable: The merged output tensor.
Examples:
.. code-block:: python
x = layers.data(
name='x', shape=[1], dtype='float32', stop_gradient=False)
y = layers.data(
name='y', shape=[1], dtype='bool', stop_gradient=False)
level = 0
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
out = layers.merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
"""
helper = LayerHelper('merge_lod_tensor', **locals())
out = helper.create_tmp_variable(dtype=in_true.dtype)
helper.append_op(
type='merge_lod_tensor',
inputs={'X': x,
'Mask': mask,
'InTrue': in_true,
'InFalse': in_false},
outputs={'Out': out},
attrs={'level': level})
return out
def Print(input,
first_n=-1,
message=None,
summarize=-1,
print_tensor_name=True,
print_tensor_type=True,
print_tensor_shape=True,
print_tensor_lod=True,
print_phase='both'):
'''
**Print operator**
This creates a print op that will print when a tensor is accessed.
Wraps the tensor passed in so that whenever that a tensor is accessed,
the message `message` is printed, along with the current value of the
tensor `t`.
Args:
input (Variable): A Tensor to print.
summarize (int): Print this number of elements in the tensor, will print
all if left is negative.
message (str): A string message to print as a prefix.
first_n (int): Only log `first_n` number of times.
print_tensor_name (bool): Print the tensor name.
print_tensor_type (bool): Print the tensor type.
print_tensor_shape (bool): Print the tensor shape.
print_tensor_lod (bool): Print the tensor lod.
print_phase (str): Which phase to displace, including 'forward',
'backward' and 'both'. If set to 'backward' or 'both', will
print the gradients of input tensor.
Returns:
Variable: Output tensor, same data with input tensor.
Examples:
.. code-block:: python
value = some_layer(...)
Print(value, summarize=10,
message="The content of some_layer: ")
'''
helper = LayerHelper('print', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(
type='print',
inputs={'In': input},
attrs={
'first_n': first_n,
'summarize': summarize,
'message': message or "",
'print_tensor_name': print_tensor_name,
'print_tensor_type': print_tensor_type,
'print_tensor_shape': print_tensor_shape,
'print_tensor_lod': print_tensor_lod,
'print_phase': print_phase.upper()
},
outputs={'Out': out})
return out
class BlockGuard(object):
"""
BlockGuard class.
BlockGuard class is used to create a sub-block in a program by
using the Python `with` keyword.
"""
def __init__(self, main_program):
if not isinstance(main_program, Program):
raise TypeError("BlockGuard takes a program")
self.main_program = main_program
def __enter__(self):
self.main_program.create_block()
def __exit__(self, exc_type, exc_val, exc_tb):
self.main_program.rollback()
if exc_type is not None:
return False # re-raise exception
return True
class ParallelDo(object):
"""
ParallelDo is used to represent multi-thread data parallel processing.
Its vanilla implementation can be shown as the following (:math:`|` means
single thread and :math:`||||` means multiple threads)
.. code-block:: text
In the forward pass
| Split input onto different devices
| Copy parameter onto different devices
|||| Compute forward pass in parallel
| Merge output from different devices
In the backward pass
| Split output@grad onto different devices
|||| Compute backward pass in parallel
| accumulate param@grad from different devices to the first device
| Merge input@grad from different devices
| Copy param@grad to the place of parallel_do_op
Examples:
.. code-block:: python
images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
# ParallelDo version & Single-thread version
if thread_num > 1:
places = fluid.layers.get_places(thread_num)
pd = fluid.layers.ParallelDo(places)
with pd.do():
images = pd.read_input(images)
label = pd.read_input(label)
predict = cnn_model(images)
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
pd.write_output(avg_cost)
avg_cost = pd()
avg_cost = fluid.layers.mean(avg_cost)
else:
predict = cnn_model(images)
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
.. warning::
It will be soon deprecated, please use ParallelExecutor instead.
"""
def __init__(self, places, use_nccl=False, name=None):
warnings.warn(
"API ParallelDo is deprecated since 0.15.0. Please use ParallelExecutor instead.",
Warning)
self.helper = LayerHelper("parallel_do", name=name)
self.inputs = []
self.places = places
self.outputs = []
self.status = StaticRNN.BEFORE_RNN_BLOCK
self.use_nccl = use_nccl
def do(self):
return BlockGuardWithCompletion(self)
def parent_block(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def __call__(self, *args, **kwargs):
if self.status != StaticRNN.AFTER_RNN_BLOCK:
raise ValueError("RNN output can only be retrieved after rnn block")
if len(self.outputs) == 0:
raise ValueError("RNN has no output")
elif len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def read_input(self, var):
self.inputs.append(var)
return var
def write_output(self, var):
self.outputs.append(var)
def get_parameters(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
local_inputs = set()
params = list()
for var in self.inputs:
local_inputs.add(var.name)
for op in current_block.ops:
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in local_inputs:
params.append(in_var_name)
for oname in op.output_names:
for out_var_name in op.output(oname):
local_inputs.add(out_var_name)
params = list(set(params))
return [parent_block.var(name) for name in params]
def _complete_op(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
self.outputs = [
parent_block.create_var(
name=o.name,
shape=o.shape,
dtype=o.dtype,
lod_level=o.lod_level,
persistable=o.persistable,
stop_gradient=o.stop_gradient) for o in self.outputs
]
inputs = [parent_block.var(i.name) for i in self.inputs]
outputs = [parent_block.var(o.name) for o in self.outputs]
parent_block.append_op(
type='parallel_do',
inputs={
'inputs': inputs,
'parameters': self.get_parameters(),
'places': self.places
},
outputs={'outputs': outputs,
'parallel_scopes': [step_scope]},
attrs={'sub_block': current_block,
'use_nccl': self.use_nccl})
class BlockGuardWithCompletion(BlockGuard):
"""
BlockGuardWithCompletion class.
BlockGuardWithCompletion class is used to create an op with a block in a program.
"""
def __init__(self, rnn):
if not (isinstance(rnn, StaticRNN) or isinstance(rnn, ParallelDo)):
raise TypeError(
"BlockGuardWithCompletion takes a StaticRNN or ParallelDo")
super(BlockGuardWithCompletion, self).__init__(rnn.helper.main_program)
self.rnn = rnn
def __enter__(self):
self.rnn.status = StaticRNN.IN_RNN_BLOCK
return super(BlockGuardWithCompletion, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.rnn.status = StaticRNN.AFTER_RNN_BLOCK
self.rnn._complete_op()
return super(BlockGuardWithCompletion, self).__exit__(exc_type, exc_val,
exc_tb)
class StaticRNNMemoryLink(object):
"""
StaticRNNMemoryLink class.
StaticRNNMemoryLink class is used to create a link between two
memory cells of a StaticRNN.
NOTE: This is a internal data structure of a very low-level API.
Please use StaticRNN instead.
Args:
init(Variable): the initial variable for Memory.
pre_mem(Variable): the memory variable in previous time step.
mem(Variable): the memory variable in current time step.
"""
def __init__(self, init, pre_mem, mem=None):
self.init = init
self.pre_mem = pre_mem
self.mem = mem
class StaticRNN(object):
"""
StaticRNN class.
StaticRNN class is used to create a StaticRNN. The RNN will have its
own parameters like inputs, outputs, memories, status and length.
"""
BEFORE_RNN_BLOCK = 0
IN_RNN_BLOCK = 1
AFTER_RNN_BLOCK = 2
def __init__(self, name=None):
self.helper = LayerHelper("static_rnn", name=name)
self.memories = {} # memory map, from pre_mem.name --> MemoryLink
self.inputs = [] # input variable list in current block
self.outputs = [] # output variable list in parent block
self.status = StaticRNN.BEFORE_RNN_BLOCK # status flag.
# sequence length, since it is a static RNN, sequence length are fixed.
self.seq_len = None
def step(self):
return BlockGuardWithCompletion(self)
def _assert_in_rnn_block_(self, method):
if self.status != StaticRNN.IN_RNN_BLOCK:
raise ValueError("You must invoke {0} in rnn block".format(method))
def memory(self,
init=None,
shape=None,
batch_ref=None,
init_value=0.0,
init_batch_dim_idx=0,
ref_batch_dim_idx=1):
"""
Args:
init: boot memory, if not set, a shape, batch_ref must be provided
shape: shape of the boot memory
batch_ref: batch size reference variable
init_value: the init value of boot memory
init_batch_dim_idx: the index of batch size in init's dimension
ref_batch_dim_idx: the index of batch size in batch_ref's dimension
"""
self._assert_in_rnn_block_('memory')
if init is None:
if shape is None or batch_ref is None:
raise ValueError(
"if init is None, memory at least need shape and batch_ref")
parent_block = self._parent_block()
var_name = unique_name.generate("@".join(
[self.helper.name, "memory_boot"]))
boot_var = parent_block.create_var(
name=var_name,
shape=shape,
dtype=batch_ref.dtype,
persistable=False)
parent_block.append_op(
type="fill_constant_batch_size_like",
inputs={'Input': [batch_ref]},
outputs={'Out': [boot_var]},
attrs={
'value': init_value,
'shape': boot_var.shape,
'dtype': boot_var.dtype,
'input_dim_idx': ref_batch_dim_idx,
'output_dim_idx': init_batch_dim_idx
})
return self.memory(init=boot_var)
else:
pre_mem = self.helper.create_variable(
name=unique_name.generate("@".join([self.helper.name, "mem"])),
dtype=init.dtype,
shape=init.shape)
self.memories[pre_mem.name] = StaticRNNMemoryLink(
init=init, pre_mem=pre_mem)
return pre_mem
def step_input(self, x):
self._assert_in_rnn_block_('step_input')
if not isinstance(x, Variable):
raise TypeError("step input takes a Variable")
if self.seq_len is None:
self.seq_len = x.shape[0]
elif self.seq_len != x.shape[0]:
raise ValueError("Static RNN only take fix seq_len input")
ipt = self.helper.create_variable(
name=x.name, dtype=x.dtype, shape=list(x.shape[1:]), type=x.type)
self.inputs.append(ipt)
return ipt
def step_output(self, o):
self._assert_in_rnn_block_('step_output')
if not isinstance(o, Variable):
raise TypeError("step output takes a Variable")
tmp_o = self.helper.create_tmp_variable(dtype=o.dtype)
self.helper.append_op(
type='rnn_memory_helper',
inputs={'X': [o]},
outputs={'Out': tmp_o},
attrs={'dtype': o.dtype})
out_var = self._parent_block().create_var(
name=tmp_o.name,
shape=[self.seq_len] + list(tmp_o.shape),
dtype=tmp_o.dtype)
self.outputs.append(out_var)
def output(self, *outputs):
for each in outputs:
self.step_output(each)
def update_memory(self, mem, var):
if not isinstance(mem, Variable) or not isinstance(var, Variable):
raise TypeError("update memory should take variables")
self.memories[mem.name].mem = var
def _parent_block(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def __call__(self, *args, **kwargs):
if self.status != StaticRNN.AFTER_RNN_BLOCK:
raise ValueError("RNN output can only be retrieved after rnn block")
if len(self.outputs) == 0:
raise ValueError("RNN has no output")
elif len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def _complete_op(self):
main_program = self.helper.main_program
rnn_block = main_program.current_block()
parent_block = self._parent_block()
local_inputs = set()
for op in rnn_block.ops:
assert isinstance(op, Operator)
for oname in op.output_names:
for out_var_name in op.output(oname):
local_inputs.add(out_var_name)
for var in self.inputs:
local_inputs.add(var.name)
for m in self.memories:
local_inputs.add(m)
params = list()
for op in rnn_block.ops:
assert isinstance(op, Operator)
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in local_inputs:
params.append(in_var_name)
parameters = [parent_block.var(name) for name in params]
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
inlinks = [parent_block.var(i.name) for i in self.inputs]
outlinks = self.outputs
boot_memories = []
pre_memories = []
memories = []
for _, mem in list(self.memories.items()):
boot_memories.append(mem.init)
pre_memories.append(mem.pre_mem.name)
mem_var = rnn_block.var(mem.mem.name)
assert isinstance(mem_var, Variable)
new_mem = self.helper.create_tmp_variable(dtype=mem_var.dtype)
rnn_block.append_op(
type='rnn_memory_helper',
inputs={'X': [mem_var]},
outputs={'Out': [new_mem]},
attrs={'dtype': mem_var.dtype})
memories.append(new_mem.name)
parent_block.append_op(
type='recurrent',
inputs={
'inputs': inlinks,
'initial_states': boot_memories,
'parameters': parameters
},
outputs={'outputs': outlinks,
'step_scopes': [step_scope]},
attrs={
'ex_states': pre_memories,
'states': memories,
'sub_block': rnn_block
})
class WhileGuard(BlockGuard):
def __init__(self, while_op):
if not isinstance(while_op, While):
raise TypeError("WhileGuard takes a while op")
super(WhileGuard, self).__init__(while_op.helper.main_program)
self.while_op = while_op
def __enter__(self):
self.while_op.status = While.IN_WHILE_BLOCK
return super(WhileGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.while_op.status = While.AFTER_WHILE_BLOCK
self.while_op._complete()
return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb)
class While(object):
"""
while loop control flow.
Args:
cond (Variable): condition used to compare.
name (str): The name of this layer.
Examples:
.. code-block:: python
d0 = layers.data("d0", shape=[10], dtype='float32')
data_array = layers.array_write(x=d0, i=i)
array_len = layers.fill_constant(shape=[1],dtype='int64', value=3)
cond = layers.less_than(x=i, y=array_len)
while_op = layers.While(cond=cond)
with while_op.block():
d = layers.array_read(array=data_array, i=i)
i = layers.increment(x=i, in_place=True)
layers.array_write(result, i=i, array=d)
layers.less_than(x=i, y=array_len, cond=cond)
"""
BEFORE_WHILE_BLOCK = 0
IN_WHILE_BLOCK = 1
AFTER_WHILE_BLOCK = 2
def __init__(self, cond, name=None):
self.helper = LayerHelper("while", name=name)
self.status = While.BEFORE_WHILE_BLOCK
if not isinstance(cond, Variable):
raise TypeError("condition should be a variable")
assert isinstance(cond, Variable)
if cond.dtype != core.VarDesc.VarType.BOOL:
raise TypeError("condition should be a bool variable")
if reduce(lambda a, b: a * b, cond.shape, 1) != 1:
raise TypeError("condition should be a bool scalar")
self.cond_var = cond
def block(self):
return WhileGuard(self)
def _complete(self):
main_program = self.helper.main_program
while_block = main_program.current_block()
parent_block = main_program.block(main_program.current_block()
.parent_idx)
inner_outputs = {self.cond_var.name}
x_name_list = set()
for op in while_block.ops:
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in inner_outputs:
x_name_list.add(in_var_name)
for oname in op.output_names:
for out_var_name in op.output(oname):
inner_outputs.add(out_var_name)
out_vars = []
for inner_out_name in inner_outputs:
if inner_out_name in parent_block.vars:
out_vars.append(parent_block.var(inner_out_name))
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
parent_block.append_op(
type='while',
inputs={
'X': [
parent_block._var_recursive(x_name)
for x_name in x_name_list
],
'Condition': [self.cond_var]
},
outputs={'Out': out_vars,
'StepScopes': [step_scope]},
attrs={'sub_block': while_block})
def lod_rank_table(x, level=0):
"""LoD Rank Table Operator. Given an input variable **x** and a level number
of LoD, this layer creates a LodRankTable object. A LoDRankTable object
contains a list of bi-element tuples. Each tuple consists of an index and
a length, both of which are int type. Refering to specified level of LoD,
the index is the sequence index number and the length representes the
sequence length. Please note that the list is ranked in descending order by
the length. The following is an example:
.. code-block:: text
x is a LoDTensor:
x.lod = [[2, 1],
[5, 1, 1]]
x.data = [a, b, c, d, e, f, g]
1. set level to 0:
Create lod rank table:
lod_rank_table_obj = lod_rank_table(x, level=0)
Get:
lod_rank_table_obj.items() = [(0, 2), (1, 1)]
2. set level to 1:
Create lod rank table:
lod_rank_table_obj = lod_rank_table(x, level=1)
Get:
lod_rank_table_obj.items() = [(0, 5), (1, 1), (2, 1)]
Args:
x (Variable): Input variable, a LoDTensor based which to create the lod
rank table.
level (int): Specify the LoD level, on which to create the lod rank
table.
Returns:
Variable: The created LoDRankTable object.
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[10],
dtype='float32', lod_level=1)
out = layers.lod_rank_table(x=x, level=0)
"""
helper = LayerHelper("lod_rank_table", **locals())
table = helper.create_variable(
type=core.VarDesc.VarType.LOD_RANK_TABLE,
name=unique_name.generate("lod_rank_table"))
helper.append_op(
type='lod_rank_table',
inputs={'X': x},
outputs={'Out': table},
attrs={'level': level})
return table
@templatedoc()
def max_sequence_len(rank_table):
"""
${comment}
>>> import paddle.fluid as fluid
>>> x = fluid.layers.data(name='x', shape=[10], dtype='float32',
>>> lod_level=1)
>>> rank_table = layers.lod_rank_table(x=x, level=0)
>>> max_seq_len = layers.max_sequence_len(rank_table)
Args:
rank_table(${rank_table_type}): ${rank_table_comment}.
Returns:
${out_comment}.
"""
helper = LayerHelper("max_seqence_len", **locals())
res = helper.create_tmp_variable(dtype="int64")
helper.append_op(
type="max_sequence_len",
inputs={"RankTable": rank_table},
outputs={"Out": res})
return res
def lod_tensor_to_array(x, table):
"""
Convert a LoDTensor to a LoDTensorArray.
This function split a LoDTesnor to a LoDTensorArray according to its LoD
information. LoDTensorArray is an alias of C++ std::vector<LoDTensor> in
PaddlePaddle. The generated LoDTensorArray of this function can be further read
or written by `read_from_array()` and `write_to_array()` operators. However,
this function is generally an internal component of PaddlePaddle `DynamicRNN`.
Users should not use it directly.
Args:
x (Variable|list): The LoDTensor to be converted to a LoDTensorArray.
table (ParamAttr|list): The variable that stores the level of lod
which is ordered by sequence length in
descending order. It is generally generated
by `layers.lod_rank_table()` API.
Returns:
Variable: The LoDTensorArray that has been converted from the input tensor.
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[10])
table = fluid.layers.lod_rank_table(x, level=0)
array = fluid.layers.lod_tensor_to_array(x, table)
"""
helper = LayerHelper("lod_tensor_to_array", **locals())
array = helper.create_variable(
name=unique_name.generate("lod_tensor_to_array"),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
helper.append_op(
type='lod_tensor_to_array',
inputs={'X': x,
'RankTable': table},
outputs={'Out': array})
return array
def array_to_lod_tensor(x, table):
"""Convert a LoD_Tensor_Aarry to an LoDTensor.
Args:
x (Variable|list): The lod tensor array to be converted to a tensor.
table (ParamAttr|list): The variable that stores the level of lod
which is ordered by sequence length in
descending order.
Returns:
Variable: The variable of type tensor that has been converted
from an array.
Examples:
.. code-block:: python
x = fluid.layers.data(name='x', shape=[10])
table = fluid.layers.lod_rank_table(x, level=0)
array = fluid.layers.lod_tensor_to_array(x, table)
lod_tensor = fluid.layers.array_to_lod_tensor(array, table)
"""
helper = LayerHelper("array_to_lod_tensor", **locals())
tmp = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type="array_to_lod_tensor",
inputs={'X': x,
'RankTable': table},
outputs={'Out': tmp})
return tmp
def increment(x, value=1.0, in_place=True):
"""
This function performs an operation that increments each value in the
input :math:`x` by an amount: :math:`value` as mentioned in the input
parameter. This operation is performed in-place by default.
Args:
x (Variable|list): The tensor that has the input values.
value (float): The amount by which the values should be incremented.
in_place (bool): If the increment should be performed in-place.
Returns:
Variable: The elementwise-incremented object.
Examples:
.. code-block:: python
data = fluid.layers.data(name='data', shape=[32, 32], dtype='float32')
data = fluid.layers.increment(x=data, value=3.0, in_place=True)
"""
helper = LayerHelper("increment", **locals())
if not in_place:
out = helper.create_tmp_variable(dtype=x.dtype)
else:
out = x
helper.append_op(
type='increment',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'step': float(value)})
return out
def array_write(x, i, array=None):
"""
This function writes the given input variable to the specified position
indicating by the arrary index to an output LOD_TENSOR_ARRAY. If the
output LOD_TENSOR_ARRAY is not given(None), a new one will be created and
returned.
Args:
x (Variable|list): The input tensor from which the data will be read.
i (Variable|list): The index of the output LOD_TENSOR_ARRAY, pointing to
the position to which the input tensor will be
written.
array (Variable|list): The output LOD_TENSOR_ARRAY to which the input
tensor will be written. If this parameter is
NONE, a new LOD_TENSOR_ARRAY will be created and
returned.
Returns:
Variable: The output LOD_TENSOR_ARRAY where the input tensor is written.
Examples:
.. code-block:: python
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
arr = layers.array_write(tmp, i=i)
"""
helper = LayerHelper('array_write', **locals())
if array is None:
array = helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
helper.append_op(
type='write_to_array',
inputs={'X': [x],
'I': [i]},
outputs={'Out': [array]})
return array
def create_array(dtype):
"""
**Create LoDTensorArray**
This function creates an array of LOD_TENSOR_ARRAY . It is mainly used to
implement RNN with array_write, array_read and While.
Args:
dtype (int|float): The data type of the elements in the lod_tensor_array.
Returns:
Variable: The lod_tensor_array variable storing the elements of data type.
Examples:
.. code-block:: python
data = fluid.layers.create_array(dtype='float32')
"""
helper = LayerHelper("array", **locals())
return helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=dtype)
@templatedoc()
def less_than(x, y, force_cpu=None, cond=None, **ignored):
"""
${comment}
>>> import paddle.fluid as fluid
>>> less = fluid.layers.less_than(x=label, y=limit)
Args:
x(${x_type}): ${x_comment}.
y(${y_type}): ${y_comment}.
force_cpu(${force_cpu_type}): ${force_cpu_comment}.
cond(Variable|None): Optional output variable to store the result of *less_than*
Returns:
${out_comment}.
"""
helper = LayerHelper("less_than", **locals())
if cond is None:
cond = helper.create_tmp_variable(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_cpu is not None:
attrs['force_cpu'] = force_cpu
elif force_init_on_cpu():
attrs['force_cpu'] = force_init_on_cpu()
helper.append_op(
type='less_than',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond
def equal(x, y, cond=None, **ignored):
"""
**equal**
This layer returns the truth value of :math:`x == y` elementwise.
Args:
x(Variable): First operand of *equal*
y(Variable): Second operand of *equal*
cond(Variable|None): Optional output variable to store the result of *equal*
Returns:
Variable: The tensor variable storing the output of *equal*.
Examples:
.. code-block:: python
less = fluid.layers.equal(x=label, y=limit)
"""
helper = LayerHelper("equal", **locals())
if cond is None:
cond = helper.create_tmp_variable(dtype='bool')
cond.stop_gradient = True
helper.append_op(
type='equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [cond]})
return cond
def array_read(array, i):
"""
This function performs the operation to read the data in as an
LOD_TENSOR_ARRAY.
.. code-block:: text
Given:
array = [0.6, 0.1, 0.3, 0.1]
And:
i = 2
Then:
output = 0.3
Args:
array (Variable|list): The input tensor that store data to be read.
i (Variable|list): The index of the data to be read from input array.
Returns:
Variable: The tensor type variable that has the data written to it.
Examples:
.. code-block:: python
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
arr = layers.array_read(tmp, i=i)
"""
helper = LayerHelper('array_read', **locals())
if not isinstance(
array,
Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
raise TypeError("array should be tensor array vairable")
out = helper.create_tmp_variable(dtype=array.dtype)
helper.append_op(
type='read_from_array',
inputs={'X': [array],
'I': [i]},
outputs={'Out': [out]})
return out
def shrink_memory(x, i, table):
"""
This function creates an operator to shrink rnn memory using the RankTable
as mentioned in the input parameter.
NOTE: This API is very low-level API. It is used by DynamicRNN only.
Since the Dynamic RNN uses no-padding way to implement RNN. The sequence
will be sorted by order, and the length of valid memory will be shrink after
each time step.
Args:
x(Variable): The memory object in the previous time step.
i(Variable): The step count variable. A int scalar as LoDTensor.
table(Variable): The RNNRankTable object.
Returns:
the memory variable after shrink.
Examples:
Since this API is very low level API. The example is not provided.
Please reference the implementation of class DynamicRNN for detail
usage.
"""
helper = LayerHelper('shrink_memory', **locals())
out = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type='shrink_rnn_memory',
inputs={'X': [x],
'I': [i],
'RankTable': [table]},
outputs={'Out': [out]},
attrs={})
return out
def array_length(array):
"""
**Get the Length of Input LoDTensorArray**
This function performs the operation to find the length of the input
LOD_TENSOR_ARRAY.
Related API: array_read, array_write, While.
Args:
array (LOD_TENSOR_ARRAY): The input array that will be used
to compute the length.
Returns:
Variable: The length of the input LoDTensorArray.
Examples:
.. code-block:: python
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
arr = fluid.layers.array_write(tmp, i=i)
arr_len = fluid.layers.array_length(arr)
"""
helper = LayerHelper('array_length', **locals())
tmp = helper.create_tmp_variable(dtype='int64')
tmp.stop_gradient = True
helper.append_op(
type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]})
return tmp
class ConditionalBlockGuard(BlockGuard):
"""
ConditionalBlockGuard is derived from BlockGuard. It is dedicated for
holding a ConditionalBlock, and helping users entering and exiting the
ConditionalBlock via Python's 'with' keyword. However, ConditionalBlockGuard
is generally an internal component of IfElse, users should not use it directly.
"""
def __init__(self, block):
if not isinstance(block, ConditionalBlock):
raise TypeError("block should be conditional block")
super(ConditionalBlockGuard, self).__init__(block.helper.main_program)
self.block = block
def __enter__(self):
return super(ConditionalBlockGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self.block.complete()
return super(ConditionalBlockGuard, self).__exit__(exc_type, exc_val,
exc_tb)
class ConditionalBlock(object):
'''
**ConditionalBlock**
ConditionalBlock is an operator that bind a block to a specific condition,
if the condition matches, the corresponding block will be executed.
Args:
inputs (Variable): bool conditions.
is_scalar_condition (bool): whether the branch is controled by a scalar.
name(str): name of this ConditionalBlock.
Examples:
.. code-block:: python
cond = layers.less_than(x=label, y=limit)
true_image, false_image = layers.split_lod_tensor(
input=image, mask=cond)
true_cond = layers.ConditionalBlock([true_image])
with true_cond.block():
...
with false_cond.block():
...
'''
def __init__(self, inputs, is_scalar_condition=False, name=None):
for each_input in inputs:
if not isinstance(each_input, Variable):
raise TypeError("Each input should be variable")
self.inputs = inputs
self.is_scalar_condition = is_scalar_condition
self.helper = LayerHelper('conditional_block', name=name)
def block(self):
return ConditionalBlockGuard(self)
def complete(self):
inside_block = self.helper.main_program.current_block()
parent_block = self.helper.main_program.block(inside_block.parent_idx)
intermediate = set()
params = set()
for each_op in inside_block.ops:
assert isinstance(each_op, Operator)
for iname in each_op.input_names:
for in_var_name in each_op.input(iname):
if in_var_name not in intermediate:
params.add(in_var_name)
for oname in each_op.output_names:
for out_var_name in each_op.output(oname):
intermediate.add(out_var_name)
input_set = set([ipt.name for ipt in self.inputs])
param_list = [
parent_block._var_recursive(each_name) for each_name in params
if each_name not in input_set
]
out_list = [
parent_block.var(var_name) for var_name in parent_block.vars
if var_name in intermediate
]
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
parent_block.append_op(
type='conditional_block',
inputs={
'X': self.inputs,
'Params': param_list,
},
outputs={'Out': out_list,
'Scope': [step_scope]},
attrs={
'sub_block': inside_block,
'is_scalar_condition': self.is_scalar_condition
})
class Switch(object):
"""
Switch class works just like a `if-elif-else`. Can be used in learning rate scheduler
to modify learning rate
The Semantics:
1. A `switch` control-flow checks cases one-by-one.
2. The condition of each case is a boolean value, which is a scalar Variable.
3. It runs the first matched case, or the default case if there is one.
4. Once it matches a case, it runs the corresponding branch and only that branch.
Examples:
.. code-block:: python
lr = fluid.layers.tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="learning_rate")
one_var = tensor.fill_constant(
shape=[1], dtype='float32', value=1.0)
two_var = tensor.fill_constant(
shape=[1], dtype='float32', value=2.0)
with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step == zero_var):
fluid.layers.tensor.assign(input=one_var, output=lr)
with switch.default():
fluid.layers.tensor.assign(input=two_var, output=lr)
"""
def __init__(self, name=None):
self.helper = LayerHelper('switch', name=name)
self.inside_scope = False
self.pre_not_conditions = []
def case(self, condition):
"""create a new block for this condition
"""
if not self.inside_scope:
raise ValueError("case should be called inside with")
if len(self.pre_not_conditions) == 0:
cond_block = ConditionalBlock([condition], is_scalar_condition=True)
not_cond = logical_not(x=condition)
self.pre_not_conditions.append(not_cond)
else:
pre_cond_num = len(self.pre_not_conditions)
pre_not_cond = self.pre_not_conditions[pre_cond_num - 1]
new_not_cond = logical_and(
x=pre_not_cond, y=logical_not(x=condition))
self.pre_not_conditions.append(new_not_cond)
cond_block = ConditionalBlock(
[logical_and(
x=pre_not_cond, y=condition)],
is_scalar_condition=True)
return ConditionalBlockGuard(cond_block)
def default(self):
"""
create a default case for this switch
"""
pre_cond_num = len(self.pre_not_conditions)
if pre_cond_num == 0:
raise ValueError("there should be at least one condition")
cond_block = ConditionalBlock(
[self.pre_not_conditions[pre_cond_num - 1]],
is_scalar_condition=True)
return ConditionalBlockGuard(cond_block)
def __enter__(self):
"""
set flag that now is inside switch.block {}
:return:
"""
self.inside_scope = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.inside_scope = False
if exc_type is not None:
return False # re-raise exception
return True
class IfElseBlockGuard(object):
def __init__(self, is_true, ifelse):
if not isinstance(ifelse, IfElse):
raise TypeError("ifelse must be an instance of IfElse class")
if ifelse.status != IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("You cannot invoke IfElse.block() inside a block")
self.is_true = is_true
self.ie = ifelse
if is_true:
self.cond_block = ifelse.conditional_true_block
else:
self.cond_block = ifelse.conditional_false_block
if not isinstance(self.cond_block, ConditionalBlock):
raise TypeError("Unexpected situation")
self.cond_block = self.cond_block.block()
def __enter__(self):
self.ie.status = IfElse.IN_IF_ELSE_TRUE_BLOCKS if self.is_true else IfElse.IN_IF_ELSE_FALSE_BLOCKS
self.cond_block.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.cond_block.__exit__(exc_type, exc_val, exc_tb):
# re-raise inside exception
return False
if len(self.ie.output_table[1 if self.is_true else 0]) == 0:
raise ValueError("Must set output inside block")
self.ie.status = IfElse.OUT_IF_ELSE_BLOCKS
class IfElse(object):
"""
if-else control flow.
Args:
cond (Variable): condition used to compare.
name (str, default None): The name of this layer.
Examples:
.. code-block:: python
limit = fluid.layers.fill_constant_batch_size_like(
input=label, dtype='int64', shape=[1], value=5.0)
cond = fluid.layers.less_than(x=label, y=limit)
ie = fluid.layers.IfElse(cond)
with ie.true_block():
true_image = ie.input(image)
hidden = fluid.layers.fc(input=true_image, size=100, act='tanh')
prob = fluid.layers.fc(input=hidden, size=10, act='softmax')
ie.output(prob)
with ie.false_block():
false_image = ie.input(image)
hidden = fluid.layers.fc(
input=false_image, size=200, act='tanh')
prob = fluid.layers.fc(input=hidden, size=10, act='softmax')
ie.output(prob)
prob = ie()
"""
OUT_IF_ELSE_BLOCKS = 0
IN_IF_ELSE_TRUE_BLOCKS = 1
IN_IF_ELSE_FALSE_BLOCKS = 2
def __init__(self, cond, name=None):
if not isinstance(cond, Variable):
raise TypeError("cond must be a Variable")
self.helper = LayerHelper('ifelse', name=name)
self.cond = cond
self.input_table = {}
self.status = IfElse.OUT_IF_ELSE_BLOCKS
self.conditional_true_block = ConditionalBlock(inputs=[self.cond])
self.conditional_false_block = ConditionalBlock(inputs=[self.cond])
self.output_table = ([], []) # (true_outs, false_outs)
def input(self, x):
if self.status == IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("input must in true/false blocks")
if id(x) not in self.input_table:
parent_block = self._parent_block()
out_true = parent_block.create_var(
name=unique_name.generate('ifelse_input' + self.helper.name),
dtype=x.dtype)
out_false = parent_block.create_var(
name=unique_name.generate('ifelse_input' + self.helper.name),
dtype=x.dtype)
parent_block.append_op(
type='split_lod_tensor',
inputs={
'X': x,
'Mask': self.cond,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': 0})
self.input_table[id(x)] = (out_true, out_false)
else:
out_true, out_false = self.input_table[id(x)]
if self.status == IfElse.IN_IF_ELSE_TRUE_BLOCKS:
return out_true
else:
return out_false
def _parent_block(self):
current_block = self.helper.main_program.current_block()
return self.helper.main_program.block(current_block.parent_idx)
def true_block(self):
return IfElseBlockGuard(True, self)
def false_block(self):
return IfElseBlockGuard(False, self)
def output(self, *outs):
if self.status == self.OUT_IF_ELSE_BLOCKS:
raise ValueError("output can only be invoked in the sub-block")
out_table = self.output_table[1 if self.status ==
self.IN_IF_ELSE_TRUE_BLOCKS else 0]
parent_block = self._parent_block()
for each_out in outs:
if not isinstance(each_out, Variable):
raise TypeError("Each output should be a variable")
# create outside tensor
outside_out = parent_block.create_var(
name=unique_name.generate("_".join(
[self.helper.name, 'output'])),
dtype=each_out.dtype)
out_table.append(outside_out)
# assign local var to outside
assign(input=each_out, output=outside_out)
def __call__(self):
if self.status != self.OUT_IF_ELSE_BLOCKS:
raise ValueError("IfElse::__call__ must be out of sub-block")
false_len, true_len = list(map(len, self.output_table))
if false_len == 0 and true_len == 0:
raise ValueError("Must invoke true_block/false_block before "
"__call__")
elif false_len != true_len and false_len != 0 and true_len != 0:
raise ValueError("The output side must be same")
elif false_len == 0 or true_len == 0:
return self.output_table[0 if false_len != 0 else 1]
# else none of false_len/true_len is zero
# merge together
rlist = []
for false_var, true_var in zip(*self.output_table):
rlist.append(
merge_lod_tensor(
in_true=true_var,
in_false=false_var,
mask=self.cond,
x=self.cond,
level=0))
return rlist
class DynamicRNN(object):
"""
The dynamic RNN can process a batch of sequence data. The length of each
sample sequence can be different. This API automatically process them in
batch.
The input lod must be set. Please reference `lod_tensor`
>>> import paddle.fluid as fluid
>>> data = fluid.layers.data(name='sentence', dtype='int64', lod_level=1)
>>> embedding = fluid.layers.embedding(input=data, size=[65535, 32],
>>> is_sparse=True)
>>>
>>> drnn = fluid.layers.DynamicRNN()
>>> with drnn.block():
>>> word = drnn.step_input(embedding)
>>> prev = drnn.memory(shape=[200])
>>> hidden = fluid.layers.fc(input=[word, prev], size=200, act='relu')
>>> drnn.update_memory(prev, hidden) # set prev to hidden
>>> drnn.output(hidden)
>>>
>>> # last is the last time step of rnn. It is the encoding result.
>>> last = fluid.layers.sequence_last_step(drnn())
The dynamic RNN will unfold sequence into timesteps. Users need to define
how to process each time step during the :code:`with` block.
The `memory` is used staging data cross time step. The initial value of
memory can be zero or another variable.
The dynamic RNN can mark multiple variables as its output. Use `drnn()` to
get the output sequence.
"""
BEFORE_RNN = 0
IN_RNN = 1
AFTER_RNN = 2
def __init__(self, name=None):
self.helper = LayerHelper('dynamic_rnn', name=name)
self.status = DynamicRNN.BEFORE_RNN
self.lod_rank_table = None
self.max_seq_len = None
self.step_idx = None
self.zero_idx = fill_constant(
shape=[1], value=0, dtype='int64', force_cpu=True)
self.mem_dict = dict()
self.output_array = []
self.outputs = []
self.cond = self.helper.create_tmp_variable(dtype='bool')
self.cond.stop_gradient = False
self.while_op = While(self.cond)
self.input_array = []
self.mem_link = []
def step_input(self, x):
"""
Mark a sequence as a dynamic RNN input.
Args:
x(Variable): The input sequence.
Returns:
The current timestep in the input sequence.
"""
self._assert_in_rnn_block_("step_input")
if not isinstance(x, Variable):
raise TypeError(
"step_input() can only take a Variable as its input.")
parent_block = self._parent_block_()
if self.lod_rank_table is None:
self.lod_rank_table = parent_block.create_var(
name=unique_name.generate('lod_rank_table'),
type=core.VarDesc.VarType.LOD_RANK_TABLE)
self.lod_rank_table.stop_gradient = True
parent_block.append_op(
type='lod_rank_table',
inputs={"X": x},
outputs={"Out": self.lod_rank_table})
self.max_seq_len = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_max_seq_len'),
dtype='int64')
self.max_seq_len.stop_gradient = False
parent_block.append_op(
type='max_sequence_len',
inputs={'RankTable': self.lod_rank_table},
outputs={"Out": self.max_seq_len})
self.cond.stop_gradient = True
parent_block.append_op(
type='less_than',
inputs={'X': self.step_idx,
'Y': self.max_seq_len},
outputs={'Out': self.cond},
attrs={'force_cpu': True})
input_array = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_input_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
self.input_array.append((input_array, x.dtype))
parent_block.append_op(
type='lod_tensor_to_array',
inputs={'X': x,
'RankTable': self.lod_rank_table},
outputs={'Out': input_array})
return array_read(array=input_array, i=self.step_idx)
def static_input(self, x):
"""
Mark a variable as a RNN input. The input will not be scattered into
time steps.
Args:
x(Variable): The input variable.
Returns:
The input variable that can access in RNN.
"""
self._assert_in_rnn_block_("static_input")
if not isinstance(x, Variable):
raise TypeError(
"static_input() can only take a Variable as its input")
if self.lod_rank_table is None:
raise RuntimeError(
"static_input() must be called after step_input().")
parent_block = self._parent_block_()
x_reordered = parent_block.create_var(
name=unique_name.generate("dynamic_rnn_static_input_reordered"),
type=core.VarDesc.VarType.LOD_TENSOR,
dtype=x.dtype)
parent_block.append_op(
type='reorder_lod_tensor_by_rank',
inputs={'X': [x],
'RankTable': [self.lod_rank_table]},
outputs={'Out': [x_reordered]})
return shrink_memory(x_reordered, self.step_idx, self.lod_rank_table)
@contextlib.contextmanager
def block(self):
"""
The block for user to define operators in RNN. See the class docstring
for more details.
"""
if self.status != DynamicRNN.BEFORE_RNN:
raise ValueError("rnn.block() can only be invoke once")
self.step_idx = fill_constant(
shape=[1], dtype='int64', value=0, force_cpu=True)
self.step_idx.stop_gradient = False
self.status = DynamicRNN.IN_RNN
with self.while_op.block():
yield
increment(x=self.step_idx, value=1.0, in_place=True)
for new_mem, mem_array in self.mem_link:
array_write(x=new_mem, i=self.step_idx, array=mem_array)
less_than(
x=self.step_idx,
y=self.max_seq_len,
force_cpu=True,
cond=self.cond)
self.status = DynamicRNN.AFTER_RNN
for each_array in self.output_array:
self.outputs.append(
array_to_lod_tensor(
x=each_array, table=self.lod_rank_table))
def __call__(self, *args, **kwargs):
"""
Get the output of RNN. This API should only be invoked after RNN.block()
"""
if self.status != DynamicRNN.AFTER_RNN:
raise ValueError(("Output of the dynamic RNN can only be visited "
"outside the rnn block."))
if len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def memory(self,
init=None,
shape=None,
value=0.0,
need_reorder=False,
dtype='float32'):
"""
Create a memory variable for dynamic rnn.
If the :code:`init` is not None, :code:`memory` will be initialized by
this variable. The :code:`need_reorder` is used to reorder the memory as
the input variable. It should be set to true when the initialized memory
depends on the input sample.
For example,
>>> import paddle.fluid as fluid
>>> sentence = fluid.layers.data(
>>> name='sentence', dtype='float32', shape=[32])
>>> boot_memory = fluid.layers.data(
>>> name='boot', dtype='float32', shape=[10])
>>>
>>> drnn = fluid.layers.DynamicRNN()
>>> with drnn.block():
>>> word = drnn.step_input(sentence)
>>> memory = drnn.memory(init=boot_memory, need_reorder=True)
>>> hidden = fluid.layers.fc(
>>> input=[word, memory], size=10, act='tanh')
>>> drnn.update_memory(ex_mem=memory, new_mem=hidden)
>>> drnn.output(hidden)
>>> rnn_output = drnn()
Otherwise, if :code:`shape`, :code:`value`, :code:`dtype` are set, the
:code:`memory` will be initialized by this :code:`value`.
For example,
>>> import paddle.fluid as fluid
>>> sentence = fluid.layers.data(
>>> name='sentence', dtype='float32', shape=[32])
>>>
>>> drnn = fluid.layers.DynamicRNN()
>>> with drnn.block():
>>> word = drnn.step_input(sentence)
>>> memory = drnn.memory(shape=[10], dtype='float32', value=0)
>>> hidden = fluid.layers.fc(
>>> input=[word, memory], size=10, act='tanh')
>>> drnn.update_memory(ex_mem=memory, new_mem=hidden)
>>> drnn.output(hidden)
>>> rnn_output = drnn()
Args:
init(Variable|None): The initialized variable.
shape(list|tuple): The memory shape. NOTE the shape does not contain
batch_size.
value(float): the initalized value.
need_reorder(bool): True if the initialized memory depends on the
input sample.
dtype(str|numpy.dtype): The data type of the initialized memory.
Returns:
the memory variable.
"""
self._assert_in_rnn_block_('memory')
if init is not None:
if not isinstance(init, Variable):
raise TypeError(
"The input arg `init` of memory() must be a Variable")
parent_block = self._parent_block_()
init_tensor = init
if need_reorder == True:
if self.lod_rank_table is None:
raise ValueError(
'If set need_reorder to True, make sure step_input be '
'invoked before '
'memory(init=init, need_reordered=True, ...).')
init_reordered = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_mem_init_reordered'),
type=core.VarDesc.VarType.LOD_TENSOR,
dtype=init.dtype)
parent_block.append_op(
type='reorder_lod_tensor_by_rank',
inputs={
'X': [init_tensor],
'RankTable': [self.lod_rank_table]
},
outputs={'Out': [init_reordered]})
init_tensor = init_reordered
mem_array = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_mem_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=init.dtype)
parent_block.append_op(
type='write_to_array',
inputs={'X': init_tensor,
'I': self.zero_idx},
outputs={'Out': mem_array})
retv = array_read(array=mem_array, i=self.step_idx)
retv = shrink_memory(
x=retv, i=self.step_idx, table=self.lod_rank_table)
self.mem_dict[retv.name] = mem_array
return retv
else:
if len(self.input_array) == 0:
raise ValueError(
"step_input should be invoked before memory(shape=..., value=...)"
)
parent_block = self._parent_block_()
init = parent_block.create_var(
name=unique_name.generate('mem_init'), dtype=dtype)
arr, dtype = self.input_array[0]
in0 = parent_block.create_var(
name=unique_name.generate('in0'), dtype=dtype)
parent_block.append_op(
type='read_from_array',
inputs={'X': [arr],
'I': [self.zero_idx]},
outputs={'Out': [in0]})
parent_block.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': [in0]},
outputs={'Out': [init]},
attrs={
'shape': [-1] + shape,
'value': float(value),
'dtype': init.dtype
})
return self.memory(init=init)
def update_memory(self, ex_mem, new_mem):
"""
Update the memory from ex_mem to new_mem. NOTE that the shape and data
type of :code:`ex_mem` and :code:`new_mem` must be same.
Args:
ex_mem(Variable): the memory variable.
new_mem(Variable): the plain variable generated in RNN block.
Returns:
None
"""
self._assert_in_rnn_block_('update_memory')
if not isinstance(ex_mem, Variable):
raise TypeError("The input arg `ex_mem` of update_memory() must "
"be a Variable")
if not isinstance(new_mem, Variable):
raise TypeError("The input arg `new_mem` of update_memory() must "
"be a Variable")
mem_array = self.mem_dict.get(ex_mem.name, None)
if mem_array is None:
raise ValueError("Please invoke memory before update_memory")
if self.lod_rank_table is None:
raise ValueError("Please invoke step_input before update_memory")
self.mem_link.append((new_mem, mem_array))
def output(self, *outputs):
"""
mark the RNN output variables.
Args:
outputs: The output variables.
Returns:
None
"""
self._assert_in_rnn_block_('output')
parent_block = self._parent_block_()
for each in outputs:
outside_array = parent_block.create_var(
name=unique_name.generate("_".join(
[self.helper.name, "output_array", each.name])),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=each.dtype)
array_write(x=each, i=self.step_idx, array=outside_array)
self.output_array.append(outside_array)
def _parent_block_(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def _assert_in_rnn_block_(self, method):
if self.status != DynamicRNN.IN_RNN:
raise ValueError("{0} can only be invoked inside rnn block.".format(
method))
@autodoc()
def reorder_lod_tensor_by_rank(x, rank_table):
helper = LayerHelper('reorder_lod_tensor_by_rank', **locals())
helper.is_instance('x', Variable)
helper.is_instance('rank_table', Variable)
out = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type='reorder_lod_tensor_by_rank',
inputs={'X': [x],
'RankTable': [rank_table]},
outputs={'Out': [out]})
return out
def is_empty(x, cond=None, **ignored):
"""
Test whether a Variable is empty.
Args:
x (Variable): The Variable to be tested.
cond (Variable|None): Output parameter. Returns the test result
of given 'x'. Default: None
Returns:
Variable: A bool scalar. True if 'x' is an empty Variable.
Raises:
TypeError: If input cond is not a variable, or cond's dtype is
not bool.
Examples:
.. code-block:: python
res = fluid.layers.is_empty(x=input)
# or:
fluid.layers.is_empty(x=input, cond=res)
"""
helper = LayerHelper("is_empty", **locals())
if cond is None:
cond = helper.create_tmp_variable(dtype='bool')
cond.stop_gradient = True
elif not isinstance(cond, Variable):
raise TypeError("cond takes a variable")
elif cond.dtype != 'bool':
raise TypeError("The data type of cond must be bool")
helper.append_op(
type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]})
return cond
| 34.759288 | 106 | 0.589763 |
d3fdeec8165e84bf7a43059c3c5c541ee72ccb58
| 68,892 |
py
|
Python
|
lib/taurus/qt/qtgui/taurusgui/appsettingswizard.py
|
mrosanes/taurus_deb
|
119bf27193af0bbaaececf054eefb78beb6f117a
|
[
"CC-BY-3.0"
] | null | null | null |
lib/taurus/qt/qtgui/taurusgui/appsettingswizard.py
|
mrosanes/taurus_deb
|
119bf27193af0bbaaececf054eefb78beb6f117a
|
[
"CC-BY-3.0"
] | null | null | null |
lib/taurus/qt/qtgui/taurusgui/appsettingswizard.py
|
mrosanes/taurus_deb
|
119bf27193af0bbaaececf054eefb78beb6f117a
|
[
"CC-BY-3.0"
] | null | null | null |
#!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
###########################################################################
"""
This Wizard provide functionality for creating from scratch a configuration
directory for a TaurusGUI based application.
The files in the configuration dir determine the default, permanent, pre-defined
contents of the GUI. While the user may add/remove more elements at run
time and those customizations will also be stored, this file defines what a
user will find when launching the GUI for the first time.
"""
__all__ = ["AppSettingsWizard", "ExternalAppEditor"]
import os
import re
import sys
import shutil
import copy
import datetime
import glob
from lxml import etree
from taurus import tauruscustomsettings
from taurus.external.qt import Qt
import taurus.qt.qtgui.panel
import taurus.qt.qtgui.taurusgui.paneldescriptionwizard
import taurus.qt.qtgui.input
from taurus.core.util.enumeration import Enumeration
from taurus.qt.qtgui.util import ExternalAppAction
class BooleanWidget(Qt.QWidget):
"""
This class represents the simple boolean widget with two RadioButtons
true and false. The default value of the widget is true.
It change the value by using getValue and setValue methods
"""
valueChangedSignal = Qt.pyqtSignal(bool, bool)
def __init__(self, parent=None):
Qt.QWidget.__init__(self, parent)
self._formLayout = Qt.QHBoxLayout(self)
self.trueButton = Qt.QRadioButton(self)
self._formLayout.addWidget(self.trueButton)
self.falseButton = Qt.QRadioButton(self)
self._formLayout.addWidget(self.falseButton)
self.trueButton.setText("Yes")
self.falseButton.setText("No")
self.trueButton.clicked.connect(self.valueChanged)
self.falseButton.clicked.connect(self.valueChanged)
self.setValue(self.getDefaultValue())
def valueChanged(self):
if not (self.trueButton.isChecked() == self._actualValue):
self.valueChangedSignal.emit(self._actualValue, not self._actualValue)
self._actualValue = self.trueButton.isChecked()
def setValue(self, value):
if value is None:
value = self.getDefaultValue()
self.trueButton.setChecked(value)
self.falseButton.setChecked(not value)
self._actualValue = value
def getValue(self):
return self.trueButton.isChecked()
@classmethod
def getDefaultValue(self):
return False
class BasePage(Qt.QWizardPage):
"""
This class represents the base page for all of the pages in the wizard
"""
completeChanged = Qt.pyqtSignal()
def __init__(self, parent=None):
Qt.QWizardPage.__init__(self, parent)
self._item_funcs = {}
self._layout = Qt.QGridLayout()
self.setLayout(self._layout)
self._setupUI()
def initializePage(self):
Qt.QWizardPage.initializePage(self)
self.checkData()
def fromXml(self, xml):
"""
:param xml: (etree.Element) root node
"""
pass
def _setupUI(self):
pass
def checkData(self):
self._valid = True
self.completeChanged.emit()
def isComplete(self):
return self._valid
def _markRed(self, label):
"""
Set the color of the given label to red
"""
palette = label.palette()
palette.setBrush(Qt.QPalette.WindowText, Qt.Qt.red)
label.update()
def _markBlack(self, label):
"""
Set the color of the given label to black
"""
palette = label.palette()
palette.setBrush(Qt.QPalette.WindowText, Qt.Qt.black)
label.update()
def setStatusLabelPalette(self, label):
"""
Set the label look as as status label
"""
label.setAutoFillBackground(True)
palette = label.palette()
gradient = Qt.QLinearGradient(0, 0, 0, 15)
gradient.setColorAt(0.0, Qt.QColor.fromRgb(60, 150, 255))
gradient.setColorAt(0.5, Qt.QColor.fromRgb(0, 85, 227))
gradient.setColorAt(1.0, Qt.QColor.fromRgb(60, 150, 255))
gradient.setSpread(Qt.QGradient.RepeatSpread)
palette.setBrush(Qt.QPalette.Window, Qt.QBrush(gradient))
palette.setBrush(Qt.QPalette.WindowText, Qt.Qt.white)
def __setitem__(self, name, value):
self._item_funcs[name] = value
def __getitem__(self, name):
return self._item_funcs[name]
def setNextPageId(self, id):
self._nextPageId = id
def nextId(self):
return self._nextPageId
class IntroPage(BasePage):
"""
Introduction page
"""
def __init__(self, parent=None):
BasePage.__init__(self, parent)
def _setupUI(self):
self.setTitle('Introduction')
self.setPixmap(Qt.QWizard.WatermarkPixmap, Qt.QIcon.fromTheme(
"document-properties").pixmap(120, 120))
label = Qt.QLabel(self.getIntroText())
label.setWordWrap(True)
self._layout.addWidget(label, 0, 0)
self._spacerItem1 = Qt.QSpacerItem(
10, 200, Qt.QSizePolicy.Minimum, Qt.QSizePolicy.Fixed)
self._layout.addItem(self._spacerItem1, 1, 0)
self.setLayout(self._layout)
def getIntroText(self):
text = 'This wizard will guide you through the process of creating a ' + \
'GUI based on TaurusGUI.\n' + \
'TaurusGui-based applications are very customizable. The user can ' + \
'add/remove elements at run time and store those customizations. So ' + \
'with this wizard you will define just the default contents of the GUI.'
return text
def setNextPageId(self, id):
self._nextPageId = id
class ProjectPage(BasePage):
def __init__(self, parent=None):
BasePage.__init__(self, parent)
self.setTitle('Project')
self.setSubTitle(
'Choose a location for the application files (i.e., the "project directory")')
self.__setitem__('projectDir', self._getProjectDir)
def _setupUI(self):
BasePage._setupUI(self)
self._projectDirLabel = Qt.QLabel("Project Directory:")
self._projectDirLE = Qt.QLineEdit(Qt.QDir.homePath())
self._projectDirLE.setMinimumSize(150, 30)
self._projectDirLE.setToolTip(
'This directory will be used to store all files needed by the application.')
self._projectDirBT = Qt.QPushButton(Qt.QIcon.fromTheme(
"document-properties"), '...')
self._layout.addWidget(self._projectDirLabel, 1, 0)
self._layout.addWidget(self._projectDirLE, 1, 1)
self._layout.addWidget(self._projectDirBT, 1, 2)
self._projectDirBT.clicked.connect(self.onSelectDir)
def onSelectDir(self):
dirname = unicode(Qt.QFileDialog.getExistingDirectory(
self, 'Choose the project directory', self._projectDirLE.text()))
if not dirname:
return
self._projectDirLE.setText(dirname)
def validatePage(self):
dirname = unicode(self._projectDirLE.text())
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except Exception, e:
Qt.QMessageBox.warning(self, 'Error creating project directory',
'Could not create the project directory.\nReason:%s' % repr(
e),
Qt.QMessageBox.Cancel)
return False
configs_found = glob.glob(os.path.join(dirname, "tgconf_*",
self.wizard().getXmlConfigFileName()))
# fname = os.path.join(dirname, self.wizard().getXmlConfigFileName())
if len(configs_found) == 1:
fname = configs_found[0]
option = Qt.QMessageBox.question(self, 'Overwrite project?',
'The "%s" file already exists in the project directory.\n Do you want to edit the existing project?' % (
os.path.basename(fname)),
Qt.QMessageBox.Yes | Qt.QMessageBox.Cancel)
if option == Qt.QMessageBox.Yes:
try:
self.wizard().loadXml(fname)
except Exception, e:
Qt.QMessageBox.warning(self, 'Error loading project configuration',
'Could not load the existing configuration.\nReason:%s' % repr(
e),
Qt.QMessageBox.Cancel)
return False
else:
return False
elif len(os.listdir(dirname)):
option = Qt.QMessageBox.question(self, 'Non empty project dir',
'The project directory ("%s") is not empty.\nAre you sure you want to use it?' % (
os.path.basename(dirname)),
Qt.QMessageBox.Yes | Qt.QMessageBox.No)
if option != Qt.QMessageBox.Yes:
return False
# if all went ok...
return True
def _getProjectDir(self):
return unicode(self._projectDirLE.text())
class GeneralSettings(BasePage):
def __init__(self, parent=None):
BasePage.__init__(self, parent)
self.setTitle('General settings')
def initializePage(self):
BasePage.initializePage(self)
self.wizard().__setitem__("guiName", self._getGUIName)
self.wizard().__setitem__("organizationName", self._getOrganizationName)
def fromXml(self, xml):
self._guiNameLineEdit.setText(
AppSettingsWizard.getValueFromNode(xml, "GUI_NAME", ''))
self._organizationCombo.setEditText(
AppSettingsWizard.getValueFromNode(xml, "ORGANIZATION", default='Taurus'))
def _getGUIName(self):
return str(self._guiNameLineEdit.text())
def _getOrganizationName(self):
if len(self._organizationCombo.currentText()) > 0:
return str(self._organizationCombo.currentText())
else:
return None
def _setupUI(self):
BasePage._setupUI(self)
self._guiNameLabel = Qt.QLabel("GUI name:")
font = Qt.QFont() # set bigger font
font.setPointSize(14)
self._label = Qt.QLabel()
self._layout.addWidget(self._label, 0, 0, 1, 2, Qt.Qt.AlignRight)
self._guiNameLineEdit = Qt.QLineEdit()
self._guiNameLineEdit.setFont(font)
self._guiNameLineEdit.setMinimumSize(150, 30)
self._layout.addWidget(self._guiNameLabel, 1, 0,
1, 1, Qt.Qt.AlignRight)
self._layout.addWidget(self._guiNameLineEdit, 1,
1, 1, 1, Qt.Qt.AlignRight)
self._organizationNameLabel = Qt.QLabel("Organization name:")
self._organizationCombo = Qt.QComboBox()
self._organizationCombo.addItems(self._getOrganizationNames())
self._organizationCombo.setMinimumSize(150, 25)
self._organizationCombo.setEditable(True)
self._layout.addWidget(self._organizationNameLabel,
2, 0, 1, 1, Qt.Qt.AlignRight)
self._layout.addWidget(self._organizationCombo,
2, 1, 1, 1, Qt.Qt.AlignRight)
self._spacerItem1 = Qt.QSpacerItem(
10, 0, Qt.QSizePolicy.Fixed, Qt.QSizePolicy.Expanding)
self._layout.addItem(self._spacerItem1, 8, 0, 1, 1, Qt.Qt.AlignCenter)
self._status_label = Qt.QLabel()
self.setStatusLabelPalette(self._status_label)
self._layout.addWidget(self._status_label, 9, 0, 1, 3)
self._guiNameLineEdit.textChanged.connect(self.checkData)
self._organizationCombo.editTextChanged.connect(self.checkData)
self._organizationCombo.currentIndexChanged.connect(self.checkData)
def _getOrganizationNames(self):
return ["TAURUS", "ALBA", "DESY", "Elettra", "ESRF", "MAX-lab", "SOLEIL", "XFEL"]
def checkData(self):
self._valid = True
if not len(self._guiNameLineEdit.text()):
self._valid = False
self._markRed(self._guiNameLabel)
else:
self._markBlack(self._guiNameLabel)
self.completeChanged.emit()
if not self._valid:
self._setStatus("Please type the name of the GUI")
else:
self._setStatus("Press next button to continue")
def _setStatus(self, text):
self._status_label.setText(text)
class CustomLogoPage(BasePage):
def __init__(self, parent=None):
BasePage.__init__(self, parent)
self._customLogoDefaultPath = getattr(tauruscustomsettings,
"ORGANIZATION_LOGO",
"logos:taurus.png")
self._customLogoPath = self._customLogoDefaultPath
def initializePage(self):
BasePage.initializePage(self)
self.wizard().__setitem__("customLogo", self._getCustomLogo)
self._changeImage()
def _setupUI(self):
BasePage._setupUI(self)
self.setTitle('Custom logo')
self._label = Qt.QLabel(
"\nIf you want to have a custom logo inside your application panel, please select the image file. \n")
self._label.setWordWrap(True)
self._layout.addWidget(self._label, 0, 0, 1, 4)
self._customLogoLabel = Qt.QLabel("Custom logo:")
self._customLogoLineEdit = Qt.QLineEdit()
self._customLogoLineEdit.setMinimumSize(250, 25)
self._customLogoLineEdit.setReadOnly(False)
self._customLogoButton = Qt.QPushButton()
self._customLogoButton.setToolTip("Browse...")
self._customLogoButton.setIcon(Qt.QIcon.fromTheme("folder-open"))
self._customLogoButton.setMaximumSize(80, 25)
self._spacerItem1 = Qt.QSpacerItem(
30, 30, Qt.QSizePolicy.Fixed, Qt.QSizePolicy.Fixed)
self._customLogo = Qt.QLabel(self)
self._customLogo.setAlignment(Qt.Qt.AlignCenter)
self._customLogo.setMinimumSize(120, 120)
self._customLogoDefaultButton = Qt.QPushButton()
self._customLogoDefaultButton.setToolTip("Default")
self._customLogoDefaultButton.setMaximumSize(80, 25)
self._customLogoDefaultButton.setIcon(Qt.QIcon("actions:edit-undo.svg"))
self._customLogoRemoveButton = Qt.QPushButton()
self._customLogoRemoveButton.setToolTip("Remove")
self._customLogoRemoveButton.setMaximumSize(80, 25)
self._customLogoRemoveButton.setIcon(
Qt.QIcon("emblems:emblem-unreadable.svg"))
self._spacerItem2 = Qt.QSpacerItem(
30, 30, Qt.QSizePolicy.Expanding, Qt.QSizePolicy.Fixed)
self._layout.addWidget(self._customLogoLabel, 2, 0, Qt.Qt.AlignRight)
self._layout.addWidget(self._customLogoLineEdit,
2, 1, Qt.Qt.AlignRight)
self._layout.addWidget(self._customLogoButton, 2, 2, Qt.Qt.AlignLeft)
self._layout.addWidget(
self._customLogoDefaultButton, 2, 3, Qt.Qt.AlignLeft)
self._layout.addWidget(
self._customLogoRemoveButton, 2, 4, Qt.Qt.AlignLeft)
self._layout.addItem(self._spacerItem2, 2, 5)
self._layout.addItem(self._spacerItem1, 3, 2)
self._layout.addWidget(self._customLogo, 4, 1,
1, 1, Qt.Qt.AlignHCenter)
self._customLogoButton.clicked.connect(self._selectImage)
self._customLogoDefaultButton.clicked.connect(self._setDefaultImage)
self._customLogoRemoveButton.clicked.connect(self._removeImage)
self._customLogoLineEdit.textChanged.connect(self._changeImage)
self._spacerItem1 = Qt.QSpacerItem(
10, 0, Qt.QSizePolicy.Fixed, Qt.QSizePolicy.Expanding)
self._layout.addItem(self._spacerItem1, 8, 0, 1, 1, Qt.Qt.AlignCenter)
self._status_label = Qt.QLabel("Press next button to continue")
self.setStatusLabelPalette(self._status_label)
self._layout.addWidget(self._status_label, 9, 0, 1, 6)
self._setNoImage()
def fromXml(self, xml):
customLogo = AppSettingsWizard.getValueFromNode(
xml, "CUSTOM_LOGO", None)
if customLogo is None:
self._setDefaultImage()
else:
self._customLogoLineEdit.setText(customLogo)
def _setDefaultImage(self):
self._customLogoLineEdit.setText(self._customLogoDefaultPath)
def _setNoImage(self):
self._customLogo.setPixmap(
Qt.QIcon.fromTheme("image-missing").pixmap(50, 50))
self._customLogoPath = None
self._customLogoRemoveButton.hide()
def _removeImage(self):
self._customLogoLineEdit.setText("")
self._setNoImage()
def _getCustomLogo(self):
if (self._customLogoPath is not None):
return str(self._customLogoPath)
else:
return None
def _selectImage(self):
fileName = Qt.QFileDialog.getOpenFileName(self, self.tr(
"Open File"), Qt.QDir.homePath(), self.tr("Images (*.png *.xpm *.jpg *.jpeg *.svg)"))
self._customLogoLineEdit.setText(fileName)
self._changeImage()
def _changeImage(self):
fileName = str(self._customLogoLineEdit.text())
if len(fileName):
if (os.path.exists(fileName)):
image = Qt.QImage()
if image.load(fileName):
self._setImage(image)
self._customLogoPath = fileName
self._setStatus("Press next button to continue")
self._customLogoRemoveButton.show()
else:
self._setNoImage()
self._setStatus("The file is invalid")
else:
self._setNoImage()
self._setStatus("The file does not exist")
else:
self._setNoImage()
self._setStatus("No image")
def _setImage(self, image):
if type(image) == Qt.QPixmap:
self._customLogo.setPixmap(
image.scaled(60, 200, Qt.Qt.KeepAspectRatio))
elif type(image) == Qt.QImage:
self._customLogo.setPixmap(Qt.QPixmap().fromImage(
image).scaled(60, 200, Qt.Qt.KeepAspectRatio))
else:
self._customLogo.setPixmap(
Qt.QPixmap("image-missing").scaled(50, 50))
self._customLogoPath = None
def _setStatus(self, text):
self._status_label.setText(text)
class SynopticPage(BasePage):
def __init__(self, parent=None):
BasePage.__init__(self, parent)
self._synoptics = []
def fromXml(self, xml):
self._synoptics = []
synopticNodes = AppSettingsWizard.getArrayFromNode(
xml, "SYNOPTIC", default=[])
for child in synopticNodes:
if child.get("str") is not None and len(child.get("str")):
self._synoptics.append(child.get("str"))
def initializePage(self):
BasePage.initializePage(self)
self.wizard().__setitem__("synoptics", self._getSynoptics)
self._refreshSynopticList()
def _setupUI(self):
BasePage._setupUI(self)
self.setTitle('Synoptics')
self._label = Qt.QLabel(
"If you want to add one or more synoptic panels (graphical views of instruments) select the corresponding JDRAW files here\n")
self._label.setWordWrap(True)
self._layout.addWidget(self._label, 0, 0)
self.setLayout(self._layout)
self._synopticGroupBox = Qt.QGroupBox()
self._synopticGroupBox.setCheckable(False)
self._synopticGroupBox.setAlignment(Qt.Qt.AlignLeft)
self._synopticGroupBox.setStyleSheet(
" QGroupBox::title { subcontrol-position: top left; padding: 5 5px; }")
self._layout.addWidget(self._synopticGroupBox, 2, 0, 1, 1)
self._horizontalLayout = Qt.QHBoxLayout(self._synopticGroupBox)
self._synopticList = Qt.QListWidget(self._synopticGroupBox)
self._horizontalLayout.addWidget(self._synopticList)
self._verticalLayout = Qt.QVBoxLayout()
self._addButton = Qt.QPushButton("Add Synoptic")
self._addButton.setStyleSheet("text-align: left;")
self._verticalLayout.addWidget(self._addButton)
self._removeButton = Qt.QPushButton("Remove Synoptic")
self._removeButton.setStyleSheet("text-align: left;")
self._verticalLayout.addWidget(self._removeButton)
self._upButton = Qt.QPushButton("Move Up")
self._upButton.setStyleSheet("text-align: left;")
self._verticalLayout.addWidget(self._upButton)
self._downButton = Qt.QPushButton("Move Down")
self._downButton.setStyleSheet("text-align: left;")
self._verticalLayout.addWidget(self._downButton)
self._horizontalLayout.addLayout(self._verticalLayout)
self._addButton.setIcon(Qt.QIcon.fromTheme("list-add"))
self._removeButton.setIcon(Qt.QIcon.fromTheme("list-remove"))
self._upButton.setIcon(Qt.QIcon.fromTheme("go-up"))
self._downButton.setIcon(Qt.QIcon.fromTheme("go-down"))
self._addButton.clicked.connect(self._addSynoptic)
self._removeButton.clicked.connect(self._removeSynoptic)
self._upButton.clicked.connect(self._moveUp)
self._downButton.clicked.connect(self._moveDown)
#self._synopticList.itemDoubleClicked.connect(self._editSynoptic)
self._spacerItem1 = Qt.QSpacerItem(
10, 0, Qt.QSizePolicy.Fixed, Qt.QSizePolicy.Expanding)
self._layout.addItem(self._spacerItem1, 8, 0, 1, 1, Qt.Qt.AlignCenter)
self._status_label = Qt.QLabel("Press next button to continue")
self.setStatusLabelPalette(self._status_label)
self._layout.addWidget(self._status_label, 9, 0, 1, 1)
def _addSynoptic(self):
pdir = self.wizard().__getitem__('projectDir')
fileNames = Qt.QFileDialog.getOpenFileNames(self, self.tr(
"Open File"), pdir, self.tr("JDW (*.jdw );; All files (*)"))
for fileName in fileNames:
fileName = unicode(fileName)
if fileName not in self._synoptics:
self._synoptics.append(fileName)
self._refreshSynopticList()
def _editSynoptic(self):
# edit
self._refreshSynopticList()
def _removeSynoptic(self):
if len(self._synopticList.selectedIndexes()) > 0:
self._synoptic_id = self._synopticList.selectedIndexes()[0].row()
self._synoptics.remove(self._synoptics[self._synoptic_id])
self._refreshSynopticList()
def _moveUp(self):
if len(self._synopticList.selectedIndexes()) > 0:
self._synoptic_id = self._synopticList.selectedIndexes()[0].row()
if self._synoptic_id > 0:
tmp = self._synoptics[self._synoptic_id]
self._synoptics[self._synoptic_id] = self._synoptics[
self._synoptic_id - 1]
self._synoptics[self._synoptic_id - 1] = tmp
self._refreshSynopticList()
self._synopticList.setCurrentIndex(self._synopticList.indexFromItem(
self._synopticList.item(self._synoptic_id - 1)))
def _moveDown(self):
if len(self._synopticList.selectedIndexes()) > 0:
self._synoptic_id = self._synopticList.selectedIndexes()[0].row()
if self._synoptic_id < self._synopticList.count() - 1:
tmp = self._synoptics[self._synoptic_id]
self._synoptics[self._synoptic_id] = self._synoptics[
self._synoptic_id + 1]
self._synoptics[self._synoptic_id + 1] = tmp
self._refreshSynopticList()
self._synopticList.setCurrentIndex(self._synopticList.indexFromItem(
self._synopticList.item(self._synoptic_id + 1)))
def _refreshSynopticList(self):
self._synopticList.clear()
for name in self._synoptics:
self._synopticList.addItem(name)
def _getSynoptics(self):
if len(self._synoptics) <= 0:
return None
else:
return self._synoptics
def checkData(self):
BasePage.checkData(self)
self._valid = True
def _setStatus(self, text):
self._status_label.setText(text)
class MacroServerInfoPage(BasePage):
def __init__(self, parent=None):
BasePage.__init__(self, parent)
def initializePage(self):
BasePage.initializePage(self)
self._label.setText("\n <b>%s</b> can communicate with a Sardana's Macro Server and Pool.\nYou can enable and configure them here:\n" %
self.wizard().__getitem__("guiName"))
self.wizard().__setitem__("macroServerName", self._getMacroServerName)
self.wizard().__setitem__("doorName", self._getDoorName)
def _setupUI(self):
BasePage._setupUI(self)
self.setTitle('Macro Server Info')
self._label = Qt.QLabel()
self._label.setWordWrap(True)
self._macroGroupBox = Qt.QGroupBox()
self._macroGroupBox.setTitle("Enable Sardana communication")
self._macroGroupBox.setCheckable(True)
self._macroGroupBox.setChecked(False)
self._macroGroupBox.setAlignment(Qt.Qt.AlignLeft)
self._macroGroupBox.setStyleSheet(
" QGroupBox::title { subcontrol-position: top left; padding: 5 5px; }")
self._horizontalLayout = Qt.QHBoxLayout(self._macroGroupBox)
from sardana.taurus.qt.qtgui.extra_macroexecutor.common import \
TaurusMacroConfigurationDialog
self._confWidget = TaurusMacroConfigurationDialog(self)
self._confWidget.setWindowFlags(Qt.Qt.Widget)
self._confWidget.setModal(False)
self._confWidget.setVisible(True)
self._confWidget.buttonBox.setVisible(False)
self._horizontalLayout.addWidget(self._confWidget)
self._layout.addWidget(self._label, 0, 0, 1, 1)
self._layout.addWidget(self._macroGroupBox, 1, 0, 1, 1)
self._spacerItem1 = Qt.QSpacerItem(
10, 0, Qt.QSizePolicy.Fixed, Qt.QSizePolicy.Expanding)
self._layout.addItem(self._spacerItem1, 8, 0, 1, 1, Qt.Qt.AlignCenter)
self._status_label = Qt.QLabel("Press next button to continue")
self.setStatusLabelPalette(self._status_label)
self._layout.addWidget(self._status_label, 9, 0, 1, 1)
self._confWidget.macroServerComboBox.currentIndexChanged.connect(
self.checkData)
self._confWidget.doorComboBox.currentIndexChanged.connect(
self.checkData)
self._macroGroupBox.toggled.connect(self.checkData)
def fromXml(self, xml):
macroserverName = AppSettingsWizard.getValueFromNode(
xml, "MACROSERVER_NAME", default="")
doorName = AppSettingsWizard.getValueFromNode(
xml, "DOOR_NAME", default="")
macroEditorsPath = AppSettingsWizard.getValueFromNode(
xml, "MACROEDITORS_PATH", default="")
id = self._confWidget.macroServerComboBox.findText(
macroserverName, Qt.Qt.MatchExactly)
if id >= 0:
self._confWidget.macroServerComboBox.setCurrentIndex(id)
self._macroGroupBox.setChecked(True)
else:
self._macroGroupBox.setChecked(False)
return
id = self._confWidget.doorComboBox.findText(
doorName, Qt.Qt.MatchExactly)
if id >= 0:
self._confWidget.doorComboBox.setCurrentIndex(id)
def checkData(self):
BasePage.checkData(self)
if (self._macroGroupBox.isChecked()) and len(self._confWidget.macroServerComboBox.currentText()):
self.setNextPageId(self.wizard().currentId() + 1)
else:
self.setNextPageId(self.wizard().currentId() + 2)
def _getMacroServerName(self):
if (self._macroGroupBox.isChecked()) and len(self._confWidget.macroServerComboBox.currentText()):
return str(self._confWidget.macroServerComboBox.currentText())
else:
return None
def _getDoorName(self):
if (self._macroGroupBox.isChecked()) and len(self._confWidget.macroServerComboBox.currentText()):
return str(self._confWidget.doorComboBox.currentText())
else:
return None
def _setStatus(self, text):
self._status_label.setText(text)
class InstrumentsPage(BasePage):
def __init__(self, parent=None):
BasePage.__init__(self, parent)
def initializePage(self):
BasePage.initializePage(self)
self.wizard().__setitem__("instruments", self._getInstruments)
self._label.setText("<b>%s</b> can use instrument information stored in the Sardana's Pool to create instrument panels." %
self.wizard().__getitem__("guiName"))
def _setupUI(self):
BasePage._setupUI(self)
self.setTitle('Instruments from Pool:')
self._label = Qt.QLabel()
self._label.setWordWrap(True)
self._layout.addWidget(self._label, 0, 0, 1, 3)
self._instrumentsLabel = Qt.QLabel("Generate panels from Pool Info?")
self._intstrumentsBoolean = BooleanWidget()
self._intstrumentsBoolean.setMinimumSize(150, 25)
self._layout.addWidget(self._instrumentsLabel, 5,
0, 1, 1, Qt.Qt.AlignRight)
self._layout.addWidget(self._intstrumentsBoolean,
5, 1, 1, 1, Qt.Qt.AlignRight)
self._spacerItem1 = Qt.QSpacerItem(
10, 0, Qt.QSizePolicy.Fixed, Qt.QSizePolicy.Expanding)
self._layout.addItem(self._spacerItem1, 8, 0, 1, 1, Qt.Qt.AlignCenter)
self._status_label = Qt.QLabel("Press next button to continue")
self.setStatusLabelPalette(self._status_label)
self._layout.addWidget(self._status_label, 9, 0, 1, 3)
def fromXml(self, xml):
instruments = AppSettingsWizard.getValueFromNode(
xml, "INSTRUMENTS_FROM_POOL", default="False")
if str(instruments).lower() == "true":
self._intstrumentsBoolean.setValue(True)
else:
self._intstrumentsBoolean.setValue(False)
def _getInstruments(self):
return str(self._intstrumentsBoolean.getValue())
def checkData(self):
self._valid = True
def _setStatus(self, text):
self._status_label.setText(text)
class PanelsPage(BasePage):
def __init__(self, parent=None):
BasePage.__init__(self, parent)
self._panels = []
def initializePage(self):
BasePage.initializePage(self)
self.wizard().__setitem__("panels", self._getPanels)
self._refreshPanelList()
def _setupUI(self):
BasePage._setupUI(self)
self.setTitle('Panels editor')
self._label = Qt.QLabel(
"If you want extra panels add them to this list\n")
self._layout.addWidget(self._label, 0, 0)
self.setLayout(self._layout)
self._panelGroupBox = Qt.QGroupBox()
self._panelGroupBox.setCheckable(False)
self._panelGroupBox.setAlignment(Qt.Qt.AlignLeft)
self._panelGroupBox.setStyleSheet(
" QGroupBox::title { subcontrol-position: top left; padding: 5 5px; }")
self._layout.addWidget(self._panelGroupBox, 2, 0, 1, 1)
self._horizontalLayout = Qt.QHBoxLayout(self._panelGroupBox)
self._panelList = Qt.QListWidget(self._panelGroupBox)
self._horizontalLayout.addWidget(self._panelList)
self._verticalLayout = Qt.QVBoxLayout()
self._addButton = Qt.QPushButton("Add Panel")
self._addButton.setStyleSheet("text-align: left;")
self._verticalLayout.addWidget(self._addButton)
self._removeButton = Qt.QPushButton("Remove Panel")
self._removeButton.setStyleSheet("text-align: left;")
self._verticalLayout.addWidget(self._removeButton)
self._upButton = Qt.QPushButton("Move Up")
self._upButton.setStyleSheet("text-align: left;")
self._verticalLayout.addWidget(self._upButton)
self._downButton = Qt.QPushButton("Move Down")
self._downButton.setStyleSheet("text-align: left;")
self._verticalLayout.addWidget(self._downButton)
self._horizontalLayout.addLayout(self._verticalLayout)
self._addButton.setIcon(Qt.QIcon.fromTheme("list-add"))
self._removeButton.setIcon(Qt.QIcon.fromTheme("list-remove"))
self._upButton.setIcon(Qt.QIcon.fromTheme("go-up"))
self._downButton.setIcon(Qt.QIcon.fromTheme("go-down"))
self._addButton.clicked.connect(self._addPanel)
self._removeButton.clicked.connect(self._removePanel)
self._upButton.clicked.connect(self._moveUp)
self._downButton.clicked.connect(self._moveDown)
self._panelList.itemDoubleClicked.connect(self._editPanel)
self._spacerItem1 = Qt.QSpacerItem(
10, 0, Qt.QSizePolicy.Fixed, Qt.QSizePolicy.Expanding)
self._layout.addItem(self._spacerItem1, 8, 0, 1, 1, Qt.Qt.AlignCenter)
self._status_label = Qt.QLabel("Press next button to continue")
self.setStatusLabelPalette(self._status_label)
self._layout.addWidget(self._status_label, 9, 0, 1, 1)
def fromXml(self, xml):
self._panels = []
panelNodes = AppSettingsWizard.getArrayFromNode(
xml, "PanelDescriptions", default=[])
for child in panelNodes:
name = AppSettingsWizard.getValueFromNode(
child, "name", default=None)
if name:
self._panels.append((name, etree.tostring(child)))
def _addPanel(self):
paneldesc, ok = taurus.qt.qtgui.taurusgui.paneldescriptionwizard.PanelDescriptionWizard.getDialog(
self)
if ok:
w = paneldesc.getWidget()
self._panels.append((paneldesc.name, paneldesc.toXml()))
self._refreshPanelList()
def _editPanel(self):
# edit
self._refreshPanelList()
def _removePanel(self):
if len(self._panelList.selectedIndexes()) > 0:
self._panel_id = self._panelList.selectedIndexes()[0].row()
self._panels.remove(self._panels[self._panel_id])
self._refreshPanelList()
def _moveUp(self):
if len(self._panelList.selectedIndexes()) > 0:
self._panel_id = self._panelList.selectedIndexes()[0].row()
if self._panel_id > 0:
tmp = self._panels[self._panel_id]
self._panels[self._panel_id] = self._panels[self._panel_id - 1]
self._panels[self._panel_id - 1] = tmp
self._refreshPanelList()
self._panelList.setCurrentIndex(self._panelList.indexFromItem(
self._panelList.item(self._panel_id - 1)))
def _moveDown(self):
if len(self._panelList.selectedIndexes()) > 0:
self._panel_id = self._panelList.selectedIndexes()[0].row()
if self._panel_id < self._panelList.count() - 1:
tmp = self._panels[self._panel_id]
self._panels[self._panel_id] = self._panels[self._panel_id + 1]
self._panels[self._panel_id + 1] = tmp
self._refreshPanelList()
self._panelList.setCurrentIndex(self._panelList.indexFromItem(
self._panelList.item(self._panel_id + 1)))
def _refreshPanelList(self):
self._panelList.clear()
for panel in self._panels:
name, xml = panel
self._panelList.addItem(name)
def _getPanels(self):
if len(self._panels) <= 0:
return None
else:
return self._panels
def checkData(self):
BasePage.checkData(self)
self._valid = True
def _setStatus(self, text):
self._status_label.setText(text)
class ExternalAppEditor(Qt.QDialog):
'''
A dialog for configuring an external appaction for a TaurusMainWindow.
'''
#@todo: this class should be made more generic (e.g. provide a getter for an ExternalAppAction) and then moved elsewhere
def __init__(self, parent=None):
Qt.QDialog.__init__(self, parent)
self.setModal(True)
self.setWindowTitle('External Application Editor')
self._dlgBox = Qt.QDialogButtonBox(
Qt.QDialogButtonBox.Ok | Qt.QDialogButtonBox.Cancel)
self._layout = Qt.QVBoxLayout()
self._layout1 = Qt.QGridLayout()
self._layout2 = Qt.QHBoxLayout()
self._layout.addLayout(self._layout1)
self._layout.addLayout(self._layout2)
self._layout.addWidget(self._dlgBox)
self.setLayout(self._layout)
self._icon = None
self._label = Qt.QLabel(
"\n On this page you can define an external application. \n")
self._label.setWordWrap(True)
self._layout1.addWidget(self._label, 0, 0, 1, 4)
self._execFileLabel = Qt.QLabel("Command:")
self._execFileLineEdit = Qt.QLineEdit()
self._execFileLineEdit.setMinimumSize(150, 25)
# self._execFileLineEdit.setReadOnly(True)
self._execFileButton = Qt.QPushButton()
self._execFileButton.setIcon(Qt.QIcon.fromTheme("folder-open"))
self._execFileButton.setToolTip("Browse...")
self._execFileButton.setMaximumSize(80, 25)
self._layout1.addWidget(self._execFileLabel, 2, 0, Qt.Qt.AlignRight)
self._layout1.addWidget(self._execFileLineEdit, 2, 1, Qt.Qt.AlignRight)
self._layout1.addWidget(self._execFileButton, 2, 2, Qt.Qt.AlignLeft)
self._paramsLabel = Qt.QLabel("Parameters:")
self._paramsLineEdit = Qt.QLineEdit()
self._paramsLineEdit.setMinimumSize(150, 25)
self._layout1.addWidget(self._paramsLabel, 3, 0, Qt.Qt.AlignRight)
self._layout1.addWidget(self._paramsLineEdit, 3, 1, Qt.Qt.AlignRight)
self._textLabel = Qt.QLabel("Text:")
self._textLineEdit = Qt.QLineEdit()
self._textLineEdit.setMinimumSize(150, 25)
self._layout1.addWidget(self._textLabel, 4, 0, Qt.Qt.AlignRight)
self._layout1.addWidget(self._textLineEdit, 4, 1, Qt.Qt.AlignRight)
self._iconLabel = Qt.QLabel("Icon:")
self._iconLogo = Qt.QPushButton()
self._iconLogo.setIcon(Qt.QIcon("status:image-missing.svg"))
self._iconLogo.setIconSize(Qt.QSize(60, 60))
self._layout1.addWidget(self._iconLabel, 5, 0, Qt.Qt.AlignRight)
self._layout1.addWidget(self._iconLogo, 5, 1, Qt.Qt.AlignCenter)
self._spacerItem1 = Qt.QSpacerItem(
10, 0, Qt.QSizePolicy.Fixed, Qt.QSizePolicy.Expanding)
self._layout1.addItem(self._spacerItem1, 8, 0, 1, 1, Qt.Qt.AlignCenter)
# connections
self._execFileButton.clicked.connect(self._selectExecFile)
self._execFileLineEdit.textChanged.connect(self._setDefaultText)
self._iconLogo.clicked.connect(self._selectIcon)
self._dlgBox.accepted.connect(self.accept)
self._dlgBox.rejected.connect(self.reject)
self.checkData()
self._setIcon(ExternalAppAction.DEFAULT_ICON_NAME)
def checkData(self):
if len(self._execFileLineEdit.text()) > 0:
self._dlgBox.button(Qt.QDialogButtonBox.Ok).setEnabled(True)
else:
self._dlgBox.button(Qt.QDialogButtonBox.Ok).setEnabled(False)
def _setDefaultText(self):
fileName = self._execFileLineEdit.text().split('/')[-1]
index = str(fileName).rfind(".")
if (index > 0):
self._textLineEdit.setText(str(fileName)[0:index])
else:
self._textLineEdit.setText(fileName)
self.checkData()
def _selectExecFile(self):
filePath = Qt.QFileDialog.getOpenFileName(self, self.tr(
"Open File"), Qt.QDir.homePath(), self.tr("All files (*)"))
if len(filePath):
self._execFileLineEdit.setText(filePath)
self._setDefaultText()
def _getExecFile(self):
return str(self._execFileLineEdit.text())
def _selectIcon(self):
from taurus.qt.qtgui.icon import QIconCatalog
catalog = QIconCatalog()
dlg = Qt.QDialog(self)
dlg.setLayout(Qt.QVBoxLayout())
dlg.layout().addWidget(catalog)
dlg.setWindowTitle('Icon Catalog')
catalog.iconSelected.connect(self._setIcon)
catalog.iconSelected.connect(dlg.accept)
dlg.exec_()
def _setIcon(self, name):
self._iconLogo.setIcon(Qt.QIcon(name))
self._iconLogo.setIconSize(Qt.QSize(60, 60))
self._iconLogo.setText("")
self._icon = name
def _getParams(self):
return str(self._paramsLineEdit.text())
# return str(self._paramsLineEdit.text()).split()
def _getText(self):
return str(self._textLineEdit.text())
def _getIcon(self):
return str(self._icon)
def _toXml(self):
root = etree.Element("ExternalApp")
command = etree.SubElement(root, "command")
command.text = self._getExecFile()
params = etree.SubElement(root, "params")
params.text = self._getParams()
text = etree.SubElement(root, "text")
text.text = self._getText()
icon = etree.SubElement(root, "icon")
icon.text = self._getIcon()
return etree.tostring(root)
@staticmethod
def getDialog():
dlg = ExternalAppEditor()
dlg.exec_()
return dlg._getExecFile(), dlg._toXml(), (dlg.result() == dlg.Accepted)
class ExternalAppPage(BasePage):
def __init__(self, parent=None):
BasePage.__init__(self, parent)
self._externalApps = []
def initializePage(self):
BasePage.initializePage(self)
self.wizard().__setitem__("externalApps", self._getExternalApps)
self._refreshApplicationList()
def _setupUI(self):
BasePage._setupUI(self)
self.setTitle('External Applications')
self._label = Qt.QLabel(
"The GUI may include shortcuts to external applications. You can add them now.\n")
self._layout.addWidget(self._label, 0, 0)
self.setLayout(self._layout)
self._externalAppGroupBox = Qt.QGroupBox()
self._externalAppGroupBox.setCheckable(False)
self._externalAppGroupBox.setAlignment(Qt.Qt.AlignLeft)
self._externalAppGroupBox.setStyleSheet(
" QGroupBox::title { subcontrol-position: top left; padding: 5 5px; }")
self._layout.addWidget(self._externalAppGroupBox, 2, 0, 1, 1)
self._horizontalLayout = Qt.QHBoxLayout(self._externalAppGroupBox)
self._externalAppList = Qt.QListWidget(self._externalAppGroupBox)
self._horizontalLayout.addWidget(self._externalAppList)
self._verticalLayout = Qt.QVBoxLayout()
self._addButton = Qt.QPushButton("Add Application")
self._addButton.setStyleSheet("text-align: left;")
self._verticalLayout.addWidget(self._addButton)
self._removeButton = Qt.QPushButton("Remove Application")
self._removeButton.setStyleSheet("text-align: left;")
self._verticalLayout.addWidget(self._removeButton)
self._upButton = Qt.QPushButton("Move Up")
self._upButton.setStyleSheet("text-align: left;")
self._verticalLayout.addWidget(self._upButton)
self._downButton = Qt.QPushButton("Move Down")
self._downButton.setStyleSheet("text-align: left;")
self._verticalLayout.addWidget(self._downButton)
self._horizontalLayout.addLayout(self._verticalLayout)
self._addButton.setIcon(Qt.QIcon.fromTheme("list-add"))
self._removeButton.setIcon(Qt.QIcon.fromTheme("list-remove"))
self._upButton.setIcon(Qt.QIcon.fromTheme("go-up"))
self._downButton.setIcon(Qt.QIcon.fromTheme("go-down"))
self._addButton.clicked.connect(self._addApplication)
self._removeButton.clicked.connect(self._removeApplication)
self._upButton.clicked.connect(self._moveUp)
self._downButton.clicked.connect(self._moveDown)
self._externalAppList.itemDoubleClicked.connect(self._editApplication)
self._spacerItem1 = Qt.QSpacerItem(
10, 0, Qt.QSizePolicy.Fixed, Qt.QSizePolicy.Expanding)
self._layout.addItem(self._spacerItem1, 8, 0, 1, 1, Qt.Qt.AlignCenter)
self._status_label = Qt.QLabel("Press next button to continue")
self.setStatusLabelPalette(self._status_label)
self._layout.addWidget(self._status_label, 9, 0, 1, 1)
def fromXml(self, xml):
self._externalApps = []
panelNodes = AppSettingsWizard.getArrayFromNode(
xml, "ExternalApps", default=[])
for child in panelNodes:
name = AppSettingsWizard.getValueFromNode(
child, "command", default=None)
if name:
self._externalApps.append((name, etree.tostring(child)))
def _addApplication(self):
name, xml, ok = ExternalAppEditor.getDialog()
if ok:
self._externalApps.append((name, xml))
self._refreshApplicationList()
def _editApplication(self):
# edit
self._refreshApplicationList()
def _removeApplication(self):
if len(self._externalAppList.selectedIndexes()) > 0:
self._app_id = self._externalAppList.selectedIndexes()[0].row()
self._externalApps.remove(self._externalApps[self._app_id])
self._refreshApplicationList()
def _moveUp(self):
if len(self._externalAppList.selectedIndexes()) > 0:
self._app_id = self._externalAppList.selectedIndexes()[0].row()
if self._app_id > 0:
tmp = self._externalApps[self._app_id]
self._externalApps[self._app_id] = self._externalApps[
self._app_id - 1]
self._externalApps[self._app_id - 1] = tmp
self._refreshApplicationList()
self._externalAppList.setCurrentIndex(self._externalAppList.indexFromItem(
self._externalAppList.item(self._app_id - 1)))
def _moveDown(self):
if len(self._externalAppList.selectedIndexes()) > 0:
self._app_id = self._externalAppList.selectedIndexes()[0].row()
if self._app_id < self._externalAppList.count() - 1:
tmp = self._externalApps[self._app_id]
self._externalApps[self._app_id] = self._externalApps[
self._app_id + 1]
self._externalApps[self._app_id + 1] = tmp
self._refreshApplicationList()
self._externalAppList.setCurrentIndex(self._externalAppList.indexFromItem(
self._externalAppList.item(self._app_id + 1)))
def _refreshApplicationList(self):
self._externalAppList.clear()
for panel in self._externalApps:
name, xml = panel
self._externalAppList.addItem(name)
def _getExternalApps(self):
if len(self._externalApps) <= 0:
return None
else:
return self._externalApps
def checkData(self):
BasePage.checkData(self)
self._valid = True
def _setStatus(self, text):
self._status_label.setText(text)
class MonitorPage(BasePage):
def __init__(self, parent=None):
BasePage.__init__(self, parent)
def initializePage(self):
BasePage.initializePage(self)
self.wizard().__setitem__("monitor", self._getMonitor)
def _setupUI(self):
BasePage._setupUI(self)
self.setTitle('Monitor List')
self._label = Qt.QLabel(
"\nIf you want to monitor some attributes, add them to the monitor list. \n")
self._label.setWordWrap(True)
self._layout.addWidget(self._label, 0, 0, 1, 4)
self._monitorLabel = Qt.QLabel("Monitor List:")
self._monitorLineEdit = Qt.QLineEdit()
self._monitorLineEdit.setToolTip(
"Comma-separated list of attribute names")
self._monitorLineEdit.setMinimumSize(400, 25)
self._monitorLineEdit.setReadOnly(False)
self._monitorButton = Qt.QPushButton()
self._monitorButton.setToolTip("Browse...")
# self._monitorButton.setIcon(Qt.QIcon.fromTheme("system-search"))
self._monitorButton.setIcon(Qt.QIcon("designer:devs_tree.png"))
self._monitorButton.setMaximumSize(80, 25)
self._monitorClearButton = Qt.QPushButton()
self._monitorClearButton.setToolTip("Clear")
self._monitorClearButton.setMaximumSize(80, 25)
self._monitorClearButton.setIcon(Qt.QIcon("actions:edit-clear.svg"))
self._layout.addWidget(self._monitorLabel, 2, 0, Qt.Qt.AlignRight)
self._layout.addWidget(self._monitorLineEdit, 2, 1, Qt.Qt.AlignRight)
self._layout.addWidget(self._monitorButton, 2, 2, Qt.Qt.AlignLeft)
self._layout.addWidget(self._monitorClearButton, 2, 3, Qt.Qt.AlignLeft)
self._monitorButton.clicked.connect(self._selectMonitor)
self._monitorClearButton.clicked.connect(self._clearMonitor)
# self._synopticClear.hide()
self._spacerItem1 = Qt.QSpacerItem(
10, 0, Qt.QSizePolicy.Fixed, Qt.QSizePolicy.Expanding)
self._layout.addItem(self._spacerItem1, 8, 0, 1, 1, Qt.Qt.AlignCenter)
self._status_label = Qt.QLabel("Press next button to continue")
self.setStatusLabelPalette(self._status_label)
self._layout.addWidget(self._status_label, 9, 0, 1, 4)
def fromXml(self, xml):
self._monitorLineEdit.setText(
AppSettingsWizard.getValueFromNode(xml, "MONITOR", default=""))
def _clearMonitor(self):
self._monitorLineEdit.clear()
# self._monitorClearButton.hide()
def _getMonitor(self):
return str(self._monitorLineEdit.text())
def _selectMonitor(self):
models, ok = taurus.qt.qtgui.panel.TaurusModelChooser.modelChooserDlg(
host=None)
if ok:
self._monitorLineEdit.setText(",".join(models))
self.checkData()
def _setStatus(self, text):
self._status_label.setText(text)
class OutroPage(BasePage):
def __init__(self, parent=None):
BasePage.__init__(self, parent)
self._valid = True
self.setTitle('Confirmation Page')
self._label1 = Qt.QLabel("XML configuration file:")
self._layout.addWidget(self._label1, 0, 0)
self._xml = Qt.QTextEdit()
self._xml.setSizePolicy(Qt.QSizePolicy(
Qt.QSizePolicy.Expanding, Qt.QSizePolicy.Expanding))
self._layout.addWidget(self._xml, 1, 0)
self._label2 = Qt.QLabel("Files copied")
self._layout.addWidget(self._label2, 2, 0)
self._substTable = Qt.QTableWidget()
self._substTable.setColumnCount(2)
self._substTable.setEditTriggers(self._substTable.NoEditTriggers)
self._substTable.setHorizontalHeaderLabels(
('Original file', 'File in Project dir'))
self._substTable.setSizePolicy(Qt.QSizePolicy(
Qt.QSizePolicy.Expanding, Qt.QSizePolicy.Expanding))
self._layout.addWidget(self._substTable, 3, 0)
def _getXml(self):
return str(self._xml.toPlainText())
def saveFile(self, fileName):
file = Qt.QFile(fileName)
if not file.open(Qt.QFile.WriteOnly | Qt.QFile.Text):
Qt.QMessageBox.warning(self, self.tr("Saving XML..."),
self.tr("Cannot write file %1:\n%2.")
.arg(fileName)
.arg(file.errorString()))
return False
file.write(str(self._xml.toPlainText()))
self._valid = True
self.checkData()
file.close()
return True
def initializePage(self):
xml, substitutions = self.wizard().generateXml()
self._xml.setText(xml)
self.wizard().__setitem__("xml", self._getXml)
self._substTable.clearContents()
self._substTable.setRowCount(len(substitutions))
for i, (dst, src) in enumerate(substitutions.items()):
item0, item1 = Qt.QTableWidgetItem(src), Qt.QTableWidgetItem(dst)
self._substTable.setItem(i, 0, item0)
self._substTable.setItem(i, 1, item1)
self._substTable.resizeColumnsToContents()
def validatePage(self):
try:
self.createProject()
except Exception, e:
Qt.QMessageBox.warning(self, 'Error creating project',
'Could not create project files. \nReason:%s' % repr(
e),
Qt.QMessageBox.Cancel)
import traceback
traceback.print_exc()
return False
return True
def createProject(self):
# prepare a log file
pdir = self.wizard().__getitem__('projectDir')
gui_name = self.wizard().__getitem__("guiName")
install_dir = os.path.join(pdir, "tgconf_{0}".format(gui_name))
if not os.path.exists(install_dir):
os.makedirs(install_dir)
logfilename = os.path.join(pdir, 'wizard.log')
logfile = open(logfilename, 'w')
logfile.write('Project created by AppSettingsWizard on %s\n' %
datetime.datetime.now().isoformat())
# copy files
for i in range(self._substTable.rowCount()):
src = unicode(self._substTable.item(i, 0).text())
dst = os.path.join(install_dir, unicode(
self._substTable.item(i, 1).text()))
if os.path.normpath(src) != os.path.normpath(dst):
shutil.copy(src, dst)
logfile.write('File copied: %s --> %s\n' % (src, dst))
# write xml config file
xmlcfgfilename = os.path.join(install_dir,
self.wizard().getXmlConfigFileName())
f = open(xmlcfgfilename, 'w')
f.write(unicode(self._xml.toPlainText()))
f.close()
logfile.write('XML Config file created: "%s"\n' % xmlcfgfilename)
# write python config file
pycfgfilename = os.path.join(install_dir,
'%s.py' % self.wizard().getConfigFilePrefix())
f = open(pycfgfilename, 'w')
f.write("XML_CONFIG = '%s'" % self.wizard().getXmlConfigFileName())
f.close()
logfile.write('Python config file created: "%s"\n' % pycfgfilename)
# write __init__.py config file
init_template = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'res', 'init.template')
f = open(init_template, 'r')
template = f.read()
f.close()
initfilename = os.path.join(install_dir, '__init__.py')
f = open(initfilename, 'w')
template = template.format(name=gui_name)
f.write(template)
f.close()
logfile.write('python init file created: "%s"\n' % initfilename)
# write setup file
setup_template = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'res', 'setup.template')
f = open(setup_template, 'r')
template = f.read()
f.close()
setup = os.path.join(pdir, "setup.py")
f = open(setup, 'w')
template = template.format(name=gui_name)
f.write(template)
f.close()
# write MANIFEST.in file
manifest_template = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'res', 'manifest.template')
f = open(manifest_template, 'r')
template = f.read()
f.close()
manifestfile = os.path.join(pdir, "MANIFEST.in")
f = open(manifestfile, 'w')
template = template.format(name=gui_name)
f.write(template)
f.close()
# if all went ok...
msg = 'Application project was successfully created.' +\
'You can find the files in: "%s"' % pdir
msg += '\nTip: You can install it with:\n\tpip install %s' % pdir
msg += '\nTip: And then run the application with:\n\t %s' % gui_name
details = ''
warnings = self.wizard().getProjectWarnings()
if warnings:
msg += '\n\nHowever, some fine-tuning may be needed. Please check the details:\n'
for short, long in warnings:
details += '- %s: %s\n\n' % (short, long)
logfile.write(msg + details)
logfile.close()
dlg = Qt.QMessageBox(Qt.QMessageBox.Information,
'Application project created', msg, Qt.QMessageBox.Ok, self)
dlg.setDetailedText(details)
dlg.exec_()
print
print msg + details
print
class AppSettingsWizard(Qt.QWizard):
"""
This Wizard provide functionality for creating from scratch a configuration
directory for a TaurusGUI based application.
The files in the configuration dir determine the default, permanent, pre-defined
contents of the GUI. While the user may add/remove more elements at run
time and those customizations will also be stored, this file defines what a
user will find when launching the GUI for the first time.
"""
Pages = Enumeration('Pages', ('IntroPage', 'ProjectPage', 'GeneralSettings', 'CustomLogoPage', 'SynopticPage',
'MacroServerInfo', 'InstrumentsPage', 'PanelsPage', 'ExternalAppPage', 'MonitorPage', 'OutroPage'))
SARDANA_INSTALLED = False
def __init__(self, parent=None, jdrawCommand='jdraw', configFilePrefix='config'):
Qt.QWizard.__init__(self, parent)
self.installEventFilter(self)
self._item_funcs = {}
self._pages = {}
self._jdrawCommand = jdrawCommand
self._configFilePrefix = configFilePrefix
self._loadPages()
self._substitutions = {}
self._projectWarnings = []
def getProjectWarnings(self):
return self._projectWarnings
def getConfigFilePrefix(self):
return self._configFilePrefix
def getXmlConfigFileName(self):
return "%s.xml" % self._configFilePrefix
@staticmethod
def getValueFromNode(rootNode, nodeName, default=None):
'''
returns a value from given Node
:param rootNode: (etree.Element) root node
:param nodeName: the name of node to find
:param default: returned value if node is None or contains empty string
'''
node = rootNode.find(nodeName)
if (node is not None) and (node.text is not None):
return node.text
else:
return default
@staticmethod
def getArrayFromNode(rootNode, nodeName, default=None):
'''
returns an array contained by given Node
:param rootNode: (etree.Element) root node
:param nodeName: the name of node to find
:param default: returned value if node is None or contains empty string
'''
array = []
node = rootNode.find(nodeName)
if (node is not None) and (node.text is not None):
for child in node:
array.append(child)
return array
else:
return default
def loadXml(self, fname):
'''
parses xml code and sets all pages according to its contents. It
raises an exception if something could not be processed
:param fname: (unicode) path to file containing xml code
'''
projectDir, cfgfile = os.path.split(fname)
f = open(fname, 'r')
xml = f.read()
root = etree.fromstring(xml)
# print self.Pages
for pageNumber in range(len(self.Pages.keys())):
self.page(pageNumber).fromXml(root)
def getXml(self):
try:
return self.__getitem__("xml")
except Exception, e:
return None
def __setitem__(self, name, value):
self._item_funcs[name] = value
def __getitem__(self, name):
for id in self.getPages():
p = self.page(id)
if isinstance(p, BasePage):
try:
return p[name]()
except Exception, e:
pass
return self._item_funcs[name]()
def addPage(self, page):
id = Qt.QWizard.addPage(self, page)
self._pages[id] = page
def setPage(self, id, page):
Qt.QWizard.setPage(self, id, page)
self._pages[id] = page
def getPages(self):
return self._pages
def _loadPages(self):
intro = IntroPage()
self.setPage(self.Pages.IntroPage, intro)
intro.setNextPageId(self.Pages.ProjectPage)
project_page = ProjectPage()
self.setPage(self.Pages.ProjectPage, project_page)
project_page.setNextPageId(self.Pages.GeneralSettings)
general_settings_page = GeneralSettings()
self.setPage(self.Pages.GeneralSettings, general_settings_page)
general_settings_page.setNextPageId(self.Pages.CustomLogoPage)
custom_logo_page = CustomLogoPage()
self.setPage(self.Pages.CustomLogoPage, custom_logo_page)
custom_logo_page.setNextPageId(self.Pages.SynopticPage)
synoptic_page = SynopticPage()
self.setPage(self.Pages.SynopticPage, synoptic_page)
try:
from sardana.taurus.qt.qtgui.extra_macroexecutor.common import \
TaurusMacroConfigurationDialog
self.SARDANA_INSTALLED = True
except:
self.SARDANA_INSTALLED = False
if self.SARDANA_INSTALLED:
synoptic_page.setNextPageId(self.Pages.MacroServerInfo)
macroserver_page = MacroServerInfoPage()
self.setPage(self.Pages.MacroServerInfo, macroserver_page)
macroserver_page.setNextPageId(self.Pages.InstrumentsPage)
instruments_page = InstrumentsPage()
self.setPage(self.Pages.InstrumentsPage, instruments_page)
instruments_page.setNextPageId(self.Pages.PanelsPage)
else:
synoptic_page.setNextPageId(self.Pages.PanelsPage)
panels_page = PanelsPage()
self.setPage(self.Pages.PanelsPage, panels_page)
panels_page.setNextPageId(self.Pages.ExternalAppPage)
external_app_page = ExternalAppPage()
self.setPage(self.Pages.ExternalAppPage, external_app_page)
external_app_page.setNextPageId(self.Pages.MonitorPage)
monitor_page = MonitorPage()
self.setPage(self.Pages.MonitorPage, monitor_page)
monitor_page.setNextPageId(self.Pages.OutroPage)
outro_page = OutroPage()
self.setPage(self.Pages.OutroPage, outro_page)
outro_page.setNextPageId(-1)
self.setOption(Qt.QWizard.CancelButtonOnLeft, True)
def generateXml(self):
'''returns the xml code corresponding to the options selected in the wizard
and a dictionary representing the paths that have been substituted.
:return: (str, dict<str,str>) The return value is a tuple whose first element
is the xml code and the second element is a dict where the keys are the
destination files and the values are the original paths.
'''
root = etree.Element("taurusgui_config")
# general settings page
guiName = etree.SubElement(root, "GUI_NAME")
guiName.text = self.__getitem__("guiName")
organizationName = etree.SubElement(root, "ORGANIZATION")
organizationName.text = self.__getitem__("organizationName")
# custom logo page
customLogo = etree.SubElement(root, "CUSTOM_LOGO")
src = self.__getitem__("customLogo")
mod_dir = os.path.join(self.__getitem__('projectDir'),
'tgconf_' + guiName.text)
mod_dir = os.path.abspath(mod_dir) # make sure mod_dir is absolute
if src is None or ":" in src:
# using registered paths
# TODO: what if they use windows paths such as "C:\foo" ?
dst = src
else:
# if src is absolute, it stays so, and if it is relative, we assume
# mod_dir as the root dir
src = os.path.join(mod_dir, src)
dst = os.path.basename(self.substitutionName(src, mod_dir))
customLogo.text = dst
# synoptic page
synopticList = self.__getitem__("synoptics")
if synopticList:
synoptics = etree.SubElement(root, "SYNOPTIC")
for src in synopticList:
src = os.path.join(mod_dir, src)
dst = self.substitutionName(src, mod_dir)
child = etree.SubElement(synoptics, "synoptic",
str=os.path.basename(dst))
# substitute any referenced files within the jdrawfiles
f = open(src, 'r')
contents = f.read()
f.close()
for ref in re.findall(r'file_name:\"(.+?)\"', contents):
# this is ok for both relative and absolute references
refsrc = os.path.join(os.path.dirname(src), ref)
refdst = self.substitutionName(refsrc, mod_dir)
if ref != refdst:
short = 'Manual editing needed in "%s"' % dst
long = ('The synoptic file "%s" references a file that '
'has been copied to the project dir in order to make the project portable. '
'Please edit "%s" and replace "%s" by "%s"') % (dst, dst, ref, refdst)
self._projectWarnings.append((short, long))
# macroserver page
if self.SARDANA_INSTALLED and self.__getitem__("macroServerName"):
macroServerName = etree.SubElement(root, "MACROSERVER_NAME")
macroServerName.text = self.__getitem__("macroServerName")
doorName = etree.SubElement(root, "DOOR_NAME")
doorName.text = self.__getitem__("doorName")
instruments = etree.SubElement(root, "INSTRUMENTS_FROM_POOL")
instruments.text = str(self.__getitem__("instruments"))
# panels page
panelList = self.__getitem__("panels")
if panelList:
panels = etree.SubElement(root, "PanelDescriptions")
for panel in panelList:
name, xml = panel
item = etree.fromstring(xml)
panels.append(item)
# external apps page
externalAppList = self.__getitem__("externalApps")
if externalAppList:
externalApps = etree.SubElement(root, "ExternalApps")
for externalApp in externalAppList:
name, xml = externalApp
item = etree.fromstring(xml)
externalApps.append(item)
# monitor page
monitor = etree.SubElement(root, "MONITOR")
monitor.text = self.__getitem__("monitor")
return etree.tostring(root, pretty_print=True), copy.copy(self._substitutions)
def substitutionName(self, src, mod_dir):
name = os.path.basename(src)
i = 2
if os.path.dirname(os.path.abspath(src)) != os.path.abspath(mod_dir):
# do not change the name if it is the same dir!
while name in self._substitutions:
root, ext = os.path.splitext(name)
name = "%s_%i%s" % (root, i, ext)
i += 1
self._substitutions[name] = src
return name
def main():
app = Qt.QApplication([])
wizard = AppSettingsWizard()
wizard.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 41.007143 | 149 | 0.631321 |
0711b5ccedf98d759098fcf4ca4eadbc4df04e09
| 4,869 |
py
|
Python
|
measures/validation/accuracy.py
|
ryuzakyl/data-bloodhound
|
ae0413e748e55a0d2dbae35bbe96a672f313a64b
|
[
"Apache-2.0"
] | 3 |
2019-03-18T03:22:06.000Z
|
2021-04-06T07:53:51.000Z
|
measures/validation/accuracy.py
|
ryuzakyl/data-bloodhound
|
ae0413e748e55a0d2dbae35bbe96a672f313a64b
|
[
"Apache-2.0"
] | null | null | null |
measures/validation/accuracy.py
|
ryuzakyl/data-bloodhound
|
ae0413e748e55a0d2dbae35bbe96a672f313a64b
|
[
"Apache-2.0"
] | 2 |
2020-10-05T08:22:25.000Z
|
2020-10-05T08:24:02.000Z
|
#!/usr/bin/env
# -*- coding: utf-8 -*-
# Copyright (C) Victor M. Mendiola Lau - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by Victor M. Mendiola Lau <ryuzakyl@gmail.com>, February 2017
from classification.validation import knn as knn_val
from classification.validation import svm as svm_val
# ---------------------------------------------------------------
def knn_accuracy_in_euc_space(X, labels, folds=3):
"""KNN classifier accuracy in an Euclidean Space.
Args:
X (np.ndarray): The data array.
labels (list, np.ndarray): The data labels.
folds (int): Amount of folds for validation.
Returns:
The result of the grid search procedure in an euclidean space.
Notes:
* The result is a list if `_CVScoreTuple` instances. If needed, the user should iterate over it and use relevant values for his/hers analysis.
"""
return knn_val.grid_search_in_euc_space(X, labels, folds)
def knn_accuracy_in_pretopological_space(X, labels, measure, folds=3):
"""KNN classifier accuracy in a Pretopological Space.
Args:
X (np.ndarray): The data array.
labels (list, np.ndarray): The data labels.
measure (int): The type of dissimilarity to use as metric (see 'measures' module).
folds (int): Amount of folds for validation.
Returns:
The result of the grid search procedure in a pretopological space.
Notes:
* The result is a list if `_CVScoreTuple` instances. If needed, the user should iterate over it and use relevant values for his/hers analysis.
"""
return knn_val.grid_search_in_pretopological_space(X, labels, measure, folds)
def knn_accuracy_in_dis_space(X, labels, measure, folds=3):
"""KNN classifier accuracy in a Dissimilarity Space.
Args:
X (np.ndarray): The data array.
labels (list, np.ndarray): The data labels.
measure (int): The type of dissimilarity to use as metric (see 'measures' module).
folds (int): Amount of folds for validation.
Returns:
The result of the grid search procedure in a dissimilarity space.
Notes:
* The result is a list if `_CVScoreTuple` instances. If needed, the user should iterate over it and use relevant values for his/hers analysis.
"""
return knn_val.grid_search_in_dis_space(X, labels, measure, folds)
def svm_accuracy_in_euc_space(X, labels, folds=3):
"""SVM classifier accuracy in an Euclidean Space.
Args:
X (np.ndarray): The data array.
labels (list, np.ndarray): The data labels.
folds (int): Amount of folds for validation.
Returns:
The result of the grid search procedure in an euclidean space.
Notes:
* The result is a list if `_CVScoreTuple` instances. If needed, the user should iterate over it and use relevant values for his/hers analysis.
"""
return svm_val.grid_search_in_euc_space(X, labels, folds)
def svm_accuracy_in_euc_space_params(X, labels, params, folds=3):
"""SVM classifier accuracy in an Euclidean Space.
Args:
X (np.ndarray): The data array.
labels (list, np.ndarray): The data labels.
params (dict): Dictionary of parameters and its values.
folds (int): Amount of folds for validation.
Returns:
The result of the grid search procedure in an euclidean space.
Notes:
* The result is a list if `_CVScoreTuple` instances. If needed, the user should iterate over it and use relevant values for his/hers analysis.
"""
return svm_val.grid_search_in_euc_space_params(X, labels, params, folds)
def svm_accuracy_in_dis_space(X, labels, measure, folds=3):
"""Grid search for SVM classifier in a Dissimilarity Space.
Args:
X (np.ndarray): The data array.
labels (list, np.ndarray): The data labels.
measure (int): The type of dissimilarity to use as metric (see 'measures' module).
folds (int): Amount of folds for validation
Returns:
The Grid search results for the given data and labels.
"""
return svm_val.grid_search_in_dis_space(X, labels, measure, folds)
def svm_accuracy_in_dis_space_params(X, labels, measure, params, folds=3):
"""Grid search for SVM classifier in a Dissimilarity Space.
Args:
X (np.ndarray): The data array.
labels (list, np.ndarray): The data labels.
measure (int): The type of dissimilarity to use as metric (see 'measures' module).
params (dict): Dictionary of parameters and its values.
folds (int): Amount of folds for validation
Returns:
The Grid search results for the given data and labels.
"""
return svm_val.grid_search_in_dis_space_params(X, labels, measure, params, folds)
| 33.57931 | 150 | 0.681454 |
c289200558db6ebbacced9ff2a6427b372f8cdb6
| 498 |
py
|
Python
|
calendertest.py
|
Soonyeon-Kim/TheShadowTree_in_Seoul
|
3de33c7c9b4ce85b5fe927423b2356f2d34f1e33
|
[
"Unlicense"
] | 1 |
2019-07-08T07:11:58.000Z
|
2019-07-08T07:11:58.000Z
|
calendertest.py
|
Soonyeon-Kim/TheShadowTree_in_Seoul
|
3de33c7c9b4ce85b5fe927423b2356f2d34f1e33
|
[
"Unlicense"
] | null | null | null |
calendertest.py
|
Soonyeon-Kim/TheShadowTree_in_Seoul
|
3de33c7c9b4ce85b5fe927423b2356f2d34f1e33
|
[
"Unlicense"
] | null | null | null |
from datetime import date, timedelta
def date_cal(start_date, end_date):
d1 = date(int(start_date.split('-')[0]),int(start_date.split('-')[1]),int(start_date.split('-')[2]))
d2 = date(int(end_date.split('-')[0]),int(end_date.split('-')[1]),int(end_date.split('-')[2]))
delta = d2 - d1
datelist = []
for i in range(delta.days + 1):
a = str(d1+timedelta(days=i))
a = a.replace('-','.')
datelist.append(a)
return datelist
| 27.666667 | 105 | 0.554217 |
40b769d8ab499e8451b5f892c5a33fccc467d18c
| 15,384 |
py
|
Python
|
env/Lib/site-packages/colorzero/tables.py
|
Nitinsd96/Air-Pollution-Monitoring-System
|
4870223ce7b40b4ea465620a4c3d39cf617c6eff
|
[
"MIT"
] | 10 |
2018-10-16T15:53:09.000Z
|
2020-08-19T06:06:23.000Z
|
env/Lib/site-packages/colorzero/tables.py
|
Nitinsd96/Air-Pollution-Monitoring-System
|
4870223ce7b40b4ea465620a4c3d39cf617c6eff
|
[
"MIT"
] | 23 |
2018-10-13T16:00:43.000Z
|
2019-04-27T19:08:58.000Z
|
env/Lib/site-packages/colorzero/tables.py
|
Nitinsd96/Air-Pollution-Monitoring-System
|
4870223ce7b40b4ea465620a4c3d39cf617c6eff
|
[
"MIT"
] | 2 |
2018-11-04T17:55:53.000Z
|
2018-11-18T17:33:27.000Z
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# The colorzero color library
# Copyright (c) 2016-2018 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Defines the available color names, derived from the `CSS Color Module`_ Level 3
Specification, section 4.3, along with tables for the original DOS colors, and
XTerm colors (for :func:`format` output).
.. _CSS Color Module: http://www.w3.org/TR/css3-color/#svg-color
"""
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
def _transpose(table):
# Swap keys and values in a dict, but in the case of duplicated keys, use
# the first encountered instead of the last
result = {}
for k, v in table.items():
result.setdefault(v, k)
return result
DOS_COLORS = _transpose({
# Bold, Index: (R, G, B),
(False, 0): (0, 0, 0),
(False, 1): (128, 0, 0),
(False, 2): (0, 128, 0),
(False, 3): (128, 128, 0),
(False, 4): (0, 0, 128),
(False, 5): (128, 0, 128),
(False, 6): (0, 128, 128),
(False, 7): (192, 192, 192),
(True, 0): (128, 128, 128),
(True, 1): (255, 0, 0),
(True, 2): (0, 255, 0),
(True, 3): (255, 255, 0),
(True, 4): (0, 0, 255),
(True, 5): (255, 0, 255),
(True, 6): (0, 255, 255),
(True, 7): (255, 255, 255),
})
XTERM_COLORS = _transpose({
# Index: (R, G, B)
0: (0, 0, 0),
1: (128, 0, 0),
2: (0, 128, 0),
3: (128, 128, 0),
4: (0, 0, 128),
5: (128, 0, 128),
6: (0, 128, 128),
7: (192, 192, 192),
8: (128, 128, 128),
9: (255, 0, 0),
10: (0, 255, 0),
11: (255, 255, 0),
12: (0, 0, 255),
13: (255, 0, 255),
14: (0, 255, 255),
15: (255, 255, 255),
16: (0, 0, 0),
17: (0, 0, 95),
18: (0, 0, 135),
19: (0, 0, 175),
20: (0, 0, 215),
21: (0, 0, 255),
22: (0, 95, 0),
23: (0, 95, 95),
24: (0, 95, 135),
25: (0, 95, 175),
26: (0, 95, 215),
27: (0, 95, 255),
28: (0, 135, 0),
29: (0, 135, 95),
30: (0, 135, 135),
31: (0, 135, 175),
32: (0, 135, 215),
33: (0, 135, 255),
34: (0, 175, 0),
35: (0, 175, 95),
36: (0, 175, 135),
37: (0, 175, 175),
38: (0, 175, 215),
39: (0, 175, 255),
40: (0, 215, 0),
41: (0, 215, 95),
42: (0, 215, 135),
43: (0, 215, 175),
44: (0, 215, 215),
45: (0, 215, 255),
46: (0, 255, 0),
47: (0, 255, 95),
48: (0, 255, 135),
49: (0, 255, 175),
50: (0, 255, 215),
51: (0, 255, 255),
52: (95, 0, 0),
53: (95, 0, 95),
54: (95, 0, 135),
55: (95, 0, 175),
56: (95, 0, 215),
57: (95, 0, 255),
58: (95, 95, 0),
59: (95, 95, 95),
60: (95, 95, 135),
61: (95, 95, 175),
62: (95, 95, 215),
63: (95, 95, 255),
64: (95, 135, 0),
65: (95, 135, 95),
66: (95, 135, 135),
67: (95, 135, 175),
68: (95, 135, 215),
69: (95, 135, 255),
70: (95, 175, 0),
71: (95, 175, 95),
72: (95, 175, 135),
73: (95, 175, 175),
74: (95, 175, 215),
75: (95, 175, 255),
76: (95, 215, 0),
77: (95, 215, 95),
78: (95, 215, 135),
79: (95, 215, 175),
80: (95, 215, 215),
81: (95, 215, 255),
82: (95, 255, 0),
83: (95, 255, 95),
84: (95, 255, 135),
85: (95, 255, 175),
86: (95, 255, 215),
87: (95, 255, 255),
88: (135, 0, 0),
89: (135, 0, 95),
90: (135, 0, 135),
91: (135, 0, 175),
92: (135, 0, 215),
93: (135, 0, 255),
94: (135, 95, 0),
95: (135, 95, 95),
96: (135, 95, 135),
97: (135, 95, 175),
98: (135, 95, 215),
99: (135, 95, 255),
100: (135, 135, 0),
101: (135, 135, 95),
102: (135, 135, 135),
103: (135, 135, 175),
104: (135, 135, 215),
105: (135, 135, 255),
106: (135, 175, 0),
107: (135, 175, 95),
108: (135, 175, 135),
109: (135, 175, 175),
110: (135, 175, 215),
111: (135, 175, 255),
112: (135, 215, 0),
113: (135, 215, 95),
114: (135, 215, 135),
115: (135, 215, 175),
116: (135, 215, 215),
117: (135, 215, 255),
118: (135, 255, 0),
119: (135, 255, 95),
120: (135, 255, 135),
121: (135, 255, 175),
122: (135, 255, 215),
123: (135, 255, 255),
124: (175, 0, 0),
125: (175, 0, 95),
126: (175, 0, 135),
127: (175, 0, 175),
128: (175, 0, 215),
129: (175, 0, 255),
130: (175, 95, 0),
131: (175, 95, 95),
132: (175, 95, 135),
133: (175, 95, 175),
134: (175, 95, 215),
135: (175, 95, 255),
136: (175, 135, 0),
137: (175, 135, 95),
138: (175, 135, 135),
139: (175, 135, 175),
140: (175, 135, 215),
141: (175, 135, 255),
142: (175, 175, 0),
143: (175, 175, 95),
144: (175, 175, 135),
145: (175, 175, 175),
146: (175, 175, 215),
147: (175, 175, 255),
148: (175, 215, 0),
149: (175, 215, 95),
150: (175, 215, 135),
151: (175, 215, 175),
152: (175, 215, 215),
153: (175, 215, 255),
154: (175, 255, 0),
155: (175, 255, 95),
156: (175, 255, 135),
157: (175, 255, 175),
158: (175, 255, 215),
159: (175, 255, 255),
160: (215, 0, 0),
161: (215, 0, 95),
162: (215, 0, 135),
163: (215, 0, 175),
164: (215, 0, 215),
165: (215, 0, 255),
166: (215, 95, 0),
167: (215, 95, 95),
168: (215, 95, 135),
169: (215, 95, 175),
170: (215, 95, 215),
171: (215, 95, 255),
172: (215, 135, 0),
173: (215, 135, 95),
174: (215, 135, 135),
175: (215, 135, 175),
176: (215, 135, 215),
177: (215, 135, 255),
178: (215, 175, 0),
179: (215, 175, 95),
180: (215, 175, 135),
181: (215, 175, 175),
182: (215, 175, 215),
183: (215, 175, 255),
184: (215, 215, 0),
185: (215, 215, 95),
186: (215, 215, 135),
187: (215, 215, 175),
188: (215, 215, 215),
189: (215, 215, 255),
190: (215, 255, 0),
191: (215, 255, 95),
192: (215, 255, 135),
193: (215, 255, 175),
194: (215, 255, 215),
195: (215, 255, 255),
196: (255, 0, 0),
197: (255, 0, 95),
198: (255, 0, 135),
199: (255, 0, 175),
200: (255, 0, 215),
201: (255, 0, 255),
202: (255, 95, 0),
203: (255, 95, 95),
204: (255, 95, 135),
205: (255, 95, 175),
206: (255, 95, 215),
207: (255, 95, 255),
208: (255, 135, 0),
209: (255, 135, 95),
210: (255, 135, 135),
211: (255, 135, 175),
212: (255, 135, 215),
213: (255, 135, 255),
214: (255, 175, 0),
215: (255, 175, 95),
216: (255, 175, 135),
217: (255, 175, 175),
218: (255, 175, 215),
219: (255, 175, 255),
220: (255, 215, 0),
221: (255, 215, 95),
222: (255, 215, 135),
223: (255, 215, 175),
224: (255, 215, 215),
225: (255, 215, 255),
226: (255, 255, 0),
227: (255, 255, 95),
228: (255, 255, 135),
229: (255, 255, 175),
230: (255, 255, 215),
231: (255, 255, 255),
232: (8, 8, 8),
233: (18, 18, 18),
234: (28, 28, 28),
235: (38, 38, 38),
236: (48, 48, 48),
237: (58, 58, 58),
238: (68, 68, 68),
239: (78, 78, 78),
240: (88, 88, 88),
241: (98, 98, 98),
242: (108, 108, 108),
243: (118, 118, 118),
244: (128, 128, 128),
245: (138, 138, 138),
246: (148, 148, 148),
247: (158, 158, 158),
248: (168, 168, 168),
249: (178, 178, 178),
250: (188, 188, 188),
251: (198, 198, 198),
252: (208, 208, 208),
253: (218, 218, 218),
254: (228, 228, 228),
255: (238, 238, 238),
})
NAMED_COLORS = {
'aliceblue': '#f0f8ff',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgreen': '#006400',
'darkgrey': '#a9a9a9',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'grey': '#808080',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred': '#cd5c5c',
'indigo': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgreen': '#90ee90',
'lightgrey': '#d3d3d3',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370db',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#db7093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
| 31.654321 | 79 | 0.436882 |
0afb7609845a8aa4c47e7e7ac28415afba56b4a1
| 555 |
py
|
Python
|
examples/cnn_specs.py
|
edessa/RLKit
|
de257a56b7d3d1990f66c6156a127d2038940f47
|
[
"MIT"
] | null | null | null |
examples/cnn_specs.py
|
edessa/RLKit
|
de257a56b7d3d1990f66c6156a127d2038940f47
|
[
"MIT"
] | null | null | null |
examples/cnn_specs.py
|
edessa/RLKit
|
de257a56b7d3d1990f66c6156a127d2038940f47
|
[
"MIT"
] | null | null | null |
"""
padding is for image size (104, 80)
(obtained by downsample=2, crop_last_row=True)
"""
cnn_specs = dict()
# Too slow, ~150 seconds training per epoch
# Used on 84x84 size images
spec = dict(
kernel_sizes=[5,3,3,3,3],
strides=[3,1,1,2,1],
paddings=[0,1,1,1,1],
hidden_sizes=[64,64],
n_channels=[32,64,64,128,128],
)
cnn_specs["0"] = cnn_specs[0] = spec
spec = dict(
kernel_sizes=[5,3,3],
strides=[3,1,1],
paddings=[0,0,0],
hidden_sizes=[64,64],
n_channels=[32,64,64],
)
cnn_specs["1"] = cnn_specs[1] = spec
| 19.137931 | 46 | 0.618018 |
92d787e99b8532361b94928245d8a65521b3b100
| 972 |
py
|
Python
|
isi_sdk_8_2_1/test/test_job_statistics_job_node_cpu.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24 |
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_1/test/test_job_statistics_job_node_cpu.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46 |
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_1/test/test_job_statistics_job_node_cpu.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29 |
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_1
from isi_sdk_8_2_1.models.job_statistics_job_node_cpu import JobStatisticsJobNodeCpu # noqa: E501
from isi_sdk_8_2_1.rest import ApiException
class TestJobStatisticsJobNodeCpu(unittest.TestCase):
"""JobStatisticsJobNodeCpu unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testJobStatisticsJobNodeCpu(self):
"""Test JobStatisticsJobNodeCpu"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_1.models.job_statistics_job_node_cpu.JobStatisticsJobNodeCpu() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.707317 | 106 | 0.726337 |
de7bfe38580a3c5ddb60cc0cbabe4897095738e3
| 1,514 |
py
|
Python
|
tests/2d_gaussian.py
|
DimitriMisiak/mcmc-red
|
caae0ce39d082e578176a5078a9184980b0851c3
|
[
"MIT"
] | null | null | null |
tests/2d_gaussian.py
|
DimitriMisiak/mcmc-red
|
caae0ce39d082e578176a5078a9184980b0851c3
|
[
"MIT"
] | null | null | null |
tests/2d_gaussian.py
|
DimitriMisiak/mcmc-red
|
caae0ce39d082e578176a5078a9184980b0851c3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Handy MCMC scripts.
Test for the different fit method (mcmc, ptmcmc, minimizer).
Author:
Dimitri Misiak (misiak@ipnl.in2p3.fr)
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import scipy.signal as sgl
from os import path
import scipy.optimize as op
import mcmc_red as mcr
# close all plots
plt.close('all')
nsample = 100
#mu1, sigma1 = 1., 1.
mu1 = np.random.uniform(-10, 10)
sigma1 = np.random.uniform(0, 10)
#mu2, sigma2 = 2., 5.
mu2 = np.random.uniform(-10, 10)
sigma2 = np.random.uniform(0, 10)
print ("Generating blob at (mu1, mu2)=({0:.2f}, {1:.2f})"
" and (sigma1, sigma2)=({2:.2f}, {3:.2f})".format(mu1, mu2, sigma1, sigma2))
blob = np.random.normal((mu1, mu2), (sigma1, sigma2), (nsample,2))
print("Checking")
print("mean =", np.mean(blob, axis=0))
print("std =", np.std(blob, axis=0))
def chi2(param):
return mcr.chi2_simple(blob, param, (sigma1, sigma2))
# XXX MCMC
# save directory
sampler_path = 'mcmc_sampler/autosave'
# running the mcmc analysis
bounds = ((-20, 20),(-20, 20))
sampler = mcr.mcmc_sampler(chi2, bounds, nsteps=1000, path=sampler_path)
# # loading the mcmc results
logd, chain, lnprob, acc = mcr.get_mcmc_sampler(sampler_path)
# LAB = ('$log\ a$', '$log\ t$', '$log\ s$')
#LAB = ('$log\ a1$', '$log\ a2$', '$log\ t1$', '$log\ t2$', '$log\ s$')
lab = ('$\mu1$','$\mu2$')
dim = int(logd['dim'])
xopt, inf, sup = mcr.mcmc_results(dim, chain, lnprob, acc, lab)
print(xopt, inf, sup)
| 24.031746 | 83 | 0.64531 |
b07e8827a295ac6401ec38dd567dd15bdf4fe7a6
| 64,385 |
py
|
Python
|
pytorch/pytorchcv/models/common.py
|
sahilparekh/imgclsmob
|
74d52457b4bf00c82d063b3f4a1a73fb6ba3863a
|
[
"MIT"
] | 1 |
2020-12-04T11:58:50.000Z
|
2020-12-04T11:58:50.000Z
|
pytorch/pytorchcv/models/common.py
|
sahilparekh/imgclsmob
|
74d52457b4bf00c82d063b3f4a1a73fb6ba3863a
|
[
"MIT"
] | null | null | null |
pytorch/pytorchcv/models/common.py
|
sahilparekh/imgclsmob
|
74d52457b4bf00c82d063b3f4a1a73fb6ba3863a
|
[
"MIT"
] | null | null | null |
"""
Common routines for models in PyTorch.
"""
__all__ = ['round_channels', 'Identity', 'Swish', 'HSigmoid', 'HSwish', 'get_activation_layer', 'SelectableDense',
'DenseBlock', 'ConvBlock1d', 'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'ConvBlock', 'conv1x1_block',
'conv3x3_block', 'conv7x7_block', 'dwconv_block', 'dwconv3x3_block', 'dwconv5x5_block', 'dwsconv3x3_block',
'PreConvBlock', 'pre_conv1x1_block', 'pre_conv3x3_block', 'DeconvBlock', 'NormActivation',
'InterpolationBlock', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'SABlock', 'SAConvBlock',
'saconv3x3_block', 'DucBlock', 'IBN', 'DualPathSequential', 'Concurrent', 'SequentialConcurrent',
'ParametricSequential', 'ParametricConcurrent', 'Hourglass', 'SesquialteralHourglass',
'MultiOutputSequential', 'ParallelConcurent', 'Flatten', 'HeatmapMaxDetBlock']
import math
from inspect import isfunction
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
def round_channels(channels,
divisor=8):
"""
Round weighted channel number (make divisible operation).
Parameters:
----------
channels : int or float
Original number of channels.
divisor : int, default 8
Alignment value.
Returns
-------
int
Weighted number of channels.
"""
rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if float(rounded_channels) < 0.9 * channels:
rounded_channels += divisor
return rounded_channels
class Identity(nn.Module):
"""
Identity block.
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Swish(nn.Module):
"""
Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.
"""
def forward(self, x):
return x * torch.sigmoid(x)
class HSigmoid(nn.Module):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
"""
def forward(self, x):
return F.relu6(x + 3.0, inplace=True) / 6.0
class HSwish(nn.Module):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
inplace : bool
Whether to use inplace version of the module.
"""
def __init__(self, inplace=False):
super(HSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_activation_layer(activation):
"""
Create activation layer from string/function.
Parameters:
----------
activation : function, or str, or nn.Module
Activation function or name of activation function.
Returns
-------
nn.Module
Activation layer.
"""
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu":
return nn.ReLU(inplace=True)
elif activation == "relu6":
return nn.ReLU6(inplace=True)
elif activation == "swish":
return Swish()
elif activation == "hswish":
return HSwish(inplace=True)
elif activation == "sigmoid":
return nn.Sigmoid()
elif activation == "hsigmoid":
return HSigmoid()
elif activation == "identity":
return Identity()
else:
raise NotImplementedError()
else:
assert (isinstance(activation, nn.Module))
return activation
class SelectableDense(nn.Module):
"""
Selectable dense layer.
Parameters:
----------
in_features : int
Number of input features.
out_features : int
Number of output features.
bias : bool, default False
Whether the layer uses a bias vector.
num_options : int, default 1
Number of selectable options.
"""
def __init__(self,
in_features,
out_features,
bias=False,
num_options=1):
super(SelectableDense, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.use_bias = bias
self.num_options = num_options
self.weight = Parameter(torch.Tensor(num_options, out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(num_options, out_features))
else:
self.register_parameter("bias", None)
def forward(self, x, indices):
weight = torch.index_select(self.weight, dim=0, index=indices)
x = x.unsqueeze(-1)
x = weight.bmm(x)
x = x.squeeze(dim=-1)
if self.use_bias:
bias = torch.index_select(self.bias, dim=0, index=indices)
x += bias
return x
def extra_repr(self):
return "in_features={}, out_features={}, bias={}, num_options={}".format(
self.in_features, self.out_features, self.use_bias, self.num_options)
class DenseBlock(nn.Module):
"""
Standard dense block with Batch normalization and activation.
Parameters:
----------
in_features : int
Number of input features.
out_features : int
Number of output features.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_features,
out_features,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(DenseBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.fc = nn.Linear(
in_features=in_features,
out_features=out_features,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm1d(
num_features=out_features,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
x = self.fc(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
class ConvBlock1d(nn.Module):
"""
Standard 1D convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
stride : int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(ConvBlock1d, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm1d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1(in_channels,
out_channels,
stride=1,
groups=1,
bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups,
bias=bias)
def conv3x3(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False):
"""
Convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
def depthwise_conv3x3(channels,
stride):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
return nn.Conv2d(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=1,
groups=channels,
bias=False)
class ConvBlock(nn.Module):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(ConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.use_pad = (isinstance(padding, (list, tuple)) and (len(padding) == 4))
if self.use_pad:
self.pad = nn.ZeroPad2d(padding=padding)
padding = 0
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
if self.use_pad:
x = self.pad(x)
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
padding=0,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
1x1 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
groups=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
5x5 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def conv7x7_block(in_channels,
out_channels,
stride=1,
padding=3,
bias=False,
use_bn=True,
activation=(lambda: nn.ReLU(inplace=True))):
"""
7x7 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
bias=bias,
use_bn=use_bn,
activation=activation)
def dwconv_block(in_channels,
out_channels,
kernel_size,
stride=1,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
Depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def dwconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def dwconv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
5x5 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
activation=activation)
class DwsConvBlock(nn.Module):
"""
Depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
dw_use_bn=True,
pw_use_bn=True,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True))):
super(DwsConvBlock, self).__init__()
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=dw_use_bn,
bn_eps=bn_eps,
activation=dw_activation)
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
use_bn=pw_use_bn,
bn_eps=bn_eps,
activation=pw_activation)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
def dwsconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True)),
**kwargs):
"""
3x3 depthwise separable version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
"""
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
bn_eps=bn_eps,
dw_activation=dw_activation,
pw_activation=pw_activation,
**kwargs)
class PreConvBlock(nn.Module):
"""
Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
use_bn=True,
return_preact=False,
activate=True):
super(PreConvBlock, self).__init__()
self.return_preact = return_preact
self.activate = activate
self.use_bn = use_bn
if self.use_bn:
self.bn = nn.BatchNorm2d(num_features=in_channels)
if self.activate:
self.activ = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
def forward(self, x):
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(in_channels,
out_channels,
stride=1,
bias=False,
use_bn=True,
return_preact=False,
activate=True):
"""
1x1 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
bias=bias,
use_bn=use_bn,
return_preact=return_preact,
activate=activate)
def pre_conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
use_bn=True,
return_preact=False,
activate=True):
"""
3x3 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=use_bn,
return_preact=return_preact,
activate=activate)
class DeconvBlock(nn.Module):
"""
Deconvolution block with batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the deconvolution.
padding : int or tuple/list of 2 int
Padding value for deconvolution layer.
ext_padding : tuple/list of 4 int, default None
Extra padding value for deconvolution layer.
out_padding : int or tuple/list of 2 int
Output padding value for deconvolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for deconvolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
ext_padding=None,
out_padding=0,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(DeconvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.use_pad = (ext_padding is not None)
if self.use_pad:
self.pad = nn.ZeroPad2d(padding=ext_padding)
self.conv = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=out_padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
if self.use_pad:
x = self.pad(x)
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
class NormActivation(nn.Module):
"""
Activation block with preliminary batch normalization. It's used by itself as the final block in PreResNet.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
bn_eps=1e-5):
super(NormActivation, self).__init__()
self.bn = nn.BatchNorm2d(
num_features=in_channels,
eps=bn_eps)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class InterpolationBlock(nn.Module):
"""
Interpolation upsampling block.
Parameters:
----------
scale_factor : float
Multiplier for spatial size.
out_size : tuple of 2 int, default None
Spatial size of the output tensor for the bilinear interpolation operation.
mode : str, default 'bilinear'
Algorithm used for upsampling.
align_corners : bool, default True
Whether to align the corner pixels of the input and output tensors.
up : bool, default True
Whether to upsample or downsample.
"""
def __init__(self,
scale_factor,
out_size=None,
mode="bilinear",
align_corners=True,
up=True):
super(InterpolationBlock, self).__init__()
self.scale_factor = scale_factor
self.out_size = out_size
self.mode = mode
self.align_corners = align_corners
self.up = up
def forward(self, x, size=None):
if (self.mode == "bilinear") or (size is not None):
out_size = self.calc_out_size(x) if size is None else size
return F.interpolate(
input=x,
size=out_size,
mode=self.mode,
align_corners=self.align_corners)
else:
return F.interpolate(
input=x,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
def calc_out_size(self, x):
if self.out_size is not None:
return self.out_size
if self.up:
return tuple(s * self.scale_factor for s in x.shape[2:])
else:
return tuple(s // self.scale_factor for s in x.shape[2:])
def __repr__(self):
s = '{name}(scale_factor={scale_factor}, out_size={out_size}, mode={mode}, align_corners={align_corners}, up={up})' # noqa
return s.format(
name=self.__class__.__name__,
scale_factor=self.scale_factor,
out_size=self.out_size,
mode=self.mode,
align_corners=self.align_corners,
up=self.up)
def calc_flops(self, x):
assert (x.shape[0] == 1)
if self.mode == "bilinear":
num_flops = 9 * x.numel()
else:
num_flops = 4 * x.numel()
num_macs = 0
return num_flops, num_macs
def channel_shuffle(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
# assert (channels % groups == 0)
channels_per_group = channels // groups
x = x.view(batch, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle(nn.Module):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle, self).__init__()
# assert (channels % groups == 0)
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle(x, self.groups)
def channel_shuffle2(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083. The alternative version.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
# assert (channels % groups == 0)
channels_per_group = channels // groups
x = x.view(batch, channels_per_group, groups, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle2(nn.Module):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
The alternative version.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle2, self).__init__()
# assert (channels % groups == 0)
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle2(x, self.groups)
class SEBlock(nn.Module):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
mid_channels : int or None, default None
Number of middle channels.
round_mid : bool, default False
Whether to round middle channel number (make divisible by 8).
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
activation : function, or str, or nn.Module, default 'relu'
Activation function after the first convolution.
out_activation : function, or str, or nn.Module, default 'sigmoid'
Activation function after the last convolution.
"""
def __init__(self,
channels,
reduction=16,
mid_channels=None,
round_mid=False,
use_conv=True,
mid_activation=(lambda: nn.ReLU(inplace=True)),
out_activation=(lambda: nn.Sigmoid())):
super(SEBlock, self).__init__()
self.use_conv = use_conv
if mid_channels is None:
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
if use_conv:
self.conv1 = conv1x1(
in_channels=channels,
out_channels=mid_channels,
bias=True)
else:
self.fc1 = nn.Linear(
in_features=channels,
out_features=mid_channels)
self.activ = get_activation_layer(mid_activation)
if use_conv:
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels,
bias=True)
else:
self.fc2 = nn.Linear(
in_features=mid_channels,
out_features=channels)
self.sigmoid = get_activation_layer(out_activation)
def forward(self, x):
w = self.pool(x)
if not self.use_conv:
w = w.view(x.size(0), -1)
w = self.conv1(w) if self.use_conv else self.fc1(w)
w = self.activ(w)
w = self.conv2(w) if self.use_conv else self.fc2(w)
w = self.sigmoid(w)
if not self.use_conv:
w = w.unsqueeze(2).unsqueeze(3)
x = x * w
return x
class SABlock(nn.Module):
"""
Split-Attention block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
out_channels : int
Number of output channels.
groups : int
Number of channel groups (cardinality, without radix).
radix : int
Number of splits within a cardinal group.
reduction : int, default 4
Squeeze reduction value.
min_channels : int, default 32
Minimal number of squeezed channels.
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
"""
def __init__(self,
out_channels,
groups,
radix,
reduction=4,
min_channels=32,
use_conv=True,
bn_eps=1e-5):
super(SABlock, self).__init__()
self.groups = groups
self.radix = radix
self.use_conv = use_conv
in_channels = out_channels * radix
mid_channels = max(in_channels // reduction, min_channels)
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
if use_conv:
self.conv1 = conv1x1(
in_channels=out_channels,
out_channels=mid_channels,
bias=True)
else:
self.fc1 = nn.Linear(
in_features=out_channels,
out_features=mid_channels)
self.bn = nn.BatchNorm2d(
num_features=mid_channels,
eps=bn_eps)
self.activ = nn.ReLU(inplace=True)
if use_conv:
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=in_channels,
bias=True)
else:
self.fc2 = nn.Linear(
in_features=mid_channels,
out_features=in_channels)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
batch, channels, height, width = x.size()
x = x.view(batch, self.radix, channels // self.radix, height, width)
w = x.sum(dim=1)
w = self.pool(w)
if not self.use_conv:
w = w.view(x.size(0), -1)
w = self.conv1(w) if self.use_conv else self.fc1(w)
w = self.bn(w)
w = self.activ(w)
w = self.conv2(w) if self.use_conv else self.fc2(w)
w = w.view(batch, self.groups, self.radix, -1)
w = torch.transpose(w, 1, 2).contiguous()
w = self.softmax(w)
w = w.view(batch, self.radix, -1, 1, 1)
x = x * w
x = x.sum(dim=1)
return x
class SAConvBlock(nn.Module):
"""
Split-Attention convolution block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
radix : int, default 2
Number of splits within a cardinal group.
reduction : int, default 4
Squeeze reduction value.
min_channels : int, default 32
Minimal number of squeezed channels.
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True)),
radix=2,
reduction=4,
min_channels=32,
use_conv=True):
super(SAConvBlock, self).__init__()
self.conv = ConvBlock(
in_channels=in_channels,
out_channels=(out_channels * radix),
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=(groups * radix),
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
self.att = SABlock(
out_channels=out_channels,
groups=groups,
radix=radix,
reduction=reduction,
min_channels=min_channels,
use_conv=use_conv,
bn_eps=bn_eps)
def forward(self, x):
x = self.conv(x)
x = self.att(x)
return x
def saconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
**kwargs):
"""
3x3 version of the Split-Attention convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
"""
return SAConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
**kwargs)
class DucBlock(nn.Module):
"""
Dense Upsampling Convolution (DUC) block from 'Understanding Convolution for Semantic Segmentation,'
https://arxiv.org/abs/1702.08502.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
scale_factor : int
Multiplier for spatial size.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor):
super(DucBlock, self).__init__()
mid_channels = (scale_factor * scale_factor) * out_channels
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.pix_shuffle = nn.PixelShuffle(upscale_factor=scale_factor)
def forward(self, x):
x = self.conv(x)
x = self.pix_shuffle(x)
return x
class IBN(nn.Module):
"""
Instance-Batch Normalization block from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : int
Number of channels.
inst_fraction : float, default 0.5
The first fraction of channels for normalization.
inst_first : bool, default True
Whether instance normalization be on the first part of channels.
"""
def __init__(self,
channels,
first_fraction=0.5,
inst_first=True):
super(IBN, self).__init__()
self.inst_first = inst_first
h1_channels = int(math.floor(channels * first_fraction))
h2_channels = channels - h1_channels
self.split_sections = [h1_channels, h2_channels]
if self.inst_first:
self.inst_norm = nn.InstanceNorm2d(
num_features=h1_channels,
affine=True)
self.batch_norm = nn.BatchNorm2d(num_features=h2_channels)
else:
self.batch_norm = nn.BatchNorm2d(num_features=h1_channels)
self.inst_norm = nn.InstanceNorm2d(
num_features=h2_channels,
affine=True)
def forward(self, x):
x1, x2 = torch.split(x, split_size_or_sections=self.split_sections, dim=1)
if self.inst_first:
x1 = self.inst_norm(x1.contiguous())
x2 = self.batch_norm(x2.contiguous())
else:
x1 = self.batch_norm(x1.contiguous())
x2 = self.inst_norm(x2.contiguous())
x = torch.cat((x1, x2), dim=1)
return x
class DualPathSequential(nn.Sequential):
"""
A sequential container for modules with dual inputs/outputs.
Modules will be executed in the order they are added.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first modules with single input/output.
last_ordinals : int, default 0
Number of the final modules with single input/output.
dual_path_scheme : function
Scheme of dual path response for a module.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal module.
"""
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda module, x1, x2: module(x1, x2)),
dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2))):
super(DualPathSequential, self).__init__()
self.return_two = return_two
self.first_ordinals = first_ordinals
self.last_ordinals = last_ordinals
self.dual_path_scheme = dual_path_scheme
self.dual_path_scheme_ordinal = dual_path_scheme_ordinal
def forward(self, x1, x2=None):
length = len(self._modules.values())
for i, module in enumerate(self._modules.values()):
if (i < self.first_ordinals) or (i >= length - self.last_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(module, x1, x2)
else:
x1, x2 = self.dual_path_scheme(module, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class Concurrent(nn.Sequential):
"""
A container for concatenation of modules on the base of the sequential container.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
"""
def __init__(self,
axis=1,
stack=False):
super(Concurrent, self).__init__()
self.axis = axis
self.stack = stack
def forward(self, x):
out = []
for module in self._modules.values():
out.append(module(x))
if self.stack:
out = torch.stack(tuple(out), dim=self.axis)
else:
out = torch.cat(tuple(out), dim=self.axis)
return out
class SequentialConcurrent(nn.Sequential):
"""
A sequential container with concatenated outputs.
Modules will be executed in the order they are added.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
cat_input : bool, default True
Whether to concatenate input tensor.
"""
def __init__(self,
axis=1,
stack=False,
cat_input=True):
super(SequentialConcurrent, self).__init__()
self.axis = axis
self.stack = stack
self.cat_input = cat_input
def forward(self, x):
out = [x] if self.cat_input else []
for module in self._modules.values():
x = module(x)
out.append(x)
if self.stack:
out = torch.stack(tuple(out), dim=self.axis)
else:
out = torch.cat(tuple(out), dim=self.axis)
return out
class ParametricSequential(nn.Sequential):
"""
A sequential container for modules with parameters.
Modules will be executed in the order they are added.
"""
def __init__(self, *args):
super(ParametricSequential, self).__init__(*args)
def forward(self, x, **kwargs):
for module in self._modules.values():
x = module(x, **kwargs)
return x
class ParametricConcurrent(nn.Sequential):
"""
A container for concatenation of modules with parameters.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
"""
def __init__(self, axis=1):
super(ParametricConcurrent, self).__init__()
self.axis = axis
def forward(self, x, **kwargs):
out = []
for module in self._modules.values():
out.append(module(x, **kwargs))
out = torch.cat(tuple(out), dim=self.axis)
return out
class Hourglass(nn.Module):
"""
A hourglass block.
Parameters:
----------
down_seq : nn.Sequential
Down modules as sequential.
up_seq : nn.Sequential
Up modules as sequential.
skip_seq : nn.Sequential
Skip connection modules as sequential.
merge_type : str, default 'add'
Type of concatenation of up and skip outputs.
return_first_skip : bool, default False
Whether return the first skip connection output. Used in ResAttNet.
"""
def __init__(self,
down_seq,
up_seq,
skip_seq,
merge_type="add",
return_first_skip=False):
super(Hourglass, self).__init__()
self.depth = len(down_seq)
assert (merge_type in ["add"])
assert (len(up_seq) == self.depth)
assert (len(skip_seq) in (self.depth, self.depth + 1))
self.merge_type = merge_type
self.return_first_skip = return_first_skip
self.extra_skip = (len(skip_seq) == self.depth + 1)
self.down_seq = down_seq
self.up_seq = up_seq
self.skip_seq = skip_seq
def forward(self, x, **kwargs):
y = None
down_outs = [x]
for down_module in self.down_seq._modules.values():
x = down_module(x)
down_outs.append(x)
for i in range(len(down_outs)):
if i != 0:
y = down_outs[self.depth - i]
skip_module = self.skip_seq[self.depth - i]
y = skip_module(y)
if (y is not None) and (self.merge_type == "add"):
x = x + y
if i != len(down_outs) - 1:
if (i == 0) and self.extra_skip:
skip_module = self.skip_seq[self.depth]
x = skip_module(x)
up_module = self.up_seq[self.depth - 1 - i]
x = up_module(x)
if self.return_first_skip:
return x, y
else:
return x
class SesquialteralHourglass(nn.Module):
"""
A sesquialteral hourglass block.
Parameters:
----------
down1_seq : nn.Sequential
The first down modules as sequential.
skip1_seq : nn.Sequential
The first skip connection modules as sequential.
up_seq : nn.Sequential
Up modules as sequential.
skip2_seq : nn.Sequential
The second skip connection modules as sequential.
down2_seq : nn.Sequential
The second down modules as sequential.
merge_type : str, default 'cat'
Type of concatenation of up and skip outputs.
"""
def __init__(self,
down1_seq,
skip1_seq,
up_seq,
skip2_seq,
down2_seq,
merge_type="cat"):
super(SesquialteralHourglass, self).__init__()
assert (len(down1_seq) == len(up_seq))
assert (len(down1_seq) == len(down2_seq))
assert (len(skip1_seq) == len(skip2_seq))
assert (len(down1_seq) == len(skip1_seq) - 1)
assert (merge_type in ["cat", "add"])
self.merge_type = merge_type
self.depth = len(down1_seq)
self.down1_seq = down1_seq
self.skip1_seq = skip1_seq
self.up_seq = up_seq
self.skip2_seq = skip2_seq
self.down2_seq = down2_seq
def _merge(self, x, y):
if y is not None:
if self.merge_type == "cat":
x = torch.cat((x, y), dim=1)
elif self.merge_type == "add":
x = x + y
return x
def forward(self, x, **kwargs):
y = self.skip1_seq[0](x)
skip1_outs = [y]
for i in range(self.depth):
x = self.down1_seq[i](x)
y = self.skip1_seq[i + 1](x)
skip1_outs.append(y)
x = skip1_outs[self.depth]
y = self.skip2_seq[0](x)
skip2_outs = [y]
for i in range(self.depth):
x = self.up_seq[i](x)
y = skip1_outs[self.depth - 1 - i]
x = self._merge(x, y)
y = self.skip2_seq[i + 1](x)
skip2_outs.append(y)
x = self.skip2_seq[self.depth](x)
for i in range(self.depth):
x = self.down2_seq[i](x)
y = skip2_outs[self.depth - 1 - i]
x = self._merge(x, y)
return x
class MultiOutputSequential(nn.Sequential):
"""
A sequential container with multiple outputs.
Modules will be executed in the order they are added.
Parameters:
----------
multi_output : bool, default True
Whether to return multiple output.
dual_output : bool, default False
Whether to return dual output.
return_last : bool, default True
Whether to forcibly return last value.
"""
def __init__(self,
multi_output=True,
dual_output=False,
return_last=True):
super(MultiOutputSequential, self).__init__()
self.multi_output = multi_output
self.dual_output = dual_output
self.return_last = return_last
def forward(self, x):
outs = []
for module in self._modules.values():
x = module(x)
if hasattr(module, "do_output") and module.do_output:
outs.append(x)
elif hasattr(module, "do_output2") and module.do_output2:
assert (type(x) == tuple)
outs.extend(x[1])
x = x[0]
if self.multi_output:
return [x] + outs if self.return_last else outs
elif self.dual_output:
return x, outs
else:
return x
class ParallelConcurent(nn.Sequential):
"""
A sequential container with multiple inputs and multiple outputs.
Modules will be executed in the order they are added.
"""
def __init__(self):
super(ParallelConcurent, self).__init__()
def forward(self, x):
out = []
for module, xi in zip(self._modules.values(), x):
out.append(module(xi))
return out
class Flatten(nn.Module):
"""
Simple flatten module.
"""
def forward(self, x):
return x.view(x.size(0), -1)
class HeatmapMaxDetBlock(nn.Module):
"""
Heatmap maximum detector block (for human pose estimation task).
"""
def __init__(self):
super(HeatmapMaxDetBlock, self).__init__()
def forward(self, x):
heatmap = x
vector_dim = 2
batch = heatmap.shape[0]
channels = heatmap.shape[1]
in_size = x.shape[2:]
heatmap_vector = heatmap.view(batch, channels, -1)
scores, indices = heatmap_vector.max(dim=vector_dim, keepdims=True)
scores_mask = (scores > 0.0).float()
pts_x = (indices % in_size[1]) * scores_mask
pts_y = (indices // in_size[1]) * scores_mask
pts = torch.cat((pts_x, pts_y, scores), dim=vector_dim)
for b in range(batch):
for k in range(channels):
hm = heatmap[b, k, :, :]
px = int(pts[b, k, 0])
py = int(pts[b, k, 1])
if (0 < px < in_size[1] - 1) and (0 < py < in_size[0] - 1):
pts[b, k, 0] += (hm[py, px + 1] - hm[py, px - 1]).sign() * 0.25
pts[b, k, 1] += (hm[py + 1, px] - hm[py - 1, px]).sign() * 0.25
return pts
@staticmethod
def calc_flops(x):
assert (x.shape[0] == 1)
num_flops = x.numel() + 26 * x.shape[1]
num_macs = 0
return num_flops, num_macs
| 31.468719 | 130 | 0.577262 |
92b83c2702b1993bd3da2408e821dd3a7aad14c4
| 2,807 |
py
|
Python
|
mybot/lebowski/actions.py
|
osboo/sharegood
|
76cca0fa8a65256d6694de66929dca9f21ebdbf9
|
[
"MIT"
] | null | null | null |
mybot/lebowski/actions.py
|
osboo/sharegood
|
76cca0fa8a65256d6694de66929dca9f21ebdbf9
|
[
"MIT"
] | 22 |
2021-08-08T15:33:50.000Z
|
2021-09-24T01:12:54.000Z
|
mybot/lebowski/actions.py
|
osboo/sharegood
|
76cca0fa8a65256d6694de66929dca9f21ebdbf9
|
[
"MIT"
] | null | null | null |
import logging
from azure.storage.table import TableService
from lebowski.azure_connections import AKVConnector
from lebowski.db import DBHelper
from lebowski.enums import CCY, Tables
from lebowski.external_api import get_eur_rates, get_gas_quotes
from lebowski.stat import convert_spendings_to_eur, get_total_mileage, get_total_spending_eur
def add_gas_action(args: list, storage: TableService, user_id: int, akv: AKVConnector) -> str:
[amount, ccy, volume] = args
db = DBHelper(storage)
if volume is None:
try:
price_eur = get_gas_quotes(akv.get_gas_quotes_api_token())
if ccy != CCY.EUR:
rate = get_eur_rates(akv.get_fx_quotes_api_token())[ccy]
else:
rate = 1.0
volume = amount / rate / price_eur
except Exception as e:
logging.error(e)
volume = None
return db.add_gas_record(user_id, amount, ccy, volume)
def add_mileage_action(args: list, storage: TableService, user_id: int, akv: AKVConnector) -> str:
[mileage] = args
db = DBHelper(storage)
adding_mileage_report = db.add_mileage_record(user_id, mileage)
result_list = []
result_list.append(adding_mileage_report)
reminders = db.list_reminders(user_id)
for r in reminders:
if "Уже наступило" in r:
result_list.append(r)
return "\n".join(result_list)
def add_car_goods_action(args: list, storage: TableService, user_id, akv: AKVConnector) -> str:
[amount, ccy, description] = args
db = DBHelper(storage)
return db.add_car_goods_record(user_id, amount, ccy, description)
def add_car_repair_action(args: list, storage: TableService, user_id, akv: AKVConnector) -> str:
[amount, ccy, description] = args
db = DBHelper(storage)
return db.add_car_repair_record(user_id, amount, ccy, description)
def add_mileage_reminder_action(args: list, storage: TableService, user_id: int, akv: AKVConnector) -> str:
[target_mileage, description] = args
db = DBHelper(storage)
return db.add_mileage_reminder_record(user_id, target_mileage, description)
def compute_stat_action(dataset: dict, akv: AKVConnector) -> dict:
result = {}
result['total_mileage'] = get_total_mileage(dataset[Tables.MILEAGE])
try:
rates = get_eur_rates(akv.get_fx_quotes_api_token())
except Exception as e:
logging.error(e)
rates = {
CCY.BYN: 2.93,
CCY.RUB: 85.30,
CCY.USD: 1.17
}
df_spendings_eur = convert_spendings_to_eur(dataset[Tables.SPENDINGS], rates)
result['total_spending'] = get_total_spending_eur(df_spendings_eur)
result['km_cost'] = result['total_spending'] / result['total_mileage'] if result['total_mileage'] > 0 else 0.0
return result
| 35.0875 | 114 | 0.694336 |
305af257e6d6ffba6ff0ed6653fa408f2830507f
| 4,451 |
py
|
Python
|
dynaconf/vendor_src/ruamel/yaml/scalarint.py
|
sephiartlist/dynaconf
|
9c5f60b289c1f0fa3f899f1962a8fe5712c74eab
|
[
"MIT"
] | 2,293 |
2015-08-14T22:39:31.000Z
|
2022-03-31T12:44:49.000Z
|
dynaconf/vendor_src/ruamel/yaml/scalarint.py
|
sephiartlist/dynaconf
|
9c5f60b289c1f0fa3f899f1962a8fe5712c74eab
|
[
"MIT"
] | 676 |
2015-08-20T19:29:56.000Z
|
2022-03-31T13:45:51.000Z
|
dynaconf/vendor_src/ruamel/yaml/scalarint.py
|
sephiartlist/dynaconf
|
9c5f60b289c1f0fa3f899f1962a8fe5712c74eab
|
[
"MIT"
] | 255 |
2015-12-02T21:16:33.000Z
|
2022-03-20T22:03:46.000Z
|
# coding: utf-8
from __future__ import print_function, absolute_import, division, unicode_literals
from .compat import no_limit_int # NOQA
from .anchor import Anchor
if False: # MYPY
from typing import Text, Any, Dict, List # NOQA
__all__ = ['ScalarInt', 'BinaryInt', 'OctalInt', 'HexInt', 'HexCapsInt', 'DecimalInt']
class ScalarInt(no_limit_int):
def __new__(cls, *args, **kw):
# type: (Any, Any, Any) -> Any
width = kw.pop('width', None) # type: ignore
underscore = kw.pop('underscore', None) # type: ignore
anchor = kw.pop('anchor', None) # type: ignore
v = no_limit_int.__new__(cls, *args, **kw) # type: ignore
v._width = width
v._underscore = underscore
if anchor is not None:
v.yaml_set_anchor(anchor, always_dump=True)
return v
def __iadd__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self + a)
x._width = self._width # type: ignore
x._underscore = ( # type: ignore
self._underscore[:] if self._underscore is not None else None # type: ignore
) # NOQA
return x
def __ifloordiv__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self // a)
x._width = self._width # type: ignore
x._underscore = ( # type: ignore
self._underscore[:] if self._underscore is not None else None # type: ignore
) # NOQA
return x
def __imul__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self * a)
x._width = self._width # type: ignore
x._underscore = ( # type: ignore
self._underscore[:] if self._underscore is not None else None # type: ignore
) # NOQA
return x
def __ipow__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self ** a)
x._width = self._width # type: ignore
x._underscore = ( # type: ignore
self._underscore[:] if self._underscore is not None else None # type: ignore
) # NOQA
return x
def __isub__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self - a)
x._width = self._width # type: ignore
x._underscore = ( # type: ignore
self._underscore[:] if self._underscore is not None else None # type: ignore
) # NOQA
return x
@property
def anchor(self):
# type: () -> Any
if not hasattr(self, Anchor.attrib):
setattr(self, Anchor.attrib, Anchor())
return getattr(self, Anchor.attrib)
def yaml_anchor(self, any=False):
# type: (bool) -> Any
if not hasattr(self, Anchor.attrib):
return None
if any or self.anchor.always_dump:
return self.anchor
return None
def yaml_set_anchor(self, value, always_dump=False):
# type: (Any, bool) -> None
self.anchor.value = value
self.anchor.always_dump = always_dump
class BinaryInt(ScalarInt):
def __new__(cls, value, width=None, underscore=None, anchor=None):
# type: (Any, Any, Any, Any) -> Any
return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
class OctalInt(ScalarInt):
def __new__(cls, value, width=None, underscore=None, anchor=None):
# type: (Any, Any, Any, Any) -> Any
return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
# mixed casing of A-F is not supported, when loading the first non digit
# determines the case
class HexInt(ScalarInt):
"""uses lower case (a-f)"""
def __new__(cls, value, width=None, underscore=None, anchor=None):
# type: (Any, Any, Any, Any) -> Any
return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
class HexCapsInt(ScalarInt):
"""uses upper case (A-F)"""
def __new__(cls, value, width=None, underscore=None, anchor=None):
# type: (Any, Any, Any, Any) -> Any
return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
class DecimalInt(ScalarInt):
"""needed if anchor"""
def __new__(cls, value, width=None, underscore=None, anchor=None):
# type: (Any, Any, Any, Any) -> Any
return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
| 33.977099 | 95 | 0.601887 |
9f0fea9c716565cd883bbca9fbb3b0d1e0a72301
| 1,123 |
py
|
Python
|
setup.py
|
liorcohen5/replay-monitor
|
086ec57ccdd446b56da92fb4e735186e9e639f15
|
[
"MIT"
] | null | null | null |
setup.py
|
liorcohen5/replay-monitor
|
086ec57ccdd446b56da92fb4e735186e9e639f15
|
[
"MIT"
] | null | null | null |
setup.py
|
liorcohen5/replay-monitor
|
086ec57ccdd446b56da92fb4e735186e9e639f15
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="replay-monitor", # Replace with your own username
version="0.0.5",
author="Leor Cohen",
author_email="liorcohen5@gmail.com",
description="A tool for easy data exploration in reinforcement learning environments.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/liorcohen5/replay-monitor",
download_url="https://github.com/liorcohen5/replay-monitor/archive/0.0.5.tar.gz",
keywords=['reinforcement learning', 'tool', 'data exploration', 'replay', 'monitor', 'analytical tool'],
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=['gym', 'bokeh', 'tensorflow', 'numpy', 'tables'],
entry_points={
'console_scripts': ['replay-monitor=replay_monitor.server:start_server'],
},
python_requires='>=3.6',
)
| 40.107143 | 108 | 0.682992 |
7c4a7567c47a098a5987f328ba3ed4ed5a6f2bb5
| 2,685 |
py
|
Python
|
tests/random_expression.py
|
Dobatymo/quepy
|
d56a5ae1310ef7c3e6e292a03a84ef922f192788
|
[
"BSD-3-Clause"
] | null | null | null |
tests/random_expression.py
|
Dobatymo/quepy
|
d56a5ae1310ef7c3e6e292a03a84ef922f192788
|
[
"BSD-3-Clause"
] | null | null | null |
tests/random_expression.py
|
Dobatymo/quepy
|
d56a5ae1310ef7c3e6e292a03a84ef922f192788
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
from builtins import chr
from future.utils import python_2_unicode_compatible
import random
from quepy.expression import Expression
def get_random_unichar():
# returns only encodable unicode chars
while True:
x = random.random()
if 0.1 > x:
c = random.choice(" ./\n")
elif 0.50 > x:
c = chr(random.randint(65, 122))
elif 0.85 > x:
c = chr(random.randint(0, 127))
else:
c = chr(random.randint(0, 65535))
try:
c.encode("utf-8")
return c
except UnicodeEncodeError:
pass
def random_data(only_ascii=False):
data = []
first = True
while first or 1 / 20 < random.random():
first = False
if only_ascii:
c = chr(random.randint(33, 126))
data.append(c)
else:
c = get_random_unichar()
data.append(c)
return "".join(data)
def random_relation(only_ascii=False):
data = random_data(only_ascii)
data = data.replace(" ", "")
if random.random() > 0.05:
return data
@python_2_unicode_compatible
class UnicodeableDummy(object):
def __str__(self):
return data
return UnicodeableDummy()
def random_expression(only_ascii=False):
"""
operations: new node, add data, decapitate, merge
"""
mean_size = 20
xs = [40, 30, 50, 20]
xs = [x * (1 - random.random()) for x in xs]
assert all(x != 0. for x in xs)
new_node, add_data, decapitate, _ = [x / sum(xs) for x in xs]
expressions = [Expression(), Expression(), Expression(), Expression()]
while len(expressions) != 1:
if (1 / mean_size) < random.random():
# Will start to merge more and will not create new nodes
new_node = 0.
# Choose action
r = random.random()
if r < new_node:
# New expression
expressions.append(Expression())
elif r < add_data + new_node:
# Add data
e = random.choice(expressions)
e.add_data(random_relation(only_ascii), random_data(only_ascii))
elif r < decapitate + add_data + new_node:
# Decapitate
e = random.choice(expressions)
e.decapitate(random_relation(only_ascii),
reverse=(0.25 < random.random()))
elif len(expressions) != 1:
# Merge
random.shuffle(expressions)
e2 = expressions.pop()
e1 = expressions[-1]
e1 += e2
return expressions[0]
| 28.56383 | 76 | 0.565736 |
a71189bbc184432881e09cba8270d1a57eece967
| 1,536 |
py
|
Python
|
exp/python_c3_class_mro/python_c3_mro_anler_run.py
|
nicolasessisbreton/fython
|
988f5a94cee8b16b0000501a22239195c73424a1
|
[
"Apache-2.0"
] | 41 |
2016-01-21T05:14:45.000Z
|
2021-11-24T20:37:21.000Z
|
exp/python_c3_class_mro/python_c3_mro_anler_run.py
|
nicolasessisbreton/fython
|
988f5a94cee8b16b0000501a22239195c73424a1
|
[
"Apache-2.0"
] | 5 |
2016-01-21T05:36:37.000Z
|
2016-08-22T19:26:51.000Z
|
exp/python_c3_class_mro/python_c3_mro_anler_run.py
|
nicolasessisbreton/fython
|
988f5a94cee8b16b0000501a22239195c73424a1
|
[
"Apache-2.0"
] | 3 |
2016-01-23T04:03:44.000Z
|
2016-08-21T15:58:38.000Z
|
import itertools
cls = 'd'
mro_lists = [ ['d', 'c'], ['c', 'b'], ['b', 'a'], ['a', 'o'] ]
# Make a copy so we don't change existing content
mro_lists = [list(mro_list[:]) for mro_list in mro_lists]
print(1, mro_lists)
# Set up the new MRO with the class itself
mro = []
print(2, cls)
# The real algorithm goes here
while True:
# Reset for the next round of tests
candidate_found = False
for mro_list in mro_lists:
print(3, mro_list)
if not len(mro_list):
# Any empty lists are of no use to the algorithm
continue
# Get the first item as a potential candidate for the MRO
candidate = mro_list[0]
if candidate_found:
# Candidates promoted to the MRO are no longer of use
if candidate in mro:
mro_list.pop(0)
# Don't bother checking any more candidates if one was found
continue
# See if it's in any position other than fist in any of the other lists
print(4, list(itertools.chain(*(x[1:] for x in mro_lists))))
if candidate in itertools.chain(*(x[1:] for x in mro_lists)):
# Isn't a valid candidate yet and we need to move on to the first class
# in the next list
continue
else:
# The candidate is valid and should be promoted to the MRO
mro.append(candidate)
mro_list.pop(0)
candidate_found = True
if not sum(len(mro_list) for mro_list in mro_lists):
# There are no MROs to cycle through, so we're all done
break
if not candidate_found:
# No valid candidate was available, so we have to bail out
raise TypeError("Inconsistent MRO")
print(mro)
| 27.428571 | 74 | 0.690755 |
94ec6bb8a553e341c64bb05758629ca5d89785fd
| 1,397 |
py
|
Python
|
misc/python/materialize/feature_benchmark/executor.py
|
bobbyiliev/materialize
|
44e3bcae151179075232ad436ae72f5883361fd1
|
[
"MIT"
] | 1 |
2022-03-19T21:08:19.000Z
|
2022-03-19T21:08:19.000Z
|
misc/python/materialize/feature_benchmark/executor.py
|
bobbyiliev/materialize
|
44e3bcae151179075232ad436ae72f5883361fd1
|
[
"MIT"
] | 203 |
2022-01-04T00:16:23.000Z
|
2022-03-30T17:34:01.000Z
|
misc/python/materialize/feature_benchmark/executor.py
|
guswynn/materialize
|
f433173ed71f511d91311769ec58c2d427dd6c3b
|
[
"MIT"
] | null | null | null |
# Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
from typing import Any, Callable, List
from materialize.mzcompose import Composition
class Executor:
def Lambda(self, _lambda: Callable[["Executor"], float]) -> float:
return _lambda(self)
class Docker(Executor):
def __init__(
self,
composition: Composition,
seed: int,
) -> None:
self._composition = composition
self._seed = seed
def RestartMz(self) -> None:
self._composition.kill("materialized")
self._composition.up("materialized")
return None
def Td(self, input: str) -> Any:
return self._composition.exec(
"testdrive",
"--no-reset",
f"--seed={self._seed}",
"--initial-backoff=10ms",
"--backoff-factor=0",
stdin=input,
capture=True,
).stdout
def Kgen(self, topic: str, args: List[str]) -> Any:
return self._composition.run(
"kgen", f"--topic=testdrive-{topic}-{self._seed}", *args
)
| 28.510204 | 70 | 0.621331 |
757c7481f4c6d15b2fe5ea28f08116ae8ebbd7dd
| 3,774 |
py
|
Python
|
ci/appveyor-download.py
|
metrasynth/sunvosc
|
65ccd1015d55e6e91e6ac3d33ca7677ce4fbeb15
|
[
"MIT"
] | 2 |
2017-02-27T06:05:50.000Z
|
2017-07-31T12:14:35.000Z
|
ci/appveyor-download.py
|
metrasynth/SunVOSC
|
65ccd1015d55e6e91e6ac3d33ca7677ce4fbeb15
|
[
"MIT"
] | 1 |
2016-10-22T17:37:05.000Z
|
2016-10-22T17:37:05.000Z
|
ci/appveyor-download.py
|
metrasynth/sunvosc
|
65ccd1015d55e6e91e6ac3d33ca7677ce4fbeb15
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Use the AppVeyor API to download Windows artifacts.
Taken from: https://bitbucket.org/ned/coveragepy/src/tip/ci/download_appveyor.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""
import argparse
import os
import zipfile
import requests
def make_auth_headers():
"""Make the authentication headers needed to use the Appveyor API."""
path = os.path.expanduser("~/.appveyor.token")
if not os.path.exists(path):
raise RuntimeError(
"Please create a file named `.appveyor.token` in your home directory. "
"You can get the token from https://ci.appveyor.com/api-token"
)
with open(path) as f:
token = f.read().strip()
headers = {
'Authorization': 'Bearer {}'.format(token),
}
return headers
def download_latest_artifacts(account_project, build_id):
"""Download all the artifacts from the latest build."""
if build_id is None:
url = "https://ci.appveyor.com/api/projects/{}".format(account_project)
else:
url = "https://ci.appveyor.com/api/projects/{}/build/{}".format(account_project, build_id)
build = requests.get(url, headers=make_auth_headers()).json()
jobs = build['build']['jobs']
print(u"Build {0[build][version]}, {1} jobs: {0[build][message]}".format(build, len(jobs)))
for job in jobs:
name = job['name']
print(u" {0}: {1[status]}, {1[artifactsCount]} artifacts".format(name, job))
url = "https://ci.appveyor.com/api/buildjobs/{}/artifacts".format(job['jobId'])
response = requests.get(url, headers=make_auth_headers())
artifacts = response.json()
for artifact in artifacts:
is_zip = artifact['type'] == "Zip"
filename = artifact['fileName']
print(u" {0}, {1} bytes".format(filename, artifact['size']))
url = "https://ci.appveyor.com/api/buildjobs/{}/artifacts/{}".format(job['jobId'], filename)
download_url(url, filename, make_auth_headers())
if is_zip:
unpack_zipfile(filename)
os.remove(filename)
def ensure_dirs(filename):
"""Make sure the directories exist for `filename`."""
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
def download_url(url, filename, headers):
"""Download a file from `url` to `filename`."""
ensure_dirs(filename)
response = requests.get(url, headers=headers, stream=True)
if response.status_code == 200:
with open(filename, 'wb') as f:
for chunk in response.iter_content(16 * 1024):
f.write(chunk)
else:
print(u" Error downloading {}: {}".format(url, response))
def unpack_zipfile(filename):
"""Unpack a zipfile, using the names in the zip."""
with open(filename, 'rb') as fzip:
z = zipfile.ZipFile(fzip)
for name in z.namelist():
print(u" extracting {}".format(name))
ensure_dirs(name)
z.extract(name)
parser = argparse.ArgumentParser(description='Download artifacts from AppVeyor.')
parser.add_argument('--id',
metavar='PROJECT_ID',
default='metrasynth/sunvosc',
help='Project ID in AppVeyor.')
parser.add_argument('build',
nargs='?',
metavar='BUILD_ID',
help='Build ID in AppVeyor. Eg: master-123')
if __name__ == "__main__":
# import logging
# logging.basicConfig(level="DEBUG")
args = parser.parse_args()
download_latest_artifacts(args.id, args.build)
| 34.944444 | 104 | 0.622682 |
a26eb550098198378abf34ad7d980227fc486a8b
| 2,332 |
py
|
Python
|
pythonprog/code/13/13.2/table.py
|
davidyshuang/python3
|
5200638f6c4450998d97a9ac24bcba0872786fb2
|
[
"Unlicense"
] | null | null | null |
pythonprog/code/13/13.2/table.py
|
davidyshuang/python3
|
5200638f6c4450998d97a9ac24bcba0872786fb2
|
[
"Unlicense"
] | null | null | null |
pythonprog/code/13/13.2/table.py
|
davidyshuang/python3
|
5200638f6c4450998d97a9ac24bcba0872786fb2
|
[
"Unlicense"
] | null | null | null |
# table.py
import sys
from abc import ABCMeta, abstractmethod
def print_table(objects, colnames, formatter):
'''
Make a nicely formatted table showing attributes from a list of objects
'''
if not isinstance(formatter, TableFormatter):
raise TypeError('formatter must be a TableFormatter')
formatter.headings(colnames)
for obj in objects:
rowdata = [str(getattr(obj, colname)) for colname in colnames ]
formatter.row(rowdata)
_formatters = { }
class TableMeta(ABCMeta):
def __init__(cls, clsname, bases, methods):
super().__init__(clsname, bases, methods)
if hasattr(cls, 'name'):
_formatters[cls.name] = cls
class TableFormatter(metaclass=TableMeta):
def __init__(self, outfile=None):
if outfile == None:
outfile = sys.stdout
self.outfile = outfile
# Serves a design spec for making tables (use inheritance to customize)
@abstractmethod
def headings(self, headers):
pass
@abstractmethod
def row(self, rowdata):
pass
class TextTableFormatter(TableFormatter):
name = 'text'
def __init__(self, outfile=None, width=10):
super().__init__(outfile) # Initialize parent
self.width = width
def headings(self, headers):
for header in headers:
print('{:>{}s}'.format(header, self.width), end=' ', file=self.outfile)
print(file=self.outfile)
def row(self, rowdata):
for item in rowdata:
print('{:>{}s}'.format(item, self.width), end=' ', file=self.outfile)
print(file=self.outfile)
class CSVTableFormatter(TableFormatter):
name = 'csv'
def headings(self, headers):
print(','.join(headers))
def row(self, rowdata):
print(','.join(rowdata))
class HTMLTableFormatter(TableFormatter):
name = 'html'
def headings(self, headers):
print('<tr>', end='')
for h in headers:
print('<th>{}</th>'.format(h), end='')
print('</tr>')
def row(self, rowdata):
print('<tr>', end='')
for d in rowdata:
print('<td>{}</td>'.format(d), end='')
print('</tr>')
class QuotedMixin(object):
def row(self, rowdata):
quoted = [ '"{}"'.format(d) for d in rowdata ]
super().row(quoted)
| 28.096386 | 83 | 0.607204 |
6cd0c96185ad106f0b0f0bc9cddd05e459c2d191
| 914 |
py
|
Python
|
Libraries/matrix.py
|
tonko2/AtCoder
|
5d617072517881d226d7c8af09cb88684d41af7e
|
[
"Xnet",
"X11",
"CECILL-B"
] | 2 |
2022-01-22T07:56:58.000Z
|
2022-01-24T00:29:37.000Z
|
Libraries/matrix.py
|
tonko2/AtCoder
|
5d617072517881d226d7c8af09cb88684d41af7e
|
[
"Xnet",
"X11",
"CECILL-B"
] | null | null | null |
Libraries/matrix.py
|
tonko2/AtCoder
|
5d617072517881d226d7c8af09cb88684d41af7e
|
[
"Xnet",
"X11",
"CECILL-B"
] | null | null | null |
from copy import deepcopy
MOD = 1000000000
# 2x2行列 A * B
def multiply(A, B):
global MOD
C = [[0, 0], [0, 0]]
for i in range(2):
for j in range(2):
for k in range(2):
C[i][j] += A[i][k] * B[k][j]
C[i][j] %= MOD
return C
# Aのn乗
def power(A, n):
P = deepcopy(A)
Q = [[0, 0], [0, 0]]
flag = False
for i in range(60):
if n & 1 << i != 0:
if flag == False:
Q = deepcopy(P)
flag = True
else:
Q = deepcopy(multiply(Q, P))
P = deepcopy(multiply(P, P))
return Q
def main():
# 入力 → 累乗の計算(N が 2 以上でなければ正しく動作しないので注意)
N = int(input())
A = [[1, 1], [1, 0]]
B = power(A, N - 1)
# 答えの計算 → 出力(下から 9 桁目が 0 の場合、最初に 0 を含まない形で出力していることに注意)
answer = (B[1][0] + B[1][1]) % MOD
print(answer)
if __name__ == '__main__':
main()
| 21.255814 | 58 | 0.447484 |
dd5afae3f3e7fad9b3722ff3288cf2e11d34de11
| 5,683 |
py
|
Python
|
CPG_core/controllers/CPG_controller_snake_sin.py
|
Jerryxiaoyu/maml_rl_v2
|
6091f996ff1be8e80d80331e510087868461b8e6
|
[
"MIT"
] | null | null | null |
CPG_core/controllers/CPG_controller_snake_sin.py
|
Jerryxiaoyu/maml_rl_v2
|
6091f996ff1be8e80d80331e510087868461b8e6
|
[
"MIT"
] | null | null | null |
CPG_core/controllers/CPG_controller_snake_sin.py
|
Jerryxiaoyu/maml_rl_v2
|
6091f996ff1be8e80d80331e510087868461b8e6
|
[
"MIT"
] | null | null | null |
from CPG_core.CPG_Sin_osillator import sin_oscillator
from CPG_core.CPG_Sin_osillator import CPG_Sinneutron
import math
class CPG_network(object):
def __init__(self, CPG_node_num, position_vector):
kf = position_vector[0]
self.CPG_node_num = CPG_node_num # 不包括placemarker
if len(position_vector) != self.CPG_node_num *3+1:
assert "Position vector out of range!"
GAIN,BIAS,PHASE = [],[],[]
for i in range(self.CPG_node_num):
GAIN.append(position_vector[i+1])
BIAS.append(position_vector[self.CPG_node_num+i+1])
PHASE.append(position_vector[2 * self.CPG_node_num+i+1])
parm_list = {
0: [0.0, 0.0, 0.0, 1.0, 0.0, 0],
}
for i in range(self.CPG_node_num):
parm ={i+1:[0.0, 0.0, 0.0, GAIN[i], BIAS[i], PHASE[i]]}
parm_list.update(parm)
#print(parm_list)
self.kf = position_vector[0]
self.num_CPG = len(parm_list)
self.CPG_list =[]
self.w_ms_list = [None, 1,1,1, 1, 1, 1, 1, 1, 1, ]
self.master_list = [None, 0,1,2,3,4,5,6,7,8 ]
for i in range(self.num_CPG):
if i == 0:
self.CPG_list.append(CPG_Sinneutron(0, master_nuron = None, param=parm_list[0] ,kf= self.kf, w_ms = 0))
else:
self.CPG_list.append(CPG_Sinneutron(i, master_nuron=self.CPG_list[self.master_list[i]],
param=parm_list[i], kf=self.kf, w_ms= self.w_ms_list[i]))
def output(self, state):
output_list = []
for cpg_n in self.CPG_list:
cpg_n.next_output(f1=0, f2=0)
output_list.append(cpg_n.parm['o'])
return output_list
# import numpy as np
# position_vector = np.zeros(40)
# position_vector[0]=1
# for i in range(1,14):
# position_vector[i] = 1
# CPG_network(position_vector)
class CPG_network5(object):
def __init__(self, CPG_node_num, position_vector):
kf = position_vector[0]
self.CPG_node_num = CPG_node_num # 不包括placemarker
if len(position_vector) != self.CPG_node_num * 4 + 1:
assert "Position vector out of range!"
GAIN, BIAS, PHASE , WEIGHT= [], [], [], []
for i in range(self.CPG_node_num):
GAIN.append(position_vector[i + 1])
BIAS.append(position_vector[self.CPG_node_num + i + 1])
PHASE.append(position_vector[2 * self.CPG_node_num + i + 1])
WEIGHT.append(position_vector[3 * self.CPG_node_num + i + 1])
parm_list = {
0: [0.0, 0.0, 0.0, 1.0, 0.0, 0],
}
for i in range(self.CPG_node_num):
parm = {i + 1: [0.0, 0.0, 0.0, GAIN[i], BIAS[i], PHASE[i]]}
parm_list.update(parm)
# print(parm_list)
self.kf = position_vector[0]
self.num_CPG = len(parm_list)
self.CPG_list = []
self.w_ms_list = [None, WEIGHT[0], WEIGHT[1], WEIGHT[2], WEIGHT[3], WEIGHT[4], WEIGHT[5], WEIGHT[6], WEIGHT[7], WEIGHT[8], ]
self.master_list = [None, 0, 1, 2, 3, 4, 5, 6, 7, 8]
for i in range(self.num_CPG):
if i == 0:
self.CPG_list.append(CPG_Sinneutron(0, master_nuron=None, param=parm_list[0], kf=self.kf, w_ms=0))
else:
self.CPG_list.append(CPG_Sinneutron(i, master_nuron=self.CPG_list[self.master_list[i]],
param=parm_list[i], kf=self.kf, w_ms=self.w_ms_list[i]))
def output(self, state):
output_list = []
for cpg_n in self.CPG_list:
cpg_n.next_output(f1=0, f2=0)
output_list.append(cpg_n.parm['o'])
return output_list
class CPG_network_2gb(object):
def __init__(self, CPG_node_num, position_vector):
kf = position_vector[0]
N = 2
self.CPG_node_num = CPG_node_num # 不包括placemarker
if len(position_vector) != self.CPG_node_num * 2 + 1:
assert "Position vector out of range!"
GAIN, BIAS, PHASE = [], [], []
for i in range(self.CPG_node_num):
GAIN.append(position_vector[i + 1])
BIAS.append(position_vector[self.CPG_node_num + i + 1])
PHASE.append(2*math.pi*N/float(self.CPG_node_num))
parm_list = {
0: [0.0, 0.0, 0.0, 1.0, 0.0, 0],
}
for i in range(self.CPG_node_num):
parm = {i + 1: [0.0, 0.0, 0.0, GAIN[i], BIAS[i], PHASE[i]]}
parm_list.update(parm)
# print(parm_list)
self.kf = position_vector[0]
self.num_CPG = len(parm_list)
self.CPG_list = []
self.w_ms_list = [None, 1, 1, 1, 1, 1, 1, 1, 1, 1, ]
self.master_list = [None, 0, 1, 2, 3, 4, 5, 6, 7, 8]
for i in range(self.num_CPG):
if i == 0:
self.CPG_list.append(CPG_Sinneutron(0, master_nuron=None, param=parm_list[0], kf=self.kf, w_ms=0))
else:
self.CPG_list.append(CPG_Sinneutron(i, master_nuron=self.CPG_list[self.master_list[i]],
param=parm_list[i], kf=self.kf, w_ms=self.w_ms_list[i]))
def output(self, state):
output_list = []
for cpg_n in self.CPG_list:
cpg_n.next_output(f1=0, f2=0)
output_list.append(cpg_n.parm['o'])
return output_list
| 36.197452 | 132 | 0.535633 |
892abbd826ef8c71aafc1bac57eb09f0cf32c912
| 12,691 |
py
|
Python
|
packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/tokenization_xlm_roberta.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | 96 |
2021-06-16T09:06:52.000Z
|
2022-03-26T09:56:32.000Z
|
packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/tokenization_xlm_roberta.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | 16 |
2021-07-01T05:34:48.000Z
|
2022-03-28T09:40:15.000Z
|
packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/tokenization_xlm_roberta.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | 24 |
2021-06-19T15:58:31.000Z
|
2022-03-14T09:17:19.000Z
|
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
""" Tokenization classes for XLM-RoBERTa model."""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from .tokenization_utils import PreTrainedTokenizer
from .utils import logging
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-spanish": "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll03-english": "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll03-german": "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class XLMRobertaTokenizer(PreTrainedTokenizer):
"""
Adapted from :class:`~transfomers.RobertaTokenizer` and class:`~transfomers.XLNetTokenizer`. Based on
`SentencePiece <https://github.com/google/sentencepiece>`__.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the :obj:`cls_token`.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (:obj:`str`, `optional`, defaults to :obj:`"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (:obj:`List[str]`, `optional`, defaults to :obj:`["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
Attributes: sp_model (:obj:`SentencePieceProcessor`): The `SentencePiece` processor that is used for every
conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["attention_mask"]
def __init__(
self,
vocab_file,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
**kwargs
):
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs,
)
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
self.fairseq_offset = 1
self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM-RoBERTa sequence has the following format:
- single sequence: ``<s> X </s>``
- pair of sequences: ``<s> A </s></s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
@property
def vocab_size(self):
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text):
return self.sp_model.EncodeAsPieces(text)
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
| 44.529825 | 161 | 0.635332 |
a4edb60df9d6b652f3452c195cf7213519056769
| 4,672 |
py
|
Python
|
tests/bugs/core_4484_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_4484_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_4484_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
#coding:utf-8
#
# id: bugs.core_4484
# title: Description (COMMENT ON) for package procedures and functions, and its parameters
# decription: Test verifies ability to store comments and also to encode them in UTF8
# tracker_id: CORE-4484
# min_versions: ['3.0']
# versions: 3.0
# qmid:
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = [('TEXT_BLOB.*', '')]
init_script_1 = """"""
db_1 = db_factory(charset='UTF8', sql_dialect=3, init=init_script_1)
test_script_1 = """
set term ^;
create or alter package pg_test
as
begin
procedure sp_test(i_x int) returns (o_z int);
function fn_test(i_x int) returns int;
end
^
recreate package body pg_test
as
begin
procedure sp_test(i_x int) returns (o_z int) as
begin
o_z = i_x * 2;
end
function fn_test(i_x int) returns int as
begin
return i_x * 2;
end
end
^
set term ;^
commit;
comment on package pg_test is 'MITÄ TÄMÄN';
comment on procedure pg_test.sp_test is 'ÁÉÍÓÚÝ';
comment on function pg_test.fn_test is 'ÂÊÎÔÛ';
comment on procedure parameter pg_test.sp_test.i_x is 'ÃÑÕ ÄËÏÖÜŸ';
comment on procedure parameter pg_test.sp_test.o_z is 'ÇŠ ΔΘΛΞΣΨΩ';
comment on function parameter pg_test.fn_test.i_x is 'ĄĘŁŹŻ ЙЁ ЊЋЏ ĂŞŢ';
commit;
set list on;
set blob all;
select 'package itself' descr_for_what,rp.rdb$package_name obj_name, rp.rdb$description text_blob
from rdb$packages rp
where rp.rdb$package_name=upper('pg_test')
union all
select *
from (
select 'package proc' descr_for_what, pp.rdb$procedure_name, pp.rdb$description
from rdb$procedures pp
where pp.rdb$package_name=upper('pg_test')
order by pp.rdb$procedure_name
)
union all
select *
from (
select 'package func' descr_for_what, pf.rdb$function_name, pf.rdb$description
from rdb$functions pf
where pf.rdb$package_name = upper('pg_test')
order by pf.rdb$function_name
)
union all
select 'package proc pars' descr_for_what, p_name, p_memo
from (
select p2.rdb$parameter_name p_name, p2.rdb$description p_memo
from rdb$procedure_parameters p2
where p2.rdb$package_name = upper('pg_test')
order by p2.rdb$parameter_name
)
union all
select 'package func args', a_name, f_memo
from (
select f2.rdb$argument_name a_name, f2.rdb$description f_memo
from rdb$function_arguments f2
where f2.rdb$package_name = upper('pg_test') and f2.rdb$argument_name is not null
order by f2.rdb$argument_name
)
;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
DESCR_FOR_WHAT package itself
OBJ_NAME PG_TEST
TEXT_BLOB 0:3
MITÄ TÄMÄN
DESCR_FOR_WHAT package proc
OBJ_NAME SP_TEST
TEXT_BLOB 0:6
ÁÉÍÓÚÝ
DESCR_FOR_WHAT package func
OBJ_NAME FN_TEST
TEXT_BLOB 0:9
ÂÊÎÔÛ
DESCR_FOR_WHAT package proc pars
OBJ_NAME I_X
TEXT_BLOB 0:c
ÃÑÕ ÄËÏÖÜŸ
DESCR_FOR_WHAT package proc pars
OBJ_NAME O_Z
TEXT_BLOB 0:f
ÇŠ ΔΘΛΞΣΨΩ
DESCR_FOR_WHAT package func args
OBJ_NAME I_X
TEXT_BLOB 0:12
ĄĘŁŹŻ ЙЁ ЊЋЏ ĂŞŢ
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| 31.782313 | 157 | 0.514769 |
fe144ee6367fc8208f408c8ed22d2e081abcda47
| 1,039 |
py
|
Python
|
leetcode/054-Spiral-Matrix/SpiralMatrix_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/054-Spiral-Matrix/SpiralMatrix_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/054-Spiral-Matrix/SpiralMatrix_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution:
# @param {integer[][]} matrix
# @return {integer[]}
def spiralOrder(self, matrix):
if len(matrix) == 0 or len(matrix[0]) == 0:
return []
res = []
top, bottom = 0, len(matrix) - 1
left, right = 0, len(matrix[0]) - 1
while left < right and top < bottom:
for j in range(left, right + 1):
res.append(matrix[top][j])
for i in range(top + 1, bottom):
res.append(matrix[i][right])
for j in range(right, left - 1, -1):
res.append(matrix[bottom][j])
for i in range(bottom - 1, top, -1):
res.append(matrix[i][left])
left += 1
right -= 1
top += 1
bottom -= 1
if top == bottom:
for j in range(left, right + 1):
res.append(matrix[top][j])
elif left == right:
for i in range(top, bottom + 1):
res.append(matrix[i][left])
return res
| 33.516129 | 51 | 0.455245 |
4ae91a99e63fd655034f5d722e78c72c3992e260
| 894 |
py
|
Python
|
ultron8/api/resource.py
|
bossjones/ultron8
|
45db73d32542a844570d44bc83defa935e15803f
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ultron8/api/resource.py
|
bossjones/ultron8
|
45db73d32542a844570d44bc83defa935e15803f
|
[
"Apache-2.0",
"MIT"
] | 43 |
2019-06-01T23:08:32.000Z
|
2022-02-07T22:24:53.000Z
|
ultron8/api/resource.py
|
bossjones/ultron8
|
45db73d32542a844570d44bc83defa935e15803f
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""
Web Services Interface used by command-line clients and web frontend to
view current state, event history and send commands to trond.
"""
import collections
import datetime
import logging
import ujson as json
log = logging.getLogger(__name__)
class LogAdapter(object):
def __init__(self, logger):
self.logger = logger
def write(self, line):
self.logger.info(line.rstrip(b"\n"))
def close(self):
pass
def UltronSite():
"""Web server"""
class JSONEncoder(json.JSONEncoder):
"""Custom JSON for certain objects"""
def default(self, o):
if isinstance(o, datetime.datetime):
return o.strftime("%Y-%m-%d %H:%M:%S")
if isinstance(o, datetime.date):
return o.isoformat()
if isinstance(o, collections.KeysView):
return list(o)
return super(JSONEncoder, self).default(o)
| 20.790698 | 71 | 0.645414 |
9cc282306f9cf31c6751620ab290c5227ba792d2
| 1,186 |
py
|
Python
|
pyMacID.py
|
pudquick/pyMacID
|
d55a18fc30398813d9ed22411c573b80d1ffc3b0
|
[
"MIT",
"Unlicense"
] | 1 |
2021-04-16T12:09:29.000Z
|
2021-04-16T12:09:29.000Z
|
pyMacID.py
|
pudquick/pyMacID
|
d55a18fc30398813d9ed22411c573b80d1ffc3b0
|
[
"MIT",
"Unlicense"
] | null | null | null |
pyMacID.py
|
pudquick/pyMacID
|
d55a18fc30398813d9ed22411c573b80d1ffc3b0
|
[
"MIT",
"Unlicense"
] | null | null | null |
# Get the Computer Name from System Preferences -> Sharing
from SystemConfiguration import SCDynamicStoreCopyComputerName
try:
computer_name = SCDynamicStoreCopyComputerName(None, None)[0].encode('utf-8')
except:
computer_name = None
# Get the Bonjour .local name
from socket import gethostname
try:
bonjour_name = gethostname()
except:
bonjour_name = None
# Get detailed hardware information
from subprocess import Popen, PIPE
from plistlib import readPlistFromString
system_profiler_xml = Popen(["/usr/sbin/system_profiler", "-xml", "SPHardwareDataType", "SPNetworkDataType"], stdout= PIPE).communicate()[0]
cpu_info, net_info = readPlistFromString(system_profiler_xml)
# System serial number
mac_serial = cpu_info._items[0].serial_number
# Machine model
mac_model = cpu_info._items[0].machine_model
# Interface names (in ifconfig), excluding localhost
net_interface_names = [interface.interface for interface in net_info._items]
# Interface names (in System Preferences -> Network)
net_interface_snames = [interface._name for interface in net_info._items]
# IPv4 addresses
net_interface_ipv4s = [interface.IPv4.Addresses[0] for interface in net_info._items]
| 32.054054 | 140 | 0.79511 |
ef21e0efeaca211c3e46a7442f24a21bff6835a0
| 2,788 |
py
|
Python
|
CIM15/IEC61970/Informative/InfGMLSupport/GmlFeatureType.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 58 |
2015-04-22T10:41:03.000Z
|
2022-03-29T16:04:34.000Z
|
CIM15/IEC61970/Informative/InfGMLSupport/GmlFeatureType.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 12 |
2015-08-26T03:57:23.000Z
|
2020-12-11T20:14:42.000Z
|
CIM15/IEC61970/Informative/InfGMLSupport/GmlFeatureType.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 35 |
2015-01-10T12:21:03.000Z
|
2020-09-09T08:18:16.000Z
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class GmlFeatureType(IdentifiedObject):
"""Type classification of feature.Type classification of feature.
"""
def __init__(self, GmlFeatureStyles=None, *args, **kw_args):
"""Initialises a new 'GmlFeatureType' instance.
@param GmlFeatureStyles:
"""
self._GmlFeatureStyles = []
self.GmlFeatureStyles = [] if GmlFeatureStyles is None else GmlFeatureStyles
super(GmlFeatureType, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["GmlFeatureStyles"]
_many_refs = ["GmlFeatureStyles"]
def getGmlFeatureStyles(self):
return self._GmlFeatureStyles
def setGmlFeatureStyles(self, value):
for p in self._GmlFeatureStyles:
filtered = [q for q in p.GmlFeatureTypes if q != self]
self._GmlFeatureStyles._GmlFeatureTypes = filtered
for r in value:
if self not in r._GmlFeatureTypes:
r._GmlFeatureTypes.append(self)
self._GmlFeatureStyles = value
GmlFeatureStyles = property(getGmlFeatureStyles, setGmlFeatureStyles)
def addGmlFeatureStyles(self, *GmlFeatureStyles):
for obj in GmlFeatureStyles:
if self not in obj._GmlFeatureTypes:
obj._GmlFeatureTypes.append(self)
self._GmlFeatureStyles.append(obj)
def removeGmlFeatureStyles(self, *GmlFeatureStyles):
for obj in GmlFeatureStyles:
if self in obj._GmlFeatureTypes:
obj._GmlFeatureTypes.remove(self)
self._GmlFeatureStyles.remove(obj)
| 39.267606 | 84 | 0.708034 |
36fe3b35dfaf120d17288c8dc1d14a96a1fb9707
| 37,111 |
py
|
Python
|
libcxx/utils/generate_feature_test_macro_components.py
|
keryell/llvm-2
|
4dc23a26d1bd6ced23969c0525dedbddf8c6fddc
|
[
"Apache-2.0"
] | null | null | null |
libcxx/utils/generate_feature_test_macro_components.py
|
keryell/llvm-2
|
4dc23a26d1bd6ced23969c0525dedbddf8c6fddc
|
[
"Apache-2.0"
] | null | null | null |
libcxx/utils/generate_feature_test_macro_components.py
|
keryell/llvm-2
|
4dc23a26d1bd6ced23969c0525dedbddf8c6fddc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
from builtins import range
from functools import reduce
def get_libcxx_paths():
utils_path = os.path.dirname(os.path.abspath(__file__))
script_name = os.path.basename(__file__)
assert os.path.exists(utils_path)
src_root = os.path.dirname(utils_path)
include_path = os.path.join(src_root, 'include')
assert os.path.exists(include_path)
docs_path = os.path.join(src_root, 'docs')
assert os.path.exists(docs_path)
macro_test_path = os.path.join(src_root, 'test', 'std', 'language.support',
'support.limits', 'support.limits.general')
assert os.path.exists(macro_test_path)
assert os.path.exists(os.path.join(macro_test_path, 'version.version.pass.cpp'))
return script_name, src_root, include_path, docs_path, macro_test_path
script_name, source_root, include_path, docs_path, macro_test_path = get_libcxx_paths()
def has_header(h):
h_path = os.path.join(include_path, h)
return os.path.exists(h_path)
def add_version_header(tc):
tc["headers"].append("version")
return tc
# ================ ============================================================
# Field Description
# ================ ============================================================
# name The name of the feature-test macro.
# values A dict whose keys are C++ versions and whose values are the
# value of the feature-test macro for that C++ version.
# (TODO: This isn't a very clean model for feature-test
# macros affected by multiple papers.)
# headers An array with the headers that should provide the
# feature-test macro.
# test_suite_guard An optional string field. When this field is provided,
# `libcxx_guard` must also be provided. This field is used
# only to generate the unit tests for the feature-test macros.
# It can't depend on macros defined in <__config> because the
# `test/std/` parts of the test suite are intended to be
# portable to any C++ standard library implementation, not
# just libc++. It may depend on
# * macros defined by the compiler itself, or
# * macros generated by CMake.
# In some cases we add
# `&& !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM_...)`
# in order to make libc++ pass the tests on OSX; see D94983.
# libcxx_guard An optional string field. When this field is provided,
# `test_suite_guard` must also be provided. This field is used
# only to guard the feature-test macro in <version>. It may
# be the same as `test_suite_guard`, or it may depend on
# macros defined in <__config>.
# unimplemented An optional Boolean field with the value `True`. This field
# is only used when a feature isn't fully implemented. Once
# you've fully implemented the feature, you should remove
# this field.
# ================ ============================================================
feature_test_macros = [ add_version_header(x) for x in [
{
"name": "__cpp_lib_addressof_constexpr",
"values": { "c++17": 201603 },
"headers": ["memory"],
"test_suite_guard": "TEST_HAS_BUILTIN(__builtin_addressof) || TEST_GCC_VER >= 700",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_BUILTIN_ADDRESSOF)",
}, {
"name": "__cpp_lib_allocator_traits_is_always_equal",
"values": { "c++17": 201411 },
"headers": ["deque", "forward_list", "list", "map", "memory", "scoped_allocator", "set", "string", "unordered_map", "unordered_set", "vector"],
}, {
"name": "__cpp_lib_any",
"values": { "c++17": 201606 },
"headers": ["any"],
}, {
"name": "__cpp_lib_apply",
"values": { "c++17": 201603 },
"headers": ["tuple"],
}, {
"name": "__cpp_lib_array_constexpr",
"values": { "c++17": 201603, "c++20": 201811 },
"headers": ["array", "iterator"],
}, {
"name": "__cpp_lib_as_const",
"values": { "c++17": 201510 },
"headers": ["utility"],
}, {
"name": "__cpp_lib_assume_aligned",
"values": { "c++20": 201811 },
"headers": ["memory"],
"unimplemented": True,
}, {
"name": "__cpp_lib_atomic_flag_test",
"values": { "c++20": 201907 },
"headers": ["atomic"],
"test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
}, {
"name": "__cpp_lib_atomic_float",
"values": { "c++20": 201711 },
"headers": ["atomic"],
"test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
"unimplemented": True,
}, {
"name": "__cpp_lib_atomic_is_always_lock_free",
"values": { "c++17": 201603 },
"headers": ["atomic"],
"test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
}, {
"name": "__cpp_lib_atomic_lock_free_type_aliases",
"values": { "c++20": 201907 },
"headers": ["atomic"],
"test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
}, {
"name": "__cpp_lib_atomic_ref",
"values": { "c++20": 201806 },
"headers": ["atomic"],
"test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
"unimplemented": True,
}, {
"name": "__cpp_lib_atomic_shared_ptr",
"values": { "c++20": 201711 },
"headers": ["atomic"],
"test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
"unimplemented": True,
}, {
"name": "__cpp_lib_atomic_value_initialization",
"values": { "c++20": 201911 },
"headers": ["atomic", "memory"],
"test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
"unimplemented": True,
}, {
"name": "__cpp_lib_atomic_wait",
"values": { "c++20": 201907 },
"headers": ["atomic"],
"test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_atomic_wait)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_atomic_wait)",
}, {
"name": "__cpp_lib_barrier",
"values": { "c++20": 201907 },
"headers": ["barrier"],
"test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_barrier)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_barrier)",
}, {
"name": "__cpp_lib_bind_front",
"values": { "c++20": 201907 },
"headers": ["functional"],
}, {
"name": "__cpp_lib_bit_cast",
"values": { "c++20": 201806 },
"headers": ["bit"],
"unimplemented": True,
}, {
"name": "__cpp_lib_bitops",
"values": { "c++20": 201907 },
"headers": ["bit"],
"unimplemented": True,
}, {
"name": "__cpp_lib_bool_constant",
"values": { "c++17": 201505 },
"headers": ["type_traits"],
}, {
"name": "__cpp_lib_bounded_array_traits",
"values": { "c++20": 201902 },
"headers": ["type_traits"],
}, {
"name": "__cpp_lib_boyer_moore_searcher",
"values": { "c++17": 201603 },
"headers": ["functional"],
"unimplemented": True,
}, {
"name": "__cpp_lib_byte",
"values": { "c++17": 201603 },
"headers": ["cstddef"],
}, {
"name": "__cpp_lib_char8_t",
"values": { "c++20": 201811 },
"headers": ["atomic", "filesystem", "istream", "limits", "locale", "ostream", "string", "string_view"],
"test_suite_guard": "defined(__cpp_char8_t)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_CHAR8_T)",
}, {
"name": "__cpp_lib_chrono",
"values": { "c++17": 201611 },
"headers": ["chrono"],
}, {
"name": "__cpp_lib_chrono_udls",
"values": { "c++14": 201304 },
"headers": ["chrono"],
}, {
"name": "__cpp_lib_clamp",
"values": { "c++17": 201603 },
"headers": ["algorithm"],
}, {
"name": "__cpp_lib_complex_udls",
"values": { "c++14": 201309 },
"headers": ["complex"],
}, {
"name": "__cpp_lib_concepts",
"values": { "c++20": 202002 },
"headers": ["concepts"],
}, {
"name": "__cpp_lib_constexpr_algorithms",
"values": { "c++20": 201806 },
"headers": ["algorithm"],
}, {
"name": "__cpp_lib_constexpr_complex",
"values": { "c++20": 201711 },
"headers": ["complex"],
"unimplemented": True,
}, {
"name": "__cpp_lib_constexpr_dynamic_alloc",
"values": { "c++20": 201907 },
"headers": ["memory"],
}, {
"name": "__cpp_lib_constexpr_functional",
"values": { "c++20": 201907 },
"headers": ["functional"],
}, {
"name": "__cpp_lib_constexpr_iterator",
"values": { "c++20": 201811 },
"headers": ["iterator"],
}, {
"name": "__cpp_lib_constexpr_memory",
"values": { "c++20": 201811 },
"headers": ["memory"],
}, {
"name": "__cpp_lib_constexpr_numeric",
"values": { "c++20": 201911 },
"headers": ["numeric"],
}, {
"name": "__cpp_lib_constexpr_string",
"values": { "c++20": 201811 }, # because P1032R1 is implemented; but should become 201907 after P0980R1
"headers": ["string"],
}, {
"name": "__cpp_lib_constexpr_string_view",
"values": { "c++20": 201811 },
"headers": ["string_view"],
}, {
"name": "__cpp_lib_constexpr_tuple",
"values": { "c++20": 201811 },
"headers": ["tuple"],
}, {
"name": "__cpp_lib_constexpr_utility",
"values": { "c++20": 201811 },
"headers": ["utility"],
}, {
"name": "__cpp_lib_constexpr_vector",
"values": { "c++20": 201907 },
"headers": ["vector"],
"unimplemented": True,
}, {
"name": "__cpp_lib_coroutine",
"values": { "c++20": 201902 },
"headers": ["coroutine"],
"unimplemented": True,
}, {
"name": "__cpp_lib_destroying_delete",
"values": { "c++20": 201806 },
"headers": ["new"],
"test_suite_guard": "TEST_STD_VER > 17 && defined(__cpp_impl_destroying_delete) && __cpp_impl_destroying_delete >= 201806L",
"libcxx_guard": "_LIBCPP_STD_VER > 17 && defined(__cpp_impl_destroying_delete) && __cpp_impl_destroying_delete >= 201806L",
}, {
"name": "__cpp_lib_enable_shared_from_this",
"values": { "c++17": 201603 },
"headers": ["memory"],
}, {
"name": "__cpp_lib_endian",
"values": { "c++20": 201907 },
"headers": ["bit"],
}, {
"name": "__cpp_lib_erase_if",
"values": { "c++20": 202002 },
"headers": ["deque", "forward_list", "list", "map", "set", "string", "unordered_map", "unordered_set", "vector"],
}, {
"name": "__cpp_lib_exchange_function",
"values": { "c++14": 201304 },
"headers": ["utility"],
}, {
"name": "__cpp_lib_execution",
"values": { "c++17": 201603, "c++20": 201902 },
"headers": ["execution"],
"unimplemented": True,
}, {
"name": "__cpp_lib_filesystem",
"values": { "c++17": 201703 },
"headers": ["filesystem"],
"test_suite_guard": "!defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_filesystem)",
"libcxx_guard": "!defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_filesystem)"
}, {
"name": "__cpp_lib_format",
"values": { "c++20": 201907 },
"headers": ["format"],
"unimplemented": True,
}, {
"name": "__cpp_lib_gcd_lcm",
"values": { "c++17": 201606 },
"headers": ["numeric"],
}, {
"name": "__cpp_lib_generic_associative_lookup",
"values": { "c++14": 201304 },
"headers": ["map", "set"],
}, {
"name": "__cpp_lib_generic_unordered_lookup",
"values": { "c++20": 201811 },
"headers": ["unordered_map", "unordered_set"],
}, {
"name": "__cpp_lib_hardware_interference_size",
"values": { "c++17": 201703 },
"headers": ["new"],
"unimplemented": True,
}, {
"name": "__cpp_lib_has_unique_object_representations",
"values": { "c++17": 201606 },
"headers": ["type_traits"],
"test_suite_guard": "TEST_HAS_BUILTIN_IDENTIFIER(__has_unique_object_representations) || TEST_GCC_VER >= 700",
"libcxx_guard": "defined(_LIBCPP_HAS_UNIQUE_OBJECT_REPRESENTATIONS)",
}, {
"name": "__cpp_lib_hypot",
"values": { "c++17": 201603 },
"headers": ["cmath"],
}, {
"name": "__cpp_lib_incomplete_container_elements",
"values": { "c++17": 201505 },
"headers": ["forward_list", "list", "vector"],
}, {
"name": "__cpp_lib_int_pow2",
"values": { "c++20": 202002 },
"headers": ["bit"],
}, {
"name": "__cpp_lib_integer_comparison_functions",
"values": { "c++20": 202002 },
"headers": ["utility"],
"test_suite_guard": "defined(__cpp_concepts) && __cpp_concepts >= 201907L",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_CONCEPTS)",
}, {
"name": "__cpp_lib_integer_sequence",
"values": { "c++14": 201304 },
"headers": ["utility"],
}, {
"name": "__cpp_lib_integral_constant_callable",
"values": { "c++14": 201304 },
"headers": ["type_traits"],
}, {
"name": "__cpp_lib_interpolate",
"values": { "c++20": 201902 },
"headers": ["cmath", "numeric"],
}, {
"name": "__cpp_lib_invoke",
"values": { "c++17": 201411 },
"headers": ["functional"],
}, {
"name": "__cpp_lib_is_aggregate",
"values": { "c++17": 201703 },
"headers": ["type_traits"],
"test_suite_guard": "TEST_HAS_BUILTIN_IDENTIFIER(__is_aggregate) || TEST_GCC_VER_NEW >= 7001",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_IS_AGGREGATE)",
}, {
"name": "__cpp_lib_is_constant_evaluated",
"values": { "c++20": 201811 },
"headers": ["type_traits"],
"test_suite_guard": "TEST_HAS_BUILTIN(__builtin_is_constant_evaluated) || TEST_GCC_VER >= 900",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_BUILTIN_IS_CONSTANT_EVALUATED)",
}, {
"name": "__cpp_lib_is_final",
"values": { "c++14": 201402 },
"headers": ["type_traits"],
}, {
"name": "__cpp_lib_is_invocable",
"values": { "c++17": 201703 },
"headers": ["type_traits"],
}, {
"name": "__cpp_lib_is_layout_compatible",
"values": { "c++20": 201907 },
"headers": ["type_traits"],
"unimplemented": True,
}, {
"name": "__cpp_lib_is_nothrow_convertible",
"values": { "c++20": 201806 },
"headers": ["type_traits"],
}, {
"name": "__cpp_lib_is_null_pointer",
"values": { "c++14": 201309 },
"headers": ["type_traits"],
}, {
"name": "__cpp_lib_is_pointer_interconvertible",
"values": { "c++20": 201907 },
"headers": ["type_traits"],
"unimplemented": True,
}, {
"name": "__cpp_lib_is_scoped_enum",
"values": { "c++2b": 202011 },
"headers": ["type_traits"],
}, {
"name": "__cpp_lib_is_swappable",
"values": { "c++17": 201603 },
"headers": ["type_traits"],
}, {
"name": "__cpp_lib_jthread",
"values": { "c++20": 201911 },
"headers": ["stop_token", "thread"],
"test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS)",
"unimplemented": True,
}, {
"name": "__cpp_lib_latch",
"values": { "c++20": 201907 },
"headers": ["latch"],
"test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_latch)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_latch)",
}, {
"name": "__cpp_lib_launder",
"values": { "c++17": 201606 },
"headers": ["new"],
}, {
"name": "__cpp_lib_list_remove_return_type",
"values": { "c++20": 201806 },
"headers": ["forward_list", "list"],
}, {
"name": "__cpp_lib_logical_traits",
"values": { "c++17": 201510 },
"headers": ["type_traits"],
}, {
"name": "__cpp_lib_make_from_tuple",
"values": { "c++17": 201606 },
"headers": ["tuple"],
}, {
"name": "__cpp_lib_make_reverse_iterator",
"values": { "c++14": 201402 },
"headers": ["iterator"],
}, {
"name": "__cpp_lib_make_unique",
"values": { "c++14": 201304 },
"headers": ["memory"],
}, {
"name": "__cpp_lib_map_try_emplace",
"values": { "c++17": 201411 },
"headers": ["map"],
}, {
"name": "__cpp_lib_math_constants",
"values": { "c++20": 201907 },
"headers": ["numbers"],
"test_suite_guard": "defined(__cpp_concepts) && __cpp_concepts >= 201907L",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_CONCEPTS)",
}, {
"name": "__cpp_lib_math_special_functions",
"values": { "c++17": 201603 },
"headers": ["cmath"],
"unimplemented": True,
}, {
"name": "__cpp_lib_memory_resource",
"values": { "c++17": 201603 },
"headers": ["memory_resource"],
"unimplemented": True,
}, {
"name": "__cpp_lib_node_extract",
"values": { "c++17": 201606 },
"headers": ["map", "set", "unordered_map", "unordered_set"],
}, {
"name": "__cpp_lib_nonmember_container_access",
"values": { "c++17": 201411 },
"headers": ["array", "deque", "forward_list", "iterator", "list", "map", "regex", "set", "string", "unordered_map", "unordered_set", "vector"],
}, {
"name": "__cpp_lib_not_fn",
"values": { "c++17": 201603 },
"headers": ["functional"],
}, {
"name": "__cpp_lib_null_iterators",
"values": { "c++14": 201304 },
"headers": ["iterator"],
}, {
"name": "__cpp_lib_optional",
"values": { "c++17": 201606 },
"headers": ["optional"],
}, {
"name": "__cpp_lib_parallel_algorithm",
"values": { "c++17": 201603 },
"headers": ["algorithm", "numeric"],
"unimplemented": True,
}, {
"name": "__cpp_lib_polymorphic_allocator",
"values": { "c++20": 201902 },
"headers": ["memory"],
"unimplemented": True,
}, {
"name": "__cpp_lib_quoted_string_io",
"values": { "c++14": 201304 },
"headers": ["iomanip"],
}, {
"name": "__cpp_lib_ranges",
"values": { "c++20": 201811 },
"headers": ["algorithm", "functional", "iterator", "memory", "ranges"],
"unimplemented": True,
}, {
"name": "__cpp_lib_raw_memory_algorithms",
"values": { "c++17": 201606 },
"headers": ["memory"],
}, {
"name": "__cpp_lib_remove_cvref",
"values": { "c++20": 201711 },
"headers": ["type_traits"],
}, {
"name": "__cpp_lib_result_of_sfinae",
"values": { "c++14": 201210 },
"headers": ["functional", "type_traits"],
}, {
"name": "__cpp_lib_robust_nonmodifying_seq_ops",
"values": { "c++14": 201304 },
"headers": ["algorithm"],
}, {
"name": "__cpp_lib_sample",
"values": { "c++17": 201603 },
"headers": ["algorithm"],
}, {
"name": "__cpp_lib_scoped_lock",
"values": { "c++17": 201703 },
"headers": ["mutex"],
}, {
"name": "__cpp_lib_semaphore",
"values": { "c++20": 201907 },
"headers": ["semaphore"],
"test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_semaphore)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_semaphore)",
}, {
"name": "__cpp_lib_shared_mutex",
"values": { "c++17": 201505 },
"headers": ["shared_mutex"],
"test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_mutex)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_mutex)",
}, {
"name": "__cpp_lib_shared_ptr_arrays",
"values": { "c++17": 201611 },
"headers": ["memory"],
}, {
"name": "__cpp_lib_shared_ptr_weak_type",
"values": { "c++17": 201606 },
"headers": ["memory"],
}, {
"name": "__cpp_lib_shared_timed_mutex",
"values": { "c++14": 201402 },
"headers": ["shared_mutex"],
"test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_timed_mutex)",
"libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_timed_mutex)",
}, {
"name": "__cpp_lib_shift",
"values": { "c++20": 201806 },
"headers": ["algorithm"],
}, {
"name": "__cpp_lib_smart_ptr_for_overwrite",
"values": { "c++20": 202002 },
"headers": ["memory"],
"unimplemented": True,
}, {
"name": "__cpp_lib_source_location",
"values": { "c++20": 201907 },
"headers": ["source_location"],
"unimplemented": True,
}, {
"name": "__cpp_lib_span",
"values": { "c++20": 202002 },
"headers": ["span"],
}, {
"name": "__cpp_lib_ssize",
"values": { "c++20": 201902 },
"headers": ["iterator"],
}, {
"name": "__cpp_lib_stacktrace",
"values": { "c++2b": 202011 },
"headers": ["stacktrace"],
"unimplemented": True,
}, {
"name": "__cpp_lib_starts_ends_with",
"values": { "c++20": 201711 },
"headers": ["string", "string_view"],
}, {
"name": "__cpp_lib_stdatomic_h",
"values": { "c++2b": 202011 },
"headers": ["stdatomic.h"],
"unimplemented": True,
}, {
"name": "__cpp_lib_string_contains",
"values": { "c++2b": 202011 },
"headers": ["string", "string_view"],
}, {
"name": "__cpp_lib_string_udls",
"values": { "c++14": 201304 },
"headers": ["string"],
}, {
"name": "__cpp_lib_string_view",
"values": { "c++17": 201606, "c++20": 201803 },
"headers": ["string", "string_view"],
}, {
"name": "__cpp_lib_syncbuf",
"values": { "c++20": 201803 },
"headers": ["syncstream"],
"unimplemented": True,
}, {
"name": "__cpp_lib_three_way_comparison",
"values": { "c++20": 201907 },
"headers": ["compare"],
"unimplemented": True,
}, {
"name": "__cpp_lib_to_address",
"values": { "c++20": 201711 },
"headers": ["memory"],
}, {
"name": "__cpp_lib_to_array",
"values": { "c++20": 201907 },
"headers": ["array"],
}, {
"name": "__cpp_lib_to_chars",
"values": { "c++17": 201611 },
"headers": ["utility"],
"unimplemented": True,
}, {
"name": "__cpp_lib_to_underlying",
"values": { "c++2b": 202102 },
"headers": ["utility"],
}, {
"name": "__cpp_lib_transformation_trait_aliases",
"values": { "c++14": 201304 },
"headers": ["type_traits"],
}, {
"name": "__cpp_lib_transparent_operators",
"values": { "c++14": 201210, "c++17": 201510 },
"headers": ["functional", "memory"],
}, {
"name": "__cpp_lib_tuple_element_t",
"values": { "c++14": 201402 },
"headers": ["tuple"],
}, {
"name": "__cpp_lib_tuples_by_type",
"values": { "c++14": 201304 },
"headers": ["tuple", "utility"],
}, {
"name": "__cpp_lib_type_trait_variable_templates",
"values": { "c++17": 201510 },
"headers": ["type_traits"],
}, {
"name": "__cpp_lib_uncaught_exceptions",
"values": { "c++17": 201411 },
"headers": ["exception"],
}, {
"name": "__cpp_lib_unordered_map_try_emplace",
"values": { "c++17": 201411 },
"headers": ["unordered_map"],
}, {
"name": "__cpp_lib_unwrap_ref",
"values": { "c++20": 201811 },
"headers": ["functional"],
}, {
"name": "__cpp_lib_variant",
"values": { "c++17": 202102 },
"headers": ["variant"],
}, {
"name": "__cpp_lib_void_t",
"values": { "c++17": 201411 },
"headers": ["type_traits"],
}
]]
assert feature_test_macros == sorted(feature_test_macros, key=lambda tc: tc["name"])
assert all(tc["headers"] == sorted(tc["headers"]) for tc in feature_test_macros)
assert all(("libcxx_guard" in tc) == ("test_suite_guard" in tc) for tc in feature_test_macros)
assert all(all(key in ["name", "values", "headers", "libcxx_guard", "test_suite_guard", "unimplemented"] for key in tc.keys()) for tc in feature_test_macros)
# Map from each header to the Lit annotations that should be used for
# tests that include that header.
#
# For example, when threads are not supported, any feature-test-macro test
# that includes <thread> should be marked as UNSUPPORTED, because including
# <thread> is a hard error in that case.
lit_markup = {
"atomic": ["UNSUPPORTED: libcpp-has-no-threads"],
"barrier": ["UNSUPPORTED: libcpp-has-no-threads"],
"filesystem": ["UNSUPPORTED: libcpp-has-no-filesystem-library"],
"iomanip": ["UNSUPPORTED: libcpp-has-no-localization"],
"istream": ["UNSUPPORTED: libcpp-has-no-localization"],
"latch": ["UNSUPPORTED: libcpp-has-no-threads"],
"locale": ["UNSUPPORTED: libcpp-has-no-localization"],
"ostream": ["UNSUPPORTED: libcpp-has-no-localization"],
"regex": ["UNSUPPORTED: libcpp-has-no-localization"],
"semaphore": ["UNSUPPORTED: libcpp-has-no-threads"],
"shared_mutex": ["UNSUPPORTED: libcpp-has-no-threads"],
"thread": ["UNSUPPORTED: libcpp-has-no-threads"],
}
def get_std_dialects():
std_dialects = ['c++14', 'c++17', 'c++20', 'c++2b']
return list(std_dialects)
def get_first_std(d):
for s in get_std_dialects():
if s in d.keys():
return s
return None
def get_last_std(d):
rev_dialects = get_std_dialects()
rev_dialects.reverse()
for s in rev_dialects:
if s in d.keys():
return s
return None
def get_std_before(d, std):
std_dialects = get_std_dialects()
candidates = std_dialects[0:std_dialects.index(std)]
candidates.reverse()
for cand in candidates:
if cand in d.keys():
return cand
return None
def get_value_before(d, std):
new_std = get_std_before(d, std)
if new_std is None:
return None
return d[new_std]
def get_for_std(d, std):
# This catches the C++11 case for which there should be no defined feature
# test macros.
std_dialects = get_std_dialects()
if std not in std_dialects:
return None
# Find the value for the newest C++ dialect between C++14 and std
std_list = list(std_dialects[0:std_dialects.index(std)+1])
std_list.reverse()
for s in std_list:
if s in d.keys():
return d[s]
return None
def get_std_number(std):
return std.replace('c++', '')
"""
Functions to produce the <version> header
"""
def produce_macros_definition_for_std(std):
result = ""
indent = 55
for tc in feature_test_macros:
if std not in tc["values"]:
continue
inner_indent = 1
if 'test_suite_guard' in tc.keys():
result += "# if %s\n" % tc["libcxx_guard"]
inner_indent += 2
if get_value_before(tc["values"], std) is not None:
assert 'test_suite_guard' not in tc.keys()
result += "# undef %s\n" % tc["name"]
line = "#%sdefine %s" % ((" " * inner_indent), tc["name"])
line += " " * (indent - len(line))
line += " %sL" % tc["values"][std]
if 'unimplemented' in tc.keys():
line = "// " + line
result += line
result += "\n"
if 'test_suite_guard' in tc.keys():
result += "# endif\n"
return result.strip()
def produce_macros_definitions():
macro_definition_template = """#if _LIBCPP_STD_VER > {previous_std_number}
{macro_definition}
#endif"""
macros_definitions = []
previous_std_number = '11'
for std in get_std_dialects():
macros_definitions.append(
macro_definition_template.format(previous_std_number=previous_std_number,
macro_definition=produce_macros_definition_for_std(std)))
previous_std_number = get_std_number(std)
return '\n\n'.join(macros_definitions)
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def produce_version_synopsis():
indent = 56
header_indent = 56 + len("20XXYYL ")
result = ""
def indent_to(s, val):
if len(s) >= val:
return s
s += " " * (val - len(s))
return s
line = indent_to("Macro name", indent) + "Value"
line = indent_to(line, header_indent) + "Headers"
result += line + "\n"
for tc in feature_test_macros:
prev_defined_std = get_last_std(tc["values"])
line = "{name: <{indent}}{value}L ".format(name=tc['name'], indent=indent,
value=tc["values"][prev_defined_std])
headers = list(tc["headers"])
headers.remove("version")
for chunk in chunks(headers, 3):
line = indent_to(line, header_indent)
chunk = ['<%s>' % header for header in chunk]
line += ' '.join(chunk)
result += line
result += "\n"
line = ""
while True:
prev_defined_std = get_std_before(tc["values"], prev_defined_std)
if prev_defined_std is None:
break
result += "%s%sL // %s\n" % (indent_to("", indent), tc["values"][prev_defined_std],
prev_defined_std.replace("c++", "C++"))
return result
def produce_version_header():
template="""// -*- C++ -*-
//===--------------------------- version ----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef _LIBCPP_VERSIONH
#define _LIBCPP_VERSIONH
/*
version synopsis
{synopsis}
*/
#include <__config>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
#endif
// clang-format off
{cxx_macros}
// clang-format on
#endif // _LIBCPP_VERSIONH
"""
version_str = template.format(
synopsis=produce_version_synopsis().strip(),
cxx_macros=produce_macros_definitions())
version_header_path = os.path.join(include_path, 'version')
with open(version_header_path, 'w', newline='\n') as f:
f.write(version_str)
"""
Functions to produce test files
"""
test_types = {
"undefined": """
# ifdef {name}
# error "{name} should not be defined before {std_first}"
# endif
""",
"test_suite_guard": """
# if {test_suite_guard}
# ifndef {name}
# error "{name} should be defined in {std}"
# endif
# if {name} != {value}
# error "{name} should have the value {value} in {std}"
# endif
# else
# ifdef {name}
# error "{name} should not be defined when {test_suite_guard} is not defined!"
# endif
# endif
""",
"unimplemented": """
# if !defined(_LIBCPP_VERSION)
# ifndef {name}
# error "{name} should be defined in {std}"
# endif
# if {name} != {value}
# error "{name} should have the value {value} in {std}"
# endif
# else // _LIBCPP_VERSION
# ifdef {name}
# error "{name} should not be defined because it is unimplemented in libc++!"
# endif
# endif
""",
"defined": """
# ifndef {name}
# error "{name} should be defined in {std}"
# endif
# if {name} != {value}
# error "{name} should have the value {value} in {std}"
# endif
"""
}
def generate_std_test(test_list, std):
result = ""
for tc in test_list:
val = get_for_std(tc["values"], std)
if val is not None:
val = "%sL" % val
if val is None:
result += test_types["undefined"].format(name=tc["name"], std_first=get_first_std(tc["values"]))
elif 'unimplemented' in tc.keys():
result += test_types["unimplemented"].format(name=tc["name"], value=val, std=std)
elif "test_suite_guard" in tc.keys():
result += test_types["test_suite_guard"].format(name=tc["name"], value=val, std=std, test_suite_guard=tc["test_suite_guard"])
else:
result += test_types["defined"].format(name=tc["name"], value=val, std=std)
return result.strip()
def generate_std_tests(test_list):
std_tests_template = """#if TEST_STD_VER < {first_std_number}
{pre_std_test}
{other_std_tests}
#elif TEST_STD_VER > {penultimate_std_number}
{last_std_test}
#endif // TEST_STD_VER > {penultimate_std_number}"""
std_dialects = get_std_dialects()
assert not get_std_number(std_dialects[-1]).isnumeric()
other_std_tests = []
for std in std_dialects[:-1]:
other_std_tests.append('#elif TEST_STD_VER == ' + get_std_number(std))
other_std_tests.append(generate_std_test(test_list, std))
std_tests = std_tests_template.format(first_std_number=get_std_number(std_dialects[0]),
pre_std_test=generate_std_test(test_list, 'c++11'),
other_std_tests='\n\n'.join(other_std_tests),
penultimate_std_number=get_std_number(std_dialects[-2]),
last_std_test=generate_std_test(test_list, std_dialects[-1]))
return std_tests
def generate_synopsis(test_list):
max_name_len = max([len(tc["name"]) for tc in test_list])
indent = max_name_len + 8
def mk_line(prefix, suffix):
return "{prefix: <{max_len}}{suffix}\n".format(prefix=prefix, suffix=suffix,
max_len=indent)
result = ""
result += mk_line("/* Constant", "Value")
for tc in test_list:
prefix = " %s" % tc["name"]
for std in [s for s in get_std_dialects() if s in tc["values"].keys()]:
result += mk_line(prefix, "%sL [%s]" % (tc["values"][std], std.replace("c++", "C++")))
prefix = ""
result += "*/"
return result
def produce_tests():
headers = set([h for tc in feature_test_macros for h in tc["headers"]])
for h in headers:
test_list = [tc for tc in feature_test_macros if h in tc["headers"]]
if not has_header(h):
for tc in test_list:
assert 'unimplemented' in tc.keys()
continue
markup = '\n'.join('// ' + tag for tag in lit_markup.get(h, []))
test_body = \
"""//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// WARNING: This test was generated by {script_name}
// and should not be edited manually.
//
// clang-format off
{markup}
// <{header}>
// Test the feature test macros defined by <{header}>
{synopsis}
#include <{header}>
#include "test_macros.h"
{cxx_tests}
int main(int, char**) {{ return 0; }}
""".format(script_name=script_name,
header=h,
markup=('\n{}\n'.format(markup) if markup else ''),
synopsis=generate_synopsis(test_list),
cxx_tests=generate_std_tests(test_list))
test_name = "{header}.version.pass.cpp".format(header=h)
out_path = os.path.join(macro_test_path, test_name)
with open(out_path, 'w', newline='\n') as f:
f.write(test_body)
"""
Produce documentation for the feature test macros
"""
def make_widths(grid):
widths = []
for i in range(0, len(grid[0])):
cell_width = 2 + max(reduce(lambda x,y: x+y, [[len(row[i])] for row in grid], []))
widths += [cell_width]
return widths
def create_table(grid, indent):
indent_str = ' '*indent
col_widths = make_widths(grid)
result = [indent_str + add_divider(col_widths, 2)]
header_flag = 2
for row_i in range(0, len(grid)):
row = grid[row_i]
line = indent_str + ' '.join([pad_cell(row[i], col_widths[i]) for i in range(0, len(row))])
result.append(line.rstrip())
is_cxx_header = row[0].startswith('**')
if row_i == len(grid) - 1:
header_flag = 2
separator = indent_str + add_divider(col_widths, 1 if is_cxx_header else header_flag)
result.append(separator.rstrip())
header_flag = 0
return '\n'.join(result)
def add_divider(widths, header_flag):
if header_flag == 2:
return ' '.join(['='*w for w in widths])
if header_flag == 1:
return '-'.join(['-'*w for w in widths])
else:
return ' '.join(['-'*w for w in widths])
def pad_cell(s, length, left_align=True):
padding = ((length - len(s)) * ' ')
return s + padding
def get_status_table():
table = [["Macro Name", "Value"]]
for std in get_std_dialects():
table += [["**" + std.replace("c++", "C++ ") + "**", ""]]
for tc in feature_test_macros:
if std not in tc["values"].keys():
continue
value = "``%sL``" % tc["values"][std]
if 'unimplemented' in tc.keys():
value = '*unimplemented*'
table += [["``%s``" % tc["name"], value]]
return table
def produce_docs():
doc_str = """.. _FeatureTestMacroTable:
==========================
Feature Test Macro Support
==========================
.. contents::
:local:
Overview
========
This file documents the feature test macros currently supported by libc++.
.. _feature-status:
Status
======
.. table:: Current Status
:name: feature-status-table
:widths: auto
{status_tables}
""".format(status_tables=create_table(get_status_table(), 4))
table_doc_path = os.path.join(docs_path, 'FeatureTestMacroTable.rst')
with open(table_doc_path, 'w', newline='\n') as f:
f.write(doc_str)
def main():
produce_version_header()
produce_tests()
produce_docs()
if __name__ == '__main__':
main()
| 33.046305 | 157 | 0.600711 |
b21ff581e97c70c0597fce0531d0c489d93d54b7
| 13 |
py
|
Python
|
semiempy/integrals/__init__.py
|
amandadumi/semiempy
|
2f8ed7e9fb033772f2c34381de3c3faa3a05e3ab
|
[
"BSD-3-Clause"
] | 6 |
2019-01-14T15:50:10.000Z
|
2019-03-26T07:08:33.000Z
|
semiempy/integrals/__init__.py
|
amandadumi/semiempy
|
2f8ed7e9fb033772f2c34381de3c3faa3a05e3ab
|
[
"BSD-3-Clause"
] | 4 |
2019-01-14T15:45:08.000Z
|
2019-03-04T15:26:15.000Z
|
semiempy/integrals/__init__.py
|
amandadumi/semiempy
|
2f8ed7e9fb033772f2c34381de3c3faa3a05e3ab
|
[
"BSD-3-Clause"
] | 2 |
2019-01-14T15:23:44.000Z
|
2019-01-14T15:51:07.000Z
|
# Integrals
| 4.333333 | 11 | 0.692308 |
d251fe1da26e9b2d6bfd5387933a05b68272f3cc
| 924 |
py
|
Python
|
src/reformat-axiomata.py
|
mac389/sudo
|
470dc2506bfed51c166ba8c13c818261ece32d56
|
[
"MIT"
] | null | null | null |
src/reformat-axiomata.py
|
mac389/sudo
|
470dc2506bfed51c166ba8c13c818261ece32d56
|
[
"MIT"
] | null | null | null |
src/reformat-axiomata.py
|
mac389/sudo
|
470dc2506bfed51c166ba8c13c818261ece32d56
|
[
"MIT"
] | null | null | null |
import csv, os
from tqdm import tqdm
from pprint import pprint
filename = os.path.join('..','jamia-axiomata.csv')
contents = list(csv.DictReader(open(filename,'r'),delimiter='\t',skipinitialspace=True))
PATH = os.path.join('..','problog','could-create-mln-file')
translation_table = {item['original']:item['converted'] for item in contents}
amalgamated_axiomata = []
for name in tqdm(os.listdir(PATH),'Formatting axiomata'):
if os.path.isdir(os.path.join(PATH,name)):
axioms = open(os.path.join(PATH,name,'abstract.logic'),'r').read()
for string in contents:
for string in translation_table:
if string in axioms:
axioms = axioms.replace(string,translation_table[string])
with open(os.path.join(PATH,name,'formatted.logic'), 'w') as out:
out.write(axioms)
amalgamated_axiomata += [axioms]
with open(os.path.join('..','amalgamated.axiomata'),'w') as out:
out.write(''.join(amalgamated_axiomata))
| 36.96 | 88 | 0.718615 |
8f3b186461d7c89f3b467d6376307b510711df82
| 1,854 |
py
|
Python
|
WOTS.py
|
lothar1998/XMSS-tree
|
3e491d6d4b82de1934f7a9b405730ec1ac7d2e54
|
[
"MIT"
] | 1 |
2021-08-13T08:24:23.000Z
|
2021-08-13T08:24:23.000Z
|
WOTS.py
|
lothar1998/XMSS-tree
|
3e491d6d4b82de1934f7a9b405730ec1ac7d2e54
|
[
"MIT"
] | null | null | null |
WOTS.py
|
lothar1998/XMSS-tree
|
3e491d6d4b82de1934f7a9b405730ec1ac7d2e54
|
[
"MIT"
] | null | null | null |
from utils import *
def WOTS_genSK(length, n):
secret_key = [bytes()] * length
for i in range(length):
SEED = generate_random_value(length)
secret_key[i] = pseudorandom_function(SEED, n)
return secret_key
def WOTS_genPK(private_key: [bytes], length: int, w: int in {4, 16}, SEED, address):
public_key = [bytes()] * length
for i in range(length):
address.setChainAddress(i)
public_key[i] = chain(private_key[i], 0, w - 1, SEED, address, w)
return public_key
def WOTS_sign(message: bytes, private_key: [bytes], w: int in {4, 16}, SEED, address):
checksum = 0
n = len(message) // 2
len_1, len_2, len_all = compute_lengths(n, w)
msg = base_w(message, w, len_1)
for i in range(0, len_1):
checksum += w - 1 - msg[i]
checksum = checksum << int(8 - ((len_2 * log2(w)) % 8))
len_2_bytes = compute_needed_bytes(checksum)
msg.extend(base_w(to_byte(checksum, len_2_bytes), w, len_2))
signature = [bytes()] * len_all
for i in range(0, len_all):
address.setChainAddress(i)
signature[i] = chain(private_key[i], 0, msg[i], SEED, address, w)
return signature
def WOTS_pkFromSig(message: bytes, signature: [bytes], w: int in {4, 16}, address, SEED):
checksum = 0
n = len(message) // 2
len_1, len_2, len_all = compute_lengths(n, w)
msg = base_w(message, w, len_1)
for i in range(0, len_1):
checksum += w - 1 - msg[i]
checksum = checksum << int(8 - ((len_2 * log2(w)) % 8))
len_2_bytes = compute_needed_bytes(checksum)
msg.extend(base_w(to_byte(checksum, len_2_bytes), w, len_2))
tmp_pk = [bytes()] * len_all
for i in range(0, len_all):
address.setChainAddress(i)
tmp_pk[i] = chain(signature[i], msg[i], w - 1 - msg[i], SEED, address, w)
return tmp_pk
| 25.054054 | 89 | 0.618123 |
542bfdb8541c35e42a35caff1ee698791a97cc51
| 2,215 |
py
|
Python
|
cpurger.py
|
updatedenis5300/compact-purger
|
311daae1037e2bf3786f2e882732919586799e55
|
[
"MIT"
] | null | null | null |
cpurger.py
|
updatedenis5300/compact-purger
|
311daae1037e2bf3786f2e882732919586799e55
|
[
"MIT"
] | null | null | null |
cpurger.py
|
updatedenis5300/compact-purger
|
311daae1037e2bf3786f2e882732919586799e55
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import argparse
import os
import glob
# Defualt path
dir_path = os.path.dirname(os.path.realpath(__file__))
def create_parser ():
parser = argparse.ArgumentParser(description='Tool for revoming useless files')
parser.add_argument('file_types', metavar='T', type=str, nargs='+', help='Types of files for the removing')
parser.add_argument ('-s', '--show', action='store_true', help='Show information')
parser.add_argument ('-r', '--recursion', action='store_true', help='Enable recursion')
parser.add_argument ('-p', '--path', nargs='?', default=dir_path, help='Path to search')
parser.add_argument ('-f', '--force', action='store_true', help='Disable confirmation')
return parser
def get_files(types, recurse, dir_path):
files = []
try:
if recurse:
for type_search in types:
files.extend(glob.glob(dir_path+'/**/*.'+type_search, recursive=True))
else:
for type_search in types:
files.extend(glob.glob(dir_path+'/*.'+type_search))
except Exception as e:
raise
sys.exit()
return files
def smart_purge(file_types, show=False, recursion=False, path=dir_path, force=True):
files=get_files(file_types, recursion, path)
if len(files) == 0:
print("Not found")
return
if show:
print("Found files in", dir_path)
for file_type in namespace.file_types:
files_filtered = [x for x in files if (x.endswith('.'+file_type))]
print('\t' + file_type + ':', len(files_filtered))
if force:
for file in files:
os.remove(file)
print('Successful', len(files), 'was deleted')
else:
if input('Deletion '+str(len(files))+' files. Are you sure? [Y/N] ').upper() != 'N':
for file in files:
os.remove(file)
print('Successful', len(files), 'files was deleted')
else:
print('Operation was canceled')
def smart_purge_muted(file_types, path=dir_path, recursion=False):
files=get_files(file_types, recursion, path)
if len(files) == 0:
return
for file in files:
os.remove(file)
if __name__ == "__main__":
parser = create_parser()
namespace = parser.parse_args(sys.argv[1:])
smart_purge(namespace.file_types, namespace.show, namespace.recursion, namespace.path, namespace.force)
| 30.342466 | 111 | 0.695711 |
741097e99387db61805ca827d093de1d0e28b8f6
| 119 |
py
|
Python
|
gitlean/urls.py
|
franck-roland/GitPlan
|
76260186d2d78bbe5ba3b2c6222a437d6d535d7e
|
[
"Apache-2.0"
] | null | null | null |
gitlean/urls.py
|
franck-roland/GitPlan
|
76260186d2d78bbe5ba3b2c6222a437d6d535d7e
|
[
"Apache-2.0"
] | null | null | null |
gitlean/urls.py
|
franck-roland/GitPlan
|
76260186d2d78bbe5ba3b2c6222a437d6d535d7e
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.githook, name='githook'),
]
| 17 | 46 | 0.672269 |
4b366472b0401abeaa53135e4e8e7222e70f5dcc
| 785 |
py
|
Python
|
src/pyxltab/pyxltab.py
|
blakeNaccarato/pyxltab
|
55963598cf70cebe3c5939e6f227f4e487839012
|
[
"MIT"
] | null | null | null |
src/pyxltab/pyxltab.py
|
blakeNaccarato/pyxltab
|
55963598cf70cebe3c5939e6f227f4e487839012
|
[
"MIT"
] | null | null | null |
src/pyxltab/pyxltab.py
|
blakeNaccarato/pyxltab
|
55963598cf70cebe3c5939e6f227f4e487839012
|
[
"MIT"
] | null | null | null |
"""
Extends `openpyxl` classes for easier operation on Excel tables.
"""
__all__ = ["attach", "get_tables"]
from typing import Dict, Union
from pyxltab import classes
def attach(openpyxl_book: classes.openpyxl_Workbook) -> classes.Book:
"""
Attach to an `openpyxl` workbook, allowing other operations to be performed.
"""
book = classes.Book(openpyxl_book)
return book
def get_tables(
book: Union[classes.Book, classes.openpyxl_Workbook]
) -> Dict[str, classes.Table]:
"""
Get all tables in the workbook.
"""
if isinstance(book, classes.openpyxl_Workbook):
book = attach(book)
tables = {
table_name: table
for sheet in book.values()
for (table_name, table) in sheet.items()
}
return tables
| 20.657895 | 80 | 0.66242 |
ae5a7020d43f3c0f2ab84412f5f95e076a73ae58
| 11,028 |
py
|
Python
|
third_party/pybind11/tests/test_numpy_dtypes.py
|
gautamkmr/caffe2
|
cde7f21d1e34ec714bc08dbfab945a1ad30e92ff
|
[
"MIT"
] | 29 |
2019-11-27T00:43:07.000Z
|
2020-02-25T14:35:54.000Z
|
third_party/pybind11/tests/test_numpy_dtypes.py
|
gautamkmr/caffe2
|
cde7f21d1e34ec714bc08dbfab945a1ad30e92ff
|
[
"MIT"
] | 7 |
2019-07-22T21:10:03.000Z
|
2021-12-14T16:53:27.000Z
|
third_party/pybind11/tests/test_numpy_dtypes.py
|
gautamkmr/caffe2
|
cde7f21d1e34ec714bc08dbfab945a1ad30e92ff
|
[
"MIT"
] | 13 |
2019-07-31T09:16:58.000Z
|
2022-03-29T15:12:40.000Z
|
import re
import pytest
from pybind11_tests import numpy_dtypes as m
pytestmark = pytest.requires_numpy
with pytest.suppress(ImportError):
import numpy as np
@pytest.fixture(scope='module')
def simple_dtype():
ld = np.dtype('longdouble')
return np.dtype({'names': ['bool_', 'uint_', 'float_', 'ldbl_'],
'formats': ['?', 'u4', 'f4', 'f{}'.format(ld.itemsize)],
'offsets': [0, 4, 8, (16 if ld.alignment > 4 else 12)]})
@pytest.fixture(scope='module')
def packed_dtype():
return np.dtype([('bool_', '?'), ('uint_', 'u4'), ('float_', 'f4'), ('ldbl_', 'g')])
def dt_fmt():
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
return ("{{'names':['bool_','uint_','float_','ldbl_'],"
" 'formats':['?','" + e + "u4','" + e + "f4','" + e + "f{}'],"
" 'offsets':[0,4,8,{}], 'itemsize':{}}}")
def simple_dtype_fmt():
ld = np.dtype('longdouble')
simple_ld_off = 12 + 4 * (ld.alignment > 4)
return dt_fmt().format(ld.itemsize, simple_ld_off, simple_ld_off + ld.itemsize)
def packed_dtype_fmt():
from sys import byteorder
return "[('bool_', '?'), ('uint_', '{e}u4'), ('float_', '{e}f4'), ('ldbl_', '{e}f{}')]".format(
np.dtype('longdouble').itemsize, e='<' if byteorder == 'little' else '>')
def partial_ld_offset():
return 12 + 4 * (np.dtype('uint64').alignment > 4) + 8 + 8 * (
np.dtype('longdouble').alignment > 8)
def partial_dtype_fmt():
ld = np.dtype('longdouble')
partial_ld_off = partial_ld_offset()
return dt_fmt().format(ld.itemsize, partial_ld_off, partial_ld_off + ld.itemsize)
def partial_nested_fmt():
ld = np.dtype('longdouble')
partial_nested_off = 8 + 8 * (ld.alignment > 8)
partial_ld_off = partial_ld_offset()
partial_nested_size = partial_nested_off * 2 + partial_ld_off + ld.itemsize
return "{{'names':['a'], 'formats':[{}], 'offsets':[{}], 'itemsize':{}}}".format(
partial_dtype_fmt(), partial_nested_off, partial_nested_size)
def assert_equal(actual, expected_data, expected_dtype):
np.testing.assert_equal(actual, np.array(expected_data, dtype=expected_dtype))
def test_format_descriptors():
with pytest.raises(RuntimeError) as excinfo:
m.get_format_unbound()
assert re.match('^NumPy type info missing for .*UnboundStruct.*$', str(excinfo.value))
ld = np.dtype('longdouble')
ldbl_fmt = ('4x' if ld.alignment > 4 else '') + ld.char
ss_fmt = "^T{?:bool_:3xI:uint_:f:float_:" + ldbl_fmt + ":ldbl_:}"
dbl = np.dtype('double')
partial_fmt = ("^T{?:bool_:3xI:uint_:f:float_:" +
str(4 * (dbl.alignment > 4) + dbl.itemsize + 8 * (ld.alignment > 8)) +
"xg:ldbl_:}")
nested_extra = str(max(8, ld.alignment))
assert m.print_format_descriptors() == [
ss_fmt,
"^T{?:bool_:I:uint_:f:float_:g:ldbl_:}",
"^T{" + ss_fmt + ":a:^T{?:bool_:I:uint_:f:float_:g:ldbl_:}:b:}",
partial_fmt,
"^T{" + nested_extra + "x" + partial_fmt + ":a:" + nested_extra + "x}",
"^T{3s:a:3s:b:}",
"^T{(3)4s:a:(2)i:b:(3)B:c:1x(4, 2)f:d:}",
'^T{q:e1:B:e2:}',
'^T{Zf:cflt:Zd:cdbl:}'
]
def test_dtype(simple_dtype):
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
assert m.print_dtypes() == [
simple_dtype_fmt(),
packed_dtype_fmt(),
"[('a', {}), ('b', {})]".format(simple_dtype_fmt(), packed_dtype_fmt()),
partial_dtype_fmt(),
partial_nested_fmt(),
"[('a', 'S3'), ('b', 'S3')]",
("{{'names':['a','b','c','d'], " +
"'formats':[('S4', (3,)),('<i4', (2,)),('u1', (3,)),('<f4', (4, 2))], " +
"'offsets':[0,12,20,24], 'itemsize':56}}").format(e=e),
"[('e1', '" + e + "i8'), ('e2', 'u1')]",
"[('x', 'i1'), ('y', '" + e + "u8')]",
"[('cflt', '" + e + "c8'), ('cdbl', '" + e + "c16')]"
]
d1 = np.dtype({'names': ['a', 'b'], 'formats': ['int32', 'float64'],
'offsets': [1, 10], 'itemsize': 20})
d2 = np.dtype([('a', 'i4'), ('b', 'f4')])
assert m.test_dtype_ctors() == [np.dtype('int32'), np.dtype('float64'),
np.dtype('bool'), d1, d1, np.dtype('uint32'), d2]
assert m.test_dtype_methods() == [np.dtype('int32'), simple_dtype, False, True,
np.dtype('int32').itemsize, simple_dtype.itemsize]
assert m.trailing_padding_dtype() == m.buffer_to_dtype(np.zeros(1, m.trailing_padding_dtype()))
def test_recarray(simple_dtype, packed_dtype):
elements = [(False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)]
for func, dtype in [(m.create_rec_simple, simple_dtype), (m.create_rec_packed, packed_dtype)]:
arr = func(0)
assert arr.dtype == dtype
assert_equal(arr, [], simple_dtype)
assert_equal(arr, [], packed_dtype)
arr = func(3)
assert arr.dtype == dtype
assert_equal(arr, elements, simple_dtype)
assert_equal(arr, elements, packed_dtype)
if dtype == simple_dtype:
assert m.print_rec_simple(arr) == [
"s:0,0,0,-0",
"s:1,1,1.5,-2.5",
"s:0,2,3,-5"
]
else:
assert m.print_rec_packed(arr) == [
"p:0,0,0,-0",
"p:1,1,1.5,-2.5",
"p:0,2,3,-5"
]
nested_dtype = np.dtype([('a', simple_dtype), ('b', packed_dtype)])
arr = m.create_rec_nested(0)
assert arr.dtype == nested_dtype
assert_equal(arr, [], nested_dtype)
arr = m.create_rec_nested(3)
assert arr.dtype == nested_dtype
assert_equal(arr, [((False, 0, 0.0, -0.0), (True, 1, 1.5, -2.5)),
((True, 1, 1.5, -2.5), (False, 2, 3.0, -5.0)),
((False, 2, 3.0, -5.0), (True, 3, 4.5, -7.5))], nested_dtype)
assert m.print_rec_nested(arr) == [
"n:a=s:0,0,0,-0;b=p:1,1,1.5,-2.5",
"n:a=s:1,1,1.5,-2.5;b=p:0,2,3,-5",
"n:a=s:0,2,3,-5;b=p:1,3,4.5,-7.5"
]
arr = m.create_rec_partial(3)
assert str(arr.dtype) == partial_dtype_fmt()
partial_dtype = arr.dtype
assert '' not in arr.dtype.fields
assert partial_dtype.itemsize > simple_dtype.itemsize
assert_equal(arr, elements, simple_dtype)
assert_equal(arr, elements, packed_dtype)
arr = m.create_rec_partial_nested(3)
assert str(arr.dtype) == partial_nested_fmt()
assert '' not in arr.dtype.fields
assert '' not in arr.dtype.fields['a'][0].fields
assert arr.dtype.itemsize > partial_dtype.itemsize
np.testing.assert_equal(arr['a'], m.create_rec_partial(3))
def test_array_constructors():
data = np.arange(1, 7, dtype='int32')
for i in range(8):
np.testing.assert_array_equal(m.test_array_ctors(10 + i), data.reshape((3, 2)))
np.testing.assert_array_equal(m.test_array_ctors(20 + i), data.reshape((3, 2)))
for i in range(5):
np.testing.assert_array_equal(m.test_array_ctors(30 + i), data)
np.testing.assert_array_equal(m.test_array_ctors(40 + i), data)
def test_string_array():
arr = m.create_string_array(True)
assert str(arr.dtype) == "[('a', 'S3'), ('b', 'S3')]"
assert m.print_string_array(arr) == [
"a='',b=''",
"a='a',b='a'",
"a='ab',b='ab'",
"a='abc',b='abc'"
]
dtype = arr.dtype
assert arr['a'].tolist() == [b'', b'a', b'ab', b'abc']
assert arr['b'].tolist() == [b'', b'a', b'ab', b'abc']
arr = m.create_string_array(False)
assert dtype == arr.dtype
def test_array_array():
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
arr = m.create_array_array(3)
assert str(arr.dtype) == (
"{{'names':['a','b','c','d'], " +
"'formats':[('S4', (3,)),('<i4', (2,)),('u1', (3,)),('{e}f4', (4, 2))], " +
"'offsets':[0,12,20,24], 'itemsize':56}}").format(e=e)
assert m.print_array_array(arr) == [
"a={{A,B,C,D},{K,L,M,N},{U,V,W,X}},b={0,1}," +
"c={0,1,2},d={{0,1},{10,11},{20,21},{30,31}}",
"a={{W,X,Y,Z},{G,H,I,J},{Q,R,S,T}},b={1000,1001}," +
"c={10,11,12},d={{100,101},{110,111},{120,121},{130,131}}",
"a={{S,T,U,V},{C,D,E,F},{M,N,O,P}},b={2000,2001}," +
"c={20,21,22},d={{200,201},{210,211},{220,221},{230,231}}",
]
assert arr['a'].tolist() == [[b'ABCD', b'KLMN', b'UVWX'],
[b'WXYZ', b'GHIJ', b'QRST'],
[b'STUV', b'CDEF', b'MNOP']]
assert arr['b'].tolist() == [[0, 1], [1000, 1001], [2000, 2001]]
assert m.create_array_array(0).dtype == arr.dtype
def test_enum_array():
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
arr = m.create_enum_array(3)
dtype = arr.dtype
assert dtype == np.dtype([('e1', e + 'i8'), ('e2', 'u1')])
assert m.print_enum_array(arr) == [
"e1=A,e2=X",
"e1=B,e2=Y",
"e1=A,e2=X"
]
assert arr['e1'].tolist() == [-1, 1, -1]
assert arr['e2'].tolist() == [1, 2, 1]
assert m.create_enum_array(0).dtype == dtype
def test_complex_array():
from sys import byteorder
e = '<' if byteorder == 'little' else '>'
arr = m.create_complex_array(3)
dtype = arr.dtype
assert dtype == np.dtype([('cflt', e + 'c8'), ('cdbl', e + 'c16')])
assert m.print_complex_array(arr) == [
"c:(0,0.25),(0.5,0.75)",
"c:(1,1.25),(1.5,1.75)",
"c:(2,2.25),(2.5,2.75)"
]
assert arr['cflt'].tolist() == [0.0 + 0.25j, 1.0 + 1.25j, 2.0 + 2.25j]
assert arr['cdbl'].tolist() == [0.5 + 0.75j, 1.5 + 1.75j, 2.5 + 2.75j]
assert m.create_complex_array(0).dtype == dtype
def test_signature(doc):
assert doc(m.create_rec_nested) == \
"create_rec_nested(arg0: int) -> numpy.ndarray[NestedStruct]"
def test_scalar_conversion():
n = 3
arrays = [m.create_rec_simple(n), m.create_rec_packed(n),
m.create_rec_nested(n), m.create_enum_array(n)]
funcs = [m.f_simple, m.f_packed, m.f_nested]
for i, func in enumerate(funcs):
for j, arr in enumerate(arrays):
if i == j and i < 2:
assert [func(arr[k]) for k in range(n)] == [k * 10 for k in range(n)]
else:
with pytest.raises(TypeError) as excinfo:
func(arr[0])
assert 'incompatible function arguments' in str(excinfo.value)
def test_register_dtype():
with pytest.raises(RuntimeError) as excinfo:
m.register_dtype()
assert 'dtype is already registered' in str(excinfo.value)
@pytest.unsupported_on_pypy
def test_str_leak():
from sys import getrefcount
fmt = "f4"
pytest.gc_collect()
start = getrefcount(fmt)
d = m.dtype_wrapper(fmt)
assert d is np.dtype("f4")
del d
pytest.gc_collect()
assert getrefcount(fmt) == start
def test_compare_buffer_info():
assert all(m.compare_buffer_info())
| 35.459807 | 99 | 0.547515 |
a3e37f377f172410daab48a533fc8797664db7ab
| 549 |
py
|
Python
|
Caller.py
|
Jemeni11/Lyric-Translator_
|
622e1e49e1af46d2d8bf6ecf3063a63dabddfcf1
|
[
"MIT"
] | null | null | null |
Caller.py
|
Jemeni11/Lyric-Translator_
|
622e1e49e1af46d2d8bf6ecf3063a63dabddfcf1
|
[
"MIT"
] | null | null | null |
Caller.py
|
Jemeni11/Lyric-Translator_
|
622e1e49e1af46d2d8bf6ecf3063a63dabddfcf1
|
[
"MIT"
] | null | null | null |
from translathor import translator
from apicalls import *
def caller():
intention = input(
"To get songs by an artist use (F)\nTo get music lyrics use(G)\nUse (H) to get lyrics and translate -->")
if intention.lower() not in ['f', 'g', 'h'] :
exit("Lmao, get serious abeg")
elif intention.lower() == "g":
for text in get_lyrics():
print(text)
elif intention.lower() == "f":
get_songsby()
else:
print(translator(get_lyrics()))
if __name__ == "__main__":
print(caller())
| 26.142857 | 113 | 0.593807 |
a79bed0c1856db5840981f989df3afe66885e31a
| 521 |
py
|
Python
|
pagetools/widgets/tests/test_commands.py
|
theithec/pagetools
|
f5fba7213864555275bddcc1882122f3be843f19
|
[
"MIT"
] | null | null | null |
pagetools/widgets/tests/test_commands.py
|
theithec/pagetools
|
f5fba7213864555275bddcc1882122f3be843f19
|
[
"MIT"
] | null | null | null |
pagetools/widgets/tests/test_commands.py
|
theithec/pagetools
|
f5fba7213864555275bddcc1882122f3be843f19
|
[
"MIT"
] | null | null | null |
from django.core.management import call_command
from django.test import TestCase
from pagetools.widgets.models import TemplateTagWidget
from pagetools.widgets.settings import TEMPLATETAG_WIDGETS
class CommandsTestCase(TestCase):
def test_mycommand(self):
"Test my custom command."
args = []
opts = {}
call_command("mk_templatetagwidgets", *args, **opts)
widgetobjects = TemplateTagWidget.objects.all()
self.assertEqual(len(TEMPLATETAG_WIDGETS), len(widgetobjects))
| 30.647059 | 70 | 0.735125 |
d4c3ddd4fa3aa1f990f31ec2b2c50d9e5288b3a2
| 37,320 |
py
|
Python
|
tensorflow_tts/models/tacotron2.py
|
krisk84/TensorFlowTTS
|
4fe289cd0d6aca79bf2501271f048800a5071cd0
|
[
"Apache-2.0"
] | 3 |
2020-12-09T11:29:22.000Z
|
2021-07-23T07:53:56.000Z
|
tensorflow_tts/models/tacotron2.py
|
krisk84/TensorFlowTTS
|
4fe289cd0d6aca79bf2501271f048800a5071cd0
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_tts/models/tacotron2.py
|
krisk84/TensorFlowTTS
|
4fe289cd0d6aca79bf2501271f048800a5071cd0
|
[
"Apache-2.0"
] | 1 |
2021-01-27T08:54:48.000Z
|
2021-01-27T08:54:48.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 The Tacotron-2 Authors, Minh Nguyen (@dathudeptrai), Eren Gölge (@erogol) and Jae Yoo (@jaeyoo)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tacotron-2 Modules."""
import collections
import numpy as np
import tensorflow as tf
# TODO: once https://github.com/tensorflow/addons/pull/1964 is fixed,
# uncomment this line.
# from tensorflow_addons.seq2seq import dynamic_decode
from tensorflow_addons.seq2seq import BahdanauAttention, Decoder, Sampler
from tensorflow_tts.utils import dynamic_decode
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
def gelu(x):
"""Gaussian Error Linear unit."""
cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))
return x * cdf
def gelu_new(x):
"""Smoother gaussian Error Linear Unit."""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
"""Swish activation function."""
return tf.nn.swish(x)
def mish(x):
return x * tf.math.tanh(tf.math.softplus(x))
ACT2FN = {
"identity": tf.keras.layers.Activation("linear"),
"tanh": tf.keras.layers.Activation("tanh"),
"gelu": tf.keras.layers.Activation(gelu),
"relu": tf.keras.activations.relu,
"swish": tf.keras.layers.Activation(swish),
"gelu_new": tf.keras.layers.Activation(gelu_new),
"mish": tf.keras.layers.Activation(mish),
}
class TFEmbedding(tf.keras.layers.Embedding):
"""Faster version of embedding."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def call(self, inputs):
inputs = tf.cast(tf.expand_dims(inputs, -1), tf.int32)
outputs = tf.gather_nd(self.embeddings, inputs)
return outputs
class TFTacotronConvBatchNorm(tf.keras.layers.Layer):
"""Tacotron-2 Convolutional Batchnorm module."""
def __init__(
self, filters, kernel_size, dropout_rate, activation=None, name_idx=None
):
super().__init__()
self.conv1d = tf.keras.layers.Conv1D(
filters,
kernel_size,
kernel_initializer=get_initializer(0.02),
padding="same",
name="conv_._{}".format(name_idx),
)
self.norm = tf.keras.layers.experimental.SyncBatchNormalization(
axis=-1, name="batch_norm_._{}".format(name_idx)
)
self.dropout = tf.keras.layers.Dropout(
rate=dropout_rate, name="dropout_._{}".format(name_idx)
)
self.act = ACT2FN[activation]
def call(self, inputs, training=False):
outputs = self.conv1d(inputs)
outputs = self.norm(outputs, training=training)
outputs = self.act(outputs)
outputs = self.dropout(outputs, training=training)
return outputs
class TFTacotronEmbeddings(tf.keras.layers.Layer):
"""Construct character/phoneme/positional/speaker embeddings."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_hidden_size = config.embedding_hidden_size
self.initializer_range = config.initializer_range
self.config = config
if config.n_speakers > 1:
self.speaker_embeddings = TFEmbedding(
config.n_speakers,
config.embedding_hidden_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="speaker_embeddings",
)
self.speaker_fc = tf.keras.layers.Dense(
units=config.embedding_hidden_size, name="speaker_fc"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm"
)
self.dropout = tf.keras.layers.Dropout(config.embedding_dropout_prob)
def build(self, input_shape):
"""Build shared character/phoneme embedding layers."""
with tf.name_scope("character_embeddings"):
self.character_embeddings = self.add_weight(
"weight",
shape=[self.vocab_size, self.embedding_hidden_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def call(self, inputs, training=False):
"""Get character embeddings of inputs.
Args:
1. character, Tensor (int32) shape [batch_size, length].
2. speaker_id, Tensor (int32) shape [batch_size]
Returns:
Tensor (float32) shape [batch_size, length, embedding_size].
"""
return self._embedding(inputs, training=training)
def _embedding(self, inputs, training=False):
"""Applies embedding based on inputs tensor."""
input_ids, speaker_ids = inputs
# create embeddings
inputs_embeds = tf.gather(self.character_embeddings, input_ids)
embeddings = inputs_embeds
if self.config.n_speakers > 1:
speaker_embeddings = self.speaker_embeddings(speaker_ids)
speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, tf.newaxis, :]
# sum all embedding
embeddings += extended_speaker_features
# apply layer-norm and dropout for embeddings.
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings, training=training)
return embeddings
class TFTacotronEncoderConvs(tf.keras.layers.Layer):
"""Tacotron-2 Encoder Convolutional Batchnorm module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_batch_norm = []
for i in range(config.n_conv_encoder):
conv = TFTacotronConvBatchNorm(
filters=config.encoder_conv_filters,
kernel_size=config.encoder_conv_kernel_sizes,
activation=config.encoder_conv_activation,
dropout_rate=config.encoder_conv_dropout_rate,
name_idx=i,
)
self.conv_batch_norm.append(conv)
def call(self, inputs, training=False):
"""Call logic."""
outputs = inputs
for conv in self.conv_batch_norm:
outputs = conv(outputs, training=training)
return outputs
class TFTacotronEncoder(tf.keras.layers.Layer):
"""Tacotron-2 Encoder."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.embeddings = TFTacotronEmbeddings(config, name="embeddings")
self.convbn = TFTacotronEncoderConvs(config, name="conv_batch_norm")
self.bilstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(
units=config.encoder_lstm_units, return_sequences=True
),
name="bilstm",
)
if config.n_speakers > 1:
self.encoder_speaker_embeddings = TFEmbedding(
config.n_speakers,
config.embedding_hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="encoder_speaker_embeddings",
)
self.encoder_speaker_fc = tf.keras.layers.Dense(
units=config.encoder_lstm_units * 2, name="encoder_speaker_fc"
)
self.config = config
def call(self, inputs, training=False):
"""Call logic."""
input_ids, speaker_ids, input_mask = inputs
# create embedding and mask them since we sum
# speaker embedding to all character embedding.
input_embeddings = self.embeddings([input_ids, speaker_ids], training=training)
# pass embeddings to convolution batch norm
conv_outputs = self.convbn(input_embeddings, training=training)
# bi-lstm.
outputs = self.bilstm(conv_outputs, mask=input_mask)
if self.config.n_speakers > 1:
encoder_speaker_embeddings = self.encoder_speaker_embeddings(speaker_ids)
encoder_speaker_features = tf.math.softplus(
self.encoder_speaker_fc(encoder_speaker_embeddings)
)
# extended encoderspeaker embeddings
extended_encoder_speaker_features = encoder_speaker_features[
:, tf.newaxis, :
]
# sum to encoder outputs
outputs += extended_encoder_speaker_features
return outputs
class TrainingSampler(Sampler):
"""Training sampler for Seq2Seq training."""
def __init__(
self, config,
):
super().__init__()
self.config = config
# create schedule factor.
# the input of a next decoder cell is calculated by formular:
# next_inputs = ratio * prev_groundtruth_outputs + (1.0 - ratio) * prev_predicted_outputs.
self._ratio = tf.constant(1.0, dtype=tf.float32)
self._reduction_factor = self.config.reduction_factor
def setup_target(self, targets, mel_lengths):
"""Setup ground-truth mel outputs for decoder."""
self.mel_lengths = mel_lengths
self.set_batch_size(tf.shape(targets)[0])
self.targets = targets[
:, self._reduction_factor - 1 :: self._reduction_factor, :
]
self.max_lengths = tf.tile([tf.shape(self.targets)[1]], [self._batch_size])
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tf.TensorShape([])
@property
def sample_ids_dtype(self):
return tf.int32
@property
def reduction_factor(self):
return self._reduction_factor
def initialize(self):
"""Return (Finished, next_inputs)."""
return (
tf.tile([False], [self._batch_size]),
tf.tile([[0.0]], [self._batch_size, self.config.n_mels]),
)
def sample(self, time, outputs, state):
return tf.tile([0], [self._batch_size])
def next_inputs(self, time, outputs, state, sample_ids, **kwargs):
finished = time + 1 >= self.max_lengths
next_inputs = (
self._ratio * self.targets[:, time, :]
+ (1.0 - self._ratio) * outputs[:, -self.config.n_mels :]
)
next_state = state
return (finished, next_inputs, next_state)
def set_batch_size(self, batch_size):
self._batch_size = batch_size
class TestingSampler(TrainingSampler):
"""Testing sampler for Seq2Seq training."""
def __init__(
self, config,
):
super().__init__(config)
def next_inputs(self, time, outputs, state, sample_ids, **kwargs):
stop_token_prediction = kwargs.get("stop_token_prediction")
stop_token_prediction = tf.nn.sigmoid(stop_token_prediction)
finished = tf.cast(tf.round(stop_token_prediction), tf.bool)
finished = tf.reduce_all(finished)
next_inputs = outputs[:, -self.config.n_mels :]
next_state = state
return (finished, next_inputs, next_state)
class TFTacotronLocationSensitiveAttention(BahdanauAttention):
"""Tacotron-2 Location Sensitive Attention module."""
def __init__(
self,
config,
memory,
mask_encoder=True,
memory_sequence_length=None,
is_cumulate=True,
):
"""Init variables."""
memory_length = memory_sequence_length if (mask_encoder is True) else None
super().__init__(
units=config.attention_dim,
memory=memory,
memory_sequence_length=memory_length,
probability_fn="softmax",
name="LocationSensitiveAttention",
)
self.location_convolution = tf.keras.layers.Conv1D(
filters=config.attention_filters,
kernel_size=config.attention_kernel,
padding="same",
use_bias=False,
name="location_conv",
)
self.location_layer = tf.keras.layers.Dense(
units=config.attention_dim, use_bias=False, name="location_layer"
)
self.v = tf.keras.layers.Dense(1, use_bias=True, name="scores_attention")
self.config = config
self.is_cumulate = is_cumulate
self.use_window = False
def setup_window(self, win_front=2, win_back=4):
self.win_front = tf.constant(win_front, tf.int32)
self.win_back = tf.constant(win_back, tf.int32)
self._indices = tf.expand_dims(tf.range(tf.shape(self.keys)[1]), 0)
self._indices = tf.tile(
self._indices, [tf.shape(self.keys)[0], 1]
) # [batch_size, max_time]
self.use_window = True
def _compute_window_mask(self, max_alignments):
"""Compute window mask for inference.
Args:
max_alignments (int): [batch_size]
"""
expanded_max_alignments = tf.expand_dims(max_alignments, 1) # [batch_size, 1]
low = expanded_max_alignments - self.win_front
high = expanded_max_alignments + self.win_back
mlow = tf.cast((self._indices < low), tf.float32)
mhigh = tf.cast((self._indices > high), tf.float32)
mask = mlow + mhigh
return mask # [batch_size, max_length]
def __call__(self, inputs, training=False):
query, state, prev_max_alignments = inputs
processed_query = self.query_layer(query) if self.query_layer else query
processed_query = tf.expand_dims(processed_query, 1)
expanded_alignments = tf.expand_dims(state, axis=2)
f = self.location_convolution(expanded_alignments)
processed_location_features = self.location_layer(f)
energy = self._location_sensitive_score(
processed_query, processed_location_features, self.keys
)
# mask energy on inference steps.
if self.use_window is True:
window_mask = self._compute_window_mask(prev_max_alignments)
energy = energy + window_mask * -1e20
alignments = self.probability_fn(energy, state)
if self.is_cumulate:
state = alignments + state
else:
state = alignments
expanded_alignments = tf.expand_dims(alignments, 2)
context = tf.reduce_sum(expanded_alignments * self.values, 1)
return context, alignments, state
def _location_sensitive_score(self, W_query, W_fil, W_keys):
"""Calculate location sensitive energy."""
return tf.squeeze(self.v(tf.nn.tanh(W_keys + W_query + W_fil)), -1)
def get_initial_state(self, batch_size, size):
"""Get initial alignments."""
return tf.zeros(shape=[batch_size, size], dtype=tf.float32)
def get_initial_context(self, batch_size):
"""Get initial attention."""
return tf.zeros(
shape=[batch_size, self.config.encoder_lstm_units * 2], dtype=tf.float32
)
class TFTacotronPrenet(tf.keras.layers.Layer):
"""Tacotron-2 prenet."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.prenet_dense = [
tf.keras.layers.Dense(
units=config.prenet_units,
activation=ACT2FN[config.prenet_activation],
name="dense_._{}".format(i),
)
for i in range(config.n_prenet_layers)
]
self.dropout = tf.keras.layers.Dropout(
rate=config.prenet_dropout_rate, name="dropout"
)
def call(self, inputs, training=False):
"""Call logic."""
outputs = inputs
for layer in self.prenet_dense:
outputs = layer(outputs)
outputs = self.dropout(outputs, training=True)
return outputs
class TFTacotronPostnet(tf.keras.layers.Layer):
"""Tacotron-2 postnet."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_batch_norm = []
for i in range(config.n_conv_postnet):
conv = TFTacotronConvBatchNorm(
filters=config.postnet_conv_filters,
kernel_size=config.postnet_conv_kernel_sizes,
dropout_rate=config.postnet_dropout_rate,
activation="identity" if i + 1 == config.n_conv_postnet else "tanh",
name_idx=i,
)
self.conv_batch_norm.append(conv)
def call(self, inputs, training=False):
"""Call logic."""
outputs = inputs
for _, conv in enumerate(self.conv_batch_norm):
outputs = conv(outputs, training=training)
return outputs
TFTacotronDecoderCellState = collections.namedtuple(
"TFTacotronDecoderCellState",
[
"attention_lstm_state",
"decoder_lstms_state",
"context",
"time",
"state",
"alignment_history",
"max_alignments",
],
)
TFDecoderOutput = collections.namedtuple(
"TFDecoderOutput", ("mel_output", "token_output", "sample_id")
)
class TFTacotronDecoderCell(tf.keras.layers.AbstractRNNCell):
"""Tacotron-2 custom decoder cell."""
def __init__(self, config, training, enable_tflite_convertible=False, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.training = training
self.enable_tflite_convertible = enable_tflite_convertible
self.prenet = TFTacotronPrenet(config, name="prenet")
# define lstm cell on decoder.
# TODO(@dathudeptrai) switch to zone-out lstm.
self.attention_lstm = tf.keras.layers.LSTMCell(
units=config.decoder_lstm_units, name="attention_lstm_cell"
)
lstm_cells = []
for i in range(config.n_lstm_decoder):
lstm_cell = tf.keras.layers.LSTMCell(
units=config.decoder_lstm_units, name="lstm_cell_._{}".format(i)
)
lstm_cells.append(lstm_cell)
self.decoder_lstms = tf.keras.layers.StackedRNNCells(
lstm_cells, name="decoder_lstms"
)
# define attention layer.
if config.attention_type == "lsa":
# create location-sensitive attention.
self.attention_layer = TFTacotronLocationSensitiveAttention(
config,
memory=None,
mask_encoder=True,
memory_sequence_length=None,
is_cumulate=True,
)
else:
raise ValueError("Only lsa (location-sensitive attention) is supported")
# frame, stop projection layer.
self.frame_projection = tf.keras.layers.Dense(
units=config.n_mels * config.reduction_factor, name="frame_projection"
)
self.stop_projection = tf.keras.layers.Dense(
units=config.reduction_factor, name="stop_projection"
)
self.config = config
def set_alignment_size(self, alignment_size):
self.alignment_size = alignment_size
@property
def output_size(self):
"""Return output (mel) size."""
return self.frame_projection.units
@property
def state_size(self):
"""Return hidden state size."""
return TFTacotronDecoderCellState(
attention_lstm_state=self.attention_lstm.state_size,
decoder_lstms_state=self.decoder_lstms.state_size,
time=tf.TensorShape([]),
attention=self.config.attention_dim,
state=self.alignment_size,
alignment_history=(),
max_alignments=tf.TensorShape([1]),
)
def get_initial_state(self, batch_size):
"""Get initial states."""
initial_attention_lstm_cell_states = self.attention_lstm.get_initial_state(
None, batch_size, dtype=tf.float32
)
initial_decoder_lstms_cell_states = self.decoder_lstms.get_initial_state(
None, batch_size, dtype=tf.float32
)
initial_context = tf.zeros(
shape=[batch_size, self.config.encoder_lstm_units * 2], dtype=tf.float32
)
initial_state = self.attention_layer.get_initial_state(
batch_size, size=self.alignment_size
)
if self.enable_tflite_convertible:
initial_alignment_history = ()
else:
initial_alignment_history = tf.TensorArray(
dtype=tf.float32, size=0, dynamic_size=True
)
return TFTacotronDecoderCellState(
attention_lstm_state=initial_attention_lstm_cell_states,
decoder_lstms_state=initial_decoder_lstms_cell_states,
time=tf.zeros([], dtype=tf.int32),
context=initial_context,
state=initial_state,
alignment_history=initial_alignment_history,
max_alignments=tf.zeros([batch_size], dtype=tf.int32),
)
def call(self, inputs, states):
"""Call logic."""
decoder_input = inputs
# 1. apply prenet for decoder_input.
prenet_out = self.prenet(
decoder_input, training=self.training
) # [batch_size, dim]
# 2. concat prenet_out and prev context vector
# then use it as input of attention lstm layer.
attention_lstm_input = tf.concat([prenet_out, states.context], axis=-1)
attention_lstm_output, next_attention_lstm_state = self.attention_lstm(
attention_lstm_input, states.attention_lstm_state
)
# 3. compute context, alignment and cumulative alignment.
prev_state = states.state
if not self.enable_tflite_convertible:
prev_alignment_history = states.alignment_history
prev_max_alignments = states.max_alignments
context, alignments, state = self.attention_layer(
[attention_lstm_output, prev_state, prev_max_alignments],
training=self.training,
)
# 4. run decoder lstm(s)
decoder_lstms_input = tf.concat([attention_lstm_output, context], axis=-1)
decoder_lstms_output, next_decoder_lstms_state = self.decoder_lstms(
decoder_lstms_input, states.decoder_lstms_state
)
# 5. compute frame feature and stop token.
projection_inputs = tf.concat([decoder_lstms_output, context], axis=-1)
decoder_outputs = self.frame_projection(projection_inputs)
stop_inputs = tf.concat([decoder_lstms_output, decoder_outputs], axis=-1)
stop_tokens = self.stop_projection(stop_inputs)
# 6. save alignment history to visualize.
if self.enable_tflite_convertible:
alignment_history = ()
else:
alignment_history = prev_alignment_history.write(states.time, alignments)
# 7. return new states.
new_states = TFTacotronDecoderCellState(
attention_lstm_state=next_attention_lstm_state,
decoder_lstms_state=next_decoder_lstms_state,
time=states.time + 1,
context=context,
state=state,
alignment_history=alignment_history,
max_alignments=tf.argmax(alignments, -1, output_type=tf.int32),
)
return (decoder_outputs, stop_tokens), new_states
class TFTacotronDecoder(Decoder):
"""Tacotron-2 Decoder."""
def __init__(
self,
decoder_cell,
decoder_sampler,
output_layer=None,
enable_tflite_convertible=False,
):
"""Initial variables."""
self.cell = decoder_cell
self.sampler = decoder_sampler
self.output_layer = output_layer
self.enable_tflite_convertible = enable_tflite_convertible
def setup_decoder_init_state(self, decoder_init_state):
self.initial_state = decoder_init_state
def initialize(self, **kwargs):
return self.sampler.initialize() + (self.initial_state,)
@property
def output_size(self):
return TFDecoderOutput(
mel_output=tf.nest.map_structure(
lambda shape: tf.TensorShape(shape), self.cell.output_size
),
token_output=tf.TensorShape(self.sampler.reduction_factor),
sample_id=tf.TensorShape([1])
if self.enable_tflite_convertible
else self.sampler.sample_ids_shape, # tf.TensorShape([])
)
@property
def output_dtype(self):
return TFDecoderOutput(tf.float32, tf.float32, self.sampler.sample_ids_dtype)
@property
def batch_size(self):
return self.sampler._batch_size
def step(self, time, inputs, state, training=False):
(mel_outputs, stop_tokens), cell_state = self.cell(
inputs, state, training=training
)
if self.output_layer is not None:
mel_outputs = self.output_layer(mel_outputs)
sample_ids = self.sampler.sample(
time=time, outputs=mel_outputs, state=cell_state
)
(finished, next_inputs, next_state) = self.sampler.next_inputs(
time=time,
outputs=mel_outputs,
state=cell_state,
sample_ids=sample_ids,
stop_token_prediction=stop_tokens,
)
outputs = TFDecoderOutput(mel_outputs, stop_tokens, sample_ids)
return (outputs, next_state, next_inputs, finished)
class TFTacotron2(tf.keras.Model):
"""Tensorflow tacotron-2 model."""
def __init__(self, config, **kwargs):
"""Initalize tacotron-2 layers."""
training = kwargs.pop("training", False)
enable_tflite_convertible = kwargs.pop("enable_tflite_convertible", False)
super().__init__(self, **kwargs)
self.encoder = TFTacotronEncoder(config, name="encoder")
self.decoder_cell = TFTacotronDecoderCell(
config,
training=training,
name="decoder_cell",
enable_tflite_convertible=enable_tflite_convertible,
)
self.decoder = TFTacotronDecoder(
self.decoder_cell,
TrainingSampler(config) if training is True else TestingSampler(config),
enable_tflite_convertible=enable_tflite_convertible,
)
self.postnet = TFTacotronPostnet(config, name="post_net")
self.post_projection = tf.keras.layers.Dense(
units=config.n_mels, name="residual_projection"
)
self.config = config
self.use_window_mask = False
self.maximum_iterations = 4000
self.enable_tflite_convertible = enable_tflite_convertible
def setup_window(self, win_front, win_back):
"""Call only for inference."""
self.use_window_mask = True
self.win_front = win_front
self.win_back = win_back
def setup_maximum_iterations(self, maximum_iterations):
"""Call only for inference."""
self.maximum_iterations = maximum_iterations
def _build(self):
input_ids = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9]])
input_lengths = np.array([9])
speaker_ids = np.array([0])
mel_outputs = np.random.normal(size=(1, 50, 80)).astype(np.float32)
mel_lengths = np.array([50])
self(
input_ids,
input_lengths,
speaker_ids,
mel_outputs,
mel_lengths,
10,
training=True,
)
def call(
self,
input_ids,
input_lengths,
speaker_ids,
mel_gts,
mel_lengths,
maximum_iterations=2000,
use_window_mask=False,
win_front=2,
win_back=3,
training=False,
**kwargs,
):
"""Call logic."""
# create input-mask based on input_lengths
input_mask = tf.sequence_mask(
input_lengths,
maxlen=tf.reduce_max(input_lengths),
name="input_sequence_masks",
)
# Encoder Step.
encoder_hidden_states = self.encoder(
[input_ids, speaker_ids, input_mask], training=training
)
batch_size = tf.shape(encoder_hidden_states)[0]
alignment_size = tf.shape(encoder_hidden_states)[1]
# Setup some initial placeholders for decoder step. Include:
# 1. mel_gts, mel_lengths for teacher forcing mode.
# 2. alignment_size for attention size.
# 3. initial state for decoder cell.
# 4. memory (encoder hidden state) for attention mechanism.
self.decoder.sampler.setup_target(targets=mel_gts, mel_lengths=mel_lengths)
self.decoder.cell.set_alignment_size(alignment_size)
self.decoder.setup_decoder_init_state(
self.decoder.cell.get_initial_state(batch_size)
)
self.decoder.cell.attention_layer.setup_memory(
memory=encoder_hidden_states,
memory_sequence_length=input_lengths, # use for mask attention.
)
if use_window_mask:
self.decoder.cell.attention_layer.setup_window(
win_front=win_front, win_back=win_back
)
# run decode step.
(
(frames_prediction, stop_token_prediction, _),
final_decoder_state,
_,
) = dynamic_decode(
self.decoder,
maximum_iterations=maximum_iterations,
enable_tflite_convertible=self.enable_tflite_convertible,
)
decoder_outputs = tf.reshape(
frames_prediction, [batch_size, -1, self.config.n_mels]
)
stop_token_prediction = tf.reshape(stop_token_prediction, [batch_size, -1])
residual = self.postnet(decoder_outputs, training=training)
residual_projection = self.post_projection(residual)
mel_outputs = decoder_outputs + residual_projection
if self.enable_tflite_convertible:
mask = tf.math.not_equal(
tf.cast(
tf.reduce_sum(tf.abs(decoder_outputs), axis=-1), dtype=tf.int32
),
0,
)
decoder_outputs = tf.expand_dims(
tf.boolean_mask(decoder_outputs, mask), axis=0
)
mel_outputs = tf.expand_dims(tf.boolean_mask(mel_outputs, mask), axis=0)
alignment_history = ()
else:
alignment_history = tf.transpose(
final_decoder_state.alignment_history.stack(), [1, 2, 0]
)
return decoder_outputs, mel_outputs, stop_token_prediction, alignment_history
@tf.function(
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec([None, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec([None,], dtype=tf.int32, name="input_lengths"),
tf.TensorSpec([None,], dtype=tf.int32, name="speaker_ids"),
],
)
def inference(self, input_ids, input_lengths, speaker_ids, **kwargs):
"""Call logic."""
# create input-mask based on input_lengths
input_mask = tf.sequence_mask(
input_lengths,
maxlen=tf.reduce_max(input_lengths),
name="input_sequence_masks",
)
# Encoder Step.
encoder_hidden_states = self.encoder(
[input_ids, speaker_ids, input_mask], training=False
)
batch_size = tf.shape(encoder_hidden_states)[0]
alignment_size = tf.shape(encoder_hidden_states)[1]
# Setup some initial placeholders for decoder step. Include:
# 1. batch_size for inference.
# 2. alignment_size for attention size.
# 3. initial state for decoder cell.
# 4. memory (encoder hidden state) for attention mechanism.
# 5. window front/back to solve long sentence synthesize problems. (call after setup memory.)
self.decoder.sampler.set_batch_size(batch_size)
self.decoder.cell.set_alignment_size(alignment_size)
self.decoder.setup_decoder_init_state(
self.decoder.cell.get_initial_state(batch_size)
)
self.decoder.cell.attention_layer.setup_memory(
memory=encoder_hidden_states,
memory_sequence_length=input_lengths, # use for mask attention.
)
if self.use_window_mask:
self.decoder.cell.attention_layer.setup_window(
win_front=self.win_front, win_back=self.win_back
)
# run decode step.
(
(frames_prediction, stop_token_prediction, _),
final_decoder_state,
_,
) = dynamic_decode(self.decoder, maximum_iterations=self.maximum_iterations)
decoder_outputs = tf.reshape(
frames_prediction, [batch_size, -1, self.config.n_mels]
)
stop_token_predictions = tf.reshape(stop_token_prediction, [batch_size, -1])
residual = self.postnet(decoder_outputs, training=False)
residual_projection = self.post_projection(residual)
mel_outputs = decoder_outputs + residual_projection
alignment_historys = tf.transpose(
final_decoder_state.alignment_history.stack(), [1, 2, 0]
)
return decoder_outputs, mel_outputs, stop_token_predictions, alignment_historys
@tf.function(
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec([1, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec([1,], dtype=tf.int32, name="input_lengths"),
tf.TensorSpec([1,], dtype=tf.int32, name="speaker_ids"),
],
)
def inference_tflite(self, input_ids, input_lengths, speaker_ids, **kwargs):
"""Call logic."""
# create input-mask based on input_lengths
input_mask = tf.sequence_mask(
input_lengths,
maxlen=tf.reduce_max(input_lengths),
name="input_sequence_masks",
)
# Encoder Step.
encoder_hidden_states = self.encoder(
[input_ids, speaker_ids, input_mask], training=False
)
batch_size = tf.shape(encoder_hidden_states)[0]
alignment_size = tf.shape(encoder_hidden_states)[1]
# Setup some initial placeholders for decoder step. Include:
# 1. batch_size for inference.
# 2. alignment_size for attention size.
# 3. initial state for decoder cell.
# 4. memory (encoder hidden state) for attention mechanism.
# 5. window front/back to solve long sentence synthesize problems. (call after setup memory.)
self.decoder.sampler.set_batch_size(batch_size)
self.decoder.cell.set_alignment_size(alignment_size)
self.decoder.setup_decoder_init_state(
self.decoder.cell.get_initial_state(batch_size)
)
self.decoder.cell.attention_layer.setup_memory(
memory=encoder_hidden_states,
memory_sequence_length=input_lengths, # use for mask attention.
)
if self.use_window_mask:
self.decoder.cell.attention_layer.setup_window(
win_front=self.win_front, win_back=self.win_back
)
# run decode step.
(
(frames_prediction, stop_token_prediction, _),
final_decoder_state,
_,
) = dynamic_decode(
self.decoder,
maximum_iterations=self.maximum_iterations,
enable_tflite_convertible=self.enable_tflite_convertible,
)
decoder_outputs = tf.reshape(
frames_prediction, [batch_size, -1, self.config.n_mels]
)
stop_token_predictions = tf.reshape(stop_token_prediction, [batch_size, -1])
residual = self.postnet(decoder_outputs, training=False)
residual_projection = self.post_projection(residual)
mel_outputs = decoder_outputs + residual_projection
if self.enable_tflite_convertible:
mask = tf.math.not_equal(
tf.cast(
tf.reduce_sum(tf.abs(decoder_outputs), axis=-1), dtype=tf.int32
),
0,
)
decoder_outputs = tf.expand_dims(
tf.boolean_mask(decoder_outputs, mask), axis=0
)
mel_outputs = tf.expand_dims(tf.boolean_mask(mel_outputs, mask), axis=0)
alignment_historys = ()
else:
alignment_historys = tf.transpose(
final_decoder_state.alignment_history.stack(), [1, 2, 0]
)
return decoder_outputs, mel_outputs, stop_token_predictions, alignment_historys
| 35.884615 | 112 | 0.632262 |
9f3e7b71d1c571aef430ba87aedd39ff6ad35ce9
| 2,614 |
py
|
Python
|
venv/Lib/site-packages/pandas/core/internals/api.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/core/internals/api.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/core/internals/api.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
"""
This is a pseudo-public API for downstream libraries. We ask that downstream
authors
1) Try to avoid using internals directly altogether, and failing that,
2) Use only functions exposed here (or in core.internals)
"""
from __future__ import annotations
import numpy as np
from pandas._libs.internals import BlockPlacement
from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
pandas_dtype,
)
from pandas.core.arrays import DatetimeArray
from pandas.core.construction import extract_array
from pandas.core.internals.blocks import (
Block,
DatetimeTZBlock,
check_ndim,
ensure_block_shape,
extract_pandas_array,
get_block_type,
maybe_coerce_values,
)
def make_block(
values, placement, klass=None, ndim=None, dtype: Dtype | None = None
) -> Block:
"""
This is a pseudo-public analogue to blocks.new_block.
We ask that downstream libraries use this rather than any fully-internal
APIs, including but not limited to:
- core.internals.blocks.make_block
- Block.make_block
- Block.make_block_same_class
- Block.__init__
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
values, dtype = extract_pandas_array(values, dtype, ndim)
if klass is None:
dtype = dtype or values.dtype
klass = get_block_type(values, dtype)
elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values.dtype):
# pyarrow calls get here
values = DatetimeArray._simple_new(values, dtype=dtype)
if not isinstance(placement, BlockPlacement):
placement = BlockPlacement(placement)
ndim = maybe_infer_ndim(values, placement, ndim)
if is_datetime64tz_dtype(values.dtype):
# GH#41168 ensure we can pass 1D dt64tz values
values = extract_array(values, extract_numpy=True)
values = ensure_block_shape(values, ndim)
check_ndim(values, placement, ndim)
values = maybe_coerce_values(values)
return klass(values, ndim=ndim, placement=placement)
def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int:
"""
If `ndim` is not provided, infer it from placment and values.
"""
if ndim is None:
# GH#38134 Block constructor now assumes ndim is not None
if not isinstance(values.dtype, np.dtype):
if len(placement) != 1:
ndim = 1
else:
ndim = 2
else:
ndim = values.ndim
return ndim
| 29.370787 | 82 | 0.67062 |
d86bb1ae754832790703d6430562c7bd421c634e
| 660 |
py
|
Python
|
langford/langford.py
|
tlocke/geocache
|
53add754625d00779f676671ea595dc660fa0e82
|
[
"MIT"
] | null | null | null |
langford/langford.py
|
tlocke/geocache
|
53add754625d00779f676671ea595dc660fa0e82
|
[
"MIT"
] | null | null | null |
langford/langford.py
|
tlocke/geocache
|
53add754625d00779f676671ea595dc660fa0e82
|
[
"MIT"
] | null | null | null |
sols = []
def langford(remain, sofar):
try:
c = sofar[-1]
idx = sofar[:-1].index(c)
if len(sofar) - idx - 2 != c:
return
except ValueError:
pass
except IndexError:
pass
if sum(remain.values()) == 0:
sols.append(sofar)
print("found sol", sofar)
else:
for k, v in remain.items():
if v != 0:
remain_i = remain.copy()
remain_i[k] -= 1
langford(remain_i, sofar + (k,))
n = 8
remain = dict((i, 2) for i in range(1, n + 1))
langford(remain, ())
for sol in sols:
print(sol, '\n')
print(len(sols), '\n')
| 20.625 | 48 | 0.477273 |
89deace523bb9bafdda63049940ddf36279c9bc6
| 649 |
py
|
Python
|
migrations/versions/2bc31ea34c8f_.py
|
bestwishfang/FrameWork
|
99f5a0316335e9df7b9da7b2af092cc735c7473c
|
[
"MIT"
] | null | null | null |
migrations/versions/2bc31ea34c8f_.py
|
bestwishfang/FrameWork
|
99f5a0316335e9df7b9da7b2af092cc735c7473c
|
[
"MIT"
] | null | null | null |
migrations/versions/2bc31ea34c8f_.py
|
bestwishfang/FrameWork
|
99f5a0316335e9df7b9da7b2af092cc735c7473c
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 2bc31ea34c8f
Revises: 016b9025f91a
Create Date: 2021-02-05 09:34:20.910591
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2bc31ea34c8f'
down_revision = '016b9025f91a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('c_time', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'c_time')
# ### end Alembic commands ###
| 22.37931 | 76 | 0.688752 |
e27294db4ec2e8ec8d2c7bf0551f007bf3a50bb1
| 53,516 |
py
|
Python
|
test/orm/test_events.py
|
Cito/sqlalchemy
|
c4dede6e7c1420aacc54c9c326bf3a834dff45c7
|
[
"MIT"
] | 1 |
2021-02-22T15:00:47.000Z
|
2021-02-22T15:00:47.000Z
|
test/orm/test_events.py
|
xqzhou/sqlalchemy
|
e21cd0d95fb6cdcb4e10ea78abd5626bb92c37c3
|
[
"MIT"
] | null | null | null |
test/orm/test_events.py
|
xqzhou/sqlalchemy
|
e21cd0d95fb6cdcb4e10ea78abd5626bb92c37c3
|
[
"MIT"
] | null | null | null |
from sqlalchemy.testing import assert_raises_message, assert_raises
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, \
create_session, class_mapper, \
Mapper, column_property, \
Session, sessionmaker, attributes
from sqlalchemy.orm.instrumentation import ClassManager
from sqlalchemy.orm import instrumentation, events
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.util import gc_collect
from test.orm import _fixtures
from sqlalchemy import event
from sqlalchemy.testing.mock import Mock, call
class _RemoveListeners(object):
def teardown(self):
events.MapperEvents._clear()
events.InstanceEvents._clear()
events.SessionEvents._clear()
events.InstrumentationEvents._clear()
super(_RemoveListeners, self).teardown()
class MapperEventsTest(_RemoveListeners, _fixtures.FixtureTest):
run_inserts = None
def test_instance_event_listen(self):
"""test listen targets for instance events"""
users, addresses = self.tables.users, self.tables.addresses
canary = []
class A(object):
pass
class B(A):
pass
mapper(A, users)
mapper(B, addresses, inherits=A)
def init_a(target, args, kwargs):
canary.append(('init_a', target))
def init_b(target, args, kwargs):
canary.append(('init_b', target))
def init_c(target, args, kwargs):
canary.append(('init_c', target))
def init_d(target, args, kwargs):
canary.append(('init_d', target))
def init_e(target, args, kwargs):
canary.append(('init_e', target))
event.listen(mapper, 'init', init_a)
event.listen(Mapper, 'init', init_b)
event.listen(class_mapper(A), 'init', init_c)
event.listen(A, 'init', init_d)
event.listen(A, 'init', init_e, propagate=True)
a = A()
eq_(canary, [('init_a', a), ('init_b', a),
('init_c', a), ('init_d', a), ('init_e', a)])
# test propagate flag
canary[:] = []
b = B()
eq_(canary, [('init_a', b), ('init_b', b), ('init_e', b)])
def listen_all(self, mapper, **kw):
canary = []
def evt(meth):
def go(*args, **kwargs):
canary.append(meth)
return go
for meth in [
'init',
'init_failure',
'translate_row',
'create_instance',
'append_result',
'populate_instance',
'load',
'refresh',
'expire',
'before_insert',
'after_insert',
'before_update',
'after_update',
'before_delete',
'after_delete'
]:
event.listen(mapper, meth, evt(meth), **kw)
return canary
def test_listen_doesnt_force_compile(self):
User, users = self.classes.User, self.tables.users
m = mapper(User, users, properties={
'addresses': relationship(lambda: ImNotAClass)
})
event.listen(User, "before_insert", lambda *a, **kw: None)
assert not m.configured
def test_basic(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
canary = self.listen_all(User)
sess = create_session()
u = User(name='u1')
sess.add(u)
sess.flush()
sess.expire(u)
u = sess.query(User).get(u.id)
sess.expunge_all()
u = sess.query(User).get(u.id)
u.name = 'u1 changed'
sess.flush()
sess.delete(u)
sess.flush()
eq_(canary,
['init', 'before_insert',
'after_insert', 'expire', 'translate_row',
'populate_instance', 'refresh',
'append_result', 'translate_row', 'create_instance',
'populate_instance', 'load', 'append_result',
'before_update', 'after_update', 'before_delete',
'after_delete'])
def test_merge(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
canary = []
def load(obj, ctx):
canary.append('load')
event.listen(mapper, 'load', load)
s = Session()
u = User(name='u1')
s.add(u)
s.commit()
s = Session()
u2 = s.merge(u)
s = Session()
u2 = s.merge(User(name='u2'))
s.commit()
s.query(User).first()
eq_(canary, ['load', 'load', 'load'])
def test_inheritance(self):
users, addresses, User = (self.tables.users,
self.tables.addresses,
self.classes.User)
class AdminUser(User):
pass
mapper(User, users)
mapper(AdminUser, addresses, inherits=User)
canary1 = self.listen_all(User, propagate=True)
canary2 = self.listen_all(User)
canary3 = self.listen_all(AdminUser)
sess = create_session()
am = AdminUser(name='au1', email_address='au1@e1')
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = 'au1 changed'
sess.flush()
sess.delete(am)
sess.flush()
eq_(canary1, ['init', 'before_insert', 'after_insert',
'translate_row', 'populate_instance','refresh',
'append_result', 'translate_row', 'create_instance'
, 'populate_instance', 'load', 'append_result',
'before_update', 'after_update', 'before_delete',
'after_delete'])
eq_(canary2, [])
eq_(canary3, ['init', 'before_insert', 'after_insert',
'translate_row', 'populate_instance','refresh',
'append_result', 'translate_row', 'create_instance'
, 'populate_instance', 'load', 'append_result',
'before_update', 'after_update', 'before_delete',
'after_delete'])
def test_inheritance_subclass_deferred(self):
users, addresses, User = (self.tables.users,
self.tables.addresses,
self.classes.User)
mapper(User, users)
canary1 = self.listen_all(User, propagate=True)
canary2 = self.listen_all(User)
class AdminUser(User):
pass
mapper(AdminUser, addresses, inherits=User)
canary3 = self.listen_all(AdminUser)
sess = create_session()
am = AdminUser(name='au1', email_address='au1@e1')
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = 'au1 changed'
sess.flush()
sess.delete(am)
sess.flush()
eq_(canary1, ['init', 'before_insert', 'after_insert',
'translate_row', 'populate_instance','refresh',
'append_result', 'translate_row', 'create_instance'
, 'populate_instance', 'load', 'append_result',
'before_update', 'after_update', 'before_delete',
'after_delete'])
eq_(canary2, [])
eq_(canary3, ['init', 'before_insert', 'after_insert',
'translate_row', 'populate_instance','refresh',
'append_result', 'translate_row', 'create_instance'
, 'populate_instance', 'load', 'append_result',
'before_update', 'after_update', 'before_delete',
'after_delete'])
def test_before_after_only_collection(self):
"""before_update is called on parent for collection modifications,
after_update is called even if no columns were updated.
"""
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Item, items, properties={
'keywords': relationship(Keyword, secondary=item_keywords)})
mapper(Keyword, keywords)
canary1 = self.listen_all(Item)
canary2 = self.listen_all(Keyword)
sess = create_session()
i1 = Item(description="i1")
k1 = Keyword(name="k1")
sess.add(i1)
sess.add(k1)
sess.flush()
eq_(canary1,
['init',
'before_insert', 'after_insert'])
eq_(canary2,
['init',
'before_insert', 'after_insert'])
canary1[:]= []
canary2[:]= []
i1.keywords.append(k1)
sess.flush()
eq_(canary1, ['before_update', 'after_update'])
eq_(canary2, [])
def test_retval(self):
User, users = self.classes.User, self.tables.users
def create_instance(mapper, context, row, class_):
u = User.__new__(User)
u.foo = True
return u
mapper(User, users)
event.listen(User, 'create_instance', create_instance, retval=True)
sess = create_session()
u1 = User()
u1.name = 'ed'
sess.add(u1)
sess.flush()
sess.expunge_all()
u = sess.query(User).first()
assert u.foo
def test_instrument_event(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
canary = []
def instrument_class(mapper, cls):
canary.append(cls)
event.listen(Mapper, 'instrument_class', instrument_class)
mapper(User, users)
eq_(canary, [User])
mapper(Address, addresses)
eq_(canary, [User, Address])
class DeclarativeEventListenTest(_RemoveListeners, fixtures.DeclarativeMappedTest):
run_setup_classes = "each"
run_deletes = None
def test_inheritance_propagate_after_config(self):
# test [ticket:2949]
class A(self.DeclarativeBasic):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
class B(A):
pass
listen = Mock()
event.listen(self.DeclarativeBasic, "load", listen, propagate=True)
class C(B):
pass
m1 = A.__mapper__.class_manager
m2 = B.__mapper__.class_manager
m3 = C.__mapper__.class_manager
a1 = A()
b1 = B()
c1 = C()
m3.dispatch.load(c1._sa_instance_state, "c")
m2.dispatch.load(b1._sa_instance_state, "b")
m1.dispatch.load(a1._sa_instance_state, "a")
eq_(
listen.mock_calls,
[call(c1, "c"), call(b1, "b"), call(a1, "a")]
)
class DeferredMapperEventsTest(_RemoveListeners, _fixtures.FixtureTest):
""""test event listeners against unmapped classes.
This incurs special logic. Note if we ever do the "remove" case,
it has to get all of these, too.
"""
run_inserts = None
def test_deferred_map_event(self):
"""
1. mapper event listen on class
2. map class
3. event fire should receive event
"""
users, User = (self.tables.users,
self.classes.User)
canary = []
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", evt, raw=True)
m = mapper(User, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary, [5])
def test_deferred_map_event_subclass_propagate(self):
"""
1. mapper event listen on class, w propagate
2. map only subclass of class
3. event fire should receive event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
class SubSubUser(SubUser):
pass
canary = Mock()
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", canary, propagate=True, raw=True)
m = mapper(SubUser, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary.mock_calls,
[call(5, 6, 7)])
m2 = mapper(SubSubUser, users)
m2.dispatch.before_insert(8, 9, 10)
eq_(canary.mock_calls,
[call(5, 6, 7), call(8, 9, 10)])
def test_deferred_map_event_subclass_no_propagate(self):
"""
1. mapper event listen on class, w/o propagate
2. map only subclass of class
3. event fire should not receive event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
canary = []
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", evt, propagate=False)
m = mapper(SubUser, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary, [])
def test_deferred_map_event_subclass_post_mapping_propagate(self):
"""
1. map only subclass of class
2. mapper event listen on class, w propagate
3. event fire should receive event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
m = mapper(SubUser, users)
canary = []
def evt(x, y, z):
canary.append(x)
event.listen(User, "before_insert", evt, propagate=True, raw=True)
m.dispatch.before_insert(5, 6, 7)
eq_(canary, [5])
def test_deferred_map_event_subclass_post_mapping_propagate_two(self):
"""
1. map only subclass of class
2. mapper event listen on class, w propagate
3. event fire should receive event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
class SubSubUser(SubUser):
pass
m = mapper(SubUser, users)
canary = Mock()
event.listen(User, "before_insert", canary, propagate=True, raw=True)
m2 = mapper(SubSubUser, users)
m.dispatch.before_insert(5, 6, 7)
eq_(canary.mock_calls, [call(5, 6, 7)])
m2.dispatch.before_insert(8, 9, 10)
eq_(canary.mock_calls, [call(5, 6, 7), call(8, 9, 10)])
def test_deferred_instance_event_subclass_post_mapping_propagate(self):
"""
1. map only subclass of class
2. instance event listen on class, w propagate
3. event fire should receive event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
m = mapper(SubUser, users)
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, propagate=True, raw=True)
m.class_manager.dispatch.load(5)
eq_(canary, [5])
def test_deferred_instance_event_plain(self):
"""
1. instance event listen on class, w/o propagate
2. map class
3. event fire should receive event
"""
users, User = (self.tables.users,
self.classes.User)
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, raw=True)
m = mapper(User, users)
m.class_manager.dispatch.load(5)
eq_(canary, [5])
def test_deferred_instance_event_subclass_propagate_subclass_only(self):
"""
1. instance event listen on class, w propagate
2. map two subclasses of class
3. event fire on each class should receive one and only one event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
class SubUser2(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, propagate=True, raw=True)
m = mapper(SubUser, users)
m2 = mapper(SubUser2, users)
m.class_manager.dispatch.load(5)
eq_(canary, [5])
m2.class_manager.dispatch.load(5)
eq_(canary, [5, 5])
def test_deferred_instance_event_subclass_propagate_baseclass(self):
"""
1. instance event listen on class, w propagate
2. map one subclass of class, map base class, leave 2nd subclass unmapped
3. event fire on sub should receive one and only one event
4. event fire on base should receive one and only one event
5. map 2nd subclass
6. event fire on 2nd subclass should receive one and only one event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
class SubUser2(User):
pass
canary = Mock()
event.listen(User, "load", canary, propagate=True, raw=False)
# reversing these fixes....
m = mapper(SubUser, users)
m2 = mapper(User, users)
instance = Mock()
m.class_manager.dispatch.load(instance)
eq_(canary.mock_calls, [call(instance.obj())])
m2.class_manager.dispatch.load(instance)
eq_(canary.mock_calls, [call(instance.obj()), call(instance.obj())])
m3 = mapper(SubUser2, users)
m3.class_manager.dispatch.load(instance)
eq_(canary.mock_calls, [call(instance.obj()),
call(instance.obj()), call(instance.obj())])
def test_deferred_instance_event_subclass_no_propagate(self):
"""
1. instance event listen on class, w/o propagate
2. map subclass
3. event fire on subclass should not receive event
"""
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "load", evt, propagate=False)
m = mapper(SubUser, users)
m.class_manager.dispatch.load(5)
eq_(canary, [])
def test_deferred_instrument_event(self):
users, User = (self.tables.users,
self.classes.User)
canary = []
def evt(x):
canary.append(x)
event.listen(User, "attribute_instrument", evt)
instrumentation._instrumentation_factory.dispatch.attribute_instrument(User)
eq_(canary, [User])
def test_isolation_instrument_event(self):
users, User = (self.tables.users,
self.classes.User)
class Bar(object):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(Bar, "attribute_instrument", evt)
instrumentation._instrumentation_factory.dispatch.attribute_instrument(User)
eq_(canary, [])
@testing.requires.predictable_gc
def test_instrument_event_auto_remove(self):
class Bar(object):
pass
dispatch = instrumentation._instrumentation_factory.dispatch
assert not dispatch.attribute_instrument
event.listen(Bar, "attribute_instrument", lambda: None)
eq_(len(dispatch.attribute_instrument), 1)
del Bar
gc_collect()
assert not dispatch.attribute_instrument
def test_deferred_instrument_event_subclass_propagate(self):
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "attribute_instrument", evt, propagate=True)
instrumentation._instrumentation_factory.dispatch.\
attribute_instrument(SubUser)
eq_(canary, [SubUser])
def test_deferred_instrument_event_subclass_no_propagate(self):
users, User = (self.tables.users,
self.classes.User)
class SubUser(User):
pass
canary = []
def evt(x):
canary.append(x)
event.listen(User, "attribute_instrument", evt, propagate=False)
mapper(SubUser, users)
instrumentation._instrumentation_factory.dispatch.attribute_instrument(5)
eq_(canary, [])
class LoadTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
mapper(User, users)
def _fixture(self):
User = self.classes.User
canary = []
def load(target, ctx):
canary.append("load")
def refresh(target, ctx, attrs):
canary.append(("refresh", attrs))
event.listen(User, "load", load)
event.listen(User, "refresh", refresh)
return canary
def test_just_loaded(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
sess.close()
sess.query(User).first()
eq_(canary, ['load'])
def test_repeated_rows(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
sess.close()
sess.query(User).union_all(sess.query(User)).all()
eq_(canary, ['load'])
class RemovalTest(_fixtures.FixtureTest):
run_inserts = None
def test_attr_propagated(self):
User = self.classes.User
users, addresses, User = (self.tables.users,
self.tables.addresses,
self.classes.User)
class AdminUser(User):
pass
mapper(User, users)
mapper(AdminUser, addresses, inherits=User)
fn = Mock()
event.listen(User.name, "set", fn, propagate=True)
au = AdminUser()
au.name = 'ed'
eq_(fn.call_count, 1)
event.remove(User.name, "set", fn)
au.name = 'jack'
eq_(fn.call_count, 1)
def test_unmapped_listen(self):
users = self.tables.users
class Foo(object):
pass
fn = Mock()
event.listen(Foo, "before_insert", fn, propagate=True)
class User(Foo):
pass
m = mapper(User, users)
u1 = User()
m.dispatch.before_insert(m, None, attributes.instance_state(u1))
eq_(fn.call_count, 1)
event.remove(Foo, "before_insert", fn)
# existing event is removed
m.dispatch.before_insert(m, None, attributes.instance_state(u1))
eq_(fn.call_count, 1)
# the _HoldEvents is also cleaned out
class Bar(Foo):
pass
m = mapper(Bar, users)
b1 = Bar()
m.dispatch.before_insert(m, None, attributes.instance_state(b1))
eq_(fn.call_count, 1)
def test_instance_event_listen_on_cls_before_map(self):
users = self.tables.users
fn = Mock()
class User(object):
pass
event.listen(User, "load", fn)
m = mapper(User, users)
u1 = User()
m.class_manager.dispatch.load(u1._sa_instance_state, "u1")
event.remove(User, "load", fn)
m.class_manager.dispatch.load(u1._sa_instance_state, "u2")
eq_(fn.mock_calls, [call(u1, "u1")])
class RefreshTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
mapper(User, users)
def _fixture(self):
User = self.classes.User
canary = []
def load(target, ctx):
canary.append("load")
def refresh(target, ctx, attrs):
canary.append(("refresh", attrs))
event.listen(User, "load", load)
event.listen(User, "refresh", refresh)
return canary
def test_already_present(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.flush()
sess.query(User).first()
eq_(canary, [])
def test_repeated_rows(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
sess.query(User).union_all(sess.query(User)).all()
eq_(canary, [('refresh', set(['id','name']))])
def test_via_refresh_state(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
u1.name
eq_(canary, [('refresh', set(['id','name']))])
def test_was_expired(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.flush()
sess.expire(u1)
sess.query(User).first()
eq_(canary, [('refresh', set(['id','name']))])
def test_was_expired_via_commit(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
sess.query(User).first()
eq_(canary, [('refresh', set(['id','name']))])
def test_was_expired_attrs(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.flush()
sess.expire(u1, ['name'])
sess.query(User).first()
eq_(canary, [('refresh', set(['name']))])
def test_populate_existing(self):
User = self.classes.User
canary = self._fixture()
sess = Session()
u1 = User(name='u1')
sess.add(u1)
sess.commit()
sess.query(User).populate_existing().first()
eq_(canary, [('refresh', None)])
class SessionEventsTest(_RemoveListeners, _fixtures.FixtureTest):
run_inserts = None
def test_class_listen(self):
def my_listener(*arg, **kw):
pass
event.listen(Session, 'before_flush', my_listener)
s = Session()
assert my_listener in s.dispatch.before_flush
def test_sessionmaker_listen(self):
"""test that listen can be applied to individual
scoped_session() classes."""
def my_listener_one(*arg, **kw):
pass
def my_listener_two(*arg, **kw):
pass
S1 = sessionmaker()
S2 = sessionmaker()
event.listen(Session, 'before_flush', my_listener_one)
event.listen(S1, 'before_flush', my_listener_two)
s1 = S1()
assert my_listener_one in s1.dispatch.before_flush
assert my_listener_two in s1.dispatch.before_flush
s2 = S2()
assert my_listener_one in s2.dispatch.before_flush
assert my_listener_two not in s2.dispatch.before_flush
def test_scoped_session_invalid_callable(self):
from sqlalchemy.orm import scoped_session
def my_listener_one(*arg, **kw):
pass
scope = scoped_session(lambda:Session())
assert_raises_message(
sa.exc.ArgumentError,
"Session event listen on a scoped_session requires that its "
"creation callable is associated with the Session class.",
event.listen, scope, "before_flush", my_listener_one
)
def test_scoped_session_invalid_class(self):
from sqlalchemy.orm import scoped_session
def my_listener_one(*arg, **kw):
pass
class NotASession(object):
def __call__(self):
return Session()
scope = scoped_session(NotASession)
assert_raises_message(
sa.exc.ArgumentError,
"Session event listen on a scoped_session requires that its "
"creation callable is associated with the Session class.",
event.listen, scope, "before_flush", my_listener_one
)
def test_scoped_session_listen(self):
from sqlalchemy.orm import scoped_session
def my_listener_one(*arg, **kw):
pass
scope = scoped_session(sessionmaker())
event.listen(scope, "before_flush", my_listener_one)
assert my_listener_one in scope().dispatch.before_flush
def _listener_fixture(self, **kw):
canary = []
def listener(name):
def go(*arg, **kw):
canary.append(name)
return go
sess = Session(**kw)
for evt in [
'after_transaction_create',
'after_transaction_end',
'before_commit',
'after_commit',
'after_rollback',
'after_soft_rollback',
'before_flush',
'after_flush',
'after_flush_postexec',
'after_begin',
'before_attach',
'after_attach',
'after_bulk_update',
'after_bulk_delete'
]:
event.listen(sess, evt, listener(evt))
return sess, canary
def test_flush_autocommit_hook(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess, canary = self._listener_fixture(autoflush=False,
autocommit=True, expire_on_commit=False)
u = User(name='u1')
sess.add(u)
sess.flush()
eq_(
canary,
[ 'before_attach', 'after_attach', 'before_flush',
'after_transaction_create', 'after_begin',
'after_flush', 'after_flush_postexec',
'before_commit', 'after_commit','after_transaction_end']
)
def test_rollback_hook(self):
User, users = self.classes.User, self.tables.users
sess, canary = self._listener_fixture()
mapper(User, users)
u = User(name='u1', id=1)
sess.add(u)
sess.commit()
u2 = User(name='u1', id=1)
sess.add(u2)
assert_raises(
sa.orm.exc.FlushError,
sess.commit
)
sess.rollback()
eq_(canary,
['before_attach', 'after_attach', 'before_commit', 'before_flush',
'after_transaction_create', 'after_begin', 'after_flush',
'after_flush_postexec', 'after_transaction_end', 'after_commit',
'after_transaction_end', 'after_transaction_create',
'before_attach', 'after_attach', 'before_commit',
'before_flush', 'after_transaction_create', 'after_begin', 'after_rollback',
'after_transaction_end',
'after_soft_rollback', 'after_transaction_end','after_transaction_create',
'after_soft_rollback'])
def test_can_use_session_in_outer_rollback_hook(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = Session()
assertions = []
@event.listens_for(sess, "after_soft_rollback")
def do_something(session, previous_transaction):
if session.is_active:
assertions.append('name' not in u.__dict__)
assertions.append(u.name == 'u1')
u = User(name='u1', id=1)
sess.add(u)
sess.commit()
u2 = User(name='u1', id=1)
sess.add(u2)
assert_raises(
sa.orm.exc.FlushError,
sess.commit
)
sess.rollback()
eq_(assertions, [True, True])
def test_flush_noautocommit_hook(self):
User, users = self.classes.User, self.tables.users
sess, canary = self._listener_fixture()
mapper(User, users)
u = User(name='u1')
sess.add(u)
sess.flush()
eq_(canary, ['before_attach', 'after_attach', 'before_flush',
'after_transaction_create', 'after_begin',
'after_flush', 'after_flush_postexec',
'after_transaction_end'])
def test_flush_in_commit_hook(self):
User, users = self.classes.User, self.tables.users
sess, canary = self._listener_fixture()
mapper(User, users)
u = User(name='u1')
sess.add(u)
sess.flush()
canary[:] = []
u.name = 'ed'
sess.commit()
eq_(canary, ['before_commit', 'before_flush', 'after_transaction_create', 'after_flush',
'after_flush_postexec',
'after_transaction_end',
'after_commit',
'after_transaction_end', 'after_transaction_create',])
def test_state_before_attach(self):
User, users = self.classes.User, self.tables.users
sess = Session()
@event.listens_for(sess, "before_attach")
def listener(session, inst):
state = attributes.instance_state(inst)
if state.key:
assert state.key not in session.identity_map
else:
assert inst not in session.new
mapper(User, users)
u= User(name='u1')
sess.add(u)
sess.flush()
sess.expunge(u)
sess.add(u)
def test_state_after_attach(self):
User, users = self.classes.User, self.tables.users
sess = Session()
@event.listens_for(sess, "after_attach")
def listener(session, inst):
state = attributes.instance_state(inst)
if state.key:
assert session.identity_map[state.key] is inst
else:
assert inst in session.new
mapper(User, users)
u= User(name='u1')
sess.add(u)
sess.flush()
sess.expunge(u)
sess.add(u)
def test_standalone_on_commit_hook(self):
sess, canary = self._listener_fixture()
sess.commit()
eq_(canary, ['before_commit', 'after_commit',
'after_transaction_end',
'after_transaction_create'])
def test_on_bulk_update_hook(self):
User, users = self.classes.User, self.tables.users
sess = Session()
canary = Mock()
event.listen(sess, "after_begin", canary.after_begin)
event.listen(sess, "after_bulk_update", canary.after_bulk_update)
def legacy(ses, qry, ctx, res):
canary.after_bulk_update_legacy(ses, qry, ctx, res)
event.listen(sess, "after_bulk_update", legacy)
mapper(User, users)
sess.query(User).update({'name': 'foo'})
eq_(
canary.after_begin.call_count,
1
)
eq_(
canary.after_bulk_update.call_count,
1
)
upd = canary.after_bulk_update.mock_calls[0][1][0]
eq_(
upd.session,
sess
)
eq_(
canary.after_bulk_update_legacy.mock_calls,
[call(sess, upd.query, upd.context, upd.result)]
)
def test_on_bulk_delete_hook(self):
User, users = self.classes.User, self.tables.users
sess = Session()
canary = Mock()
event.listen(sess, "after_begin", canary.after_begin)
event.listen(sess, "after_bulk_delete", canary.after_bulk_delete)
def legacy(ses, qry, ctx, res):
canary.after_bulk_delete_legacy(ses, qry, ctx, res)
event.listen(sess, "after_bulk_delete", legacy)
mapper(User, users)
sess.query(User).delete()
eq_(
canary.after_begin.call_count,
1
)
eq_(
canary.after_bulk_delete.call_count,
1
)
upd = canary.after_bulk_delete.mock_calls[0][1][0]
eq_(
upd.session,
sess
)
eq_(
canary.after_bulk_delete_legacy.mock_calls,
[call(sess, upd.query, upd.context, upd.result)]
)
def test_connection_emits_after_begin(self):
sess, canary = self._listener_fixture(bind=testing.db)
conn = sess.connection()
eq_(canary, ['after_begin'])
def test_reentrant_flush(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
def before_flush(session, flush_context, objects):
session.flush()
sess = Session()
event.listen(sess, 'before_flush', before_flush)
sess.add(User(name='foo'))
assert_raises_message(sa.exc.InvalidRequestError,
'already flushing', sess.flush)
def test_before_flush_affects_flush_plan(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
def before_flush(session, flush_context, objects):
for obj in list(session.new) + list(session.dirty):
if isinstance(obj, User):
session.add(User(name='another %s' % obj.name))
for obj in list(session.deleted):
if isinstance(obj, User):
x = session.query(User).filter(User.name
== 'another %s' % obj.name).one()
session.delete(x)
sess = Session()
event.listen(sess, 'before_flush', before_flush)
u = User(name='u1')
sess.add(u)
sess.flush()
eq_(sess.query(User).order_by(User.name).all(),
[
User(name='another u1'),
User(name='u1')
]
)
sess.flush()
eq_(sess.query(User).order_by(User.name).all(),
[
User(name='another u1'),
User(name='u1')
]
)
u.name='u2'
sess.flush()
eq_(sess.query(User).order_by(User.name).all(),
[
User(name='another u1'),
User(name='another u2'),
User(name='u2')
]
)
sess.delete(u)
sess.flush()
eq_(sess.query(User).order_by(User.name).all(),
[
User(name='another u1'),
]
)
def test_before_flush_affects_dirty(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
def before_flush(session, flush_context, objects):
for obj in list(session.identity_map.values()):
obj.name += " modified"
sess = Session(autoflush=True)
event.listen(sess, 'before_flush', before_flush)
u = User(name='u1')
sess.add(u)
sess.flush()
eq_(sess.query(User).order_by(User.name).all(),
[User(name='u1')]
)
sess.add(User(name='u2'))
sess.flush()
sess.expunge_all()
eq_(sess.query(User).order_by(User.name).all(),
[
User(name='u1 modified'),
User(name='u2')
]
)
class MapperExtensionTest(_fixtures.FixtureTest):
"""Superseded by MapperEventsTest - test backwards
compatibility of MapperExtension."""
run_inserts = None
def extension(self):
methods = []
class Ext(sa.orm.MapperExtension):
def instrument_class(self, mapper, cls):
methods.append('instrument_class')
return sa.orm.EXT_CONTINUE
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
methods.append('init_instance')
return sa.orm.EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
methods.append('init_failed')
return sa.orm.EXT_CONTINUE
def translate_row(self, mapper, context, row):
methods.append('translate_row')
return sa.orm.EXT_CONTINUE
def create_instance(self, mapper, selectcontext, row, class_):
methods.append('create_instance')
return sa.orm.EXT_CONTINUE
def reconstruct_instance(self, mapper, instance):
methods.append('reconstruct_instance')
return sa.orm.EXT_CONTINUE
def append_result(self, mapper, selectcontext, row, instance, result, **flags):
methods.append('append_result')
return sa.orm.EXT_CONTINUE
def populate_instance(self, mapper, selectcontext, row, instance, **flags):
methods.append('populate_instance')
return sa.orm.EXT_CONTINUE
def before_insert(self, mapper, connection, instance):
methods.append('before_insert')
return sa.orm.EXT_CONTINUE
def after_insert(self, mapper, connection, instance):
methods.append('after_insert')
return sa.orm.EXT_CONTINUE
def before_update(self, mapper, connection, instance):
methods.append('before_update')
return sa.orm.EXT_CONTINUE
def after_update(self, mapper, connection, instance):
methods.append('after_update')
return sa.orm.EXT_CONTINUE
def before_delete(self, mapper, connection, instance):
methods.append('before_delete')
return sa.orm.EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
methods.append('after_delete')
return sa.orm.EXT_CONTINUE
return Ext, methods
def test_basic(self):
"""test that common user-defined methods get called."""
User, users = self.classes.User, self.tables.users
Ext, methods = self.extension()
mapper(User, users, extension=Ext())
sess = create_session()
u = User(name='u1')
sess.add(u)
sess.flush()
u = sess.query(User).populate_existing().get(u.id)
sess.expunge_all()
u = sess.query(User).get(u.id)
u.name = 'u1 changed'
sess.flush()
sess.delete(u)
sess.flush()
eq_(methods,
['instrument_class', 'init_instance', 'before_insert',
'after_insert', 'translate_row', 'populate_instance',
'append_result', 'translate_row', 'create_instance',
'populate_instance', 'reconstruct_instance', 'append_result',
'before_update', 'after_update', 'before_delete', 'after_delete'])
def test_inheritance(self):
users, addresses, User = (self.tables.users,
self.tables.addresses,
self.classes.User)
Ext, methods = self.extension()
class AdminUser(User):
pass
mapper(User, users, extension=Ext())
mapper(AdminUser, addresses, inherits=User)
sess = create_session()
am = AdminUser(name='au1', email_address='au1@e1')
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = 'au1 changed'
sess.flush()
sess.delete(am)
sess.flush()
eq_(methods,
['instrument_class', 'instrument_class', 'init_instance',
'before_insert', 'after_insert', 'translate_row',
'populate_instance', 'append_result', 'translate_row',
'create_instance', 'populate_instance', 'reconstruct_instance',
'append_result', 'before_update', 'after_update', 'before_delete',
'after_delete'])
def test_before_after_only_collection(self):
"""before_update is called on parent for collection modifications,
after_update is called even if no columns were updated.
"""
keywords, items, item_keywords, Keyword, Item = (self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
Ext1, methods1 = self.extension()
Ext2, methods2 = self.extension()
mapper(Item, items, extension=Ext1() , properties={
'keywords': relationship(Keyword, secondary=item_keywords)})
mapper(Keyword, keywords, extension=Ext2())
sess = create_session()
i1 = Item(description="i1")
k1 = Keyword(name="k1")
sess.add(i1)
sess.add(k1)
sess.flush()
eq_(methods1,
['instrument_class', 'init_instance',
'before_insert', 'after_insert'])
eq_(methods2,
['instrument_class', 'init_instance',
'before_insert', 'after_insert'])
del methods1[:]
del methods2[:]
i1.keywords.append(k1)
sess.flush()
eq_(methods1, ['before_update', 'after_update'])
eq_(methods2, [])
def test_inheritance_with_dupes(self):
"""Inheritance with the same extension instance on both mappers."""
users, addresses, User = (self.tables.users,
self.tables.addresses,
self.classes.User)
Ext, methods = self.extension()
class AdminUser(User):
pass
ext = Ext()
mapper(User, users, extension=ext)
mapper(AdminUser, addresses, inherits=User, extension=ext)
sess = create_session()
am = AdminUser(name="au1", email_address="au1@e1")
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = 'au1 changed'
sess.flush()
sess.delete(am)
sess.flush()
eq_(methods,
['instrument_class', 'instrument_class', 'init_instance',
'before_insert', 'after_insert', 'translate_row',
'populate_instance', 'append_result', 'translate_row',
'create_instance', 'populate_instance', 'reconstruct_instance',
'append_result', 'before_update', 'after_update', 'before_delete',
'after_delete'])
def test_create_instance(self):
User, users = self.classes.User, self.tables.users
class CreateUserExt(sa.orm.MapperExtension):
def create_instance(self, mapper, selectcontext, row, class_):
return User.__new__(User)
mapper(User, users, extension=CreateUserExt())
sess = create_session()
u1 = User()
u1.name = 'ed'
sess.add(u1)
sess.flush()
sess.expunge_all()
assert sess.query(User).first()
def test_unnecessary_methods_not_evented(self):
users = self.tables.users
class MyExtension(sa.orm.MapperExtension):
def before_insert(self, mapper, connection, instance):
pass
class Foo(object):
pass
m = mapper(Foo, users, extension=MyExtension())
assert not m.class_manager.dispatch.load
assert not m.dispatch.before_update
assert len(m.dispatch.before_insert) == 1
class AttributeExtensionTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('t1',
metadata,
Column('id', Integer, primary_key=True),
Column('type', String(40)),
Column('data', String(50))
)
def test_cascading_extensions(self):
t1 = self.tables.t1
ext_msg = []
class Ex1(sa.orm.AttributeExtension):
def set(self, state, value, oldvalue, initiator):
ext_msg.append("Ex1 %r" % value)
return "ex1" + value
class Ex2(sa.orm.AttributeExtension):
def set(self, state, value, oldvalue, initiator):
ext_msg.append("Ex2 %r" % value)
return "ex2" + value
class A(fixtures.BasicEntity):
pass
class B(A):
pass
class C(B):
pass
mapper(A, t1, polymorphic_on=t1.c.type, polymorphic_identity='a', properties={
'data':column_property(t1.c.data, extension=Ex1())
})
mapper(B, polymorphic_identity='b', inherits=A)
mc = mapper(C, polymorphic_identity='c', inherits=B, properties={
'data':column_property(t1.c.data, extension=Ex2())
})
a1 = A(data='a1')
b1 = B(data='b1')
c1 = C(data='c1')
eq_(a1.data, 'ex1a1')
eq_(b1.data, 'ex1b1')
eq_(c1.data, 'ex2c1')
a1.data = 'a2'
b1.data='b2'
c1.data = 'c2'
eq_(a1.data, 'ex1a2')
eq_(b1.data, 'ex1b2')
eq_(c1.data, 'ex2c2')
eq_(ext_msg, ["Ex1 'a1'", "Ex1 'b1'", "Ex2 'c1'",
"Ex1 'a2'", "Ex1 'b2'", "Ex2 'c2'"])
class SessionExtensionTest(_fixtures.FixtureTest):
run_inserts = None
def test_extension(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
log = []
class MyExt(sa.orm.session.SessionExtension):
def before_commit(self, session):
log.append('before_commit')
def after_commit(self, session):
log.append('after_commit')
def after_rollback(self, session):
log.append('after_rollback')
def before_flush(self, session, flush_context, objects):
log.append('before_flush')
def after_flush(self, session, flush_context):
log.append('after_flush')
def after_flush_postexec(self, session, flush_context):
log.append('after_flush_postexec')
def after_begin(self, session, transaction, connection):
log.append('after_begin')
def after_attach(self, session, instance):
log.append('after_attach')
def after_bulk_update(
self,
session, query, query_context, result
):
log.append('after_bulk_update')
def after_bulk_delete(
self,
session, query, query_context, result
):
log.append('after_bulk_delete')
sess = create_session(extension = MyExt())
u = User(name='u1')
sess.add(u)
sess.flush()
assert log == [
'after_attach',
'before_flush',
'after_begin',
'after_flush',
'after_flush_postexec',
'before_commit',
'after_commit',
]
log = []
sess = create_session(autocommit=False, extension=MyExt())
u = User(name='u1')
sess.add(u)
sess.flush()
assert log == ['after_attach', 'before_flush', 'after_begin',
'after_flush', 'after_flush_postexec']
log = []
u.name = 'ed'
sess.commit()
assert log == ['before_commit', 'before_flush', 'after_flush',
'after_flush_postexec', 'after_commit']
log = []
sess.commit()
assert log == ['before_commit', 'after_commit']
log = []
sess.query(User).delete()
assert log == ['after_begin', 'after_bulk_delete']
log = []
sess.query(User).update({'name': 'foo'})
assert log == ['after_bulk_update']
log = []
sess = create_session(autocommit=False, extension=MyExt(),
bind=testing.db)
conn = sess.connection()
assert log == ['after_begin']
def test_multiple_extensions(self):
User, users = self.classes.User, self.tables.users
log = []
class MyExt1(sa.orm.session.SessionExtension):
def before_commit(self, session):
log.append('before_commit_one')
class MyExt2(sa.orm.session.SessionExtension):
def before_commit(self, session):
log.append('before_commit_two')
mapper(User, users)
sess = create_session(extension = [MyExt1(), MyExt2()])
u = User(name='u1')
sess.add(u)
sess.flush()
assert log == [
'before_commit_one',
'before_commit_two',
]
def test_unnecessary_methods_not_evented(self):
class MyExtension(sa.orm.session.SessionExtension):
def before_commit(self, session):
pass
s = Session(extension=MyExtension())
assert not s.dispatch.after_commit
assert len(s.dispatch.before_commit) == 1
| 29.615938 | 96 | 0.560655 |
336d5f39291c302e648faafd72ed5d30ed611cae
| 2,391 |
py
|
Python
|
data_tools/train_test_split.py
|
bsm8734/formula-image-latex-recognition
|
86d5070e8f907571a47967d64facaee246d92a35
|
[
"MIT"
] | 13 |
2021-06-20T18:11:23.000Z
|
2021-12-07T18:06:42.000Z
|
data_tools/train_test_split.py
|
bsm8734/formula-image-latex-recognition
|
86d5070e8f907571a47967d64facaee246d92a35
|
[
"MIT"
] | 9 |
2021-06-16T14:55:07.000Z
|
2021-06-23T14:45:36.000Z
|
data_tools/train_test_split.py
|
bsm8734/formula-image-latex-recognition
|
86d5070e8f907571a47967d64facaee246d92a35
|
[
"MIT"
] | 6 |
2021-06-17T15:16:50.000Z
|
2021-07-05T20:41:26.000Z
|
import argparse
import csv
import os
import random
test_percent = 0.2
output_dir = "gt-split"
# Split the ground truth into train, test sets
def split_gt(groundtruth, test_percent=0.2, data_num=None):
"""Split the ground truth into train, test sets
Args:
groundtruth (text file) : ground truth file
test_percent (float) : represent the proportion of the dataset to include in the test split. Defaults to 0.2.
data_num (int) : represents the absolute number of test samples. Defaults to None.
Returns:
train dataset
test dataset
"""
with open(groundtruth, "r") as fd:
data = fd.read()
data = data.split('\n')
data = [x.split('\t') for x in data]
random.shuffle(data)
if data_num:
assert sum(data_num) < len(data)
return data[:data_num[0]], data[data_num[0]:data_num[0] + data_num[1]]
test_len = round(len(data) * test_percent)
return data[test_len:], data[:test_len] # train, test
def write_tsv(data, path):
with open(path, "w") as fd:
writer = csv.writer(fd, delimiter="\t")
writer.writerows(data)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--test-percent",
dest="test_percent",
default=test_percent,
type=float,
help="Percent of data to use for test [Default: {}]".format(test_percent)
)
parser.add_argument(
"-n",
"--data_num",
nargs=2,
type=int,
help="Number of train data and test data",
)
parser.add_argument(
"-i",
"--input",
dest="input",
required=True,
type=str,
help="Path to input ground truth file",
)
parser.add_argument(
"-o",
"--output-dir",
dest="output_dir",
default=output_dir,
type=str,
help="Directory to save the split ground truth files",
)
return parser.parse_args()
if __name__ == "__main__":
options = parse_args()
train_gt, test_gt = split_gt(options.input, options.test_percent, options.data_num)
if not os.path.exists(options.output_dir):
os.makedirs(options.output_dir)
write_tsv(train_gt, os.path.join(options.output_dir, "train.txt"))
write_tsv(test_gt, os.path.join(options.output_dir, "test.txt"))
| 28.464286 | 117 | 0.609787 |
5284a605bbfd1c8189766fc2dbe6944f6f59ea34
| 875 |
py
|
Python
|
Desafios/desafio94.py
|
gustavodoamaral/115_Desafios_Python
|
8baa1c0353a40f7a63f442293bc0f6852fd94da0
|
[
"MIT"
] | 1 |
2022-02-07T01:12:19.000Z
|
2022-02-07T01:12:19.000Z
|
Desafios/desafio94.py
|
gustavodoamaral/desafios_python_gustavo_guanabara
|
8baa1c0353a40f7a63f442293bc0f6852fd94da0
|
[
"MIT"
] | null | null | null |
Desafios/desafio94.py
|
gustavodoamaral/desafios_python_gustavo_guanabara
|
8baa1c0353a40f7a63f442293bc0f6852fd94da0
|
[
"MIT"
] | null | null | null |
pessoaslist = []
dicionario = {}
pessoastotal = 0
soma = media = 0
mulheres = []
acimamedia = []
while True:
dicionario['Nome'] = str(input("Nome: "))
dicionario['Sexo'] = str(input("Sexo: [M/F] "))
dicionario['Idade'] = int(input("Idade: "))
resp = str(input("Continuar?: [S/N]"))
pessoaslist.append(dicionario.copy())
pessoastotal += 1
dicionario.clear()
if resp == "N":
break
for i, v in enumerate(pessoaslist):
soma += pessoaslist[i]['Idade']
media = soma / pessoastotal
if v['Sexo'] == "F":
mulheres.append(v['Nome'])
print(f'- O grupo tem {pessoastotal} pessoas. \n- A média de idade é de {media:.2f} anos. \n- As mulheres cadastradas foram {mulheres}')
for v in pessoaslist:
if v['Idade'] > media:
acimamedia.append(v)
print(f"- Lista das pessoas que estão acima da média: \n{acimamedia}")
| 33.653846 | 136 | 0.616 |
be2e5e45f9e44ebb2aae9217dd4bd949f6b5369f
| 6,484 |
py
|
Python
|
train_nerf.py
|
matajoh/fourier_feature_nets
|
784140f01464e34a0dd4b813c50d20c4c15a8a59
|
[
"MIT"
] | 88 |
2021-11-24T09:22:43.000Z
|
2022-03-28T20:34:51.000Z
|
train_nerf.py
|
matajoh/fourier_feature_nets
|
784140f01464e34a0dd4b813c50d20c4c15a8a59
|
[
"MIT"
] | 1 |
2022-01-04T18:13:18.000Z
|
2022-01-25T09:36:52.000Z
|
train_nerf.py
|
matajoh/fourier_feature_nets
|
784140f01464e34a0dd4b813c50d20c4c15a8a59
|
[
"MIT"
] | 5 |
2021-11-27T13:48:22.000Z
|
2022-03-28T20:35:24.000Z
|
"""Script to train a full NeRF model."""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
import json
import os
import fourier_feature_nets as ffn
import numpy as np
import torch
def _parse_args():
parser = ArgumentParser("NeRF Training script",
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("data_path", help="Path to the data NPZ")
parser.add_argument("results_dir", help="Path to output results")
parser.add_argument("--mode",
choices=["rgba", "rgb", "dilate"],
default="rgba", help="Ray sampling mode.")
parser.add_argument("--opacity-model",
help="Path to the optional opacity model")
parser.add_argument("--num-samples", type=int, default=128,
help="Number of samples to take")
parser.add_argument("--resolution", type=int, default=400,
help="Ray sampling resolution")
parser.add_argument("--num-cameras", type=int, default=100,
help="Number of cameras")
parser.add_argument("--batch-size", type=int, default=1024)
parser.add_argument("--num-layers", type=int, default=8)
parser.add_argument("--learning-rate", type=float, default=5e-4)
parser.add_argument("--num-channels", type=int, default=256,
help="Number of channels in the MLP")
parser.add_argument("--pos-freq", type=int, default=10,
help="Number of frequencies used for encoding")
parser.add_argument("--pos-max-log-scale", type=float, default=9,
help="Value of sigma for the positional model")
parser.add_argument("--view-freq", type=int, default=4,
help="Number of frequencies used for encoding")
parser.add_argument("--view-max-log-scale", type=float, default=3,
help="Value of sigma for the positional model")
parser.add_argument("--num-steps", type=int, default=50000,
help="Number of steps to use for training.")
parser.add_argument("--report-interval", type=int, default=1000,
help="Interval for progress reports")
parser.add_argument("--image-interval", type=int, default=2000,
help="Image rendering interval")
parser.add_argument("--crop-steps", type=int, default=1000,
help="Number of steps to train on center crops")
parser.add_argument("--seed", type=int, default=20080524,
help="Manual seed for the RNG")
parser.add_argument("--omit-inputs", action="store_true",
help="Whether to omit inputs from the input vector")
parser.add_argument("--decay-rate", type=float, default=0.1,
help="Rate at which the learning rate decays")
parser.add_argument("--decay-steps", type=int, default=250000,
help="Interval over which the learning rate decays.")
parser.add_argument("--weight-decay", type=float, default=0,
help="Regularizer term for the weights.")
parser.add_argument("--make-video", action="store_true",
help="Whether to render frames for a training video.")
parser.add_argument("--color-space", choices=["YCrCb", "RGB"],
default="RGB",
help="Color space to use during training.")
parser.add_argument("--num-frames", type=int, default=200,
help="Number of frames in the training video orbit.")
parser.add_argument("--device", default="cuda",
help="Pytorch compute device")
return parser.parse_args()
def _main():
args = _parse_args()
torch.manual_seed(args.seed)
model = ffn.NeRF(args.num_layers, args.num_channels,
args.pos_max_log_scale, args.pos_freq,
args.view_max_log_scale, args.view_freq,
[4], not args.omit_inputs)
if args.opacity_model:
opacity_model = ffn.load_model(args.opacity_model)
if opacity_model is None:
return 1
opacity_model = opacity_model.to(args.device)
else:
opacity_model = None
include_alpha = args.mode == "rgba"
train_dataset = ffn.RayDataset.load(args.data_path, "train",
args.num_samples, include_alpha,
True, opacity_model,
args.batch_size, args.color_space)
val_dataset = ffn.RayDataset.load(args.data_path, "val",
args.num_samples, include_alpha,
False, opacity_model,
args.batch_size, args.color_space)
if train_dataset is None:
return 1
if args.make_video:
cameras = ffn.orbit(np.array([0, 1, 0]), np.array([0, 0, -1]),
args.num_frames, 40,
train_dataset.resolution.square(), 4)
bounds = np.eye(4, dtype=np.float32) * 2
video_sampler = ffn.RaySampler(bounds, cameras, args.num_samples)
image_interval = args.num_steps // args.num_frames
else:
video_sampler = None
image_interval = args.image_interval
if args.mode == "dilate":
train_dataset.mode = ffn.RayDataset.Mode.Dilate
raycaster = ffn.Raycaster(model)
raycaster.to(args.device)
log = raycaster.fit(train_dataset, val_dataset, args.results_dir,
args.batch_size, args.learning_rate,
args.num_steps, image_interval,
args.crop_steps, args.report_interval,
args.decay_rate, args.decay_steps,
args.weight_decay, video_sampler)
model.save(os.path.join(args.results_dir, "nerf.pt"))
with open(os.path.join(args.results_dir, "log.txt"), "w") as file:
json.dump(vars(args), file)
file.write("\n\n")
file.write("\t".join(["step", "timestamp", "psnr_train", "psnr_val"]))
file.write("\t")
for line in log:
file.write("\t".join([str(val) for val in line]) + "\n")
sp_path = os.path.join(args.results_dir, "nerf.html")
raycaster.to_scenepic(val_dataset).save_as_html(sp_path)
if __name__ == "__main__":
_main()
| 45.661972 | 78 | 0.588834 |
8f21dd63c6cf72af25249fe99e952b46bddcdbbe
| 128,199 |
py
|
Python
|
lrs/tests/test_Statement.py
|
dblacksmith/ADL_LRS
|
4732f62a53fecf76844a8b5fb420db0f5aed4647
|
[
"Apache-2.0"
] | null | null | null |
lrs/tests/test_Statement.py
|
dblacksmith/ADL_LRS
|
4732f62a53fecf76844a8b5fb420db0f5aed4647
|
[
"Apache-2.0"
] | null | null | null |
lrs/tests/test_Statement.py
|
dblacksmith/ADL_LRS
|
4732f62a53fecf76844a8b5fb420db0f5aed4647
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import base64
import uuid
import urllib
import hashlib
from datetime import datetime, timedelta
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.utils.timezone import utc
from django.conf import settings
from django.test.utils import override_settings
from ..models import Statement, Activity, Agent, Verb, SubStatement
from ..utils import retrieve_statement
from adl_lrs.views import register
class StatementTests(TestCase):
@classmethod
def setUpClass(cls):
print "\n%s" % __name__
super(StatementTests, cls).setUpClass()
def setUp(self):
self.username = "tester1"
self.email = "test1@tester.com"
self.password = "test"
self.auth = "Basic %s" % base64.b64encode(
"%s:%s" % (self.username, self.password))
form = {"username": self.username, "email": self.email,
"password": self.password, "password2": self.password}
self.client.post(reverse(register), form,
X_Experience_API_Version=settings.XAPI_VERSION)
self.username2 = "tester2"
self.email2 = "test2@tester.com"
self.password2 = "test2"
self.auth2 = "Basic %s" % base64.b64encode(
"%s:%s" % (self.username2, self.password2))
form2 = {"username": self.username2, "email": self.email2,
"password": self.password2, "password2": self.password2}
self.client.post(reverse(register), form2,
X_Experience_API_Version=settings.XAPI_VERSION)
self.firstTime = str(datetime.utcnow().replace(tzinfo=utc).isoformat())
self.guid1 = uuid.uuid4()
def bunchostmts(self):
self.guid2 = uuid.uuid4()
self.guid3 = uuid.uuid4()
self.guid4 = uuid.uuid4()
self.guid5 = uuid.uuid4()
self.guid6 = uuid.uuid4()
self.guid7 = uuid.uuid4()
self.guid8 = uuid.uuid4()
self.guid9 = uuid.uuid4()
self.guid10 = str(uuid.uuid4())
self.cguid1 = str(uuid.uuid4())
self.cguid2 = str(uuid.uuid4())
self.cguid3 = str(uuid.uuid4())
self.cguid4 = str(uuid.uuid4())
self.cguid5 = str(uuid.uuid4())
self.cguid6 = str(uuid.uuid4())
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "object": {"id": "act:activity"},
"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"authority": {"objectType": "Agent", "name": "tester1", "mbox": "mailto:test1@tester.com"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
stmt_id = uuid.UUID(json.loads(response.content)[0])
self.existStmt = Statement.objects.get(statement_id=stmt_id)
self.exist_stmt_id = self.existStmt.statement_id
self.existStmt1 = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname2", "en-GB": "altname"},
"description": {"en-US": "testdesc2", "en-GB": "altdesc"}, "type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answer"],
"extensions": {"ext:key1": "value1", "ext:key2": "value2", "ext:key3": "value3"}}},
"result": {"score": {"scaled": .85}, "completion": True, "success": True, "response": "kicked",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:key1": "value1", "ext:key2": "value2"}},
"context": {"registration": self.cguid1, "contextActivities": {"other": {"id": "act:NewActivityID2"}},
"revision": "food", "platform": "bard", "language": "en-US", "extensions": {"ext:ckey1": "cval1",
"ext:ckey2": "cval2"}}})
self.existStmt2 = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@t.com"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname3", "en-GB": "altname"},
"description": {"en-US": "testdesc3", "en-GB": "altdesc"}, "type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answers"],
"extensions": {"ext:key11": "value11", "ext:key22": "value22", "ext:key33": "value33"}}},
"result": {"score": {"scaled": .75}, "completion": True, "success": True, "response": "shouted",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:dkey1": "dvalue1", "ext:dkey2": "dvalue2"}},
"context": {"registration": self.cguid2, "contextActivities": {"other": {"id": "act:NewActivityID22"}},
"revision": "food", "platform": "bard", "language": "en-US", "extensions": {"ext:ckey11": "cval11",
"ext:ckey22": "cval22"}}})
self.existStmt3 = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"object": {"objectType": "Activity", "id": "act:foogals",
"definition": {"name": {"en-US": "testname3"}, "description": {"en-US": "testdesc3"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answers"],
"extensions": {"ext:key111": "value111", "ext:key222": "value222", "ext:key333": "value333"}}},
"result": {"score": {"scaled": .79}, "completion": True, "success": True, "response": "shouted",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:dkey1": "dvalue1", "ext:dkey2": "dvalue2"}},
"context": {"registration": self.cguid3, "contextActivities": {"other": {"id": "act:NewActivityID22"}},
"revision": "food", "platform": "bard", "language": "en-US",
"instructor": {"objectType": "Agent", "name": "bob", "mbox": "mailto:bob@bob.com"},
"extensions": {"ext:ckey111": "cval111", "ext:ckey222": "cval222"}}})
self.existStmt4 = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"object": {"objectType": "Activity", "id": "act:foogal",
"definition": {"name": {"en-US": "testname3"}, "description": {"en-US": "testdesc3"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answers"],
"extensions": {"ext:key111": "value111", "ext:key222": "value222", "ext:key333": "value333"}}},
"result": {"score": {"scaled": .79}, "completion": True, "success": True, "response": "shouted",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:dkey1": "dvalue1", "ext:dkey2": "dvalue2"}},
"context": {"registration": self.cguid4, "contextActivities": {"other": {"id": "act:NewActivityID22"}},
"revision": "food", "platform": "bard", "language": "en-US", "instructor": {"name": "bill", "mbox": "mailto:bill@bill.com"},
"extensions": {"ext:ckey111": "cval111", "ext:ckey222": "cval222"}}})
self.existStmt5 = json.dumps({"object": {"objectType": "Agent", "name": "jon", "mbox": "mailto:jon@jon.com"},
"verb": {"id": "http://example.com/verbs/created", "display": {"en-US": "created"}},
"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"}})
self.existStmt6 = json.dumps({"actor": {"objectType": "Agent", "name": "max", "mbox": "mailto:max@max.com"},
"object": {"id": "act:test_activity"}, "verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}})
self.existStmt7 = json.dumps({"object": {"objectType": "Agent", "name": "max", "mbox": "mailto:max@max.com"},
"verb": {"id": "http://example.com/verbs/created", "display": {"en-US": "created"}},
"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"}})
self.existStmt8 = json.dumps({"object": {"objectType": "Agent", "name": "john", "mbox": "mailto:john@john.com"},
"verb": {"id": "http://example.com/verbs/missed", "display": {"en-US": "missed"}},
"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"}})
self.existStmt9 = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:sub@sub.com"},
"verb": {"id": "http://example.com/verbs/missed"}, "object": {"objectType": "SubStatement",
"actor": {"objectType": "Agent", "mbox": "mailto:ss@ss.com"}, "verb": {"id": "verb:verb/url/nested"},
"object": {"objectType": "Activity", "id": "act:testex.com"}, "result": {"completion": True, "success": True,
"response": "kicked"}, "context": {"registration": self.cguid5,
"contextActivities": {"other": {"id": "act:NewActivityID"}}, "revision": "foo", "platform": "bar",
"language": "en-US", "extensions": {"ext:k1": "v1", "ext:k2": "v2"}}}})
self.existStmt10 = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:ref@ref.com"},
"verb": {"id": "http://example.com/verbs/missed"}, "object": {"objectType": "StatementRef",
"id": str(self.exist_stmt_id)}})
# Put statements
param = {"statementId": str(self.guid1)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt_payload = self.existStmt1
self.putresponse1 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse1.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=2)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid1).update(stored=time)
param = {"statementId": str(self.guid3)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt_payload = self.existStmt3
self.putresponse3 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse3.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=3)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid3).update(stored=time)
param = {"statementId": str(self.guid4)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt_payload = self.existStmt4
self.putresponse4 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse4.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=4)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid4).update(stored=time)
self.secondTime = str(
(datetime.utcnow() + timedelta(seconds=4)).replace(tzinfo=utc).isoformat())
param = {"statementId": str(self.guid2)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt_payload = self.existStmt2
self.putresponse2 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse2.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=6)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid2).update(stored=time)
param = {"statementId": str(self.guid5)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt_payload = self.existStmt5
self.putresponse5 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse5.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=7)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid5).update(stored=time)
param = {"statementId": str(self.guid6)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt_payload = self.existStmt6
self.putresponse6 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse6.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=8)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid6).update(stored=time)
param = {"statementId": str(self.guid7)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt_payload = self.existStmt7
self.putresponse7 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse7.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=9)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid7).update(stored=time)
param = {"statementId": str(self.guid8)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt_payload = self.existStmt8
self.putresponse8 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse8.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=10)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid8).update(stored=time)
param = {"statementId": str(self.guid9)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt_payload = self.existStmt9
self.putresponse9 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse9.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=11)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid9).update(stored=time)
param = {"statementId": str(self.guid10)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt_payload = self.existStmt10
self.putresponse10 = self.client.put(path, stmt_payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(self.putresponse10.status_code, 204)
time = retrieve_statement.convert_to_datetime_object(
str((datetime.utcnow() + timedelta(seconds=11)).replace(tzinfo=utc).isoformat()))
stmt = Statement.objects.filter(
statement_id=self.guid10).update(stored=time)
def test_invalid_result_fields(self):
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"object": {"objectType": "Activity", "id": "act:foogie"},
"result": {"bad": "fields", "foo": "bar", "score": {"scaled": .85}, "completion": True, "success": True,
"response": "kicked", "duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:key1": "value1",
"ext:key2": "value2"}}})
resp = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp.status_code, 400)
self.assertEqual(
resp.content, 'Invalid field(s) found in Result - bad, foo')
def test_invalid_context_fields(self):
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"object": {"objectType": "Activity", "id": "act:foogals",
"definition": {"name": {"en-US": "testname3"}, "description": {"en-US": "testdesc3"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answers"],
"extensions": {"ext:key111": "value111", "ext:key222": "value222", "ext:key333": "value333"}}},
"result": {"score": {"scaled": .79}, "completion": True, "success": True, "response": "shouted",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:dkey1": "dvalue1", "ext:dkey2": "dvalue2"}},
"context": {"contextActivities": {"other": {"id": "act:NewActivityID22"}},
"revision": "food", "bad": "foo", "platform": "bard", "language": "en-US",
"instructor": {"objectType": "Agent", "name": "bob", "mbox": "mailto:bob@bob.com"},
"extensions": {"ext:ckey111": "cval111", "ext:ckey222": "cval222"}}})
resp = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp.status_code, 400)
self.assertEqual(
resp.content, 'Invalid field(s) found in Context - bad')
def test_post_with_no_valid_params(self):
# Error will be thrown in statements class
resp = self.client.post(reverse('lrs:statements'), {"feet": "yes", "hands": {"id": "http://example.com/test_post"}},
content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp.status_code, 400)
def test_post(self):
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:t@t.com", "name": "bob"},
"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_post"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
act = Activity.objects.get(activity_id="act:test_post")
self.assertEqual(act.activity_id, "act:test_post")
agent = Agent.objects.get(mbox="mailto:t@t.com")
self.assertEqual(agent.name, "bob")
def test_post_wrong_crp_type(self):
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/created"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname2", "en-GB": "altname"},
"description": {"en-US": "testdesc2", "en-GB": "altdesc"}, "type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": "wrong"}},
"actor": {"objectType": "Agent", "mbox": "mailto:wrong-t@t.com"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.content, 'Activity definition correctResponsesPattern is not a properly formatted array')
def test_post_wrong_choice_type(self):
stmt = json.dumps(
{"verb": {"id": "http://example.com/verbs/created"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname2", "en-GB": "altname"},
"description": {"en-US": "testdesc2", "en-GB": "altdesc"},
"type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "choice", "correctResponsesPattern": ["a1[,]a3[,]a6[,]a7"],
"choices": "wrong"}},
"actor": {"objectType": "Agent", "mbox": "mailto:wrong-t@t.com"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.content, 'Activity definition choices is not a properly formatted array')
def test_openid(self):
stmt = json.dumps({'object': {'objectType': 'Agent', 'name': 'lulu', 'openid': 'id:luluid'},
'verb': {"id": "verb:verb/url"}, 'actor': {'objectType': 'Agent', 'mbox': 'mailto:t@t.com'}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
agent = Agent.objects.get(name='lulu')
self.assertEqual(agent.openid, 'id:luluid')
def test_invalid_actor_fields(self):
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:t@t.com", "name": "bob", "bad": "blah",
"foo": "bar"},
"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_post"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
'Invalid field(s) found in Agent/Group - bad, foo')
def test_invalid_activity_fields(self):
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:t@t.com", "name": "bob"},
"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_post", "bad": "foo", "foo": "bar"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
"Invalid field(s) found in Activity - bad, foo")
def test_invalid_activity_def_fields(self):
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:t@t.com", "name": "bob"},
"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {'objectType': 'Activity', 'id': 'act:food',
'definition': {'bad': 'foo', 'name': {'en-FR': 'testname2', 'en-US': 'testnameEN'}, 'description': {'en-CH': 'testdesc2',
'en-GB': 'testdescGB'}, 'type': 'type:course', 'interactionType': 'intType2', 'extensions': {'ext:key1': 'value1',
'ext:key2': 'value2', 'ext:key3': 'value3'}}}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
'Invalid field(s) found in Activity definition - bad')
def test_post_wrong_duration(self):
stmt = json.dumps({"actor": {'name': 'jon',
'mbox': 'mailto:jon@example.com'}, 'verb': {"id": "verb:verb/url"}, "object": {'id': 'act:activity13'},
"result": {'completion': True, 'success': True, 'response': 'yes', 'duration': 'wrongduration',
'extensions': {'ext:key1': 'value1', 'ext:key2': 'value2'}}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.content, "Error with result duration - Unable to parse duration string u'wrongduration'")
def test_post_stmt_ref_no_existing_stmt(self):
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:ref@ref.com"},
"verb": {"id": "http://example.com/verbs/missed"}, "object": {"objectType": "StatementRef",
"id": "12345678-1234-5678-1234-567812345678"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
def test_post_with_actor(self):
stmt = json.dumps({"actor": {"mbox": "mailto:mr.t@example.com"},
"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:i.pity.the.fool"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
Agent.objects.get(mbox="mailto:mr.t@example.com")
def test_list_post(self):
stmts = json.dumps([{"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_list_post"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}},
{"verb": {"id": "http://example.com/verbs/failed", "display": {"en-GB": "failed"}},
"object": {"id": "act:test_list_post1"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}}])
response = self.client.post(reverse('lrs:statements'), stmts, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
activity1 = Activity.objects.get(activity_id="act:test_list_post")
activity2 = Activity.objects.get(activity_id="act:test_list_post1")
stmt1 = Statement.objects.get(object_activity=activity1)
stmt2 = Statement.objects.get(object_activity=activity2)
verb1 = Verb.objects.get(id=stmt1.verb.id)
verb2 = Verb.objects.get(id=stmt2.verb.id)
lang_map1 = verb1.canonical_data['display']
lang_map2 = verb2.canonical_data['display']
self.assertEqual(response.status_code, 200)
self.assertEqual(stmt1.verb.verb_id, "http://example.com/verbs/passed")
self.assertEqual(stmt2.verb.verb_id, "http://example.com/verbs/failed")
self.assertEqual(lang_map1.keys()[0], "en-US")
self.assertEqual(lang_map1.values()[0], "passed")
self.assertEqual(lang_map2.keys()[0], "en-GB")
self.assertEqual(lang_map2.values()[0], "failed")
def test_put(self):
guid = uuid.uuid4()
param = {"statementId": str(guid)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_put"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
putResponse = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 204)
stmt = Statement.objects.get(statement_id=guid)
act = Activity.objects.get(activity_id="act:test_put")
self.assertEqual(act.activity_id, "act:test_put")
self.assertEqual(stmt.actor.mbox, "mailto:t@t.com")
self.assertEqual(stmt.authority.name, "tester1")
self.assertEqual(stmt.authority.mbox, "mailto:test1@tester.com")
self.assertEqual(stmt.version, settings.XAPI_VERSION)
self.assertEqual(stmt.verb.verb_id, "http://example.com/verbs/passed")
def test_put_1_0_0(self):
guid = uuid.uuid4()
param = {"statementId": str(guid)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_put"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
putResponse = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version="1.0.0")
self.assertEqual(putResponse.status_code, 204)
stmt = Statement.objects.get(statement_id=guid)
act = Activity.objects.get(activity_id="act:test_put")
self.assertEqual(act.activity_id, "act:test_put")
self.assertEqual(stmt.actor.mbox, "mailto:t@t.com")
self.assertEqual(stmt.authority.name, "tester1")
self.assertEqual(stmt.authority.mbox, "mailto:test1@tester.com")
self.assertEqual(stmt.version, "1.0.0")
self.assertEqual(stmt.verb.verb_id, "http://example.com/verbs/passed")
def test_put_id_in_stmt(self):
guid = uuid.uuid4()
stmt = json.dumps({"id": str(guid), "verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_put"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
putResponse = self.client.put(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 400)
def test_put_id_in_both_same(self):
guid = uuid.uuid4()
param = {"statementId": str(guid)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt = json.dumps({"id": str(guid), "verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_put"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
putResponse = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 204)
stmt = Statement.objects.get(statement_id=guid)
act = Activity.objects.get(activity_id="act:test_put")
self.assertEqual(act.activity_id, "act:test_put")
self.assertEqual(stmt.actor.mbox, "mailto:t@t.com")
self.assertEqual(stmt.authority.name, "tester1")
self.assertEqual(stmt.authority.mbox, "mailto:test1@tester.com")
self.assertEqual(stmt.version, settings.XAPI_VERSION)
self.assertEqual(stmt.verb.verb_id, "http://example.com/verbs/passed")
def test_put_id_in_both_different(self):
guid1 = str(uuid.uuid4())
guid2 = str(uuid.uuid4())
param = {"statementId": guid1}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt = json.dumps({"id": guid2, "verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_put"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
putResponse = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 400)
self.assertEqual(
putResponse.content, "Error -- statements - method = PUT, param and body ID both given, but do not match")
def test_put_with_substatement(self):
con_guid = str(uuid.uuid4())
st_guid = str(uuid.uuid4())
param = {"statementId": st_guid}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:sass@sass.com"},
"verb": {"id": "verb:verb/url/tested"}, "object": {"objectType": "SubStatement",
"actor": {"objectType": "Agent", "mbox": "mailto:ss@ss.com"}, "verb": {"id": "verb:verb/url/nested"},
"object": {"objectType": "Activity", "id": "act:testex.com"}, "result": {"completion": True, "success": True,
"response": "kicked"}, "context": {"registration": con_guid,
"contextActivities": {"other": {"id": "act:NewActivityID"}}, "revision": "foo", "platform": "bar",
"language": "en-US", "extensions": {"ext:k1": "v1", "ext:k2": "v2"}}}})
response = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 204)
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
get_response = self.client.get(
path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(get_response.status_code, 200)
rsp = get_response.content
self.assertIn("objectType", rsp)
self.assertIn("SubStatement", rsp)
self.assertIn("actor", rsp)
self.assertIn("ss@ss.com", rsp)
self.assertIn("verb", rsp)
self.assertIn("verb:verb/url/nested", rsp)
self.assertIn("Activity", rsp)
self.assertIn("act:testex.com", rsp)
self.assertIn("result", rsp)
self.assertIn("completion", rsp)
self.assertIn("success", rsp)
self.assertIn("response", rsp)
self.assertIn("kicked", rsp)
self.assertIn("context", rsp)
self.assertIn(con_guid, rsp)
self.assertIn("contextActivities", rsp)
self.assertIn("other", rsp)
self.assertIn("revision", rsp)
self.assertIn("foo", rsp)
self.assertIn("platform", rsp)
self.assertIn("bar", rsp)
self.assertIn("language", rsp)
self.assertIn("en-US", rsp)
self.assertIn("extensions", rsp)
self.assertIn("ext:k1", rsp)
self.assertIn("v1", rsp)
self.assertIn("ext:k2", rsp)
self.assertIn("v2", rsp)
def test_no_content_put(self):
guid = str(uuid.uuid4())
param = {"statementId": guid}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt = json.dumps({})
putResponse = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 400)
def test_existing_stmtID_put(self):
guid = str(uuid.uuid4())
exist_stmt = json.dumps({"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:activity"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
path = "%s?%s" % (reverse('lrs:statements'),
urllib.urlencode({"statementId": guid}))
response = self.client.put(path, exist_stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 204)
param = {"statementId": guid}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_existing_put"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
putResponse = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putResponse.status_code, 409)
def test_missing_stmtID_put(self):
stmt = json.dumps({"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_put"}, "actor": {"objectType": "Agent", "mbox": "mailto:t@t.com"}})
response = self.client.put(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn(
response.content, "Error -- statements - method = PUT, but no statementId parameter or ID given in statement")
def test_get(self):
self.bunchostmts()
param = {"statementId": str(self.guid1)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
getResponse = self.client.get(
path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(getResponse.status_code, 200)
rsp = getResponse.content
self.assertIn(str(self.guid1), rsp)
self.assertIn('content-length', getResponse._headers)
def test_get_no_params(self):
self.bunchostmts()
getResponse = self.client.get(reverse('lrs:statements'), X_Experience_API_Version=settings.XAPI_VERSION,
Authorization=self.auth)
self.assertEqual(getResponse.status_code, 200)
self.assertIn('content-length', getResponse._headers)
rsp = json.loads(getResponse.content)
self.assertEqual(len(rsp['statements']), 11)
def test_head(self):
self.bunchostmts()
param = {"statementId": str(self.guid1)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
head_resp = self.client.head(
path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(head_resp.status_code, 200)
self.assertEqual(head_resp.content, '')
self.assertIn('content-length', head_resp._headers)
def test_get_no_existing_ID(self):
param = {"statementId": "aaaaaa"}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
getResponse = self.client.get(
path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(getResponse.status_code, 404)
def test_get_no_statementid(self):
self.bunchostmts()
getResponse = self.client.get(reverse(
'lrs:statements'), X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(getResponse.status_code, 200)
jsn = json.loads(getResponse.content)
self.assertEqual(len(jsn["statements"]), 11)
self.assertIn('content-length', getResponse._headers)
def test_head_no_statementid(self):
self.bunchostmts()
head_resp = self.client.head(reverse(
'lrs:statements'), X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
self.assertEqual(head_resp.status_code, 200)
self.assertEqual(head_resp.content, '')
self.assertIn('content-length', head_resp._headers)
# Sever activities are PUT - contextActivities create 3 more
def test_number_of_activities(self):
self.bunchostmts()
acts = len(Activity.objects.all())
self.assertEqual(9, acts)
def test_timeout_snafu(self):
stmt = json.dumps({
"timestamp": "2013-11-05T07:33:49.512119+00:00",
"object": {
"definition": {
"name": {
"en-US": "news.google.com",
"ja": "news.google.com"
},
"description": {
"en-US": "",
"ja": ""
}
},
"id": "http://garewelswe.com/",
"objectType": "Activity"
},
"authority": {
"mbox": "mailto:kazutaka_kamiya@test.local",
"name": "adllrs",
"objectType": "Agent"
},
"verb": {
"id": "http://example.com/verbs/experienced",
"display": {
"en-US": "experienced"
}
},
"actor": {
"openid": "http://test.local/PEab76617d1d21d725d358a7ad5231bd6e",
"name": "dev2-001",
"objectType": "Agent"
},
"id": "9cb78e42-45ec-11e3-b8dc-0af904863508"
})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
stmt = json.dumps({
"timestamp": "2013-11-08T08:41:55.985064+00:00",
"object": {
"definition": {
"interactionType": "fill-in",
"correctResponsesPattern": [],
"type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"name": {
"ja": "SCORM20110721_12"
},
"description": {
"ja": ""
}
},
"id": "http://garewelswe.com/",
"objectType": "Activity"
},
"actor": {
"openid": "http://test.local/EAGLE/PEab76617d1d21d725d358a7ad5231bd6e",
"name": "dev2-001",
"objectType": "Agent"
},
"verb": {
"id": "http://example.com/verbs/answered",
"display": {
"en-US": "answered"
}
},
"result": {
"response": "TEST0",
"success": True
},
"context": {
"contextActivities": {
"parent": [
{
"id": "http://garewelswe.com/"
}
],
"grouping": [
{
"id": "http://garewelswe.com/"
}
]
}
},
"id": "9faf143c-4851-11e3-b1a1-000c29bfba11",
"authority": {
"mbox": "mailto:kazutaka_kamiya@test.local",
"name": "adllrs",
"objectType": "Agent"
}
})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
def test_amsterdam_snafu(self):
stmt = json.dumps({
"timestamp": "2013-05-23T10:46:39+02:00",
"verb": {"id": "http://www.adlnet.gov/expapi/verbs/experienced"},
"context": {
"contextActivities": {
"parent": {
"id": "http://localhost:8080/portal/site/~88a4933d-99d2-4a35-8906-993fdcdf2259"
}
}
},
"object": {
"id": "http://localhost:8080/portal/web/~88a4933d-99d2-4a35-8906-993fdcdf2259/id/c50bf034-0f3e-4055-a1e7-8d1cf92be353/url/%2Flibrary%2Fcontent%2Fmyworkspace_info.html",
"definition": {
"type": "http://adlnet.gov/expapi/activities/view-web-content"
},
"objectType": "Activity"
},
"actor": {
"name": "Alan Tester",
"objectType": "Agent",
"mbox": "mailto:tester@dev.nl"
}
})
post_response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post_response.status_code, 200)
def test_update_activity_wrong_auth(self):
existStmt1 = json.dumps({"verb": {"id": "http://example.com/verbs/created",
"display": {"en-US": "created"}}, "actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname2", "en-GB": "altname"}, "description": {"en-US": "testdesc2", "en-GB": "altdesc"},
"type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answer"],
"extensions": {"ext:key1": "value1", "ext:key2": "value2", "ext:key3": "value3"}}},
"result": {"score": {"scaled": .85}, "completion": True, "success": True, "response": "kicked",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:key1": "value1", "ext:key2": "value2"}},
"context": {"registration": str(uuid.uuid4()), "contextActivities": {"other": {"id": "act:NewActivityID2"}},
"revision": "food", "platform": "bard", "language": "en-US", "extensions": {"ext:ckey1": "cval1",
"ext:ckey2": "cval2"}}})
param = {"statementId": str(self.guid1)}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
putresponse1 = self.client.put(path, existStmt1, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(putresponse1.status_code, 204)
wrong_username = "tester2"
wrong_email = "test2@tester.com"
wrong_password = "test2"
wrong_auth = "Basic %s" % base64.b64encode(
"%s:%s" % (wrong_username, wrong_password))
form = {"username": wrong_username, "email": wrong_email, "password": wrong_password,
"password2": wrong_password}
self.client.post(reverse(register), form,
X_Experience_API_Version=settings.XAPI_VERSION)
stmt = json.dumps({"verb": {"id": "verb:verb/iri/attempted"}, "actor": {"objectType": "Agent", "mbox": "mailto:r@r.com"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname3"}, "description": {"en-US": "testdesc3"},
"type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answer"],
"extensions": {"ext:key1": "value1", "ext:key2": "value2", "ext:key3": "value3"}}},
"result": {"score": {"scaled": .85}, "completion": True, "success": True, "response": "kicked",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:key1": "value1", "ext:key2": "value2"}},
"context": {"registration": str(uuid.uuid4()), "contextActivities": {"other": {"id": "act:NewActivityID2"}},
"revision": "food", "platform": "bard", "language": "en-US", "extensions": {"ext:ckey1": "cval1",
"ext:ckey2": "cval2"}}, "authority": {"objectType": "Agent", "name": "auth", "mbox": "mailto:auth@example.com"}})
post_response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=wrong_auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post_response.status_code, 200)
acts = Activity.objects.filter(activity_id="act:foogie").count()
self.assertEqual(acts, 1)
def test_update_activity_correct_auth(self):
self.bunchostmts()
stmt = json.dumps({"verb": {"id": "verb:verb/url/changed-act"}, "actor": {"objectType": "Agent", "mbox": "mailto:l@l.com"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname3"}, "description": {"en-US": "testdesc3"},
"type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction", "interactionType": "fill-in", "correctResponsesPattern": ["answer"],
"extensions": {"ext:key1": "value1", "ext:key2": "value2", "ext:key3": "value3"}}},
"result": {"score": {"scaled": .85}, "completion": True, "success": True, "response": "kicked",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:key1": "value1", "ext:key2": "value2"}},
"context": {"registration": self.cguid6, "contextActivities": {"other": {"id": "act:NewActivityID2"}},
"revision": "food", "platform": "bard", "language": "en-US", "extensions": {"ext:ckey1": "cval1",
"ext:ckey2": "cval2"}}, "authority": {"objectType": "Agent", "name": "auth", "mbox": "mailto:auth@example.com"}})
post_response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(post_response.status_code, 200)
act = Activity.objects.get(activity_id="act:foogie")
name_set = act.canonical_data['definition']['name']
desc_set = act.canonical_data['definition']['description']
self.assertEqual(name_set.keys()[1], "en-US")
self.assertEqual(name_set.values()[1], "testname3")
self.assertEqual(name_set.keys()[0], "en-GB")
self.assertEqual(name_set.values()[0], "altname")
self.assertEqual(desc_set.keys()[1], "en-US")
self.assertEqual(desc_set.values()[1], "testdesc3")
self.assertEqual(desc_set.keys()[0], "en-GB")
self.assertEqual(desc_set.values()[0], "altdesc")
def test_cors_post_put(self):
content = {"verb": {"id": "verb:verb/url"}, "actor": {"objectType": "Agent", "mbox": "mailto:r@r.com"},
"object": {"id": "act:test_cors_post_put"}}
bdy = "statementId=886313e1-3b8a-5372-9b90-0c9aee199e5d&content=%s&Authorization=%s&Content-Type=application/json&X-Experience-API-Version=%s" % (
content, self.auth, settings.XAPI_VERSION)
path = "%s?%s" % (reverse('lrs:statements'),
urllib.urlencode({"method": "PUT"}))
response = self.client.post(
path, bdy, content_type="application/x-www-form-urlencoded")
self.assertEqual(response.status_code, 204)
act = Activity.objects.get(activity_id="act:test_cors_post_put")
self.assertEqual(act.activity_id, "act:test_cors_post_put")
agent = Agent.objects.get(mbox="mailto:test1@tester.com")
self.assertEqual(agent.name, "tester1")
self.assertEqual(agent.mbox, "mailto:test1@tester.com")
def test_cors_post_put_1_0_0(self):
content = {"verb": {"id": "verb:verb/url"}, "actor": {"objectType": "Agent", "mbox": "mailto:r@r.com"},
"object": {"id": "act:test_cors_post_put"}}
bdy = "statementId=886313e1-3b8a-5372-9b90-0c9aee199e5d&content=%s&Authorization=%s&Content-Type=application/json&X-Experience-API-Version=1.0.0" % (
content, self.auth)
path = "%s?%s" % (reverse('lrs:statements'),
urllib.urlencode({"method": "PUT"}))
response = self.client.post(
path, bdy, content_type="application/x-www-form-urlencoded")
self.assertEqual(response.status_code, 204)
act = Activity.objects.get(activity_id="act:test_cors_post_put")
self.assertEqual(act.activity_id, "act:test_cors_post_put")
agent = Agent.objects.get(mbox="mailto:test1@tester.com")
self.assertEqual(agent.name, "tester1")
self.assertEqual(agent.mbox, "mailto:test1@tester.com")
def test_cors_post_put_wrong_version(self):
content = {"verb": {"id": "verb:verb/url"}, "actor": {"objectType": "Agent", "mbox": "mailto:r@r.com"},
"object": {"id": "act:test_cors_post_put"}}
bdy = "statementId=886313e1-3b8a-5372-9b90-0c9aee199e5b&content=%s&Authorization=%s&X-Experience-API-Version=1.0.33&Content-Type=application/json" % (
content, self.auth)
path = "%s?%s" % (reverse('lrs:statements'),
urllib.urlencode({"method": "PUT"}))
response = self.client.post(
path, bdy, content_type="application/x-www-form-urlencoded")
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
"X-Experience-API-Version is not supported")
def test_cors_post_put_correct_version(self):
content = {"verb": {"id": "verb:verb/url"}, "actor": {"objectType": "Agent", "mbox": "mailto:r@r.com"},
"object": {"id": "act:test_cors_post_put"}}
bdy = "statementId=886313e1-3b8a-5372-9b90-0c9aee199e5a&content=%s&Authorization=%s&X-Experience-API-Version=1.0.1&Content-Type=application/json" % (
content, self.auth)
path = "%s?%s" % (reverse('lrs:statements'),
urllib.urlencode({"method": "PUT"}))
response = self.client.post(
path, bdy, content_type="application/x-www-form-urlencoded")
self.assertEqual(response.status_code, 204)
def test_issue_put(self):
stmt_id = "33f60b35-e1b2-4ddc-9c6f-7b3f65244430"
stmt = json.dumps({"verb": {"id": "verb:verb/iri"}, "object": {"id": "act:scorm.com/JsTetris_TCAPI", "definition": {"type": "type:media",
"name": {"en-US": "Js Tetris - Tin Can Prototype"}, "description": {"en-US": "A game of tetris."}}},
"context": {"contextActivities": {"grouping": {"id": "act:scorm.com/JsTetris_TCAPI"}},
"registration": "6b1091be-2833-4886-b4a6-59e5e0b3c3f4"},
"actor": {"mbox": "mailto:tom.creighton.ctr@adlnet.gov", "name": "Tom Creighton"}})
path = "%s?%s" % (reverse('lrs:statements'),
urllib.urlencode({"statementId": stmt_id}))
put_stmt = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_stmt.status_code, 204)
def test_post_with_group(self):
ot = "Group"
name = "the group ST"
mbox = "mailto:the.groupST@example.com"
stmt = json.dumps({"actor": {"objectType": ot, "name": name, "mbox": mbox, "member": [{"name": "agentA", "mbox": "mailto:agentA@example.com"}, {"name": "agentB", "mbox": "mailto:agentB@example.com"}]}, "verb": {"id": "http://verb/iri/created", "display": {"en-US": "created"}},
"object": {"id": "act:i.pity.the.fool"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
g = Agent.objects.get(mbox="mailto:the.groupST@example.com")
self.assertEquals(g.name, name)
self.assertEquals(g.mbox, mbox)
mems = g.member.values_list("name", flat=True)
self.assertEquals(len(mems), 2)
self.assertIn("agentA", mems)
self.assertIn("agentB", mems)
def test_post_with_group_no_members_listed(self):
ot = "Group"
name = "the group ML"
mbox = "mailto:the.groupML@example.com"
stmt = json.dumps({"actor": {"objectType": ot, "name": name, "mbox": mbox}, "verb": {"id": "http://verb/iri/created", "display": {"en-US": "created"}},
"object": {"id": "act:i.pity.the.fool"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
g = Agent.objects.get(mbox="mailto:the.groupML@example.com")
self.assertEquals(g.name, name)
self.assertEquals(g.mbox, mbox)
mems = g.member.values_list("name", flat=True)
self.assertEquals(len(mems), 0)
def test_post_with_group_member_not_array(self):
ot = "Group"
name = "the group ST"
mbox = "mailto:the.groupST@example.com"
members = "wrong"
stmt = json.dumps({"actor": {"objectType": ot, "name": name, "mbox": mbox, "member": members}, "verb": {"id": "http://verb/iri/created", "display": {"en-US": "created"}},
"object": {"id": "act:i.pity.the.fool"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
'Members is not a properly formatted array')
def test_post_with_group_member_empty_array(self):
ot = "Group"
name = "the group ST"
mbox = "mailto:the.groupST@example.com"
members = []
stmt = json.dumps({"actor": {"objectType": ot, "name": name, "mbox": mbox, "member": members}, "verb": {"id": "http://verb/iri/created", "display": {"en-US": "created"}},
"object": {"id": "act:i.pity.the.fool"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
"Member property must contain agents")
def test_issue_put_no_version_header(self):
stmt_id = '33f60b35-e1b2-4ddc-9c6f-7b3f65244431'
stmt = json.dumps({"verb": "verb:completed", "object": {"id": "act:scorm.com/JsTetris_TCAPI/level2",
"definition": {"type": "media", "name": {"en-US": "Js Tetris Level2"},
"description": {"en-US": "Starting at 1, the higher the level, the harder the game."}}},
"result": {"extensions": {"ext:time": 104, "ext:apm": 229, "ext:lines": 5}, "score": {"raw": 9911, "min": 0}},
"context": {"contextActivities": {"grouping": {"id": "act:scorm.com/JsTetris_TCAPI"}},
"registration": "b7be7d9d-bfe2-4917-8ccd-41a0d18dd953"},
"actor": {"name": "tom creighton", "mbox": "mailto:tom@example.com"}})
path = '%s?%s' % (reverse('lrs:statements'),
urllib.urlencode({"statementId": stmt_id}))
put_stmt = self.client.put(
path, stmt, content_type="application/json", Authorization=self.auth)
self.assertEqual(put_stmt.status_code, 400)
def test_issue_put_wrong_version_header(self):
stmt_id = '33f60b35-e1b2-4ddc-9c6f-7b3f65244432'
stmt = json.dumps({"verb": {"id": "verb:completed"}, "object": {"id": "act:scorm.com/JsTetris_TCAPI/level2",
"definition": {"type": "media", "name": {"en-US": "Js Tetris Level2"},
"description": {"en-US": "Starting at 1, the higher the level, the harder the game."}}},
"result": {"extensions": {"ext:time": 104, "ext:apm": 229, "ext:lines": 5}, "score": {"raw": 9911, "min": 0}},
"context": {"contextActivities": {"grouping": {"id": "act:scorm.com/JsTetris_TCAPI"}},
"registration": "b7be7d9d-bfe2-4917-8ccd-41a0d18dd953"},
"actor": {"name": "tom creighton", "mbox": "mailto:tom@example.com"}})
path = '%s?%s' % (reverse('lrs:statements'),
urllib.urlencode({"statementId": stmt_id}))
put_stmt = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version="0.90")
self.assertEqual(put_stmt.status_code, 400)
def test_issue_put_wrong_version_header_again(self):
stmt_id = '33f60b35-e1b2-4ddc-9c6f-7b3f65244432'
stmt = json.dumps({"verb": {"id": "verb:completed"}, "object": {"id": "act:scorm.com/JsTetris_TCAPI/level2",
"definition": {"type": "media", "name": {"en-US": "Js Tetris Level2"},
"description": {"en-US": "Starting at 1, the higher the level, the harder the game."}}},
"result": {"extensions": {"ext:time": 104, "ext:apm": 229, "ext:lines": 5}, "score": {"raw": 9911, "min": 0}},
"context": {"contextActivities": {"grouping": {"id": "act:scorm.com/JsTetris_TCAPI"}},
"registration": "b7be7d9d-bfe2-4917-8ccd-41a0d18dd953"},
"actor": {"name": "tom creighton", "mbox": "mailto:tom@example.com"}})
path = '%s?%s' % (reverse('lrs:statements'),
urllib.urlencode({"statementId": stmt_id}))
put_stmt = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version="1.0.")
self.assertEqual(put_stmt.status_code, 400)
def test_issue_put_wrong_version_header_1_1(self):
stmt_id = '33f60b35-e1b2-4ddc-9c6f-7b3f65244432'
stmt = json.dumps({"verb": {"id": "verb:completed"}, "object": {"id": "act:scorm.com/JsTetris_TCAPI/level2",
"definition": {"type": "media", "name": {"en-US": "Js Tetris Level2"},
"description": {"en-US": "Starting at 1, the higher the level, the harder the game."}}},
"result": {"extensions": {"ext:time": 104, "ext:apm": 229, "ext:lines": 5}, "score": {"raw": 9911, "min": 0}},
"context": {"contextActivities": {"grouping": {"id": "act:scorm.com/JsTetris_TCAPI"}},
"registration": "b7be7d9d-bfe2-4917-8ccd-41a0d18dd953"},
"actor": {"name": "tom creighton", "mbox": "mailto:tom@example.com"}})
path = '%s?%s' % (reverse('lrs:statements'),
urllib.urlencode({"statementId": stmt_id}))
put_stmt = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version="1.1.")
self.assertEqual(put_stmt.status_code, 400)
# Use this test to make sure stmts are being returned correctly with all
# data - doesn't check timestamp and stored fields
def test_all_fields_activity_as_object(self):
self.bunchostmts()
nested_st_id = str(uuid.uuid4())
nest_param = {"statementId": nested_st_id}
nest_path = "%s?%s" % (reverse('lrs:statements'),
urllib.urlencode(nest_param))
nested_stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:tincan@adlnet.gov"},
"verb": {"id": "http://example.com/verbs/assess", "display": {"en-US": "assessed"}},
"object": {"id": "http://example.adlnet.gov/tincan/example/simplestatement"}})
put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_sub_stmt.status_code, 204)
stmt_id = str(uuid.uuid4())
context_id = str(uuid.uuid4())
param = {"statementId": stmt_id}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt = json.dumps({"actor": {"objectType": "Agent", "name": "Lou Wolford", "account": {"homePage": "http://example.com", "name": "uniqueName"}},
"verb": {"id": "http://example.com/verbs/created", "display": {"en-US": "created", "en-GB": "made"}},
"object": {"objectType": "Activity", "id": "http:adlnet.gov/my/Activity/URL",
"definition": {"name": {"en-US": "actName", "en-GB": "anotherActName"},
"description": {"en-US": "This is my activity description.", "en-GB": "This is another activity description."},
"type": "http://adlnet.gov/expapi/activities/cmi.interaction",
"moreInfo": "http://some/activity/url",
"interactionType": "choice",
"correctResponsesPattern": ["golf", "tetris"],
"choices": [{"id": "golf", "description": {"en-US": "Golf Example", "en-GB": "GOLF"}},
{"id": "tetris", "description": {
"en-US": "Tetris Example", "en-GB": "TETRIS"}},
{"id": "facebook", "description": {
"en-US": "Facebook App", "en-GB": "FACEBOOK"}},
{"id": "scrabble", "description": {"en-US": "Scrabble Example", "en-GB": "SCRABBLE"}}],
"extensions": {"ext:key1": "value1", "ext:key2": "value2", "ext:key3": "value3"}}},
"result": {"score": {"scaled": .85, "raw": 85, "min": 0, "max": 100}, "completion": True, "success": False, "response": "Well done",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:resultKey1": "resultValue1", "ext:resultKey2": "resultValue2"}},
"context": {"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"},
"grouping": {"id": "http://groupingID"}},
"revision": "Spelling error in choices.", "platform": "Platform is web browser.", "language": "en-US",
"statement": {"objectType": "StatementRef", "id": str(nested_st_id)},
"extensions": {"ext:contextKey1": "contextVal1", "ext:contextKey2": "contextVal2"}},
"timestamp": self.firstTime})
put_stmt = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_stmt.status_code, 204)
param = {"statementId": stmt_id}
get_response = self.client.get(
path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
the_returned = json.loads(get_response.content)
self.assertEqual(the_returned['id'], stmt_id)
self.assertEqual(the_returned['version'], settings.XAPI_VERSION)
self.assertEqual(the_returned['actor']['objectType'], 'Agent')
self.assertEqual(the_returned['actor']['name'], 'Lou Wolford')
self.assertEqual(the_returned['actor'][
'account']['name'], 'uniqueName')
self.assertEqual(the_returned['actor']['account'][
'homePage'], 'http://example.com')
self.assertEqual(the_returned['verb']['id'],
'http://example.com/verbs/created')
self.assertEqual(the_returned['verb']['display']['en-GB'], 'made')
self.assertEqual(the_returned['verb']['display']['en-US'], 'created')
self.assertEqual(the_returned['result']['completion'], True)
self.assertEqual(the_returned['result'][
'duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['result']['extensions'][
'ext:resultKey1'], 'resultValue1')
self.assertEqual(the_returned['result']['extensions'][
'ext:resultKey2'], 'resultValue2')
self.assertEqual(the_returned['result']['response'], 'Well done')
self.assertEqual(the_returned['result']['score']['max'], 100)
self.assertEqual(the_returned['result']['score']['min'], 0)
self.assertEqual(the_returned['result']['score']['raw'], 85)
self.assertEqual(the_returned['result']['score']['scaled'], 0.85)
self.assertEqual(the_returned['result']['success'], False)
self.assertEqual(the_returned['context']['contextActivities']['other'][0][
'id'], 'http://example.adlnet.gov/tincan/example/test')
self.assertEqual(the_returned['context']['extensions'][
'ext:contextKey1'], 'contextVal1')
self.assertEqual(the_returned['context']['extensions'][
'ext:contextKey2'], 'contextVal2')
self.assertEqual(the_returned['context']['language'], 'en-US')
self.assertEqual(the_returned['context'][
'platform'], 'Platform is web browser.')
self.assertEqual(the_returned['context']['registration'], context_id)
self.assertEqual(the_returned['context'][
'revision'], 'Spelling error in choices.')
self.assertEqual(the_returned['context']['statement'][
'id'], str(nested_st_id))
self.assertEqual(the_returned['context']['statement'][
'objectType'], 'StatementRef')
self.assertEqual(the_returned['authority']['objectType'], 'Agent')
self.assertEqual(the_returned['authority']['name'], 'tester1')
self.assertEqual(the_returned['authority'][
'mbox'], 'mailto:test1@tester.com')
self.assertEqual(the_returned['object'][
'id'], 'http:adlnet.gov/my/Activity/URL')
self.assertEqual(the_returned['object']['objectType'], 'Activity')
self.assertEqual(the_returned['object']['definition']['description'][
'en-US'], 'This is my activity description.')
self.assertEqual(the_returned['object']['definition']['description'][
'en-GB'], 'This is another activity description.')
self.assertEqual(the_returned['object']['definition'][
'interactionType'], 'choice')
self.assertEqual(the_returned['object']['definition'][
'name']['en-US'], 'actName')
self.assertEqual(the_returned['object']['definition'][
'name']['en-GB'], 'anotherActName')
self.assertEqual(the_returned['object']['definition'][
'type'], 'http://adlnet.gov/expapi/activities/cmi.interaction')
self.assertEqual(the_returned['object']['definition'][
'moreInfo'], 'http://some/activity/url')
self.assertEqual(the_returned['object']['definition'][
'extensions']['ext:key1'], 'value1')
self.assertEqual(the_returned['object']['definition'][
'extensions']['ext:key2'], 'value2')
self.assertEqual(the_returned['object']['definition'][
'extensions']['ext:key3'], 'value3')
# arrays.. testing slightly differently
choices_str = json.dumps(the_returned['object'][
'definition']['choices'])
self.assertIn('description', choices_str)
self.assertIn('id', choices_str)
self.assertIn('GOLF', choices_str)
self.assertIn('Golf Example', choices_str)
self.assertIn('golf', choices_str)
self.assertIn('TETRIS', choices_str)
self.assertIn('Tetris Example', choices_str)
self.assertIn('tetris', choices_str)
self.assertIn('FACEBOOK', choices_str)
self.assertIn('Facebook App', choices_str)
self.assertIn('Facebook', choices_str)
self.assertIn('SCRABBLE', choices_str)
self.assertIn('Scrabble Example', choices_str)
self.assertIn('scrabble', choices_str)
crp_str = json.dumps(the_returned['object']['definition'][
'correctResponsesPattern'])
self.assertIn('golf', crp_str)
self.assertIn('tetris', crp_str)
# Use this test to make sure stmts are being returned correctly with all
# data - doesn't check timestamp, stored fields
def test_all_fields_agent_as_object(self):
nested_st_id = str(uuid.uuid4())
nest_param = {"statementId": nested_st_id}
nest_path = "%s?%s" % (reverse('lrs:statements'),
urllib.urlencode(nest_param))
nested_stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:tincan@adlnet.gov"},
"verb": {"id": "http://example.com/verbs/assess", "display": {"en-US": "assessed"}},
"object": {"id": "http://example.adlnet.gov/tincan/example/simplestatement"}})
put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_sub_stmt.status_code, 204)
stmt_id = str(uuid.uuid4())
context_id = str(uuid.uuid4())
param = {"statementId": stmt_id}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
msha = hashlib.sha1("mailto:tom@example.com").hexdigest()
stmt = json.dumps({"actor": {"objectType": "Agent", "name": "Lou Wolford", "account": {"homePage": "http://example.com", "name": "louUniqueName"}},
"verb": {"id": "http://example.com/verbs/helped", "display": {"en-US": "helped", "en-GB": "assisted"}},
"object": {"objectType": "Agent", "name": "Tom Creighton", "mbox_sha1sum": msha},
"result": {"score": {"scaled": .85, "raw": 85, "min": 0, "max": 100}, "completion": True, "success": True, "response": "Well done",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:resultKey1": "resultValue1", "ext:resultKey2": "resultValue2"}},
"context": {"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"}},
"language": "en-US",
"statement": {"objectType": "StatementRef", "id": str(nested_st_id)},
"extensions": {"ext:contextKey1": "contextVal1", "ext:contextKey2": "contextVal2"}},
"timestamp": self.firstTime})
put_stmt = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_stmt.status_code, 204)
param = {"statementId": stmt_id}
get_response = self.client.get(
path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
the_returned = json.loads(get_response.content)
self.assertEqual(the_returned['id'], stmt_id)
self.assertEqual(the_returned['version'], settings.XAPI_VERSION)
self.assertEqual(the_returned['actor']['objectType'], 'Agent')
self.assertEqual(the_returned['actor']['name'], 'Lou Wolford')
self.assertEqual(the_returned['actor']['account'][
'name'], 'louUniqueName')
self.assertEqual(the_returned['actor']['account'][
'homePage'], 'http://example.com')
self.assertEqual(the_returned['verb']['id'],
'http://example.com/verbs/helped')
self.assertEqual(the_returned['verb']['display']['en-GB'], 'assisted')
self.assertEqual(the_returned['verb']['display']['en-US'], 'helped')
self.assertEqual(the_returned['result']['completion'], True)
self.assertEqual(the_returned['result'][
'duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['result']['extensions'][
'ext:resultKey1'], 'resultValue1')
self.assertEqual(the_returned['result']['extensions'][
'ext:resultKey2'], 'resultValue2')
self.assertEqual(the_returned['result']['response'], 'Well done')
self.assertEqual(the_returned['result']['score']['max'], 100)
self.assertEqual(the_returned['result']['score']['min'], 0)
self.assertEqual(the_returned['result']['score']['raw'], 85)
self.assertEqual(the_returned['result']['score']['scaled'], 0.85)
self.assertEqual(the_returned['result']['success'], True)
self.assertEqual(the_returned['context']['contextActivities']['other'][0][
'id'], 'http://example.adlnet.gov/tincan/example/test')
self.assertEqual(the_returned['context']['extensions'][
'ext:contextKey1'], 'contextVal1')
self.assertEqual(the_returned['context']['extensions'][
'ext:contextKey2'], 'contextVal2')
self.assertEqual(the_returned['context']['language'], 'en-US')
self.assertEqual(the_returned['context']['registration'], context_id)
self.assertEqual(the_returned['context']['statement'][
'id'], str(nested_st_id))
self.assertEqual(the_returned['context']['statement'][
'objectType'], 'StatementRef')
self.assertEqual(the_returned['authority']['objectType'], 'Agent')
self.assertEqual(the_returned['authority']['name'], 'tester1')
self.assertEqual(the_returned['authority'][
'mbox'], 'mailto:test1@tester.com')
self.assertEqual(the_returned['object']['objectType'], 'Agent')
self.assertEqual(the_returned['object']['name'], 'Tom Creighton')
self.assertEqual(the_returned['object']['mbox_sha1sum'], msha)
# Use this test to make sure stmts are being returned correctly with all
# data - doesn't check timestamps or stored fields
def test_all_fields_substatement_as_object(self):
nested_st_id = str(uuid.uuid4())
nest_param = {"statementId": nested_st_id}
nest_path = "%s?%s" % (reverse('lrs:statements'),
urllib.urlencode(nest_param))
nested_stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:tincannest@adlnet.gov"},
"verb": {"id": "http://example.com/verbs/assess", "display": {"en-US": "assessed", "en-GB": "graded"}},
"object": {"id": "http://example.adlnet.gov/tincan/example/simplestatement"}})
put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_sub_stmt.status_code, 204)
nested_sub_st_id = str(uuid.uuid4())
nest_sub_param = {"statementId": nested_sub_st_id}
nest_sub_path = "%s?%s" % (
reverse('lrs:statements'), urllib.urlencode(nest_sub_param))
nested_sub_stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:tincannestsub@adlnet.gov"},
"verb": {"id": "http://example.com/verbs/verb", "display": {"en-US": "verb", "en-GB": "altVerb"}},
"object": {"id": "http://example.adlnet.gov/tincan/example/simplenestedsubstatement"}})
put_nest_sub_stmt = self.client.put(nest_sub_path, nested_sub_stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_nest_sub_stmt.status_code, 204)
stmt_id = str(uuid.uuid4())
context_id = str(uuid.uuid4())
sub_context_id = str(uuid.uuid4())
param = {"statementId": stmt_id}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
stmt = json.dumps({"actor": {"objectType": "Agent", "name": "Lou Wolford", "account": {"homePage": "http://example.com", "name": "louUniqueName"}},
"verb": {"id": "http://example.com/verbs/said", "display": {"en-US": "said", "en-GB": "talked"}},
"object": {"objectType": "SubStatement", "actor": {"objectType": "Agent", "name": "Tom Creighton", "mbox": "mailto:tom@adlnet.gov"},
"verb": {"id": "http://example.com/verbs/assess", "display": {"en-US": "assessed", "en-GB": "Graded"}},
"object": {"id": "http://example.adlnet.gov/tincan/example/simplestatement",
'definition': {'name': {'en-US': 'SubStatement name'},
'description': {'en-US': 'SubStatement description'},
'type': 'http://adlnet.gov/expapi/activities/cmi.interaction', 'interactionType': 'matching',
'correctResponsesPattern': ['lou.3,tom.2,andy.1'], 'source': [{'id': 'lou',
'description': {'en-US': 'Lou', 'it': 'Luigi'}}, {'id': 'tom', 'description': {'en-US': 'Tom', 'it': 'Tim'}},
{'id': 'andy', 'description': {'en-US': 'Andy'}}], 'target': [{'id': '1',
'description': {'en-US': 'ADL LRS'}}, {'id': '2', 'description': {'en-US': 'lrs'}},
{'id': '3', 'description': {'en-US': 'the adl lrs', 'en-CH': 'the lrs'}}]}},
"result": {"score": {"scaled": .50, "raw": 50, "min": 1, "max": 51}, "completion": True,
"success": True, "response": "Poorly done",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:resultKey11": "resultValue11", "ext:resultKey22": "resultValue22"}},
"context": {"registration": sub_context_id,
"contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test/nest"}},
"revision": "Spelling error in target.", "platform": "Ipad.", "language": "en-US",
"statement": {"objectType": "StatementRef", "id": str(nested_sub_st_id)},
"extensions": {"ext:contextKey11": "contextVal11", "ext:contextKey22": "contextVal22"}}},
"result": {"score": {"scaled": .85, "raw": 85, "min": 0, "max": 100}, "completion": True, "success": True, "response": "Well done",
"duration": "P3Y6M4DT12H30M5S", "extensions": {"ext:resultKey1": "resultValue1", "ext:resultKey2": "resultValue2"}},
"context": {"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"}},
"language": "en-US",
"statement": {"objectType": "StatementRef", "id": str(nested_st_id)},
"extensions": {"ext:contextKey1": "contextVal1", "ext:contextKey2": "contextVal2"}},
"timestamp": self.firstTime})
put_stmt = self.client.put(path, stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(put_stmt.status_code, 204)
param = {"statementId": stmt_id}
get_response = self.client.get(
path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth)
the_returned = json.loads(get_response.content)
self.assertEqual(the_returned['id'], stmt_id)
self.assertEqual(the_returned['version'], settings.XAPI_VERSION)
self.assertEqual(the_returned['actor']['objectType'], 'Agent')
self.assertEqual(the_returned['actor']['name'], 'Lou Wolford')
self.assertEqual(the_returned['actor']['account'][
'name'], 'louUniqueName')
self.assertEqual(the_returned['actor']['account'][
'homePage'], 'http://example.com')
self.assertEqual(the_returned['verb']['id'],
'http://example.com/verbs/said')
self.assertEqual(the_returned['verb']['display']['en-GB'], 'talked')
self.assertEqual(the_returned['verb']['display']['en-US'], 'said')
self.assertEqual(the_returned['object'][
'actor']['objectType'], 'Agent')
self.assertEqual(the_returned['object']['actor'][
'name'], 'Tom Creighton')
self.assertEqual(the_returned['object']['actor'][
'mbox'], 'mailto:tom@adlnet.gov')
self.assertEqual(the_returned['object']['context'][
'registration'], sub_context_id)
self.assertEqual(the_returned['object'][
'context']['language'], 'en-US')
self.assertEqual(the_returned['object'][
'context']['platform'], 'Ipad.')
self.assertEqual(the_returned['object']['context'][
'revision'], 'Spelling error in target.')
self.assertEqual(the_returned['object']['context'][
'statement']['id'], str(nested_sub_st_id))
self.assertEqual(the_returned['object']['context'][
'statement']['objectType'], 'StatementRef')
self.assertEqual(the_returned['object']['context']['contextActivities']['other'][
0]['id'], 'http://example.adlnet.gov/tincan/example/test/nest')
self.assertEqual(the_returned['object']['context']['extensions'][
'ext:contextKey11'], 'contextVal11')
self.assertEqual(the_returned['object']['context']['extensions'][
'ext:contextKey22'], 'contextVal22')
self.assertEqual(the_returned['object']['object'][
'id'], 'http://example.adlnet.gov/tincan/example/simplestatement')
self.assertEqual(the_returned['object']['object']['definition'][
'type'], 'http://adlnet.gov/expapi/activities/cmi.interaction')
self.assertEqual(the_returned['object']['object']['definition'][
'description']['en-US'], 'SubStatement description')
self.assertEqual(the_returned['object']['object'][
'definition']['interactionType'], 'matching')
self.assertEqual(the_returned['object']['object']['definition'][
'name']['en-US'], 'SubStatement name')
# arrays.. testing slightly differently
source_str = json.dumps(the_returned['object']['object'][
'definition']['source'])
self.assertIn('description', source_str)
self.assertIn('id', source_str)
self.assertIn('Lou', source_str)
self.assertIn('Luigi', source_str)
self.assertIn('lou', source_str)
self.assertIn('Tom', source_str)
self.assertIn('Tim', source_str)
self.assertIn('tom', source_str)
self.assertIn('Andy', source_str)
self.assertIn('andy', source_str)
target_str = json.dumps(the_returned['object']['object'][
'definition']['target'])
self.assertIn('description', target_str)
self.assertIn('id', target_str)
self.assertIn('ADL LRS', target_str)
self.assertIn('1', target_str)
self.assertIn('lrs', target_str)
self.assertIn('2', target_str)
self.assertIn('the lrs', target_str)
self.assertIn('the adl lrs', target_str)
self.assertIn('3', target_str)
self.assertEqual(the_returned['object']['objectType'], 'SubStatement')
self.assertEqual(the_returned['object']['result']['completion'], True)
self.assertEqual(the_returned['object']['result'][
'duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['object']['result']['extensions'][
'ext:resultKey11'], 'resultValue11')
self.assertEqual(the_returned['object']['result']['extensions'][
'ext:resultKey22'], 'resultValue22')
self.assertEqual(the_returned['object']['result'][
'response'], 'Poorly done')
self.assertEqual(the_returned['object']['result']['score']['max'], 51)
self.assertEqual(the_returned['object']['result']['score']['min'], 1)
self.assertEqual(the_returned['object']['result']['score']['raw'], 50)
self.assertEqual(the_returned['object']['result'][
'score']['scaled'], 0.5)
self.assertEqual(the_returned['object']['result']['success'], True)
self.assertEqual(the_returned['object']['verb'][
'id'], 'http://example.com/verbs/assess')
self.assertEqual(the_returned['object']['verb'][
'display']['en-GB'], 'Graded')
self.assertEqual(the_returned['object']['verb'][
'display']['en-US'], 'assessed')
self.assertEqual(the_returned['result']['completion'], True)
self.assertEqual(the_returned['result'][
'duration'], 'P3Y6M4DT12H30M5S')
self.assertEqual(the_returned['result']['extensions'][
'ext:resultKey1'], 'resultValue1')
self.assertEqual(the_returned['result']['extensions'][
'ext:resultKey2'], 'resultValue2')
self.assertEqual(the_returned['result']['response'], 'Well done')
self.assertEqual(the_returned['result']['score']['max'], 100)
self.assertEqual(the_returned['result']['score']['min'], 0)
self.assertEqual(the_returned['result']['score']['raw'], 85)
self.assertEqual(the_returned['result']['score']['scaled'], 0.85)
self.assertEqual(the_returned['result']['success'], True)
self.assertEqual(the_returned['context']['contextActivities']['other'][0][
'id'], 'http://example.adlnet.gov/tincan/example/test')
self.assertEqual(the_returned['context']['extensions'][
'ext:contextKey1'], 'contextVal1')
self.assertEqual(the_returned['context']['extensions'][
'ext:contextKey2'], 'contextVal2')
self.assertEqual(the_returned['context']['language'], 'en-US')
self.assertEqual(the_returned['context']['registration'], context_id)
self.assertEqual(the_returned['context']['statement'][
'id'], str(nested_st_id))
self.assertEqual(the_returned['context']['statement'][
'objectType'], 'StatementRef')
self.assertEqual(the_returned['authority']['objectType'], 'Agent')
self.assertEqual(the_returned['authority']['name'], 'tester1')
self.assertEqual(the_returned['authority'][
'mbox'], 'mailto:test1@tester.com')
# Third stmt in list is missing actor - should throw error and perform
# cascading delete on first three statements
def test_post_list_rollback(self):
self.bunchostmts()
cguid1 = str(uuid.uuid4())
stmts = json.dumps([
{"verb": {"id": "http://example.com/verbs/wrong-failed", "display": {"en-US": "wrong-failed"}},
"object": {"id": "act:test_wrong_list_post2"}, "actor": {"objectType": "Agent",
"mbox": "mailto:wrong-t@t.com"}, "result": {"score": {"scaled": .99}, "completion": True, "success": True,
"response": "wrong", "extensions": {"ext:resultwrongkey1": "value1", "ext:resultwrongkey2": "value2"}}},
{"verb": {"id": "http://example.com/verbs/wrong-kicked", "display": {"en-US": "wrong-kicked"}},
"object": {"objectType": "Activity", "id": "act:test_wrong_list_post",
"definition": {"name": {"en-US": "wrongactName", "en-GB": "anotherActName"},
"description": {"en-US": "This is my activity description.", "en-GB": "This is another activity description."},
"type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "choice",
"correctResponsesPattern": ["wronggolf", "wrongtetris"],
"choices":[{"id": "wronggolf", "description": {"en-US": "Golf Example", "en-GB": "GOLF"}},
{"id": "wrongtetris", "description": {
"en-US": "Tetris Example", "en-GB": "TETRIS"}},
{"id": "wrongfacebook", "description": {
"en-US": "Facebook App", "en-GB": "FACEBOOK"}},
{"id": "wrongscrabble", "description": {"en-US": "Scrabble Example", "en-GB": "SCRABBLE"}}],
"extensions": {"ext:wrongkey1": "wrongvalue1", "ext:wrongkey2": "wrongvalue2", "ext:wrongkey3": "wrongvalue3"}}},
"actor": {"objectType": "Agent", "mbox": "mailto:wrong-t@t.com"}},
{"verb": {"id": "http://example.com/verbs/wrong-passed", "display": {"en-US": "wrong-passed"}}, "object": {"id": "act:test_wrong_list_post1"},
"actor": {"objectType": "Agent", "mbox": "mailto:wrong-t@t.com"}, "context": {"registration": cguid1, "contextActivities": {"other": {"id": "act:wrongActivityID2"}},
"revision": "wrong", "platform": "wrong", "language": "en-US", "extensions": {"ext:wrongkey1": "wrongval1",
"ext:wrongkey2": "wrongval2"}}},
{"verb": {"id": "http://example.com/verbs/wrong-kicked", "display": {
"en-US": "wrong-kicked"}}, "object": {"id": "act:test_wrong_list_post2"}},
{"verb": {"id": "http://example.com/verbs/wrong-kicked", "display": {"en-US": "wrong-kicked"}}, "object": {"id": "act:test_wrong_list_post4"}, "actor": {"objectType": "Agent", "mbox": "wrong-t@t.com"}}])
response = self.client.post(reverse('lrs:statements'), stmts, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
verbs = Verb.objects.filter(verb_id__contains='wrong')
activities = Activity.objects.filter(
activity_id__contains='test_wrong_list_post')
stmts = Statement.objects.all()
# 11 statements from setup
self.assertEqual(len(stmts), 11)
self.assertEqual(len(verbs), 0)
self.assertEqual(len(activities), 0)
def test_post_list_rollback_part_2(self):
self.bunchostmts()
stmts = json.dumps([{"object": {"objectType": "Agent", "name": "john", "mbox": "mailto:john@john.com"},
"verb": {"id": "http://example.com/verbs/wrong", "display": {"en-US": "wrong"}},
"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"}},
{"verb": {"id": "http://example.com/verbs/created"},
"object": {"objectType": "Activity", "id": "act:foogie",
"definition": {"name": {"en-US": "testname2", "en-GB": "altname"},
"description": {"en-US": "testdesc2", "en-GB": "altdesc"}, "type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction",
"interactionType": "fill-in", "correctResponsesPattern": ["answer"]}},
"actor":{"objectType": "Agent", "mbox": "mailto:wrong-t@t.com"}},
{"verb": {"id": "http://example.com/verbs/wrong-kicked"}, "object": {"id": "act:test_wrong_list_post2"}}])
response = self.client.post(reverse('lrs:statements'), stmts, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
created_verbs = Verb.objects.filter(
verb_id__contains='http://example.com/verbs/created')
wrong_verbs = Verb.objects.filter(
verb_id__contains='http://example.com/verbs/wrong')
activities = Activity.objects.filter(activity_id='act:foogie')
stmts = Statement.objects.all()
wrong_agent = Agent.objects.filter(mbox='mailto:wrong-t@t.com')
john_agent = Agent.objects.filter(mbox='mailto:john@john.com')
s_agent = Agent.objects.filter(mbox='mailto:s@s.com')
auth_agent = Agent.objects.filter(mbox='mailto:test1@tester.com')
self.assertEqual(len(created_verbs), 1)
self.assertEqual(len(wrong_verbs), 0)
self.assertEqual(len(activities), 1)
self.assertEqual(len(stmts), 11)
self.assertEqual(len(wrong_agent), 0)
self.assertEqual(len(john_agent), 1)
self.assertEqual(len(s_agent), 1)
self.assertEqual(len(auth_agent), 1)
def test_post_list_rollback_with_void(self):
self.bunchostmts()
stmts = json.dumps([{"actor": {"objectType": "Agent", "mbox": "mailto:only-s@s.com"},
"object": {"objectType": "StatementRef", "id": str(self.exist_stmt_id)},
"verb": {"id": "http://adlnet.gov/expapi/verbs/voided", "display": {"en-US": "voided"}}},
{"verb": {"id": "http://example.com/verbs/wrong-kicked"}, "object": {"id": "act:test_wrong_list_post2"}}])
response = self.client.post(reverse('lrs:statements'), stmts, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
voided_st = Statement.objects.get(statement_id=self.exist_stmt_id)
voided_verb = Verb.objects.filter(verb_id__contains='voided')
only_actor = Agent.objects.filter(mbox="mailto:only-s@s.com")
stmts = Statement.objects.all()
self.assertEqual(len(stmts), 11)
self.assertEqual(voided_st.voided, False)
self.assertEqual(len(voided_verb), 0)
self.assertEqual(len(only_actor), 0)
def test_post_list_rollback_with_subs(self):
self.bunchostmts()
sub_context_id = str(uuid.uuid4())
stmts = json.dumps([{"actor": {"objectType": "Agent", "mbox": "mailto:wrong-s@s.com"},
"verb": {"id": "http://example.com/verbs/wrong", "display": {"en-US": "wrong"}},
"object": {"objectType": "Agent", "name": "john", "mbox": "mailto:john@john.com"}},
{"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"verb": {"id": "http://example.com/verbs/wrong-next", "display": {"en-US": "wrong-next"}},
"object": {"objectType": "SubStatement",
"actor": {"objectType": "Agent", "mbox": "mailto:wrong-ss@ss.com"}, "verb": {"id": "http://example.com/verbs/wrong-sub"},
"object": {"objectType": "Activity", "id": "act:wrong-testex.com"}, "result": {"completion": True, "success": True,
"response": "sub-wrong-kicked"}, "context": {"registration": sub_context_id,
"contextActivities": {"other": {"id": "act:sub-wrong-ActivityID"}}, "revision": "foo", "platform": "bar",
"language": "en-US", "extensions": {"ext:wrong-k1": "v1", "ext:wrong-k2": "v2"}}}},
{"verb": {"id": "http://example.com/verbs/wrong-kicked"}, "object": {"id": "act:test_wrong_list_post2"}}])
response = self.client.post(reverse('lrs:statements'), stmts, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
s_agent = Agent.objects.filter(mbox="mailto:wrong-s@s.com")
ss_agent = Agent.objects.filter(mbox="mailto:wrong-ss@ss.com")
john_agent = Agent.objects.filter(mbox="mailto:john@john.com")
subs = SubStatement.objects.all()
wrong_verb = Verb.objects.filter(verb_id__contains="wrong")
activities = Activity.objects.filter(activity_id__contains="wrong")
stmts = Statement.objects.all()
self.assertEqual(len(stmts), 11)
self.assertEqual(len(s_agent), 0)
self.assertEqual(len(ss_agent), 0)
self.assertEqual(len(john_agent), 1)
# Only 1 sub from setup
self.assertEqual(len(subs), 1)
self.assertEqual(len(wrong_verb), 0)
self.assertEqual(len(activities), 0)
def test_post_list_rollback_context_activities(self):
self.bunchostmts()
sub_context_id = str(uuid.uuid4())
# Will throw error and need to rollback b/c last stmt is missing actor
stmts = json.dumps([{
"actor": {"objectType": "Agent", "mbox": "mailto:wrong-s@s.com"},
"verb": {"id": "http://example.com/verbs/wrong", "display": {"en-US": "wrong"}},
"object": {"objectType": "Agent", "name": "john", "mbox": "mailto:john@john.com"}},
{
"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"verb": {"id": "http://example.com/verbs/wrong-next", "display": {"en-US": "wrong-next"}},
"object": {
"objectType": "SubStatement",
"actor": {"objectType": "Agent", "mbox": "mailto:wrong-ss@ss.com"},
"verb": {"id": "http://example.com/verbs/wrong-sub"},
"object": {"objectType": "Activity", "id": "act:wrong-testex.com"},
"result": {"completion": True, "success": True, "response": "sub-wrong-kicked"},
"context": {
"registration": sub_context_id,
"contextActivities": {
"other": [{"id": "act:subwrongActivityID"}, {"id": "act:foogie"}]},
"revision": "foo", "platform": "bar", "language": "en-US",
"extensions": {"ext:wrong-k1": "v1", "ext:wrong-k2": "v2"}}
}
},
{
"verb": {"id": "http://example.com/verbs/wrong-kicked"},
"object": {"id": "act:test_wrong_list_post2"}}])
response = self.client.post(reverse('lrs:statements'), stmts, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('actor is missing in Statement', response.content)
s_agent = Agent.objects.filter(mbox="mailto:wrong-s@s.com")
ss_agent = Agent.objects.filter(mbox="mailto:wrong-ss@ss.com")
john_agent = Agent.objects.filter(mbox="mailto:john@john.com")
subs = SubStatement.objects.all()
wrong_verb = Verb.objects.filter(verb_id__contains="wrong")
wrong_activities = Activity.objects.filter(
activity_id__contains="wrong")
foogie_activities = Activity.objects.filter(
activity_id__exact="act:foogie")
stmts = Statement.objects.all()
self.assertEqual(len(stmts), 11)
self.assertEqual(len(s_agent), 0)
self.assertEqual(len(ss_agent), 0)
self.assertEqual(len(john_agent), 1)
# Only 1 sub from setup
self.assertEqual(len(subs), 1)
self.assertEqual(len(wrong_verb), 0)
self.assertEqual(len(wrong_activities), 0)
self.assertEqual(len(foogie_activities), 1)
def test_unique_actor_authority(self):
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:timmay@timmay.com", "name": "timmay"},
"verb": {"id": "http://example.com/verbs/passed", "display": {"en-US": "passed"}},
"object": {"id": "act:test_post"}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
response2 = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth2, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response2.status_code, 200)
acts = Activity.objects.filter(activity_id='act:test_post').count()
self.assertEqual(acts, 1)
def test_stmts_w_same_regid(self):
stmt1_guid = str(uuid.uuid4())
stmt2_guid = str(uuid.uuid4())
reg_guid = str(uuid.uuid4())
stmt1 = json.dumps({"actor": {"mbox": "mailto:tom@example.com"},
"verb": {"id": "http:adlnet.gov/expapi/verbs/tested",
"display": {"en-US": "tested"}},
"object": {"id": "test:same.regid"},
"context": {"registration": reg_guid}
})
stmt2 = json.dumps({"actor": {"mbox": "mailto:tom@example.com"},
"verb": {"id": "http:adlnet.gov/expapi/verbs/tested",
"display": {"en-US": "tested"}},
"object": {"id": "test:same.regid.again"},
"context": {"registration": reg_guid}
})
param1 = {"statementId": stmt1_guid}
path1 = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param1))
stmt_payload1 = stmt1
resp1 = self.client.put(path1, stmt_payload1, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp1.status_code, 204)
param2 = {"statementId": stmt2_guid}
path2 = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param2))
stmt_payload2 = stmt2
resp2 = self.client.put(path2, stmt_payload2, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(resp2.status_code, 204)
@override_settings(CELERY_ALWAYS_EAGER=True,
TEST_RUNNER='djcelery.contrib.test_runner.CeleryTestSuiteRunner')
def test_void(self):
stmt_guid = str(uuid.uuid4())
stmt = {"actor": {"mbox": "mailto:tinytom@example.com"},
"verb": {"id": "http://tommy.com/my-testverbs/danced",
"display": {"en-US": "danced"}},
"object": {"id": "act:the-macarena"}}
param = {"statementId": stmt_guid}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
payload = json.dumps(stmt)
r = self.client.put(path, payload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 204)
r = self.client.get(reverse('lrs:statements'), Authorization=self.auth,
X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
obj = json.loads(r.content)
self.assertEqual(len(obj['statements']), 1)
obj = obj['statements'][0]
self.assertEqual(obj['id'], stmt_guid)
self.assertEqual(obj['actor']['mbox'], stmt['actor']['mbox'])
self.assertEqual(obj['verb'], stmt['verb'])
self.assertEqual(obj['object']['id'], stmt['object']['id'])
stmt2_guid = str(uuid.uuid4())
stmt2 = {"actor": {"mbox": "mailto:louo@example.com"},
"verb": {"id": "http://tommy.com/my-testverbs/laughed",
"display": {"en-US": "laughed at"}},
"object": {"objectType": "StatementRef", "id": stmt_guid}}
param = {"statementId": stmt2_guid}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(param))
payload2 = json.dumps(stmt2)
r = self.client.put(path, payload2, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 204)
r = self.client.get(reverse('lrs:statements'), Authorization=self.auth,
X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
obj = json.loads(r.content)
self.assertEqual(len(obj['statements']), 2)
objs = obj['statements']
for o in objs:
if o['id'] == stmt_guid:
self.assertEqual(o['actor']['mbox'], stmt['actor']['mbox'])
self.assertEqual(o['verb']['id'], stmt['verb']['id'])
self.assertEqual(o['object']['id'], stmt['object']['id'])
else:
self.assertEqual(o['actor']['mbox'], stmt2['actor']['mbox'])
self.assertEqual(o['verb']['id'], stmt2['verb']['id'])
self.assertEqual(o['object']['id'], stmt2['object']['id'])
stmtv = {"actor": {"mbox": "mailto:hulk@example.com"},
"verb": {"id": "http://adlnet.gov/expapi/verbs/voided"},
"object": {"objectType": "StatementRef",
"id": "%s" % stmt_guid}}
v_guid = str(uuid.uuid4())
paramv = {"statementId": v_guid}
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(paramv))
vpayload = json.dumps(stmtv)
r = self.client.put(path, vpayload, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 204)
r = self.client.get(reverse('lrs:statements'), Authorization=self.auth,
X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
obj = json.loads(r.content)
self.assertEqual(len(obj['statements']), 2)
objs = obj['statements']
for o in objs:
if o['id'] == v_guid:
self.assertEqual(o['actor']['mbox'], stmtv['actor']['mbox'])
self.assertEqual(o['verb']['id'], stmtv['verb']['id'])
self.assertEqual(o['object']['id'], stmtv['object']['id'])
else:
self.assertEqual(o['actor']['mbox'], stmt2['actor']['mbox'])
self.assertEqual(o['verb']['id'], stmt2['verb']['id'])
self.assertEqual(o['object']['id'], stmt2['object']['id'])
# get voided statement via voidedStatementId
path = "%s?%s" % (reverse('lrs:statements'), urllib.urlencode(
{"voidedStatementId": stmt_guid}))
r = self.client.get(path, Authorization=self.auth,
X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 200)
obj = json.loads(r.content)
self.assertEqual(obj['id'], stmt_guid)
self.assertEqual(obj['actor']['mbox'], stmt['actor']['mbox'])
self.assertEqual(obj['verb']['id'], stmt['verb']['id'])
self.assertEqual(obj['object']['id'], stmt['object']['id'])
# make sure voided statement returns a 404 on get w/ statementId req
path = "%s?%s" % (reverse('lrs:statements'),
urllib.urlencode({"statementId": stmt_guid}))
r = self.client.get(path, Authorization=self.auth,
X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(r.status_code, 404)
def test_act_id_iri(self):
act_id = "act:Flügel"
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"verb": {"id": "http://example.com/verbs/created", "display": {"en-US": "created"}},
"object": {"id": act_id}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
stmt_db = Statement.objects.get(
statement_id=uuid.UUID(json.loads(response.content)[0]))
act = Activity.objects.get(id=stmt_db.object_activity.id)
self.assertEqual(act.activity_id.encode('utf-8'), act_id)
def test_invalid_act_id_iri(self):
act_id = "Flügel"
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"verb": {"id": "http://example.com/verbs/created", "display": {"en-US": "created"}},
"object": {"id": act_id}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
self.assertIn('not a valid IRI', response.content)
def test_tag_act_id_uri(self):
act_id = "tag:adlnet.gov,2013:expapi:0.9:activities"
stmt = json.dumps({"actor": {"objectType": "Agent", "mbox": "mailto:s@s.com"},
"verb": {"id": "http://example.com/verbs/created", "display": {"en-US": "created"}},
"object": {"id": act_id}})
response = self.client.post(reverse('lrs:statements'), stmt, content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
stmt_db = Statement.objects.get(
statement_id=uuid.UUID(json.loads(response.content)[0]))
act = Activity.objects.get(id=stmt_db.object_activity.id)
self.assertEqual(act.activity_id, act_id)
@override_settings(CELERY_ALWAYS_EAGER=True,
TEST_RUNNER='djcelery.contrib.test_runner.CeleryTestSuiteRunner')
def test_large_batch(self):
import random
post_payload = []
acts = ["http://tom.com/act/1/foo", "http://adlnet.gov/act/arrgs/2",
"http://google.com/activity/eats/ants", "http://tom.com/act/3/boo"]
ctxs = ["http://ctx.com/one", "http://ctx.com/two"]
for x in range(1, 500):
s = {"verb": {"id": "http://example.com/verbs/passed"}, "object": {"id": ""}, "actor": {"mbox": "mailto:t@t.com"},
"context": {"contextActivities": {"grouping": [{"id": ""}]}}}
s['object']['id'] = acts[random.randrange(0, len(acts) - 1)]
s['context']['contextActivities']['grouping'][0][
'id'] = ctxs[random.randrange(0, len(ctxs) - 1)]
post_payload.append(s)
response = self.client.post(reverse('lrs:statements'), json.dumps(post_payload), content_type="application/json",
Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
| 64.812437 | 306 | 0.536946 |
078aebba0281776544583a695d22daa9f51176e7
| 770 |
py
|
Python
|
classroom/api/urls.py
|
anish1997bendarkar/pytest
|
de7bf758e75b7c4f921e89b2695aca14bf48ee05
|
[
"MIT"
] | 16 |
2020-02-26T09:52:43.000Z
|
2021-12-21T07:03:10.000Z
|
classroom/api/urls.py
|
anish1997bendarkar/pytest
|
de7bf758e75b7c4f921e89b2695aca14bf48ee05
|
[
"MIT"
] | 4 |
2021-03-18T23:31:32.000Z
|
2021-06-10T18:25:30.000Z
|
classroom/api/urls.py
|
anish1997bendarkar/pytest
|
de7bf758e75b7c4f921e89b2695aca14bf48ee05
|
[
"MIT"
] | 11 |
2020-06-05T00:06:11.000Z
|
2022-02-14T07:50:31.000Z
|
from django.contrib import admin
from django.urls import path
from .views import (
StudentListAPIView,
StudentCreateAPIView,
StudentDeleteAPIView,
StudentDetailAPIView,
ClassroomNumberAPIView,
)
urlpatterns = [
path("student/list/", StudentListAPIView.as_view(), name="student_list_api"),
path("student/create/", StudentCreateAPIView.as_view(), name="student_create_api"),
path(
"student/<int:pk>/", StudentDetailAPIView.as_view(), name="student_detail_api"
),
path(
"student/<int:pk>/delete/",
StudentDeleteAPIView.as_view(),
name="student_delete_api",
),
path(
"classroom/<int:student_capacity>/",
ClassroomNumberAPIView.as_view(),
name="class_qs_api",
),
]
| 26.551724 | 87 | 0.671429 |
aa8cdd7fde92cc507f56b0b25c5e7a6c822ceb32
| 17,630 |
py
|
Python
|
peeringdb/http.py
|
adamgent/peering-manager
|
46858766cf131da2378010189d13485dec98332f
|
[
"Apache-2.0"
] | null | null | null |
peeringdb/http.py
|
adamgent/peering-manager
|
46858766cf131da2378010189d13485dec98332f
|
[
"Apache-2.0"
] | null | null | null |
peeringdb/http.py
|
adamgent/peering-manager
|
46858766cf131da2378010189d13485dec98332f
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import requests
from django.db import transaction
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist, ValidationError
from django.utils import timezone
from .models import Network, NetworkIXLAN, PeerRecord, Prefix, Synchronization
NAMESPACES = {
"facility": "fac",
"internet_exchange": "ix",
"internet_exchange_facility": "ixfac",
"internet_exchange_lan": "ixlan",
"internet_exchange_prefix": "ixpfx",
"network": "net",
"network_facility": "netfac",
"network_internet_exchange_lan": "netixlan",
"organization": "org",
"network_contact": "poc",
}
class Object(object):
"""
This is a class used to load JSON data into class fields for easier use.
"""
def __init__(self, data):
self.__dict__ = json.loads(json.dumps(data))
def __str__(self):
return str(self.__dict__)
class PeeringDB(object):
"""
Class used to interact with the PeeringDB API.
"""
logger = logging.getLogger("peering.manager.peeringdb")
def lookup(self, namespace, search):
"""
Sends a get request to the API given a namespace and some parameters.
"""
# Enforce trailing slash and add namespace
api_url = settings.PEERINGDB_API.strip("/") + "/" + namespace
# Check if the depth param is provided, add it if not
if "depth" not in search:
search["depth"] = 1
# Make the request
self.logger.debug("calling api: %s | %s", api_url, search)
response = requests.get(api_url, params=search)
return response.json() if response.status_code == 200 else None
def record_last_sync(self, time, objects_changes):
"""
Save the last synchronization details (number of objects and time) for
later use (and logs).
"""
last_sync = None
number_of_changes = (
objects_changes["added"]
+ objects_changes["updated"]
+ objects_changes["deleted"]
)
# Save the last sync time only if objects were retrieved
if number_of_changes > 0:
values = {
"time": time,
"added": objects_changes["added"],
"updated": objects_changes["updated"],
"deleted": objects_changes["deleted"],
}
last_sync = Synchronization(**values)
last_sync.save()
self.logger.debug(
"synchronizated %s objects at %s", number_of_changes, last_sync.time
)
return last_sync
def get_last_synchronization(self):
"""
Return the last synchronization.
"""
try:
return Synchronization.objects.latest("time")
except Synchronization.DoesNotExist:
pass
return None
def get_last_sync_time(self):
"""
Return the last time of synchronization based on the latest record.
The time is returned as an integer UNIX timestamp.
"""
# Assume first sync
last_sync_time = 0
last_sync = self.get_last_synchronization()
if last_sync:
last_sync_time = last_sync.time.timestamp()
return int(last_sync_time)
def synchronize_objects(self, last_sync, namespace, model):
"""
Synchronizes all the objects of a namespace of the PeeringDB to the
local database. This function is meant to be run regularly to update
the local database with the latest changes.
If the object already exists locally it will be updated and no new
entry will be created.
If the object is marked as deleted in the PeeringDB, it will be locally
deleted.
This function returns the number of objects that have been successfully
synchronized to the local database.
"""
objects_added = 0
objects_updated = 0
objects_deleted = 0
# Get all network changes since the last sync
search = {"since": last_sync, "depth": 0}
result = self.lookup(namespace, search)
if not result:
return None
for data in result["data"]:
peeringdb_object = Object(data)
marked_as_deleted = peeringdb_object.status == "deleted"
marked_as_new = False
try:
# Get the local object by its ID
local_object = model.objects.get(pk=peeringdb_object.id)
# Object marked as deleted so remove it locally too
if marked_as_deleted:
local_object.delete()
objects_deleted += 1
self.logger.debug(
"deleted %s #%s from local database",
model._meta.verbose_name.lower(),
peeringdb_object.id,
)
continue
except model.DoesNotExist:
# Local object does not exist so create it
local_object = model()
marked_as_new = True
# Set the value for each field
for model_field in model._meta.get_fields():
field_name = model_field.name
# Do not try to follow foreign keys
if model_field.get_internal_type() == "ForeignKey":
continue
value = getattr(peeringdb_object, field_name)
try:
field = local_object._meta.get_field(field_name)
except FieldDoesNotExist:
field = None
self.logger.error(
"bug found? field: %s for model: %s",
field_name,
model._meta.verbose_name.lower(),
)
if field:
setattr(local_object, field_name, value)
try:
local_object.full_clean()
except ValidationError:
self.logger.error(
"bug found? error while validating id: %s for model: %s",
peeringdb_object.id,
model._meta.verbose_name.lower(),
)
continue
# Save the local object
local_object.save()
# Update counters
if marked_as_new:
objects_added += 1
self.logger.debug(
"created %s #%s from peeringdb",
model._meta.verbose_name.lower(),
local_object.id,
)
else:
objects_updated += 1
self.logger.debug(
"updated %s #%s from peeringdb",
model._meta.verbose_name.lower(),
local_object.id,
)
return (objects_added, objects_updated, objects_deleted)
def update_local_database(self, last_sync):
"""
Update the local database by synchronizing all PeeringDB API's
namespaces that we are actually caring about.
"""
# Set time of sync
time_of_sync = timezone.now()
objects_to_sync = [
(NAMESPACES["network"], Network),
(NAMESPACES["network_internet_exchange_lan"], NetworkIXLAN),
(NAMESPACES["internet_exchange_prefix"], Prefix),
]
list_of_changes = []
# Make a single transaction, avoid too much database commits (poor
# speed) and fail the whole synchronization if something goes wrong
with transaction.atomic():
# Try to sync objects
for (namespace, object_type) in objects_to_sync:
changes = self.synchronize_objects(last_sync, namespace, object_type)
list_of_changes.append(changes)
objects_changes = {
"added": sum(added for added, _, _ in list_of_changes),
"updated": sum(updated for _, updated, _ in list_of_changes),
"deleted": sum(deleted for _, _, deleted in list_of_changes),
}
# Save the last sync time
return self.record_last_sync(time_of_sync, objects_changes)
def clear_local_database(self):
"""
Delete all data related to the local database. This can be used to get a
fresh start.
"""
for model in [Network, NetworkIXLAN, PeerRecord, Synchronization]:
model.objects.all().delete()
def force_peer_records_discovery(self):
"""
Force the peer records cache to be [re]built. This function can be used
if this cache appears to be out of sync or inconsistent.
"""
indexed = 0
with transaction.atomic():
# First of all, delete all existing peer records
PeerRecord.objects.all().delete()
# Build the cache
for network_ixlan in NetworkIXLAN.objects.all():
# Ignore if we have no IPv6 and no IPv4 to peer with
if not network_ixlan.ipaddr6 and not network_ixlan.ipaddr4:
self.logger.debug(
"network ixlan with as%s and ixlan id %s"
" ignored, no ipv6 and no ipv4",
network_ixlan.asn,
network_ixlan.ixlan_id,
)
continue
network = None
try:
network = Network.objects.get(asn=network_ixlan.asn)
except Network.DoesNotExist:
self.logger.debug("unable to find network as%s", network_ixlan.asn)
if network:
PeerRecord.objects.create(
network=network, network_ixlan=network_ixlan
)
self.logger.debug(
"peer record with network as%s and ixlan" "id %s created",
network_ixlan.asn,
network_ixlan.ixlan_id,
)
indexed += 1
else:
self.logger.debug(
"network ixlan with as%s and ixlan id %s" " ignored",
network_ixlan.asn,
network_ixlan.ixlan_id,
)
return indexed
def get_autonomous_system(self, asn):
"""
Return an AS (and its details) given its ASN. The result can come from
the local database (cache built with the peeringdb_sync command). If
the AS details are not found in the local database, they will be
fetched online which will take more time.
"""
try:
# Try to get from cached data
network = Network.objects.get(asn=asn)
except Network.DoesNotExist:
# If no cached data found, query the API
search = {"asn": asn}
result = self.lookup(NAMESPACES["network"], search)
if not result or not result["data"]:
return None
network = Object(result["data"][0])
return network
def get_ix_network(self, ix_network_id):
"""
Return an IX network (and its details) given an IP address. The result
can come from the local database (cache built with the peeringdb_sync
command). If the IX network is not found in the local database, it will
be fetched online which will take more time.
"""
try:
# Try to get from cached data
network_ixlan = NetworkIXLAN.objects.get(id=ix_network_id)
except NetworkIXLAN.DoesNotExist:
# If no cached data found, query the API
search = {"id": ix_network_id}
result = self.lookup(NAMESPACES["network_internet_exchange_lan"], search)
if not result or not result["data"]:
return None
network_ixlan = Object(result["data"][0])
return network_ixlan
def get_ix_network_by_ip_address(self, ipv6_address=None, ipv4_address=None):
"""
Return an IX network (and its details) given its ID. The result can
come from the local database (cache built with the peeringdb_sync
command). If the IX network is not found in the local database, it will
be fetched online which will take more time.
"""
if not ipv6_address and not ipv4_address:
return None
search = {}
if ipv6_address:
search.update({"ipaddr6": ipv6_address})
if ipv4_address:
search.update({"ipaddr4": ipv4_address})
try:
# Try to get from cached data
network_ixlan = NetworkIXLAN.objects.get(**search)
except NetworkIXLAN.DoesNotExist:
# If no cached data found, query the API
result = self.lookup(NAMESPACES["network_internet_exchange_lan"], search)
if not result or not result["data"]:
return None
network_ixlan = Object(result["data"][0])
return network_ixlan
def get_ix_networks_for_asn(self, asn):
"""
Returns a list of all IX networks an AS is connected to.
"""
# Try to get from cached data
network_ixlans = NetworkIXLAN.objects.filter(asn=asn)
# If nothing found in cache, try to fetch data online
if not network_ixlans:
search = {"asn": asn}
result = self.lookup(NAMESPACES["network_internet_exchange_lan"], search)
if not result or not result["data"]:
return None
network_ixlans = []
for ix_network in result["data"]:
network_ixlans.append(Object(ix_network))
return network_ixlans
def get_common_ix_networks_for_asns(self, asn1, asn2):
"""
Returns a list of all common IX networks on which both ASNs are
connected to. The list contains tuples of NetworkIXLAN objects. The
first element of each tuple is the IX LAN network of asn1, the second
element is the IX LAN network of asn2 matching the one of asn1.
"""
common_network_ixlans = []
# Grab IX LANs for both ASNs
asn1_network_ixlans = self.get_ix_networks_for_asn(asn1)
asn2_network_ixlans = self.get_ix_networks_for_asn(asn2)
# If IX for one of the AS cannot be found, return the empty list
if not asn1_network_ixlans or not asn2_network_ixlans:
return common_network_ixlans
# Find IX LAN networks matching
for asn1_network_ixlan in asn1_network_ixlans:
for asn2_network_ixlan in asn2_network_ixlans:
if asn1_network_ixlan.ixlan_id == asn2_network_ixlan.ixlan_id:
# Keep track of the two IX LAN networks
common_network_ixlans.append(
(asn1_network_ixlan, asn2_network_ixlan)
)
return common_network_ixlans
def get_prefixes_for_ix_network(self, ix_network_id):
"""
Returns a list of all prefixes used by an IX network.
"""
prefixes = []
# Get the NetworkIXLAN object using its ID
network_ixlan = self.get_ix_network(ix_network_id)
if network_ixlan:
# Try to get prefixes from cache
ix_prefixes = Prefix.objects.filter(ixlan_id=network_ixlan.ixlan_id)
# If not cached data, try to fetch online
if not ix_prefixes:
search = {"ixlan_id": network_ixlan.ixlan_id}
result = self.lookup(NAMESPACES["internet_exchange_prefix"], search)
if not result or not result["data"]:
return prefixes
ix_prefixes = []
for ix_prefix in result["data"]:
ix_prefixes.append(Object(ix_prefix))
# Build a list with protocol and prefix couples
for ix_prefix in ix_prefixes:
prefixes.append(ix_prefix.prefix)
return prefixes
def get_peers_for_ix(self, ix_id):
"""
Returns a dict with details for peers for the IX corresponding to the
given ID. This function try to leverage the use of local database
caching. If the cache is not built it can take some time to execute due
to the amount of peers on the IX which increases the number of API
calls to be made.
"""
# Try to get from cached data
network_ixlans = NetworkIXLAN.objects.filter(ix_id=ix_id)
# If nothing found in cache, try to fetch data online
if not network_ixlans:
search = {"ix_id": ix_id}
result = self.lookup(NAMESPACES["network_internet_exchange_lan"], search)
if not result or not result["data"]:
return None
network_ixlans = []
for data in result["data"]:
network_ixlans.append(Object(data))
# List potential peers
peers = []
for network_ixlan in network_ixlans:
# Ignore our own ASN
if network_ixlan.asn == settings.MY_ASN:
continue
# Get more details about the current network
network = self.get_autonomous_system(network_ixlan.asn)
# Package all gathered details
peers.append({"network": network, "network_ixlan": network_ixlan})
return peers
| 35.26 | 87 | 0.57544 |
bd6eba807c280f3006e20fe7adcfa8cbeff7fa9e
| 1,183 |
py
|
Python
|
opencanary/modules/sip.py
|
amorrowbellarmine/opencanary
|
a25209888faa3fbaa15db09e419d03b42897a7d5
|
[
"BSD-3-Clause"
] | 1,412 |
2015-08-06T03:54:37.000Z
|
2022-03-29T07:30:54.000Z
|
opencanary/modules/sip.py
|
amorrowbellarmine/opencanary
|
a25209888faa3fbaa15db09e419d03b42897a7d5
|
[
"BSD-3-Clause"
] | 170 |
2015-08-11T00:33:06.000Z
|
2022-03-30T05:11:38.000Z
|
opencanary/modules/sip.py
|
amorrowbellarmine/opencanary
|
a25209888faa3fbaa15db09e419d03b42897a7d5
|
[
"BSD-3-Clause"
] | 297 |
2015-08-17T09:25:41.000Z
|
2022-03-31T08:25:41.000Z
|
from opencanary.modules import CanaryService
from twisted.application import internet
from twisted.protocols.sip import Base
from twisted.internet.address import IPv4Address
"""
A log-only SIP server. It won't respond, but it will log any
SIP requests sent its way.
"""
class SIPServer(Base):
def handle_request(self, request, addr):
try:
logdata={'HEADERS': request.headers}
self.transport.getPeer = lambda: IPv4Address('UDP', addr[0], addr[1])
self.factory.log(logdata=logdata, transport=self.transport)
except Exception as e:
self.factory.log(logdata={'ERROR': e}, transport=self.transport)
class CanarySIP(CanaryService):
NAME = 'SIP'
def __init__(self, config=None, logger=None):
CanaryService.__init__(self, config=config, logger=logger)
self.port = int(config.getVal('sip.port', default=5060))
self.logtype=self.logger.LOG_SIP_REQUEST
self.listen_addr = config.getVal('device.listen_addr', default='')
def getService(self):
f = SIPServer()
f.factory = self
return internet.UDPServer(self.port, f, interface=self.listen_addr)
| 33.8 | 81 | 0.682164 |
b811a63acad21c85cc38b3b6ac1e1026db9940c3
| 7,507 |
py
|
Python
|
pytorch_optimizers/adam.py
|
madsbk/pytorch_optimizers
|
10eefd83277237d3aa2788a2a5a47ba3294a6e50
|
[
"Apache-2.0"
] | null | null | null |
pytorch_optimizers/adam.py
|
madsbk/pytorch_optimizers
|
10eefd83277237d3aa2788a2a5a47ba3294a6e50
|
[
"Apache-2.0"
] | null | null | null |
pytorch_optimizers/adam.py
|
madsbk/pytorch_optimizers
|
10eefd83277237d3aa2788a2a5a47ba3294a6e50
|
[
"Apache-2.0"
] | null | null | null |
import math
import os
from distutils.util import strtobool
import torch
from torch.optim.optimizer import Optimizer
from torch.hub import _check_module_exists
NUMBA_CUDA_EXIST = False
NUMBA_CUDA_THREAD_PER_BLOCK = 512
if not strtobool(os.environ.get('NO_NUMBA', 'n')) and _check_module_exists("numba.cuda"):
import numba.cuda
NUMBA_CUDA_EXIST = numba.cuda.is_available()
@numba.cuda.jit()
def numba_cuda_kernel(param, grad, exp_avg, exp_avg_sq, beta1,
beta2, step_size, bias_correction2, eps,
weight_decay):
i = numba.cuda.grid(1)
if i >= param.size:
return
if weight_decay != 0:
grad[i] += weight_decay * param[i]
exp_avg[i] = exp_avg[i] * beta1 + (1 - beta1) * grad[i]
exp_avg_sq[i] = exp_avg_sq[i] * beta2 + (1 - beta2) * grad[i] * grad[i]
denom = math.sqrt(exp_avg_sq[i]) / bias_correction2 + eps
param[i] = param[i] + (-step_size) * (exp_avg[i] / denom)
class Adam(Optimizer):
r"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
# In order to reduce Numba overhead, we save the device arrays
# between calls to `step()` in `_nbstate`.
self._nbstate = getattr(self, '_nbstate', {})
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for param in group['params']:
if param.grad is None:
continue
# Perform optimization step
grad = param.grad.data
p = param.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients,'
'please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[param]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p)
elif NUMBA_CUDA_EXIST and numba.cuda.is_cuda_array(p.data):
self._nbstate[param] = {
'param': numba.cuda.as_cuda_array(p.data.flatten()),
'grad': numba.cuda.as_cuda_array(grad.flatten()),
'exp_avg': numba.cuda.as_cuda_array(state['exp_avg'].data.flatten()),
'exp_avg_sq': numba.cuda.as_cuda_array(state['exp_avg_sq'].
data.flatten()),
'blockspergrid': math.ceil(p.data.numel() / NUMBA_CUDA_THREAD_PER_BLOCK)
}
weight_decay = group['weight_decay']
eps = group['eps']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = math.sqrt(1 - beta2 ** state['step'])
step_size = group['lr'] / bias_correction1
if param in self._nbstate:
s = self._nbstate[param]
numba_cuda_kernel[s['blockspergrid'],
NUMBA_CUDA_THREAD_PER_BLOCK](s['param'],
s['grad'],
s['exp_avg'],
s['exp_avg_sq'],
beta1, beta2,
step_size,
bias_correction2,
eps, weight_decay)
else:
if weight_decay != 0:
grad.add_(weight_decay, p.data)
exp_avg = state['exp_avg'].data
exp_avg_sq = state['exp_avg_sq'].data
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / bias_correction2).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / bias_correction2).add_(eps)
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| 45.49697 | 100 | 0.515785 |
77a0c2c02d3262294696213f2e32732df83ea738
| 525 |
py
|
Python
|
class4/exercise5.py
|
agonzo777/pynet
|
8b2c2bbd71ea001ba0dc2acb20a4d46c7ddeaa12
|
[
"Apache-2.0"
] | null | null | null |
class4/exercise5.py
|
agonzo777/pynet
|
8b2c2bbd71ea001ba0dc2acb20a4d46c7ddeaa12
|
[
"Apache-2.0"
] | null | null | null |
class4/exercise5.py
|
agonzo777/pynet
|
8b2c2bbd71ea001ba0dc2acb20a4d46c7ddeaa12
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import pexpect
import time
import netmiko
from netmiko import ConnectHandler
password = '88newclass'
pynet1 = {
'device_type': 'cisco_ios',
'ip': '50.76.53.27',
'username': 'pyclass',
'password': password,
}
pynet2 = {
'device_type': 'cisco_ios',
'ip': '50.76.53.27',
'username': 'pyclass',
'password': password,
'port': 8022,
}
rc = ConnectHandler(**pynet2)
rc.config_mode()
output = rc.find_prompt()
print output
output = rc.check_config_mode()
print output
| 15.909091 | 34 | 0.653333 |
0c65afa180db47038d20348fd747c92c3b44f8ef
| 7,344 |
py
|
Python
|
commands/views.py
|
guptaharsh13/notifIEEE
|
359ba33410ba3cd0b16fdbdfe6bfb45117067ece
|
[
"MIT"
] | 1 |
2022-01-08T19:02:22.000Z
|
2022-01-08T19:02:22.000Z
|
commands/views.py
|
guptaharsh13/notifIEEE
|
359ba33410ba3cd0b16fdbdfe6bfb45117067ece
|
[
"MIT"
] | null | null | null |
commands/views.py
|
guptaharsh13/notifIEEE
|
359ba33410ba3cd0b16fdbdfe6bfb45117067ece
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.conf import settings
import slack
import re
from utils.scheduler import scheduleMeet
from pprint import pprint
SLACK_VERIFICATION_TOKEN = getattr(settings, "SLACK_VERIFICATION_TOKEN", None)
SLACK_BOT_USER_TOKEN = getattr(settings, "SLACK_BOT_USER_TOKEN", None)
client = slack.WebClient(SLACK_BOT_USER_TOKEN)
BOT_ID = client.api_call("auth.test")["user_id"]
class PlanCommand(APIView):
def post(self, request):
payload = request.data
if not payload.get("token", None) == SLACK_VERIFICATION_TOKEN:
return Response(status=status.HTTP_403_FORBIDDEN)
team_id = payload.get("team_id", None)
meet_info = payload.get("text", None)
if not (team_id and meet_info):
return Response(
{
"response_type": "ephemeral",
"text": "Sorry, slash commando, that didn't work. Please try again.",
},
status=status.HTTP_400_BAD_REQUEST,
)
members = client.users_list(team_id=team_id)["members"]
members = list(
filter(lambda member: member["is_email_confirmed"], members))
emails = list(
map(
lambda member: {"email": member["profile"]["email"]},
members,
)
)
if not emails:
return Response(
{
"response_type": "ephemeral",
"text": "Sorry, slash commando, that didn't work. No Team mates found !!",
},
status=status.HTTP_200_OK,
)
meet_link = re.findall(r"<((?:http|https).+)>", meet_info)
meet_link = list(
filter(lambda link: link.startswith(
"https://meet.google.com"), meet_link)
)
if not meet_link:
return Response(
{
"response_type": "ephemeral",
"text": "Sorry, slash commando, that didn't work. INVALID G-meet link !!",
},
status=status.HTTP_200_OK,
)
if len(meet_link) > 1:
return Response(
{
"response_type": "ephemeral",
"text": "Sorry, slash commando, that didn't work. You cannot add multiple G-meet links as of now !!",
},
status=status.HTTP_200_OK,
)
meet_link = meet_link[0]
s_datetime = re.findall(
r"(\d{1,2}(?:\.|/|-)\d{1,2}(?:\.|/|-)(?:\d{2}|\d{4})/\d{0,2}:\d{0,2})", meet_info
)
if not s_datetime:
return Response(
{
"response_type": "ephemeral",
"text": "Sorry, slash commando, that didn't work. You did not mention start time !!",
},
status=status.HTTP_200_OK,
)
if len(s_datetime) > 1:
return Response(
{
"response_type": "ephemeral",
"text": "Sorry, slash commando, that didn't work. You cannot add multiple start times as of now !!",
},
status=status.HTTP_200_OK,
)
s_datetime = s_datetime[0].split("/")
s_date = s_datetime[0]
if "." in s_date:
s_date = s_date.split(".")
elif "-" in s_date:
s_date = s_date.split("-")
else:
s_date = s_datetime[:3]
s_time = s_datetime[-1].split(":")
hour = s_time[0]
minute = s_time[1]
if not hour:
hour = 0
if not minute:
minute = 0
hour = int(hour)
minute = int(minute)
year = s_date[-1]
if len(year) == 2:
year = f"20{year}"
year = int(year)
month = int(s_date[1])
day = int(s_date[0])
duration = re.findall(r"duration:(\d{1})", meet_info)
if not duration:
return Response(
{
"response_type": "ephemeral",
"text": "Sorry, slash commando, that didn't work. You did not enter your meet duration !!",
},
status=status.HTTP_200_OK,
)
if len(duration) > 1:
return Response(
{
"response_type": "ephemeral",
"text": "Sorry, slash commando, that didn't work. You cannot add multilpe meet durations as of now !!",
},
status=status.HTTP_200_OK,
)
duration = int(duration[0])
meet_info = meet_info.split(",")
meet_info = list(map(lambda info: info.strip(), meet_info))
if not meet_info:
return Response(
{
"response_type": "ephemeral",
"text": "Sorry, slash commando, that didn't work. You did not enter your meet name !!",
},
status=status.HTTP_200_OK,
)
meet_name = meet_info[0]
try:
meet_datetime = datetime(year=year, month=month,
day=day, hour=hour, minute=minute, second=0)
except:
return Response(
{
"response_type": "ephemeral",
"text": "Sorry, slash commando, that didn't work. Your datetime format was incorrect !!",
},
status=status.HTTP_200_OK,
)
try:
print("\n\n")
print(meet_name)
print(meet_link)
print(meet_datetime)
print(duration)
print(emails)
print("\n")
meet = scheduleMeet(meet_name=meet_name, meet_link=meet_link,
s_datetime=meet_datetime, duration=duration, emails=emails)
pprint(meet)
print("\n\n")
except Exception as e:
print(e)
return Response(
{"text": "Sorry, slash commando, that didn't work. We could not schedule your meeting !!"},
status=status.HTTP_200_OK,
)
return Response(
{"text": "Thank You !! Your meeting has been scheduled."},
status=status.HTTP_200_OK,
)
class Events(APIView):
def post(self, request):
slack_message = request.data
if slack_message.get("token", None) != SLACK_VERIFICATION_TOKEN:
return Response(status=status.HTTP_403_FORBIDDEN)
if slack_message.get("type", None) == "url_verification":
return Response(data=slack_message, status=status.HTTP_200_OK)
event_message = slack_message.get("event", None)
if event_message:
channel = event_message.get("channel", None)
text = event_message.get("text", "")
if not event_message.get("user", None) == BOT_ID:
# client.chat_postMessage(channel=channel, text=text)
print(f"Message found: {text} in channel: {channel}")
return Response(status=status.HTTP_200_OK)
| 33.688073 | 123 | 0.513617 |
ec47428f90d74efe86c02bfc80e62ef3e5e23521
| 4,283 |
py
|
Python
|
jesse/store/__init__.py
|
slipperlobster/flipper
|
8482edd77604fcec2ea08913f1748c21be80dac7
|
[
"MIT"
] | 3 |
2021-09-26T15:55:00.000Z
|
2022-01-17T08:04:21.000Z
|
jesse/store/__init__.py
|
slipperlobster/flipper
|
8482edd77604fcec2ea08913f1748c21be80dac7
|
[
"MIT"
] | 26 |
2021-10-31T07:04:04.000Z
|
2022-03-24T04:24:21.000Z
|
jesse/store/__init__.py
|
slipperlobster/flipper
|
8482edd77604fcec2ea08913f1748c21be80dac7
|
[
"MIT"
] | null | null | null |
import jesse.helpers as jh
from jesse.config import config
from jesse.exceptions import InvalidRoutes
from jesse.routes import router
from .state_app import AppState
from .state_candles import CandlesState
from .state_completed_trades import CompletedTrades
from .state_exchanges import ExchangesState
from .state_logs import LogsState
from .state_orderbook import OrderbookState
from .state_orders import OrdersState
from .state_positions import PositionsState
from .state_tickers import TickersState
from .state_trades import TradesState
def install_routes() -> None:
considering_candles = set()
# when importing market data, considering_candles is all we need
if jh.is_collecting_data():
for r in router.market_data:
considering_candles.add((r.exchange, r.symbol))
config['app']['considering_candles'] = tuple(considering_candles)
return
# validate routes for duplicates:
# each exchange-symbol pair can be traded only once.
for r in router.routes:
considering_candles.add((r.exchange, r.symbol))
exchange = r.exchange
symbol = r.symbol
count = sum(
ro.exchange == exchange and ro.symbol == symbol
for ro in router.routes
)
if count != 1:
raise InvalidRoutes(
'each exchange-symbol pair can be traded only once. \nMore info: https://docs.jesse.trade/docs/routes.html#trading-multiple-routes')
# check to make sure if trading more than one route, they all have the same quote
# currency because otherwise we cannot calculate the correct performance metrics
first_routes_quote = jh.quote_asset(router.routes[0].symbol)
for r in router.routes:
if jh.quote_asset(r.symbol) != first_routes_quote:
raise InvalidRoutes('All trading routes must have the same quote asset.')
trading_exchanges = set()
trading_timeframes = set()
trading_symbols = set()
for r in router.routes:
trading_exchanges.add(r.exchange)
trading_timeframes.add(r.timeframe)
trading_symbols.add(r.symbol)
considering_exchanges = trading_exchanges.copy()
considering_timeframes = trading_timeframes.copy()
considering_symbols = trading_symbols.copy()
for e in router.extra_candles:
considering_candles.add((e['exchange'], e['symbol']))
considering_exchanges.add(e['exchange'])
considering_symbols.add(e['symbol'])
considering_timeframes.add(e['timeframe'])
# 1m must be present at all times
considering_timeframes.add('1m')
config['app']['considering_candles'] = tuple(considering_candles)
config['app']['considering_exchanges'] = tuple(considering_exchanges)
config['app']['considering_symbols'] = tuple(considering_symbols)
config['app']['considering_timeframes'] = tuple(considering_timeframes)
config['app']['trading_exchanges'] = tuple(trading_exchanges)
config['app']['trading_symbols'] = tuple(trading_symbols)
config['app']['trading_timeframes'] = tuple(trading_timeframes)
class StoreClass:
app = AppState()
orders = OrdersState()
completed_trades = CompletedTrades()
logs = LogsState()
exchanges = ExchangesState()
candles = CandlesState()
positions = PositionsState()
tickers = TickersState()
trades = TradesState()
orderbooks = OrderbookState()
def __init__(self) -> None:
self.vars = {}
def reset(self, force_install_routes: bool = False) -> None:
"""
Resets all the states within the store
Keyword Arguments:
force_install_routes {bool} -- used for unit_testing (default: {False})
"""
if not jh.is_unit_testing() or force_install_routes:
install_routes()
self.app = AppState()
self.orders = OrdersState()
self.completed_trades = CompletedTrades()
self.logs = LogsState()
self.exchanges = ExchangesState()
self.candles = CandlesState()
self.positions = PositionsState()
self.tickers = TickersState()
self.trades = TradesState()
self.orderbooks = OrderbookState()
# if not jh.is_unit_testing():
# install_routes()
store = StoreClass()
# store.reset()
| 34.264 | 148 | 0.68947 |
676a0da4bb76c8c3b76afd904abca4e5573bedaf
| 1,677 |
py
|
Python
|
tests/time_integration/chemistry_abc_explicit/rebless.py
|
sandialabs/Spitfire
|
65670e3ba5d1ccb4ac72524b77957706345c5bf6
|
[
"Apache-2.0"
] | 11 |
2020-03-20T02:10:17.000Z
|
2021-12-14T10:08:09.000Z
|
tests/time_integration/chemistry_abc_explicit/rebless.py
|
sandialabs/Spitfire
|
65670e3ba5d1ccb4ac72524b77957706345c5bf6
|
[
"Apache-2.0"
] | 18 |
2020-03-18T18:58:56.000Z
|
2021-12-21T02:35:35.000Z
|
tests/time_integration/chemistry_abc_explicit/rebless.py
|
sandialabs/Spitfire
|
65670e3ba5d1ccb4ac72524b77957706345c5bf6
|
[
"Apache-2.0"
] | 2 |
2021-05-31T17:24:56.000Z
|
2021-06-20T05:27:41.000Z
|
import pickle
def run():
from spitfire.time.integrator import odesolve
from spitfire.time.methods import RK4ClassicalS4P4
import numpy as np
def right_hand_side(c, k_ab, k_bc):
"""
Computes the right-hand side function for the ODE system.
Note that time integration requires a function that takes (t, y) as arguments.
To accomodate this, we will write a lambda after defining the rate constants,
which passes the appropriate y value and rate constant to this function (and ignores the time).
:param c: current concentration vector
:param k_ab: the rate constant of the reaction A -> B
:param k_bc: the rate constant of the reaction A + B -> 2C
:return: right-hand side of the ODE system
"""
c_a = c[0]
c_b = c[1]
c_c = c[2]
q_1 = k_ab * c_a
q_2 = k_bc * c_a * c_b
return np.array([-q_1 - q_2, q_1 - q_2, 2. * q_2])
c0 = np.array([1., 0., 0.]) # initial condition
k_ab = 1. # A -> B rate constant
k_bc = 0.2 # A + B -> 2C rate constant
final_time = 10. # final time to integrate to
time_step_size = 0.1 # size of the time step used
t, sol = odesolve(lambda t, y: right_hand_side(y, k_ab, k_bc),
c0,
stop_at_time=final_time,
step_size=time_step_size,
method=RK4ClassicalS4P4(),
save_each_step=True)
return dict({'t': t.copy(), 'sol': sol.copy()})
if __name__ == '__main__':
output = run()
with open('gold.pkl', 'wb') as file_output:
pickle.dump(output, file_output)
| 34.22449 | 103 | 0.588551 |
89ac80c2e0d6ab41956717b8048cc0f0b6bc574f
| 389 |
py
|
Python
|
src/algoritmia/problems/fourier/interfaces.py
|
DavidLlorens/algoritmia
|
40ca0a89ea6de9b633fa5f697f0a28cae70816a2
|
[
"MIT"
] | 6 |
2018-09-15T15:09:10.000Z
|
2022-02-27T01:23:11.000Z
|
src/algoritmia/problems/fourier/interfaces.py
|
JeromeIllgner/algoritmia
|
406afe7206f2411557859bf03480c16db7dcce0d
|
[
"MIT"
] | null | null | null |
src/algoritmia/problems/fourier/interfaces.py
|
JeromeIllgner/algoritmia
|
406afe7206f2411557859bf03480c16db7dcce0d
|
[
"MIT"
] | 5 |
2018-07-10T20:19:55.000Z
|
2021-03-31T03:32:22.000Z
|
from abc import ABCMeta, abstractmethod
class IDiscreteFourierTransform(metaclass=ABCMeta): #[full
@abstractmethod
def transform(self, x: "IList<complex>") -> "IList<complex>": pass
def inverse_transform(self, x: "IList<complex>") -> "IList<complex>":
X = self.transform(x)
N = len(x)
for i in range(N): X[i] /= N
return X #]full
| 35.363636 | 74 | 0.611825 |
2e9562fe77f5ef4f7901bee933291de2728d023c
| 2,796 |
py
|
Python
|
huaweicloud-sdk-iec/huaweicloudsdkiec/v1/model/delete_public_ip_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1 |
2021-11-03T07:54:50.000Z
|
2021-11-03T07:54:50.000Z
|
huaweicloud-sdk-iec/huaweicloudsdkiec/v1/model/delete_public_ip_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-iec/huaweicloudsdkiec/v1/model/delete_public_ip_request.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
class DeletePublicIpRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'publicip_id': 'str'
}
attribute_map = {
'publicip_id': 'publicip_id'
}
def __init__(self, publicip_id=None):
"""DeletePublicIpRequest - a model defined in huaweicloud sdk"""
self._publicip_id = None
self.discriminator = None
self.publicip_id = publicip_id
@property
def publicip_id(self):
"""Gets the publicip_id of this DeletePublicIpRequest.
弹性公网IP ID
:return: The publicip_id of this DeletePublicIpRequest.
:rtype: str
"""
return self._publicip_id
@publicip_id.setter
def publicip_id(self, publicip_id):
"""Sets the publicip_id of this DeletePublicIpRequest.
弹性公网IP ID
:param publicip_id: The publicip_id of this DeletePublicIpRequest.
:type: str
"""
self._publicip_id = publicip_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeletePublicIpRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.418182 | 74 | 0.54578 |
fa7c85a8eb4adae14ac06fcd91d0f50748a282b4
| 35,539 |
py
|
Python
|
coremltools/test/neural_network/test_quantization.py
|
tonybove-apple/coremltools
|
22a8877beec7bad136ba5612d5aacd8e323ecdfc
|
[
"BSD-3-Clause"
] | 2,740 |
2017-10-03T23:19:01.000Z
|
2022-03-30T15:16:39.000Z
|
coremltools/test/neural_network/test_quantization.py
|
tonybove-apple/coremltools
|
22a8877beec7bad136ba5612d5aacd8e323ecdfc
|
[
"BSD-3-Clause"
] | 1,057 |
2017-10-05T22:47:01.000Z
|
2022-03-31T23:51:15.000Z
|
coremltools/test/neural_network/test_quantization.py
|
tonybove-apple/coremltools
|
22a8877beec7bad136ba5612d5aacd8e323ecdfc
|
[
"BSD-3-Clause"
] | 510 |
2017-10-04T19:22:28.000Z
|
2022-03-31T12:16:52.000Z
|
# Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""Module containing unit tests for verifying various quantization."""
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
import coremltools
import coremltools.models.datatypes as datatypes
from coremltools.models import neural_network
import coremltools.models.neural_network.quantization_utils as quantization_utils
from coremltools.models.neural_network.quantization_utils import (
activate_int8_int8_matrix_multiplications,
MatrixMultiplyLayerSelector,
_quantize_spec_weights,
)
from coremltools._deps import _HAS_KERAS2_TF
from coremltools.models import (
_MLMODEL_FULL_PRECISION,
_QUANTIZATION_MODE_LINEAR_QUANTIZATION,
_QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS,
_QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE,
)
@unittest.skipIf(
not coremltools.utils._is_macos() or coremltools.utils._macos_version() < (10, 14),
"Missing macOS 10.14+. Skipping tests.",
)
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class QuantizationNumericalCorrectnessTests(unittest.TestCase):
def runTest(self):
pass
def setUp(self):
self.qbits = 8 # n-bit quantization for tests
self.qmode = _QUANTIZATION_MODE_LINEAR_QUANTIZATION
self.custom_lut = None
from .test_keras2_numeric import KerasBasicNumericCorrectnessTest
self.keras_tester = KerasBasicNumericCorrectnessTest()
self.keras_tester._test_model = self._test_model
def _run_quantized_test(self, input_, full_precision_model, quantized_model, delta):
# Output from both models should be the same
full_output = full_precision_model.predict(input_)
quantized_output = quantized_model.predict(input_)
self.assertEqual(full_output.keys(), quantized_output.keys())
for key in full_output.keys():
full_output_flatten = full_output[key].flatten()
quantized_output_flatten = quantized_output[key].flatten()
self.assertTrue(len(full_output_flatten) == len(quantized_output_flatten))
norm_factor = np.maximum(full_output_flatten, quantized_output_flatten)
norm_factor = np.maximum(norm_factor, 1.0)
f_out = full_output_flatten / norm_factor
q_out = quantized_output_flatten / norm_factor
for idx, full_value in enumerate(f_out):
quantized_value = q_out[idx]
self.assertAlmostEqual(full_value, quantized_value, delta=delta)
def _test_model(
self,
model,
num_samples=1,
mode="random",
delta=1e-2,
model_dir=None,
transpose_keras_result=True,
one_dim_seq_flags=None,
model_precision=_MLMODEL_FULL_PRECISION,
):
# Get the model path
use_tmp_folder = False
if model_dir is None:
use_tmp_folder = True
model_dir = tempfile.mkdtemp()
# Get converted coreml model and sample input
(
input_names,
output_names,
_,
coreml_input,
) = self.keras_tester._get_coreml_model_params_and_test_input(
model, mode, one_dim_seq_flags
)
from .test_keras2_numeric import _get_coreml_model
coreml_model = _get_coreml_model(
model, input_names, output_names, model_precision=model_precision
)
# Now we quantize the model and dequantize it. We then use this model
# as our full precision model since quantizing this model again will
# result in 0 quantization error.
coreml_spec = coreml_model.get_spec()
quantization_utils._quantize_spec_weights(
spec=coreml_spec,
nbits=self.qbits,
quantization_mode=self.qmode,
lut_function=self.custom_lut,
)
# De-quantize model
quantization_utils._dequantize_nn_spec(spec=coreml_spec.neuralNetwork)
full_precision_model_spec = coreml_spec
# Quantize model from another copy
quantized_model_spec = quantization_utils._quantize_spec_weights(
spec=coreml_model.get_spec(),
nbits=self.qbits,
quantization_mode=self.qmode,
lut_function=self.custom_lut,
)
full_precision_model = coremltools.models.MLModel(full_precision_model_spec)
quantized_model = coremltools.models.MLModel(quantized_model_spec)
self._run_quantized_test(
coreml_input, full_precision_model, quantized_model, delta
)
# Clean up after ourselves
if use_tmp_folder and os.path.exists(model_dir):
shutil.rmtree(model_dir)
def test_quantized_tiny_inner_product(self):
self.keras_tester.test_tiny_inner_product()
def test_quantized_conv_batchnorm_random(self):
self.keras_tester.test_conv_batchnorm_random()
def test_quantized_conv_batchnorm_no_gamma_no_beta(self):
self.keras_tester.test_conv_batchnorm_no_gamma_no_beta()
def test_quantized_tiny_deconv_random(self):
self.keras_tester.test_tiny_deconv_random()
def test_quantized_tiny_deconv_random_same_padding(self):
self.keras_tester.test_tiny_deconv_random_same_padding()
def test_quantized_tiny_depthwise_conv_valid_pad(self):
self.keras_tester.test_tiny_depthwise_conv_valid_pad()
def test_quantized_tiny_separable_conv_valid_depth_multiplier(self):
self.keras_tester.test_tiny_separable_conv_valid_depth_multiplier()
def test_quantized_max_pooling_no_overlap(self):
self.keras_tester.test_max_pooling_no_overlap()
def test_quantized_dense_softmax(self):
self.keras_tester.test_dense_softmax()
def test_quantized_housenet_random(self):
self.keras_tester.test_housenet_random()
def test_quantized_large_input_length_conv1d_same_random(self):
self.keras_tester.test_large_input_length_conv1d_same_random()
def test_quantized_conv_dense(self):
self.keras_tester.test_conv_dense()
def test_quantized_tiny_conv_crop_1d_random(self):
self.keras_tester.test_tiny_conv_crop_1d_random()
def test_quantized_embedding(self):
self.keras_tester.test_embedding()
def test_quantized_tiny_conv_elu_random(self):
self.keras_tester.test_tiny_conv_elu_random()
def test_quantized_tiny_concat_random(self):
self.keras_tester.test_tiny_concat_random()
def test_quantized_tiny_dense_tanh_fused_random(self):
self.keras_tester.test_tiny_dense_tanh_fused_random()
def test_quantized_conv1d_flatten(self):
# Softmax after quantization appears to have a bigger error margin
self.keras_tester.test_conv1d_flatten(delta=2e-2)
def test_quantized_tiny_conv_dropout_random(self):
self.keras_tester.test_tiny_conv_dropout_random()
def test_quantized_tiny_mul_random(self):
self.keras_tester.test_tiny_mul_random()
def test_quantized_tiny_conv_thresholded_relu_random(self):
self.keras_tester.test_tiny_conv_thresholded_relu_random()
def test_quantized_tiny_seq2seq_rnn_random(self):
self.keras_tester.test_tiny_seq2seq_rnn_random()
def test_quantized_rnn_seq(self):
self.keras_tester.test_rnn_seq()
def test_quantized_medium_no_sequence_simple_rnn_random(self):
self.keras_tester.test_medium_no_sequence_simple_rnn_random()
def test_quantized_tiny_no_sequence_lstm_zeros(self):
self.keras_tester.test_tiny_no_sequence_lstm_zeros()
def test_quantized_tiny_no_sequence_lstm_ones(self):
self.keras_tester.test_tiny_no_sequence_lstm_ones()
def test_quantized_lstm_seq(self):
self.keras_tester.test_lstm_seq()
def test_quantized_medium_no_sequence_lstm_random(self):
self.keras_tester.test_medium_no_sequence_lstm_random()
def test_quantized_tiny_no_sequence_gru_random(self):
self.keras_tester.test_tiny_no_sequence_gru_random()
def test_quantized_gru_seq_backwards(self):
self.keras_tester.test_gru_seq_backwards()
def test_quantized_tiny_no_sequence_bidir_random(self):
self.keras_tester.test_tiny_no_sequence_bidir_random()
def test_quantized_tiny_no_sequence_bidir_random_gpu(self):
self.keras_tester.test_tiny_no_sequence_bidir_random_gpu()
def test_quantized_small_no_sequence_bidir_random(self):
self.keras_tester.test_small_no_sequence_bidir_random()
def test_quantized_medium_no_sequence_bidir_random(self):
self.keras_tester.test_medium_no_sequence_bidir_random()
def test_quantized_medium_bidir_random_return_seq_false(self):
self.keras_tester.test_medium_bidir_random_return_seq_false()
def test_quantized_tiny_sequence_lstm(self):
self.keras_tester.test_tiny_sequence_lstm()
def test_quantized__lstm_td(self):
self.keras_tester.test_lstm_td()
def test_quantized_large_channel_gpu(self):
self.keras_tester.test_large_channel_gpu()
def test_quantized_tiny_seq2seq_rnn_random(self):
self.keras_tester.test_tiny_seq2seq_rnn_random()
def test_quantized_lstm_seq_backwards(self):
self.keras_tester.test_lstm_seq_backwards()
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
@pytest.mark.slow
class SevenBitQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(SevenBitQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 7
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
@pytest.mark.slow
class SixBitQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(SixBitQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 6
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
@pytest.mark.slow
class FiveBitQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(FiveBitQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 5
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class FourBitQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(FourBitQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 4
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
@pytest.mark.slow
class ThreeBitQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(ThreeBitQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 3
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
@pytest.mark.slow
class TwoBitQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(TwoBitQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 2
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class OneBitQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(OneBitQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 1
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class LUTQuantizationNumericalCorrectnessTests(QuantizationNumericalCorrectnessTests):
def setUp(self):
super(LUTQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 8
self.qmode = _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS
def test_quantized_custom_lut(self):
pass
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
@pytest.mark.slow
class LUTSevenBitQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(LUTSevenBitQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 7
self.qmode = _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
@pytest.mark.slow
class LUTSixBitQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(LUTSixBitQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 6
self.qmode = _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
@pytest.mark.slow
class LUTFiveBitQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(LUTFiveBitQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 5
self.qmode = _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class LUTFourBitQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(LUTFourBitQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 4
self.qmode = _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
@pytest.mark.slow
class LUTThreeBitQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(LUTThreeBitQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 3
self.qmode = _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
@pytest.mark.slow
class LUTTwoBitQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(LUTTwoBitQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 2
self.qmode = _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class LUTOneBitQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(LUTOneBitQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 1
self.qmode = _QUANTIZATION_MODE_LOOKUP_TABLE_KMEANS
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class LUTCustomQuantizationNumericalCorrectnessTests(
QuantizationNumericalCorrectnessTests
):
def setUp(self):
super(LUTCustomQuantizationNumericalCorrectnessTests, self).setUp()
self.qbits = 8
self.qmode = _QUANTIZATION_MODE_CUSTOM_LOOKUP_TABLE
self.custom_lut = quantization_utils._get_linear_lookup_table_and_weight
from coremltools.converters import keras as keras_converter
@unittest.skipIf(
not coremltools.utils._is_macos() or coremltools.utils._macos_version() < (10, 14),
"Missing macOS 10.14+. Skipping tests.",
)
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class AdvancedQuantizationNumericalCorrectnessTests(unittest.TestCase):
""" Quantization tests for advanced settings
"""
def test_8bit_symmetric_and_skips(self):
from keras.models import Sequential
from keras.layers import Conv2D
def stable_rel_error(x, ref):
err = x - ref
denom = np.maximum(np.abs(ref), np.ones_like(ref))
return np.abs(err) / denom
np.random.seed(1988)
input_dim = 16
num_kernels, kernel_height, kernel_width, input_channels = 64, 3, 3, 32
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=(input_dim, input_dim, input_channels),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
weight, bias = model.layers[0].get_weights()
num_filters = weight.shape[-1]
filter_shape = weight.shape[:-1]
new_weight = np.stack(
[4.0 * np.random.rand(*filter_shape) - 2 for i in range(num_filters)],
axis=-1,
)
model.layers[0].set_weights([new_weight, bias])
mlmodel = keras_converter.convert(model, ["data"], ["output_0"])
selector = quantization_utils.AdvancedQuantizedLayerSelector(
skip_layer_types=["batchnorm", "bias", "depthwiseConv"],
minimum_conv_kernel_channels=4,
minimum_conv_weight_count=4096,
)
q_mlmodel = quantization_utils.quantize_weights(mlmodel, 8, selector=selector)
input_shape = (1, 1, input_channels, input_dim, input_dim)
input_val = 2 * np.random.rand(*input_shape) - 1
coreml_input = {"data": input_val}
coreml_output = mlmodel.predict(coreml_input)
q_coreml_output = q_mlmodel.predict(coreml_input)
val = coreml_output["output_0"]
q_val = q_coreml_output["output_0"]
rel_err = stable_rel_error(q_val, val)
max_rel_err, mean_rel_err = np.max(rel_err), np.mean(rel_err)
self.assertTrue(max_rel_err < 0.25)
self.assertTrue(max_rel_err > 0.01)
self.assertTrue(mean_rel_err < 0.02)
@unittest.skipIf(
not coremltools.utils._is_macos() or coremltools.utils._macos_version() < (10, 16),
"Missing macOS 10.16+. Skipping tests.",
)
class DynamicQuantizedInt8Int8MatMul(unittest.TestCase):
"""
Quantization tests for dynamic Int8 - Int8 matrix multiplications
"""
def initialize(self):
np.random.seed(1988)
self.Cout, self.Cin = 16, 32
self.W = np.random.rand(self.Cout, self.Cin) * 20.0 - 10.0
self.b = np.random.rand(self.Cout) * 20.0 - 10.0
self.input_shape = (5, self.Cin)
input_features = [("data", datatypes.Array(*self.input_shape))]
output_features = [("output", None)]
self.builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True
)
self.selector = MatrixMultiplyLayerSelector()
def _test_predictions(
self, np_preds, coreml_preds, SNR=30, PSNR=40,
):
np_preds = np_preds.flatten()
coreml_preds = coreml_preds.flatten()
noise = np_preds - coreml_preds
noise_var = np.sum(noise ** 2) / len(noise) + 1e-7
signal_energy = np.sum(np_preds ** 2) / len(np_preds)
max_signal_energy = np.amax(np_preds ** 2)
snr = 10 * np.log10(signal_energy / noise_var)
psnr = 10 * np.log10(max_signal_energy / noise_var)
self.assertGreaterEqual(snr, SNR)
self.assertGreaterEqual(psnr, PSNR)
def compare(self, specification_modified=True):
x = np.random.rand(*self.input_shape)
def _get_preds(spec):
mlmodel = coremltools.models.MLModel(spec)
return mlmodel.predict({"data": x}, useCPUOnly=True)["output"]
preds = _get_preds(self.builder.spec)
self.assertEqual(self.builder.spec.specificationVersion, 4)
quantized_spec = activate_int8_int8_matrix_multiplications(
self.builder.spec, self.selector
)
layer = self.builder.spec.neuralNetwork.layers[0]
layer_type = layer.WhichOneof("layer")
if layer_type == "innerProduct":
matmul_layer = layer.innerProduct
elif layer_type == "batchedMatmul":
matmul_layer = layer.batchedMatmul
wp = matmul_layer.weights
if specification_modified:
self.assertEqual(self.builder.spec.specificationVersion, 5)
quant_preds = _get_preds(quantized_spec)
self._test_predictions(preds, quant_preds, SNR=40)
self.assertEqual(len(wp.floatValue), 0)
else:
self.assertEqual(self.builder.spec.specificationVersion, 4)
quant_preds = _get_preds(quantized_spec)
np.testing.assert_array_almost_equal(preds, quant_preds)
self.assertGreater(len(wp.floatValue), 0)
def test_single_batched_matmul_no_bias(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
self.compare()
def test_single_batched_matmul_with_bias(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
bias=self.b,
)
self.compare()
def test_single_inner_product_no_bias(self):
self.initialize()
self.builder.add_inner_product(
name="ip",
input_name="data",
output_name="output",
input_channels=self.Cin,
output_channels=self.Cout,
W=self.W,
b=None,
has_bias=False,
)
self.compare()
def test_single_inner_product_with_bias(self):
self.initialize()
self.builder.add_inner_product(
name="ip",
input_name="data",
output_name="output",
input_channels=self.Cin,
output_channels=self.Cout,
W=self.W,
b=self.b,
has_bias=True,
)
self.compare()
def test_inner_product_min_input_channels_valid(self):
self.initialize()
self.builder.add_inner_product(
name="ip",
input_name="data",
output_name="output",
input_channels=self.Cin,
output_channels=self.Cout,
W=self.W,
b=self.b,
has_bias=True,
)
self.selector.minimum_input_channels = 31
self.compare()
def test_batched_matmul_min_input_channels_valid(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
self.selector.minimum_input_channels = 32
self.compare()
def test_inner_product_min_input_channels_invalid(self):
self.initialize()
self.builder.add_inner_product(
name="ip",
input_name="data",
output_name="output",
input_channels=self.Cin,
output_channels=self.Cout,
W=self.W,
b=self.b,
has_bias=True,
)
self.selector.minimum_input_channels = 33
self.compare(specification_modified=False)
def test_batched_matmul_min_input_channels_invalid(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
self.selector.minimum_input_channels = 33
self.compare(specification_modified=False)
def test_batched_matmul_max_input_channels_valid(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
self.selector.maximum_input_channels = 32
self.compare()
def test_inner_product_max_input_channels_valid(self):
self.initialize()
self.builder.add_inner_product(
name="ip",
input_name="data",
output_name="output",
input_channels=self.Cin,
output_channels=self.Cout,
W=self.W,
b=self.b,
has_bias=True,
)
self.selector.maximum_input_channels = 33
self.compare()
def test_batched_matmul_max_input_channels_invalid(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
self.selector.maximum_input_channels = 31
self.compare(specification_modified=False)
def test_inner_product_max_input_channels_invalid(self):
self.initialize()
self.builder.add_inner_product(
name="ip",
input_name="data",
output_name="output",
input_channels=self.Cin,
output_channels=self.Cout,
W=self.W,
b=self.b,
has_bias=True,
)
self.selector.maximum_input_channels = 30
self.compare(specification_modified=False)
def test_inner_product_min_output_channels_valid(self):
self.initialize()
self.builder.add_inner_product(
name="ip",
input_name="data",
output_name="output",
input_channels=self.Cin,
output_channels=self.Cout,
W=self.W,
b=self.b,
has_bias=True,
)
self.selector.minimum_output_channels = 16
self.compare()
def test_batched_matmul_min_output_channels_valid(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
self.selector.minimum_output_channels = 16
self.compare()
def test_inner_product_min_output_channels_invalid(self):
self.initialize()
self.builder.add_inner_product(
name="ip",
input_name="data",
output_name="output",
input_channels=self.Cin,
output_channels=self.Cout,
W=self.W,
b=self.b,
has_bias=True,
)
self.selector.minimum_output_channels = 17
self.compare(specification_modified=False)
def test_batched_matmul_min_output_channels_invalid(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
self.selector.minimum_output_channels = 17
self.compare(specification_modified=False)
def test_batched_matmul_max_output_channels_valid(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
self.selector.maximum_output_channels = 17
self.compare()
def test_inner_product_max_output_channels_valid(self):
self.initialize()
self.builder.add_inner_product(
name="ip",
input_name="data",
output_name="output",
input_channels=self.Cin,
output_channels=self.Cout,
W=self.W,
b=self.b,
has_bias=True,
)
self.selector.maximum_output_channels = 16
self.compare()
def test_batched_matmul_max_output_channels_invalid(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
self.selector.maximum_output_channels = 14
self.compare(specification_modified=False)
def test_inner_product_max_output_channels_invalid(self):
self.initialize()
self.builder.add_inner_product(
name="ip",
input_name="data",
output_name="output",
input_channels=self.Cin,
output_channels=self.Cout,
W=self.W,
b=self.b,
has_bias=True,
)
self.selector.maximum_output_channels = 15
self.compare(specification_modified=False)
def test_inner_product_min_weight_count_valid(self):
self.initialize()
self.builder.add_inner_product(
name="ip",
input_name="data",
output_name="output",
input_channels=self.Cin,
output_channels=self.Cout,
W=self.W,
b=self.b,
has_bias=True,
)
self.selector.minimum_weight_count = 512
self.compare()
def test_batched_matmul_min_weight_count_invalid(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
self.selector.minimum_weight_count = 513
self.compare(specification_modified=False)
def test_inner_product_layer_names_invalid(self):
self.initialize()
self.builder.add_inner_product(
name="ip",
input_name="data",
output_name="output",
input_channels=self.Cin,
output_channels=self.Cout,
W=self.W,
b=self.b,
has_bias=True,
)
self.selector.include_layers_with_names = ["ip1", "ip2"]
self.compare(specification_modified=False)
def test_batched_matmul_layer_names_valid(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
self.selector.include_layers_with_names = ["bm1", "batched_matmul"]
self.compare()
def test_batched_matmul_8bit_weight_quantized(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
_quantize_spec_weights(
self.builder.spec, 8, _QUANTIZATION_MODE_LINEAR_QUANTIZATION
)
self.compare()
def test_batched_matmul_4bit_weight_quantized(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
_quantize_spec_weights(
self.builder.spec, 4, _QUANTIZATION_MODE_LINEAR_QUANTIZATION
)
self.compare()
def test_batched_matmul_2bit_weight_quantized(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
_quantize_spec_weights(
self.builder.spec, 2, _QUANTIZATION_MODE_LINEAR_QUANTIZATION
)
self.compare()
def test_batched_matmul_1bit_weight_quantized(self):
self.initialize()
self.builder.add_batched_mat_mul(
name="batched_matmul",
input_names=["data"],
output_name="output",
weight_matrix_rows=self.Cin,
weight_matrix_columns=self.Cout,
W=self.W,
)
_quantize_spec_weights(
self.builder.spec, 1, _QUANTIZATION_MODE_LINEAR_QUANTIZATION
)
self.compare()
@unittest.skipIf(
not coremltools.utils._is_macos() or coremltools.utils._macos_version() < (10, 15),
"Missing macOS 10.15+. Skipping tests.",
)
class QuantizeWeightsAPI(unittest.TestCase):
def test_embeddingND_quantize(self):
input_features = [("data", datatypes.Array(10, 1))]
output_features = [("output", None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True
)
builder.add_embedding_nd(
name="embedding_nd",
input_name="data",
output_name="output",
vocab_size=300,
embedding_size=20,
W=np.random.rand(20, 300),
)
spec = builder.spec
model_fp32 = coremltools.models.MLModel(spec)
self.assertEqual(
len(spec.neuralNetwork.layers[0].embeddingND.weights.floatValue), 6000
)
# quantize to FP16
model_fp16 = quantization_utils.quantize_weights(model_fp32, nbits=16)
spec_fp16 = model_fp16.get_spec()
self.assertEqual(
len(spec_fp16.neuralNetwork.layers[0].embeddingND.weights.floatValue), 0
)
self.assertEqual(
len(spec_fp16.neuralNetwork.layers[0].embeddingND.weights.float16Value),
2 * 6000,
)
# quantize to uint8
model_uint8 = quantization_utils.quantize_weights(model_fp32, nbits=8)
spec_uint8 = model_uint8.get_spec()
self.assertEqual(
len(spec_uint8.neuralNetwork.layers[0].embeddingND.weights.floatValue), 0
)
self.assertEqual(
len(spec_uint8.neuralNetwork.layers[0].embeddingND.weights.float16Value), 0
)
self.assertEqual(
len(spec_uint8.neuralNetwork.layers[0].embeddingND.weights.rawValue), 6000
)
# quantize to uint5
model_uint5 = quantization_utils.quantize_weights(model_fp32, nbits=5)
spec_uint5 = model_uint5.get_spec()
self.assertEqual(
len(spec_uint5.neuralNetwork.layers[0].embeddingND.weights.floatValue), 0
)
self.assertEqual(
len(spec_uint5.neuralNetwork.layers[0].embeddingND.weights.float16Value), 0
)
self.assertEqual(
len(spec_uint5.neuralNetwork.layers[0].embeddingND.weights.rawValue), 3750
) # 3750 = 5*6000/8
| 33.527358 | 88 | 0.669068 |
e9ebfc09d99518cc92f5425ce0b5c199c61813df
| 3,642 |
py
|
Python
|
qa/rpc-tests/nodehandling.py
|
ripox/ripoffcoinv2
|
c9d34fbf5350ea7b6a7e36465155bd8e195d2575
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/nodehandling.py
|
ripox/ripoffcoinv2
|
c9d34fbf5350ea7b6a7e36465155bd8e195d2575
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/nodehandling.py
|
ripox/ripoffcoinv2
|
c9d34fbf5350ea7b6a7e36465155bd8e195d2575
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test node handling
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class NodeHandlingTest (BitcoinTestFramework):
def run_test(self):
###########################
# setban/listbanned tests #
###########################
assert_equal(len(self.nodes[2].getpeerinfo()), 4) #we should have 4 nodes at this point
self.nodes[2].setban("127.0.0.1", "add")
time.sleep(3) #wait till the nodes are disconected
assert_equal(len(self.nodes[2].getpeerinfo()), 0) #all nodes must be disconnected at this point
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].setban("127.0.0.0/24", "add")
assert_equal(len(self.nodes[2].listbanned()), 1)
try:
self.nodes[2].setban("127.0.0.1", "add") #throws exception because 127.0.0.1 is within range 127.0.0.0/24
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1) #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
try:
self.nodes[2].setban("127.0.0.1", "remove")
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
##test persisted banlist
self.nodes[2].setban("127.0.0.0/32", "add")
self.nodes[2].setban("127.0.0.0/24", "add")
self.nodes[2].setban("192.168.0.1", "add", 1) #ban for 1 seconds
self.nodes[2].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) #ban for 1000 seconds
listBeforeShutdown = self.nodes[2].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) #must be here
time.sleep(2) #make 100% sure we expired 192.168.0.1 node time
#stop node
stop_node(self.nodes[2], 2)
self.nodes[2] = start_node(2, self.options.tmpdir)
listAfterShutdown = self.nodes[2].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
###########################
# RPC disconnectnode test #
###########################
url = urlparse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
time.sleep(2) #disconnecting a node needs a little bit of time
for node in self.nodes[0].getpeerinfo():
assert(node['addr'] != url.hostname+":"+str(p2p_port(1)))
connect_nodes_bi(self.nodes,0,1) #reconnect the node
found = False
for node in self.nodes[0].getpeerinfo():
if node['addr'] == url.hostname+":"+str(p2p_port(1)):
found = True
assert(found)
if __name__ == '__main__':
NodeHandlingTest ().main ()
| 41.862069 | 137 | 0.595003 |
8be1aa48d6dd07eebad7a646b20c39b0b7a61056
| 8,487 |
py
|
Python
|
python_modules/dagster-graphql/dagster_graphql/schema/external.py
|
mitodl/dagster
|
c94cd8d0f5f67722790e8a176228aa4bdcaa0068
|
[
"Apache-2.0"
] | 1 |
2021-04-30T00:19:20.000Z
|
2021-04-30T00:19:20.000Z
|
python_modules/dagster-graphql/dagster_graphql/schema/external.py
|
mitodl/dagster
|
c94cd8d0f5f67722790e8a176228aa4bdcaa0068
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-graphql/dagster_graphql/schema/external.py
|
mitodl/dagster
|
c94cd8d0f5f67722790e8a176228aa4bdcaa0068
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from dagster import check
from dagster.core.host_representation import (
ExternalRepository,
ExternalRepositoryOrigin,
GrpcServerRepositoryLocationHandle,
ManagedGrpcPythonEnvRepositoryLocationHandle,
RepositoryLocation,
)
from dagster.core.host_representation.grpc_server_state_subscriber import (
LocationStateChangeEventType,
)
from dagster.utils.error import SerializableErrorInfo
from dagster_graphql import dauphin
from dagster_graphql.implementation.fetch_solids import get_solid, get_solids
from dagster_graphql.schema.errors import DauphinPythonError
DauphinLocationStateChangeEventType = dauphin.Enum.from_enum(LocationStateChangeEventType)
class DauphinRepository(dauphin.ObjectType):
class Meta:
name = "Repository"
def __init__(self, repository, repository_location):
self._repository = check.inst_param(repository, "repository", ExternalRepository)
self._repository_location = check.inst_param(
repository_location, "repository_location", RepositoryLocation
)
super(DauphinRepository, self).__init__(name=repository.name)
id = dauphin.NonNull(dauphin.ID)
name = dauphin.NonNull(dauphin.String)
location = dauphin.NonNull("RepositoryLocation")
pipelines = dauphin.non_null_list("Pipeline")
usedSolids = dauphin.Field(dauphin.non_null_list("UsedSolid"))
usedSolid = dauphin.Field("UsedSolid", name=dauphin.NonNull(dauphin.String))
origin = dauphin.NonNull("RepositoryOrigin")
partitionSets = dauphin.non_null_list("PartitionSet")
scheduleDefinitions = dauphin.non_null_list("ScheduleDefinition")
sensors = dauphin.non_null_list("Sensor")
def resolve_id(self, _graphene_info):
return self._repository.get_external_origin_id()
def resolve_origin(self, graphene_info):
origin = self._repository.get_external_origin()
return graphene_info.schema.type_named("RepositoryOrigin")(origin)
def resolve_location(self, graphene_info):
return graphene_info.schema.type_named("RepositoryLocation")(self._repository_location)
def resolve_scheduleDefinitions(self, graphene_info):
schedules = self._repository.get_external_schedules()
return sorted(
[
graphene_info.schema.type_named("ScheduleDefinition")(graphene_info, schedule)
for schedule in schedules
],
key=lambda schedule: schedule.name,
)
def resolve_sensors(self, graphene_info):
sensors = self._repository.get_external_sensors()
return sorted(
[
graphene_info.schema.type_named("Sensor")(graphene_info, sensor)
for sensor in sensors
],
key=lambda sensor: sensor.name,
)
def resolve_pipelines(self, graphene_info):
return sorted(
[
graphene_info.schema.type_named("Pipeline")(pipeline)
for pipeline in self._repository.get_all_external_pipelines()
],
key=lambda pipeline: pipeline.name,
)
def resolve_usedSolid(self, _graphene_info, name):
return get_solid(self._repository, name)
def resolve_usedSolids(self, _graphene_info):
return get_solids(self._repository)
def resolve_partitionSets(self, graphene_info):
return (
graphene_info.schema.type_named("PartitionSet")(self._repository.handle, partition_set)
for partition_set in self._repository.get_external_partition_sets()
)
class DauphinRepositoryOrigin(dauphin.ObjectType):
class Meta:
name = "RepositoryOrigin"
repository_location_name = dauphin.NonNull(dauphin.String)
repository_name = dauphin.NonNull(dauphin.String)
repository_location_metadata = dauphin.non_null_list("RepositoryMetadata")
def __init__(self, origin):
self._origin = check.inst_param(origin, "origin", ExternalRepositoryOrigin)
def resolve_repository_location_name(self, _graphene_info):
return self._origin.repository_location_origin.location_name
def resolve_repository_name(self, _graphene_info):
return self._origin.repository_name
def resolve_repository_location_metadata(self, graphene_info):
metadata = self._origin.repository_location_origin.get_display_metadata()
return [
graphene_info.schema.type_named("RepositoryMetadata")(key=key, value=value)
for key, value in metadata.items()
if value is not None
]
class DauphinRepositoryMetadata(dauphin.ObjectType):
class Meta:
name = "RepositoryMetadata"
key = dauphin.NonNull(dauphin.String)
value = dauphin.NonNull(dauphin.String)
class DauphinRepositoryLocationOrLoadFailure(dauphin.Union):
class Meta:
name = "RepositoryLocationOrLoadFailure"
types = ("RepositoryLocation", "RepositoryLocationLoadFailure")
class DauphinRepositoryLocation(dauphin.ObjectType):
class Meta:
name = "RepositoryLocation"
id = dauphin.NonNull(dauphin.ID)
name = dauphin.NonNull(dauphin.String)
is_reload_supported = dauphin.NonNull(dauphin.Boolean)
environment_path = dauphin.String()
repositories = dauphin.non_null_list("Repository")
server_id = dauphin.String()
def __init__(self, location):
self._location = check.inst_param(location, "location", RepositoryLocation)
environment_path = (
location.location_handle.executable_path
if isinstance(location.location_handle, ManagedGrpcPythonEnvRepositoryLocationHandle)
else None
)
server_id = (
location.location_handle.server_id
if isinstance(location.location_handle, GrpcServerRepositoryLocationHandle)
else None
)
check.invariant(location.name is not None)
super(DauphinRepositoryLocation, self).__init__(
name=location.name,
environment_path=environment_path,
is_reload_supported=location.is_reload_supported,
server_id=server_id,
)
def resolve_id(self, _):
return self.name
def resolve_repositories(self, graphene_info):
return [
graphene_info.schema.type_named("Repository")(repository, self._location)
for repository in self._location.get_repositories().values()
]
class DauphinRepositoryLocationLoadFailure(dauphin.ObjectType):
class Meta:
name = "RepositoryLocationLoadFailure"
id = dauphin.NonNull(dauphin.ID)
name = dauphin.NonNull(dauphin.String)
error = dauphin.NonNull("PythonError")
def __init__(self, name, error):
check.str_param(name, "name")
check.inst_param(error, "error", SerializableErrorInfo)
super(DauphinRepositoryLocationLoadFailure, self).__init__(
name=name, error=DauphinPythonError(error)
)
def resolve_id(self, _):
return self.name
class DauphinRepositoryConnection(dauphin.ObjectType):
class Meta:
name = "RepositoryConnection"
nodes = dauphin.non_null_list("Repository")
class DauphinRepositoryLocationConnection(dauphin.ObjectType):
class Meta:
name = "RepositoryLocationConnection"
nodes = dauphin.non_null_list("RepositoryLocationOrLoadFailure")
class DauphinLocationStateChangeSubscription(dauphin.ObjectType):
class Meta(object):
name = "LocationStateChangeSubscription"
event = dauphin.Field(dauphin.NonNull("LocationStateChangeEvent"))
class DauphinLocationStateChangeEvent(dauphin.ObjectType):
class Meta(object):
name = "LocationStateChangeEvent"
event_type = dauphin.NonNull("LocationStateChangeEventType")
message = dauphin.NonNull(dauphin.String)
location_name = dauphin.NonNull(dauphin.String)
server_id = dauphin.Field(dauphin.String)
def get_location_state_change_observable(graphene_info):
context = graphene_info.context
return context.location_state_events.map(
lambda event: graphene_info.schema.type_named("LocationStateChangeSubscription")(
event=graphene_info.schema.type_named("LocationStateChangeEvent")(
event_type=event.event_type,
location_name=event.location_name,
message=event.message,
server_id=event.server_id,
),
)
)
| 34.782787 | 99 | 0.712148 |
488e9122eeefe520eee11ed20bd429d681a29150
| 831 |
py
|
Python
|
jnpy/experiments/Qt/zetcode_tutorial/C5_Events_and_signals/c1_signal_slot.py
|
jojoquant/jnpy
|
c874060af4b129ae09cee9f8542517b7b2f6573b
|
[
"MIT"
] | 5 |
2020-05-19T07:32:39.000Z
|
2022-03-14T09:09:48.000Z
|
jnpy/experiments/Qt/zetcode_tutorial/C5_Events_and_signals/c1_signal_slot.py
|
jojoquant/jnpy
|
c874060af4b129ae09cee9f8542517b7b2f6573b
|
[
"MIT"
] | null | null | null |
jnpy/experiments/Qt/zetcode_tutorial/C5_Events_and_signals/c1_signal_slot.py
|
jojoquant/jnpy
|
c874060af4b129ae09cee9f8542517b7b2f6573b
|
[
"MIT"
] | 3 |
2020-04-02T08:30:17.000Z
|
2020-05-03T12:12:05.000Z
|
# !/usr/bin/env python3
# -*- coding:utf-8 -*-
'''
created by Fangyang on Time:2019/11/10
'''
__author__ = 'Fangyang'
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (
QWidget, QLCDNumber, QSlider, QVBoxLayout, QApplication
)
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
lcd = QLCDNumber(self)
sld = QSlider(Qt.Horizontal, self)
vbox = QVBoxLayout()
vbox.addWidget(lcd)
vbox.addWidget(sld)
self.setLayout(vbox)
sld.valueChanged.connect(lcd.display)
self.setGeometry(300, 300, 300, 300)
self.setWindowTitle('Signal and slot')
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| 19.325581 | 59 | 0.622142 |
98db201b0c6af51ea318faf9e8cef5aa23787712
| 2,282 |
py
|
Python
|
data/helpers.py
|
ZXisSpider/attention-is-all-you-need-pytorch
|
431222202dbbb86daa4bca42d9a3bd215c27056c
|
[
"MIT"
] | null | null | null |
data/helpers.py
|
ZXisSpider/attention-is-all-you-need-pytorch
|
431222202dbbb86daa4bca42d9a3bd215c27056c
|
[
"MIT"
] | null | null | null |
data/helpers.py
|
ZXisSpider/attention-is-all-you-need-pytorch
|
431222202dbbb86daa4bca42d9a3bd215c27056c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import os
import re
import time
import unicodedata
def show_plot(points):
plt.figure()
fig, ax = plt.subplots()
# loc = ticker.MultipleLocator(base=0.2) # put ticks at regular intervals
# ax.yaxis.set_major_locator(loc)
plt.plot(points)
plt.show()
fig.show()
def as_minutes(s):
"""
:param s: 总秒数
:return: 将秒数转换成小时-分钟格式
"""
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since, percent):
"""
:param since: 起始计算时间
:param percent:
:return:
"""
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (as_minutes(s), as_minutes(rs))
# Lowercase, trim, and remove non-letter characters
def normalize_string(s):
"""
:param s: 目标语句
:return: 对目标语句的特殊的非语言符号进行替换
"""
s = unicode_to_ascii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
# Turns a unicode string to plain ASCII (http://stackoverflow.com/a/518232/2809427)
def unicode_to_ascii(s):
"""
:param s: 目标语句
:return: 将目标语句转从unicode码转换成ascii码
"""
chars = [c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn']
char_list = ''.join(chars)
return char_list
def validate_language(l):
"""
:param l: 数据文件名
:return: 验证数据文件存在
"""
p = './data/{}.txt'.format(l)
p = os.path.abspath(p)
print(p)
if not os.path.exists(p):
url = 'http://www.manythings.org/anki/'
print("{}.txt does not exist in the data directory. Please go to '{}' and download the data set.".format(l, url))
exit(1)
def validate_language_params(l):
"""
:param l: 模型文件名
:return: 验证模型参数存在
"""
is_missing = (not os.path.exists('./data/attention_params_{}'.format(l))
or not os.path.exists('./data/decoder_params_{}'.format(l))
or not os.path.exists('./data/encoder_params_{}'.format(l)))
if is_missing:
print("Model params for language '{}' do not exist in the data directory. Please train a new model for this language.".format(l))
exit(1)
| 24.276596 | 137 | 0.594216 |
986027994312d05be41fe1383635710a3f74206a
| 1,981 |
py
|
Python
|
sourced/ml/tests/test_uast_struct_to_bag.py
|
vmarkovtsev/ml
|
22699b2f44901b84507d15e732003955024e6755
|
[
"Apache-2.0"
] | 122 |
2017-11-15T15:19:19.000Z
|
2022-03-23T13:36:34.000Z
|
sourced/ml/tests/test_uast_struct_to_bag.py
|
vmarkovtsev/ml
|
22699b2f44901b84507d15e732003955024e6755
|
[
"Apache-2.0"
] | 176 |
2017-11-14T18:11:21.000Z
|
2019-05-16T04:12:31.000Z
|
sourced/ml/tests/test_uast_struct_to_bag.py
|
vmarkovtsev/ml
|
22699b2f44901b84507d15e732003955024e6755
|
[
"Apache-2.0"
] | 58 |
2017-11-14T18:07:08.000Z
|
2021-01-28T11:41:21.000Z
|
import unittest
from bblfsh import BblfshClient
from sourced.ml.algorithms import UastRandomWalk2Bag, UastSeq2Bag
from sourced.ml.tests.models import SOURCE_PY
class Uast2RandomWalk2BagTest(unittest.TestCase):
def setUp(self):
self.uast_random_walk2bag = UastRandomWalk2Bag(seq_len=[2, 3])
self.uast = BblfshClient("0.0.0.0:9432").parse(SOURCE_PY).uast
def test_uast_to_bag(self):
bag = self.uast_random_walk2bag(self.uast)
self.assertGreater(len(bag), 0, "Expected size of bag should be > 0")
def test_equivalence_prepare_starting_nodes(self):
starting_nodes_old = self.prepare_starting_nodes(self.uast)
starting_nodes = self.uast_random_walk2bag.uast2walks.prepare_starting_nodes(self.uast)
self.assertEqual(len(starting_nodes_old), len(starting_nodes))
def structure(tree):
from collections import Counter
return set(Counter(len(node.children) for node in tree))
self.assertEqual(structure(starting_nodes_old), structure(starting_nodes))
def prepare_starting_nodes(self, uast):
starting_nodes = []
self._prepare_starting_nodes(uast, None, starting_nodes)
return starting_nodes
def _prepare_starting_nodes(self, root, parent, starting_nodes):
node = self.uast_random_walk2bag.uast2walks._extract_node(node=root, parent=parent)
starting_nodes.append(node)
for ch in root.children:
node.children.append(self._prepare_starting_nodes(
ch, parent=node, starting_nodes=starting_nodes))
class UastSeq2BagTest(unittest.TestCase):
def setUp(self):
self.uast_seq2bag = UastSeq2Bag(seq_len=[2, 3])
self.uast = BblfshClient("0.0.0.0:9432").parse(SOURCE_PY).uast
def test_uast_to_bag(self):
bag = self.uast_seq2bag(self.uast)
self.assertGreater(len(bag), 0, "Expected size of bag should be > 0")
if __name__ == "__main__":
unittest.main()
| 35.375 | 95 | 0.712267 |
4bffb15a753d1ebb80f44fceeb83073734c0d37b
| 5,489 |
py
|
Python
|
low_level_simulation/src/rosbridge_suite/rosbridge_library/test/internal/publishers/test_publisher_consistency_listener.py
|
abiantorres/autonomous-vehicles-system-simulation
|
3f0112036b2b270f5055729c648a1310976df933
|
[
"Apache-2.0"
] | 60 |
2021-09-07T12:42:48.000Z
|
2022-03-12T09:30:36.000Z
|
low_level_simulation/src/rosbridge_suite/rosbridge_library/test/internal/publishers/test_publisher_consistency_listener.py
|
abiantorres/autonomous-vehicles-system-simulation
|
3f0112036b2b270f5055729c648a1310976df933
|
[
"Apache-2.0"
] | 222 |
2021-10-29T22:00:27.000Z
|
2022-03-29T20:56:34.000Z
|
low_level_simulation/src/rosbridge_suite/rosbridge_library/test/internal/publishers/test_publisher_consistency_listener.py
|
abiantorres/autonomous-vehicles-system-simulation
|
3f0112036b2b270f5055729c648a1310976df933
|
[
"Apache-2.0"
] | 1 |
2021-08-13T08:30:25.000Z
|
2021-08-13T08:30:25.000Z
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import rospy
import rostest
import unittest
from time import sleep, time
from rosbridge_library.internal.publishers import *
from rosbridge_library.internal import ros_loader
from rosbridge_library.internal.message_conversion import *
from std_msgs.msg import String, Int32
class TestPublisherConsistencyListener(unittest.TestCase):
def setUp(self):
rospy.init_node("test_publisher_consistency_listener")
def test_listener_timeout(self):
""" See whether the listener can correctly time out """
topic = "/test_listener_timeout"
type = String
publisher = rospy.Publisher(topic, type)
listener = PublisherConsistencyListener()
listener.attach(publisher)
self.assertFalse(listener.timed_out())
sleep(listener.timeout / 2.0)
self.assertFalse(listener.timed_out())
sleep(listener.timeout / 2.0 + 0.1)
self.assertTrue(listener.timed_out())
def test_listener_attach_detach(self):
""" See whether the listener actually attaches and detaches itself """
topic = "/test_listener_attach_detach"
type = String
publisher = rospy.Publisher(topic, type)
orig_publish = publisher.publish
listener = PublisherConsistencyListener()
listener_publish = listener.publish_override
self.assertNotEqual(orig_publish, listener_publish)
self.assertNotIn(listener, publisher.impl.subscriber_listeners)
listener.attach(publisher)
self.assertEqual(publisher.publish, listener_publish)
self.assertNotEqual(publisher.publish, orig_publish)
self.assertIn(listener, publisher.impl.subscriber_listeners)
listener.detach()
self.assertEqual(publisher.publish, orig_publish)
self.assertNotEqual(publisher.publish, listener_publish)
self.assertNotIn(listener, publisher.impl.subscriber_listeners)
def test_immediate_publish_fails_without(self):
""" This test makes sure the failure case that the PublisherConsistency
Listener is trying to solve, is indeed a failure case """
topic = "/test_immediate_publish_fails_without"
msg_class = String
msg = String()
string = "why halo thar"
msg.data = string
received = {"msg": None}
def callback(msg):
received["msg"] = msg
rospy.Subscriber(topic, msg_class, callback)
publisher = rospy.Publisher(topic, msg_class)
publisher.publish(msg)
sleep(0.5)
self.assertNotEqual(received["msg"], msg)
self.assertEqual(received["msg"], None)
def test_immediate_publish(self):
""" This test makes sure the PublisherConsistencyListener is working"""
topic = "/test_immediate_publish"
msg_class = String
msg = String()
string = "why halo thar"
msg.data = string
received = {"msg": None}
def callback(msg):
print("Received a msg! ", msg)
received["msg"] = msg
rospy.Subscriber(topic, msg_class, callback)
class temp_listener(rospy.SubscribeListener):
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
print("peer subscribe in temp listener")
listener = PublisherConsistencyListener()
publisher = rospy.Publisher(topic, msg_class, temp_listener())
listener.attach(publisher)
publisher.publish(msg)
sleep(0.5)
self.assertEqual(received["msg"], msg)
def test_immediate_multi_publish_fails_without(self):
""" This test makes sure the failure case that the PublisherConsistency
Listener is trying to solve, is indeed a failure case, even for large
message buffers """
topic = "/test_immediate_multi_publish_fails_without"
msg_class = Int32
msgs = []
for i in range(100):
msg = Int32()
msg.data = i
msgs.append(msg)
received = {"msgs": []}
def callback(msg):
received["msgs"].append(msg)
rospy.Subscriber(topic, msg_class, callback)
publisher = rospy.Publisher(topic, msg_class)
for msg in msgs:
publisher.publish(msg)
sleep(0.5)
self.assertEqual(len(received["msgs"]), 0)
self.assertNotEqual(received["msgs"], msgs)
def test_immediate_multi_publish(self):
""" This test makes sure the PublisherConsistencyListener is working
even with a huge message buffer"""
topic = "/test_immediate_multi_publish"
msg_class = Int32
msgs = []
for i in range(100):
msg = Int32()
msg.data = i
msgs.append(msg)
received = {"msgs": []}
def callback(msg):
received["msgs"].append(msg)
rospy.Subscriber(topic, msg_class, callback)
listener = PublisherConsistencyListener()
publisher = rospy.Publisher(topic, msg_class)
listener.attach(publisher)
for msg in msgs:
publisher.publish(msg)
sleep(0.5)
self.assertEqual(len(received["msgs"]), len(msgs))
self.assertEqual(received["msgs"], msgs)
PKG = 'rosbridge_library'
NAME = 'test_publisher_consistency_listener'
if __name__ == '__main__':
rostest.unitrun(PKG, NAME, TestPublisherConsistencyListener)
| 30.837079 | 79 | 0.65276 |
dbe7a5199aaa83599cdfde26de4df1092a25d66c
| 2,020 |
py
|
Python
|
backend/sentiment-analysis/sentiment-analysis-sample.py
|
North-Seattle-College/ad440-winter2022-tuesday-repo
|
0778958ec6ba3606819a7b24533366380467dbbb
|
[
"Apache-2.0"
] | null | null | null |
backend/sentiment-analysis/sentiment-analysis-sample.py
|
North-Seattle-College/ad440-winter2022-tuesday-repo
|
0778958ec6ba3606819a7b24533366380467dbbb
|
[
"Apache-2.0"
] | 116 |
2022-01-12T03:43:01.000Z
|
2022-03-24T20:56:33.000Z
|
backend/sentiment-analysis/sentiment-analysis-sample.py
|
North-Seattle-College/ad440-winter2022-tuesday-repo
|
0778958ec6ba3606819a7b24533366380467dbbb
|
[
"Apache-2.0"
] | 5 |
2022-01-16T22:40:18.000Z
|
2022-03-09T01:44:11.000Z
|
import string
import nltk
import json
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import matplotlib.pyplot as plt
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import matplotlib.pyplot as plt
import logging # Setting up the loggings to monitor gensim
logging.basicConfig(format="%(levelname)s - %(asctime)s: %(message)s", datefmt= '%H:%M:%S', level=logging.INFO)
import pandas as pd
import boto3
import botocore
# !pip install sagemaker
import sagemaker
from sagemaker import get_execution_role
sagemaker_session = sagemaker.Session()
role = "christopher.navocz1@seattlecolleges.edu"
bucket_name = 'deploy-sagemaker-conversation'
s3_url = 's3://deploy-sagemaker-conversation/floop_data_15k.json'
conn = boto3.client('s3')
contents = conn.list_objects(Bucket = bucket_name)['Contents']
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
# s3_client.list_objects(Bucket = bucket_name)['Contents']
dataset = conn.get_object(Bucket = bucket_name, Key = 'floop_data_15k.json')
s3_client.get_object(Bucket = bucket_name, Key = 'floop_data_15k.json')
path = "floop_data_15k.json"
dataset = pd.read_json(path).values.tolist()
def clean_dataset(dataset):
new_ds = map(lambda x: x[0].lower(),dataset)
return list(new_ds)
new_ds = clean_dataset(dataset)
def sentiment_analysis(sentiment_text):
score = SentimentIntensityAnalyzer().polarity_scores(sentiment_text)
if score['neg'] > score['pos']:
return 'Negative'
elif score['neg'] < score['pos']:
return 'Positive'
else:
return 'Neutral'
sentiments = []
for i in new_ds:
sentiments.append(sentiment_analysis(i))
result = pd.DataFrame({'Original Data':dataset, 'Sentiment Identified':sentiments})
result.to_csv('results.csv')
print(result)
print(result['Sentiment Identified'].value_counts())
plt.pie(result['Sentiment Identified'].value_counts(), labels = result['Sentiment Identified'].value_counts().keys(), autopct='%.1f%%')
plt.show()
| 31.076923 | 135 | 0.751485 |
19f3be539d13bd34de28690516cc289e03d77947
| 5,263 |
py
|
Python
|
configure_pretraining.py
|
trisongz/electra
|
9dbb2b96edf6bb39965003eb126d41ef1e715eb3
|
[
"Apache-2.0"
] | 1 |
2020-08-12T21:08:56.000Z
|
2020-08-12T21:08:56.000Z
|
configure_pretraining.py
|
trisongz/electra
|
9dbb2b96edf6bb39965003eb126d41ef1e715eb3
|
[
"Apache-2.0"
] | null | null | null |
configure_pretraining.py
|
trisongz/electra
|
9dbb2b96edf6bb39965003eb126d41ef1e715eb3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config controlling hyperparameters for pre-training ELECTRA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
class PretrainingConfig(object):
"""Defines pre-training hyperparameters."""
def __init__(self, model_name, data_dir, **kwargs):
self.model_name = model_name
self.debug = False # debug mode for quickly running things
self.do_train = True # pre-train ELECTRA
self.do_eval = False # evaluate generator/discriminator on unlabeled data
# loss functions
self.electra_objective = True # if False, use the BERT objective instead
self.gen_weight = 1.0 # masked language modeling / generator loss
self.disc_weight = 50.0 # discriminator loss
self.mask_prob = 0.15 # percent of input tokens to mask out / replace
# optimization
self.learning_rate = 5e-4
self.lr_decay_power = 1.0 # linear weight decay by default
self.weight_decay_rate = 0.01
self.num_warmup_steps = 10000
# training settings
self.iterations_per_loop = 200
self.save_checkpoints_steps = 1000
self.num_train_steps = 1000000
self.num_eval_steps = 100
# model settings
self.model_size = "small" # one of "small", "base", or "large"
# override the default transformer hparams for the provided model size; see
# modeling.BertConfig for the possible hparams and util.training_utils for
# the defaults
self.model_hparam_overrides = (
kwargs["model_hparam_overrides"]
if "model_hparam_overrides" in kwargs else {})
self.embedding_size = None # bert hidden size by default
self.vocab_size = 30522 # number of tokens in the vocabulary
self.do_lower_case = True # lowercase the input?
# generator settings
self.uniform_generator = False # generator is uniform at random
self.untied_generator_embeddings = False # tie generator/discriminator
# token embeddings?
self.untied_generator = True # tie all generator/discriminator weights?
self.generator_layers = 1.0 # frac of discriminator layers for generator
self.generator_hidden_size = 0.25 # frac of discrim hidden size for gen
self.disallow_correct = False # force the generator to sample incorrect
# tokens (so 15% of tokens are always
# fake)
self.temperature = 1.0 # temperature for sampling from generator
# batch sizes
self.max_seq_length = 128
self.train_batch_size = 128
self.eval_batch_size = 128
# TPU settings
self.use_tpu = False
self.num_tpu_cores = 1
self.tpu_job_name = None
self.tpu_name = None # cloud TPU to use for training
self.tpu_zone = None # GCE zone where the Cloud TPU is located in
self.gcp_project = None # project name for the Cloud TPU-enabled project
# default locations of data files
self.pretrain_tfrecords = os.path.join(
data_dir, "pretrain_tfrecords/pretrain_data.tfrecord*")
self.vocab_file = os.path.join(data_dir, "vocab.txt")
self.model_dir = os.path.join(data_dir, "models", model_name)
results_dir = os.path.join(self.model_dir, "results")
self.results_txt = os.path.join(results_dir, "unsup_results.txt")
self.results_pkl = os.path.join(results_dir, "unsup_results.pkl")
# update defaults with passed-in hyperparameters
self.update(kwargs)
self.max_predictions_per_seq = int((self.mask_prob + 0.005) *
self.max_seq_length)
# debug-mode settings
if self.debug:
self.train_batch_size = 8
self.num_train_steps = 20
self.eval_batch_size = 4
self.iterations_per_loop = 1
self.num_eval_steps = 2
# defaults for different-sized model
if self.model_size == "small":
self.embedding_size = 128
# Here are the hyperparameters we used for larger models; see Table 6 in the
# paper for the full hyperparameters
else:
self.max_seq_length = 512
self.learning_rate = 2e-4
if self.model_size == "base":
self.embedding_size = 768
self.generator_hidden_size = 0.33333
self.train_batch_size = 256
else:
self.embedding_size = 1024
self.mask_prob = 0.25
self.train_batch_size = 2048
# passed-in-arguments override (for example) debug-mode defaults
self.update(kwargs)
def update(self, kwargs):
for k, v in kwargs.items():
if k not in self.__dict__:
raise ValueError("Unknown hparam " + k)
self.__dict__[k] = v
| 38.416058 | 80 | 0.689341 |
aeac0fabe4570d7ff3e22f2aea10abfbf127b47a
| 9,051 |
py
|
Python
|
othello.py
|
James-QiuHaoran/Othello
|
6725bd0a581fa60e3403d71265f9f5aa174eda97
|
[
"MIT"
] | 10 |
2018-02-24T21:54:27.000Z
|
2021-11-29T02:41:49.000Z
|
othello.py
|
James-QiuHaoran/Othello
|
6725bd0a581fa60e3403d71265f9f5aa174eda97
|
[
"MIT"
] | null | null | null |
othello.py
|
James-QiuHaoran/Othello
|
6725bd0a581fa60e3403d71265f9f5aa174eda97
|
[
"MIT"
] | 4 |
2018-02-24T21:54:29.000Z
|
2021-04-03T23:00:11.000Z
|
import ai
class IllegalMove(Exception):
def __init__(self, message):
self.message = message
class Othello(object):
# 0 - Empty
# 1 - Black (Player 1)
# 2 - White (Player 2)
def __init__(self):
super().__init__()
self.player = 1
self.victory = 0 # 0 - ongoing | 1 - black win | 2 - white win | (-1) - draw
self.whiteTiles = 2
self.blackTiles = 2
self.board = [[0 for x in range(8)] for x in range(8)]
self.board[3][3] = 1
self.board[3][4] = 2
self.board[4][3] = 2
self.board[4][4] = 1
# set useAI = False to disable AI opponent - two-player mode
self.useAI = True
# set up AI - player-computer mode
self.ai = ai.GameAI(self)
self.changed = True
self.AIReadyToMove = False
self.debug = False # True for debugging
def playerMove(self, x, y):
# if the game is over or not player's turn
if self.victory != 0 or (self.useAI and self.player != 1):
return
self.performMove(x, y)
# AI's turn and AI is ready to move
if self.useAI and self.player == 2:
self.AIReadyToMove = True
if self.debug:
print("AI is ready to move!")
def performMove(self, x, y):
if self.debug:
print("Check whether move (" + str(x) + ", " + str(y) + ") is legal or not ...")
# check whether the block has been occupied
if self.board[x][y] != 0:
raise IllegalMove(" - Block has already been occupied!")
else:
# place the piece and flip necessary pieces
numFlipped = self.placePiece(self.board, x, y, self.player, PLAYMODE=True)
if self.debug:
print("Flipped " + str(numFlipped) + " pieces!")
self.changed = True
# check game ending
allTiles = [item for sublist in self.board for item in sublist]
emptyTiles = sum(1 for tile in allTiles if tile == 0)
whiteTiles = sum(1 for tile in allTiles if tile == 2)
blackTiles = sum(1 for tile in allTiles if tile == 1)
print("[Console MSG] Current state - empty: " + str(emptyTiles) + " white: " + str(whiteTiles) + " black: " + str(blackTiles))
if self.debug:
for x in range(0, 8):
for y in range(0, 8):
print(str(self.board[x][y]) + " ", end = '')
print('')
# no moves left to make
if whiteTiles < 1 or blackTiles < 1 or emptyTiles < 1:
self.endGame(whiteTiles, blackTiles)
return
# check available moves of its opponent
movesFound = self.moveCanBeMade(self.board, 3 - self.player)
if not movesFound:
if self.debug:
print("Player " + str(3 - self.player) + " cannot move!")
# opponent cannot move, do not alternate
movesFound = self.moveCanBeMade(self.board, self.player)
if not movesFound:
# this player cannot move either, end game
if self.debug:
print("Player " + str(self.player) + "cannot move either!")
self.endGame(whiteTiles, blackTiles)
return
else:
if self.debug:
print("Player " + str(self.player) + " can move, then move!")
if self.useAI and self.player == 2:
self.ai.performMove()
# this player can move, move
self.changed = True
else:
# opponent can move, alternate between player 1 and 2
self.player = 3 - self.player
self.changed = True
def moveCanBeMade(self, board, playerID):
movesFound = False
for row in range(0, 8):
for col in range(0, 8):
if movesFound:
continue
elif board[row][col] == 0:
numAvailableMoves = self.placePiece(board, row, col, playerID, PLAYMODE=False)
if numAvailableMoves > 0:
movesFound = True
return movesFound
def AIMove(self):
self.ai.performMove()
self.AIReadyToMove = False
def endGame(self, whiteTiles, blackTiles):
if whiteTiles > blackTiles:
self.victory = 2
elif whiteTiles < blackTiles:
self.victory = 1
else:
self.victory = -1
self.changed = True
self.whiteTiles = whiteTiles
self.blackTiles = blackTiles
""" return: the number of flips given that (row, col) will be occupied by player.
param: PLAYMODE:
- True for board flipping after a piece is put by the player
- False for available number of moves checking
"""
def placePiece(self, board, row, col, playerID, PLAYMODE=True):
if PLAYMODE:
board[row][col] = self.player
count = 0 # record number of flips
# record current row and column
__column = board[row]
__row = [board[i][col] for i in range(0,8)]
# check up direction
if playerID in __column[:col]:
changes = []
searchCompleted = False
for i in range(col-1, -1, -1):
if searchCompleted:
continue
piece = __column[i]
if piece == 0:
changes = []
searchCompleted = True
elif piece == playerID:
searchCompleted = True
else:
changes.append(i)
# perform flippings
if searchCompleted:
count += len(changes)
if PLAYMODE:
for i in changes:
board[row][i] = self.player
# check down direction
if playerID in __column[col:]:
changes = []
searchCompleted = False
for i in range(col+1, 8, 1):
if searchCompleted:
continue
piece = __column[i]
if piece == 0:
changes = []
searchCompleted = True
elif piece == playerID:
searchCompleted = True
else:
changes.append(i)
# perform flippings
if searchCompleted:
count += len(changes)
if PLAYMODE:
for i in changes:
board[row][i] = self.player
# check left direction
if playerID in __row[:row]:
changes = []
searchCompleted = False
for i in range(row-1, -1, -1):
if searchCompleted:
continue
piece = __row[i]
if piece == 0:
changes = []
searchCompleted = True
elif piece == playerID:
searchCompleted = True
else:
changes.append(i)
# perform flippings
if searchCompleted:
count += len(changes)
if PLAYMODE:
for i in changes:
board[i][col] = self.player
# check right direction
if playerID in __row[row:]:
changes = []
searchCompleted = False
for i in range(row+1, 8, 1):
if searchCompleted:
continue
piece = __row[i]
if piece == 0:
changes = []
searchCompleted = True
elif piece == playerID:
searchCompleted = True
else:
changes.append(i)
# perform flippings
if searchCompleted:
count += len(changes)
if PLAYMODE:
for i in changes:
board[i][col] = self.player
# check along diagonal directions
# upper-left direction
i = 1
ulDiagonal = []
while row - i >= 0 and col - i >= 0:
ulDiagonal.append(board[row-i][col-i])
i += 1
if playerID in ulDiagonal:
changes = []
searchCompleted = False
for i in range(0, len(ulDiagonal)):
piece = ulDiagonal[i]
if searchCompleted:
continue
if piece == 0:
changes = []
searchCompleted = True
elif piece == playerID:
searchCompleted = True
else:
changes.append((row-(i+1), col-(i+1)))
# perform flippings
if searchCompleted:
count += len(changes)
if PLAYMODE:
for i,j in changes:
board[i][j] = self.player
# upper-right direction
i = 1
urDiagonal = []
while row + i < 8 and col - i >= 0:
urDiagonal.append(board[row+i][col-i])
i += 1
if playerID in urDiagonal:
changes = []
searchCompleted = False
for i in range(0, len(urDiagonal)):
piece = urDiagonal[i]
if searchCompleted:
continue
if piece == 0:
changes = []
searchCompleted = True
elif piece == playerID:
searchCompleted = True
else:
changes.append((row+(i+1), col-(i+1)))
# perform flippings
if searchCompleted:
count += len(changes)
if PLAYMODE:
for i,j in changes:
board[i][j] = self.player
# lower-left direction
i = 1
llDiagonal = []
while row - i >= 0 and col + i < 8:
llDiagonal.append(board[row-i][col+i])
i += 1
if playerID in llDiagonal:
changes = []
searchCompleted = False
for i in range(0, len(llDiagonal)):
piece = llDiagonal[i]
if searchCompleted:
continue
if piece == 0:
changes = []
searchCompleted = True
elif piece == playerID:
searchCompleted = True
else:
changes.append((row-(i+1), col+(i+1)))
# perform flippings
if searchCompleted:
count += len(changes)
if PLAYMODE:
for i,j in changes:
board[i][j] = self.player
# lower-right direction
i = 1
lrDiagonal = []
while row + i < 8 and col + i < 8:
lrDiagonal.append(board[row+i][col+i])
i += 1
if playerID in lrDiagonal:
changes = []
searchCompleted = False
for i in range(0, len(lrDiagonal)):
piece = lrDiagonal[i]
if searchCompleted:
continue
if piece == 0:
changes = []
searchCompleted = True
elif piece == playerID:
searchCompleted = True
else:
changes.append((row+(i+1), col+(i+1)))
# perform flippings
if searchCompleted:
count += len(changes)
if PLAYMODE:
for i,j in changes:
board[i][j] = self.player
if count == 0 and PLAYMODE:
board[row][col] = 0
raise IllegalMove("Placing piece at (" + str(row) + ", " + str(col) + ") does not have any flips!")
return count
| 24.865385 | 129 | 0.623025 |
34e7022dda004ed53146afe78a2b502707097585
| 9,413 |
py
|
Python
|
benchs/link_and_code/bench_link_and_code.py
|
ScriptBox99/facebook-faiss
|
04d31fac53c609b6487a4cd6ead1c8b4ad926b0c
|
[
"MIT"
] | 17,006 |
2017-03-01T02:54:26.000Z
|
2022-03-31T19:08:11.000Z
|
benchs/link_and_code/bench_link_and_code.py
|
ScriptBox99/facebook-faiss
|
04d31fac53c609b6487a4cd6ead1c8b4ad926b0c
|
[
"MIT"
] | 2,154 |
2017-03-01T10:30:34.000Z
|
2022-03-31T11:35:40.000Z
|
benchs/link_and_code/bench_link_and_code.py
|
ScriptBox99/facebook-faiss
|
04d31fac53c609b6487a4cd6ead1c8b4ad926b0c
|
[
"MIT"
] | 2,790 |
2017-03-01T14:09:44.000Z
|
2022-03-31T06:49:39.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import os
import sys
import time
import numpy as np
import re
import faiss
from multiprocessing.dummy import Pool as ThreadPool
import pdb
import argparse
import datasets
from datasets import sanitize
import neighbor_codec
######################################################
# Command-line parsing
######################################################
parser = argparse.ArgumentParser()
def aa(*args, **kwargs):
group.add_argument(*args, **kwargs)
group = parser.add_argument_group('dataset options')
aa('--db', default='deep1M', help='dataset')
aa( '--compute_gt', default=False, action='store_true',
help='compute and store the groundtruth')
group = parser.add_argument_group('index consturction')
aa('--indexkey', default='HNSW32', help='index_factory type')
aa('--efConstruction', default=200, type=int,
help='HNSW construction factor')
aa('--M0', default=-1, type=int, help='size of base level')
aa('--maxtrain', default=256 * 256, type=int,
help='maximum number of training points')
aa('--indexfile', default='', help='file to read or write index from')
aa('--add_bs', default=-1, type=int,
help='add elements index by batches of this size')
aa('--link_singletons', default=False, action='store_true',
help='do a pass to link in the singletons')
group = parser.add_argument_group(
'searching (reconstruct_from_neighbors options)')
aa('--beta_centroids', default='',
help='file with codebook')
aa('--neigh_recons_codes', default='',
help='file with codes for reconstruction')
aa('--beta_ntrain', default=250000, type=int, help='')
aa('--beta_k', default=256, type=int, help='beta codebook size')
aa('--beta_nsq', default=1, type=int, help='number of beta sub-vectors')
aa('--beta_niter', default=10, type=int, help='')
aa('--k_reorder', default='-1', help='')
group = parser.add_argument_group('searching')
aa('--k', default=100, type=int, help='nb of nearest neighbors')
aa('--exhaustive', default=False, action='store_true',
help='report the exhaustive search topline')
aa('--searchthreads', default=-1, type=int,
help='nb of threads to use at search time')
aa('--efSearch', default='', type=str,
help='comma-separated values of efSearch to try')
args = parser.parse_args()
print("args:", args)
######################################################
# Load dataset
######################################################
xt, xb, xq, gt = datasets.load_data(
dataset=args.db, compute_gt=args.compute_gt)
nq, d = xq.shape
nb, d = xb.shape
######################################################
# Make index
######################################################
if os.path.exists(args.indexfile):
print("reading", args.indexfile)
index = faiss.read_index(args.indexfile)
if isinstance(index, faiss.IndexPreTransform):
index_hnsw = faiss.downcast_index(index.index)
vec_transform = index.chain.at(0).apply_py
else:
index_hnsw = index
vec_transform = lambda x:x
hnsw = index_hnsw.hnsw
hnsw_stats = faiss.cvar.hnsw_stats
else:
print("build index, key=", args.indexkey)
index = faiss.index_factory(d, args.indexkey)
if isinstance(index, faiss.IndexPreTransform):
index_hnsw = faiss.downcast_index(index.index)
vec_transform = index.chain.at(0).apply_py
else:
index_hnsw = index
vec_transform = lambda x:x
hnsw = index_hnsw.hnsw
hnsw.efConstruction = args.efConstruction
hnsw_stats = faiss.cvar.hnsw_stats
index.verbose = True
index_hnsw.verbose = True
index_hnsw.storage.verbose = True
if args.M0 != -1:
print("set level 0 nb of neighbors to", args.M0)
hnsw.set_nb_neighbors(0, args.M0)
xt2 = sanitize(xt[:args.maxtrain])
assert np.all(np.isfinite(xt2))
print("train, size", xt.shape)
t0 = time.time()
index.train(xt2)
print(" train in %.3f s" % (time.time() - t0))
print("adding")
t0 = time.time()
if args.add_bs == -1:
index.add(sanitize(xb))
else:
for i0 in range(0, nb, args.add_bs):
i1 = min(nb, i0 + args.add_bs)
print(" adding %d:%d / %d" % (i0, i1, nb))
index.add(sanitize(xb[i0:i1]))
print(" add in %.3f s" % (time.time() - t0))
print("storing", args.indexfile)
faiss.write_index(index, args.indexfile)
######################################################
# Train beta centroids and encode dataset
######################################################
if args.beta_centroids:
print("reordering links")
index_hnsw.reorder_links()
if os.path.exists(args.beta_centroids):
print("load", args.beta_centroids)
beta_centroids = np.load(args.beta_centroids)
nsq, k, M1 = beta_centroids.shape
assert M1 == hnsw.nb_neighbors(0) + 1
rfn = faiss.ReconstructFromNeighbors(index_hnsw, k, nsq)
else:
print("train beta centroids")
rfn = faiss.ReconstructFromNeighbors(
index_hnsw, args.beta_k, args.beta_nsq)
xb_full = vec_transform(sanitize(xb[:args.beta_ntrain]))
beta_centroids = neighbor_codec.train_beta_codebook(
rfn, xb_full, niter=args.beta_niter)
print(" storing", args.beta_centroids)
np.save(args.beta_centroids, beta_centroids)
faiss.copy_array_to_vector(beta_centroids.ravel(),
rfn.codebook)
index_hnsw.reconstruct_from_neighbors = rfn
if rfn.k == 1:
pass # no codes to take care of
elif os.path.exists(args.neigh_recons_codes):
print("loading neigh codes", args.neigh_recons_codes)
codes = np.load(args.neigh_recons_codes)
assert codes.size == rfn.code_size * index.ntotal
faiss.copy_array_to_vector(codes.astype('uint8'),
rfn.codes)
rfn.ntotal = index.ntotal
else:
print("encoding neigh codes")
t0 = time.time()
bs = 1000000 if args.add_bs == -1 else args.add_bs
for i0 in range(0, nb, bs):
i1 = min(i0 + bs, nb)
print(" encode %d:%d / %d [%.3f s]\r" % (
i0, i1, nb, time.time() - t0), end=' ')
sys.stdout.flush()
xbatch = vec_transform(sanitize(xb[i0:i1]))
rfn.add_codes(i1 - i0, faiss.swig_ptr(xbatch))
print()
print("storing %s" % args.neigh_recons_codes)
codes = faiss.vector_to_array(rfn.codes)
np.save(args.neigh_recons_codes, codes)
######################################################
# Exhaustive evaluation
######################################################
if args.exhaustive:
print("exhaustive evaluation")
xq_tr = vec_transform(sanitize(xq))
index2 = faiss.IndexFlatL2(index_hnsw.d)
accu_recons_error = 0.0
if faiss.get_num_gpus() > 0:
print("do eval on GPU")
co = faiss.GpuMultipleClonerOptions()
co.shard = False
index2 = faiss.index_cpu_to_all_gpus(index2, co)
# process in batches in case the dataset does not fit in RAM
rh = datasets.ResultHeap(xq_tr.shape[0], 100)
t0 = time.time()
bs = 500000
for i0 in range(0, nb, bs):
i1 = min(nb, i0 + bs)
print(' handling batch %d:%d' % (i0, i1))
xb_recons = np.empty(
(i1 - i0, index_hnsw.d), dtype='float32')
rfn.reconstruct_n(i0, i1 - i0, faiss.swig_ptr(xb_recons))
accu_recons_error += (
(vec_transform(sanitize(xb[i0:i1])) -
xb_recons)**2).sum()
index2.reset()
index2.add(xb_recons)
D, I = index2.search(xq_tr, 100)
rh.add_batch_result(D, I, i0)
rh.finalize()
del index2
t1 = time.time()
print("done in %.3f s" % (t1 - t0))
print("total reconstruction error: ", accu_recons_error)
print("eval retrieval:")
datasets.evaluate_DI(rh.D, rh.I, gt)
def get_neighbors(hnsw, i, level):
" list the neighbors for node i at level "
assert i < hnsw.levels.size()
assert level < hnsw.levels.at(i)
be = np.empty(2, 'uint64')
hnsw.neighbor_range(i, level, faiss.swig_ptr(be), faiss.swig_ptr(be[1:]))
return [hnsw.neighbors.at(j) for j in range(be[0], be[1])]
#############################################################
# Index is ready
#############################################################
xq = sanitize(xq)
if args.searchthreads != -1:
print("Setting nb of threads to", args.searchthreads)
faiss.omp_set_num_threads(args.searchthreads)
if gt is None:
print("no valid groundtruth -- exit")
sys.exit()
k_reorders = [int(x) for x in args.k_reorder.split(',')]
efSearchs = [int(x) for x in args.efSearch.split(',')]
for k_reorder in k_reorders:
if index_hnsw.reconstruct_from_neighbors:
print("setting k_reorder=%d" % k_reorder)
index_hnsw.reconstruct_from_neighbors.k_reorder = k_reorder
for efSearch in efSearchs:
print("efSearch=%-4d" % efSearch, end=' ')
hnsw.efSearch = efSearch
hnsw_stats.reset()
datasets.evaluate(xq, gt, index, k=args.k, endl=False)
print("ndis %d nreorder %d" % (hnsw_stats.ndis, hnsw_stats.nreorder))
| 30.963816 | 77 | 0.601509 |
4f8cffa7120fd1753e67a75afd4d1b3672d3d86b
| 347 |
py
|
Python
|
test/test_molecule.py
|
sunhwan/MoleculeX
|
d718870a6f64fd7d6b110e3d6792e9d92c224da8
|
[
"MIT"
] | null | null | null |
test/test_molecule.py
|
sunhwan/MoleculeX
|
d718870a6f64fd7d6b110e3d6792e9d92c224da8
|
[
"MIT"
] | null | null | null |
test/test_molecule.py
|
sunhwan/MoleculeX
|
d718870a6f64fd7d6b110e3d6792e9d92c224da8
|
[
"MIT"
] | null | null | null |
import pytest
import moleculex as mx
def test_add_atom():
m = mx.Molecule()
v = mx.Atom()
m.add_atom(v)
assert len(m.atoms()) == 1
def test_add_bond():
m = mx.Molecule()
v = mx.Atom()
w = mx.Atom()
m.add_atom(v)
m.add_atom(w)
m.add_bond(v, w)
assert len(m.bonds()) == 1
assert len(m.atoms()) == 2
| 18.263158 | 30 | 0.56196 |
e00d9a9cfd84ef1939acaf0096d5178e2513f863
| 13,347 |
py
|
Python
|
lib/exabgp/reactor/protocol.py
|
mewbak/exabgp
|
223ccc685e2f7fa2d891ddcf4ebdcbadae540f1d
|
[
"BSD-3-Clause"
] | null | null | null |
lib/exabgp/reactor/protocol.py
|
mewbak/exabgp
|
223ccc685e2f7fa2d891ddcf4ebdcbadae540f1d
|
[
"BSD-3-Clause"
] | null | null | null |
lib/exabgp/reactor/protocol.py
|
mewbak/exabgp
|
223ccc685e2f7fa2d891ddcf4ebdcbadae540f1d
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
"""
protocol.py
Created by Thomas Mangin on 2009-08-25.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
import os
from exabgp.vendoring import six
import traceback
# ================================================================ Registration
#
from exabgp.util import ordinal
from exabgp.reactor.network.outgoing import Outgoing
# from exabgp.reactor.network.error import NotifyError
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.bgp.message import Message
from exabgp.bgp.message import NOP
from exabgp.bgp.message import _NOP
from exabgp.bgp.message import Open
from exabgp.bgp.message.open import Version
from exabgp.bgp.message.open.capability import Capabilities
from exabgp.bgp.message.open.capability import Negotiated
from exabgp.bgp.message import Update
from exabgp.bgp.message import EOR
from exabgp.bgp.message import KeepAlive
from exabgp.bgp.message import Notification
from exabgp.bgp.message import Notify
from exabgp.bgp.message import Operational
from exabgp.bgp.message.direction import IN
from exabgp.bgp.message.update.attribute import Attribute
from exabgp.protocol.ip import IP
from exabgp.reactor.api.processes import ProcessError
from exabgp.logger import Logger
from exabgp.logger import FakeLogger
# This is the number of chuncked message we are willing to buffer, not the number of routes
MAX_BACKLOG = 15000
_UPDATE = Update([],b'')
_OPERATIONAL = Operational(0x00)
class Protocol (object):
decode = True
def __init__ (self, peer):
try:
self.logger = Logger()
except RuntimeError:
self.logger = FakeLogger()
self.peer = peer
self.neighbor = peer.neighbor
self.negotiated = Negotiated(self.neighbor)
self.connection = None
if self.neighbor.connect:
self.port = self.neighbor.connect
elif os.environ.get('exabgp.tcp.port','').isdigit():
self.port = int(os.environ.get('exabgp.tcp.port'))
elif os.environ.get('exabgp_tcp_port','').isdigit():
self.port = int(os.environ.get('exabgp_tcp_port'))
else:
self.port = 179
from exabgp.configuration.environment import environment
self.log_routes = peer.neighbor.adj_rib_in or environment.settings().log.routes
def fd (self):
if self.connection is None:
return -1
return self.connection.fd()
# XXX: we use self.peer.neighbor.peer_address when we could use self.neighbor.peer_address
def me (self, message):
return "%s/%s %s" % (self.peer.neighbor.peer_address,self.peer.neighbor.peer_as,message)
def accept (self, incoming):
self.connection = incoming
if self.peer.neighbor.api['neighbor-changes']:
self.peer.reactor.processes.connected(self.peer.neighbor)
# very important - as we use this function on __init__
return self
def connect (self):
# allows to test the protocol code using modified StringIO with a extra 'pending' function
if not self.connection:
local = self.neighbor.md5_ip.top() if not self.neighbor.auto_discovery else None
peer = self.neighbor.peer_address.top()
afi = self.neighbor.peer_address.afi
md5 = self.neighbor.md5_password
md5_base64 = self.neighbor.md5_base64
ttl_out = self.neighbor.ttl_out
self.connection = Outgoing(afi,peer,local,self.port,md5,md5_base64,ttl_out)
if not self.connection.init:
yield False
return
if not local:
self.neighbor.local_address = IP.create(self.connection.local)
if self.neighbor.router_id is None and self.neighbor.local_address.afi == AFI.ipv4:
self.neighbor.router_id = self.neighbor.local_address
for connected in self.connection.establish():
if not connected:
yield False
continue
if self.peer.neighbor.api['neighbor-changes']:
self.peer.reactor.processes.connected(self.peer.neighbor)
yield True
return
def close (self, reason='protocol closed, reason unspecified'):
if self.connection:
self.logger.debug(reason,self.connection.session())
# must be first otherwise we could have a loop caused by the raise in the below
self.connection.close()
self.connection = None
self.peer.stats['down'] = self.peer.stats.get('down',0) + 1
try:
if self.peer.neighbor.api['neighbor-changes']:
self.peer.reactor.processes.down(self.peer.neighbor,reason)
except ProcessError:
self.logger.debug('could not send notification of neighbor close to API',self.connection.session())
def _to_api (self,direction,message,raw):
packets = self.neighbor.api['%s-packets' % direction]
parsed = self.neighbor.api['%s-parsed' % direction]
consolidate = self.neighbor.api['%s-consolidate' % direction]
negotiated = self.negotiated if self.neighbor.api['negotiated'] else None
if consolidate:
if packets:
self.peer.reactor.processes.message(self.peer.neighbor,direction,message,negotiated,raw[:19],raw[19:])
else:
self.peer.reactor.processes.message(self.peer.neighbor,direction,message,negotiated,b'',b'')
else:
if packets:
self.peer.reactor.processes.packets(self.peer.neighbor,direction,int(message.ID),negotiated,raw[:19],raw[19:])
if parsed:
self.peer.reactor.processes.message(message.ID,self.peer.neighbor,direction,message,negotiated,b'',b'')
def write (self, message, negotiated=None):
raw = message.message(negotiated)
code = 'send-%s' % Message.CODE.short(message.ID)
self.peer.stats[code] = self.peer.stats.get(code,0) + 1
if self.neighbor.api.get(code,False):
self._to_api('send',message,raw)
for boolean in self.connection.writer(raw):
yield boolean
def send (self, raw):
code = 'send-%s' % Message.CODE.short(ordinal(raw[18]))
self.peer.stats[code] = self.peer.stats.get(code,0) + 1
if self.neighbor.api.get(code,False):
message = Update.unpack_message(raw[19:],self.negotiated)
self._to_api('send',message,raw)
for boolean in self.connection.writer(raw):
yield boolean
# Read from network .......................................................
def read_message (self):
# This will always be defined by the loop but scope leaking upset scrutinizer/pylint
msg_id = None
packets = self.neighbor.api['receive-packets']
consolidate = self.neighbor.api['receive-consolidate']
parsed = self.neighbor.api['receive-parsed']
body,header = b'',b'' # just because pylint/pylama are getting more clever
for length,msg_id,header,body,notify in self.connection.reader():
# internal issue
if notify:
code = 'receive-%s' % Message.CODE.NOTIFICATION.SHORT
if self.neighbor.api.get(code,False):
if consolidate:
self.peer.reactor.processes.notification(self.peer.neighbor,'receive',notify.code,notify.subcode,str(notify),None,header,body)
elif parsed:
self.peer.reactor.processes.notification(self.peer.neighbor,'receive',notify.code,notify.subcode,str(notify),None,b'',b'')
elif packets:
self.peer.reactor.processes.packets(self.peer.neighbor,'receive',msg_id,None,header,body)
# XXX: is notify not already Notify class ?
raise Notify(notify.code,notify.subcode,str(notify))
if not length:
yield _NOP
continue
self.logger.debug('<< message of type %s' % Message.CODE.name(msg_id),self.connection.session())
code = 'receive-%s' % Message.CODE.short(msg_id)
self.peer.stats[code] = self.peer.stats.get(code,0) + 1
for_api = self.neighbor.api.get(code,False)
if for_api and packets and not consolidate:
negotiated = self.negotiated if self.neighbor.api.get('negotiated',False) else None
self.peer.reactor.processes.packets(self.peer.neighbor,'receive',msg_id,negotiated,header,body)
if msg_id == Message.CODE.UPDATE:
if not self.neighbor.adj_rib_in and not (for_api or self.log_routes) and not (parsed or consolidate):
yield _UPDATE
return
try:
message = Message.unpack(msg_id,body,self.negotiated)
except (KeyboardInterrupt,SystemExit,Notify):
raise
except Exception as exc:
self.logger.debug('could not decode message "%d"' % msg_id,self.connection.session())
self.logger.debug('%s' % str(exc),self.connection.session())
self.logger.debug(traceback.format_exc(),self.connection.session())
raise Notify(1,0,'can not decode update message of type "%d"' % msg_id)
# raise Notify(5,0,'unknown message received')
if message.TYPE == Update.TYPE:
if Attribute.CODE.INTERNAL_TREAT_AS_WITHDRAW in message.attributes:
for nlri in message.nlris:
nlri.action = IN.WITHDRAWN
if for_api:
negotiated = self.negotiated if self.neighbor.api.get('negotiated',False) else None
if consolidate:
self.peer.reactor.processes.message(msg_id,self.neighbor,'receive',message,negotiated,header,body)
elif parsed:
self.peer.reactor.processes.message(msg_id,self.neighbor,'receive',message,negotiated,b'',b'')
if message.TYPE == Notification.TYPE:
raise message
if message.TYPE == Update.TYPE and Attribute.CODE.INTERNAL_DISCARD in message.attributes:
yield _NOP
else:
yield message
def validate_open (self):
error = self.negotiated.validate(self.neighbor)
if error is not None:
raise Notify(*error)
if self.neighbor.api['negotiated']:
self.peer.reactor.processes.negotiated(self.peer.neighbor,self.negotiated)
if self.negotiated.mismatch:
self.logger.warning('--------------------------------------------------------------------',self.connection.session())
self.logger.warning('the connection can not carry the following family/families',self.connection.session())
for reason,(afi,safi) in self.negotiated.mismatch:
self.logger.warning(' - %s is not configured for %s/%s' % (reason,afi,safi),self.connection.session())
self.logger.warning('therefore no routes of this kind can be announced on the connection',self.connection.session())
self.logger.warning('--------------------------------------------------------------------',self.connection.session())
def read_open (self, ip):
for received_open in self.read_message():
if received_open.TYPE == NOP.TYPE:
yield received_open
else:
break
if received_open.TYPE != Open.TYPE:
raise Notify(5,1,'The first packet received is not an open message (%s)' % received_open)
self.logger.debug('<< %s' % received_open,self.connection.session())
yield received_open
def read_keepalive (self):
for message in self.read_message():
if message.TYPE == NOP.TYPE:
yield message
else:
break
if message.TYPE != KeepAlive.TYPE:
raise Notify(5,2)
yield message
#
# Sending message to peer
#
def new_open (self):
if self.neighbor.local_as:
local_as = self.neighbor.local_as
elif self.negotiated.received_open:
local_as = self.negotiated.received_open.asn
else:
raise RuntimeError('no ASN available for the OPEN message')
sent_open = Open(
Version(4),
local_as,
self.neighbor.hold_time,
self.neighbor.router_id,
Capabilities().new(self.neighbor,self.peer._restarted)
)
# we do not buffer open message in purpose
for _ in self.write(sent_open):
yield _NOP
self.logger.debug('>> %s' % sent_open,self.connection.session())
yield sent_open
def new_keepalive (self, comment=''):
keepalive = KeepAlive()
for _ in self.write(keepalive):
yield _NOP
self.logger.debug('>> KEEPALIVE%s' % (' (%s)' % comment if comment else ''),self.connection.session())
yield keepalive
def new_notification (self, notification):
for _ in self.write(notification):
yield _NOP
self.logger.debug('>> NOTIFICATION (%d,%d,"%s")' % (notification.code,notification.subcode,notification.data.decode('utf-8')),self.connection.session())
yield notification
def new_update (self, include_withdraw):
updates = self.neighbor.rib.outgoing.updates(self.neighbor.group_updates)
number = 0
for update in updates:
for message in update.messages(self.negotiated,include_withdraw):
number += 1
for boolean in self.send(message):
# boolean is a transient network error we already announced
yield _NOP
if number:
self.logger.debug('>> %d UPDATE(s)' % number,self.connection.session())
yield _UPDATE
def new_eor (self, afi, safi):
eor = EOR(afi,safi)
for _ in self.write(eor):
yield _NOP
self.logger.debug('>> EOR %s %s' % (afi,safi),self.connection.session())
yield eor
def new_eors (self, afi=AFI.undefined,safi=SAFI.undefined):
# Send EOR to let our peer know he can perform a RIB update
if self.negotiated.families:
families = self.negotiated.families if (afi,safi) == (AFI.undefined,SAFI.undefined) else [(afi,safi),]
for eor_afi,eor_safi in families:
for _ in self.new_eor(eor_afi,eor_safi):
yield _
else:
# If we are not sending an EOR, send a keepalive as soon as when finished
# So the other routers knows that we have no (more) routes to send ...
# (is that behaviour documented somewhere ??)
for eor in self.new_keepalive('EOR'):
yield _NOP
yield _UPDATE
def new_operational (self, operational, negotiated):
for _ in self.write(operational,negotiated):
yield _NOP
self.logger.debug('>> OPERATIONAL %s' % str(operational),self.connection.session())
yield operational
def new_refresh (self, refresh):
for _ in self.write(refresh,None):
yield _NOP
self.logger.debug('>> REFRESH %s' % str(refresh),self.connection.session())
yield refresh
| 34.57772 | 154 | 0.716865 |
fdb77e7f25072280edd32d6f8d2c507df7e37f8f
| 165 |
py
|
Python
|
uspy/execution/compute_feature.py
|
jwarndt/uspy
|
ab5bb73f9243a1d7978c83ccb63e7189fc18cd8a
|
[
"MIT"
] | null | null | null |
uspy/execution/compute_feature.py
|
jwarndt/uspy
|
ab5bb73f9243a1d7978c83ccb63e7189fc18cd8a
|
[
"MIT"
] | null | null | null |
uspy/execution/compute_feature.py
|
jwarndt/uspy
|
ab5bb73f9243a1d7978c83ccb63e7189fc18cd8a
|
[
"MIT"
] | null | null | null |
import numpy as np
from nmapy.features import *
def compute_feature(in_array, feature_parameters):
"""
Parameters:
-----------
in_array: an np
"""
| 15 | 51 | 0.630303 |
3617de5fb2b1fa49bb51079875687dc8af847c5c
| 12,696 |
py
|
Python
|
lib/galaxy/webapps/galaxy/api/cloudauthz.py
|
maikenp/galaxy
|
eb3f3c816f1f94bc328d092f30c8966d41a56a0d
|
[
"CC-BY-3.0"
] | 1 |
2021-10-08T02:14:24.000Z
|
2021-10-08T02:14:24.000Z
|
lib/galaxy/webapps/galaxy/api/cloudauthz.py
|
maikenp/galaxy
|
eb3f3c816f1f94bc328d092f30c8966d41a56a0d
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/webapps/galaxy/api/cloudauthz.py
|
maikenp/galaxy
|
eb3f3c816f1f94bc328d092f30c8966d41a56a0d
|
[
"CC-BY-3.0"
] | null | null | null |
"""
API operations on defining cloud authorizations.
Through means of cloud authorization a user is able to grant a Galaxy server a secure access to his/her
cloud-based resources without sharing his/her long-lasting credentials.
User provides a provider-specific configuration, which Galaxy users to request temporary credentials
from the provider to access the user's resources.
"""
import logging
from galaxy.exceptions import (
ActionInputError,
InternalServerError,
MalformedId,
RequestParameterInvalidException,
RequestParameterMissingException
)
from galaxy.managers import cloudauthzs
from galaxy.structured_app import StructuredApp
from galaxy.util import unicodify
from galaxy.web import (
expose_api
)
from . import BaseGalaxyAPIController
log = logging.getLogger(__name__)
class CloudAuthzController(BaseGalaxyAPIController):
"""
RESTfull controller for defining cloud authorizations.
"""
def __init__(self, app: StructuredApp):
super().__init__(app)
self.cloudauthz_manager = cloudauthzs.CloudAuthzManager(app)
self.cloudauthz_serializer = cloudauthzs.CloudAuthzsSerializer(app)
self.cloudauthz_deserializer = cloudauthzs.CloudAuthzsDeserializer(app)
@expose_api
def index(self, trans, **kwargs):
"""
GET /api/cloud/authz
Lists all the cloud authorizations user has defined.
:type trans: galaxy.webapps.base.webapp.GalaxyWebTransaction
:param trans: Galaxy web transaction
:param kwargs: empty dict
:rtype: list of dict
:return: a list of cloud authorizations (each represented in key-value pair format) defined for the user.
"""
rtv = []
for cloudauthz in trans.user.cloudauthz:
rtv.append(self.cloudauthz_serializer.serialize_to_view(
cloudauthz, user=trans.user, trans=trans, **self._parse_serialization_params(kwargs, 'summary')))
return rtv
@expose_api
def create(self, trans, payload, **kwargs):
"""
* POST /api/cloud/authz
Request to store the payload as a cloudauthz (cloud authorization) configuration for a user.
:type trans: galaxy.webapps.base.webapp.GalaxyWebTransaction
:param trans: Galaxy web transaction
:type payload: dict
:param payload: A dictionary structure containing the following keys:
* provider: the cloud-based resource provider to which this configuration belongs to.
* config: a dictionary containing all the configuration required to request temporary credentials
from the provider. See the following page for details:
https://galaxyproject.org/authnz/
* authn_id: the (encoded) ID of a third-party authentication of a user. To have this ID, user must
have logged-in to this Galaxy server using third-party identity (e.g., Google), or has
associated his/her Galaxy account with a third-party OIDC-based identity. See this page:
https://galaxyproject.org/authnz/config/
* description: [Optional] a brief description for this configuration.
:param kwargs: empty dict
:rtype: dict
:return: a dictionary with the following kvp:
* status: HTTP response code
* message: A message complementary to the response code.
"""
msg_template = "Rejected user `" + str(trans.user.id) + "`'s request to create cloudauthz config because of {}."
if not isinstance(payload, dict):
raise ActionInputError('Invalid payload data type. The payload is expected to be a dictionary, but '
'received data of type `{}`.'.format(str(type(payload))))
missing_arguments = []
provider = payload.get('provider', None)
if provider is None:
missing_arguments.append('provider')
config = payload.get('config', None)
if config is None:
missing_arguments.append('config')
authn_id = payload.get('authn_id', None)
if authn_id is None and provider.lower() not in ["azure", "gcp"]:
missing_arguments.append('authn_id')
if len(missing_arguments) > 0:
log.debug(msg_template.format(f"missing required config {missing_arguments}"))
raise RequestParameterMissingException('The following required arguments are missing in the payload: '
'{}'.format(missing_arguments))
description = payload.get("description", "")
if not isinstance(config, dict):
log.debug(msg_template.format("invalid config type `{}`, expect `dict`".format(type(config))))
raise RequestParameterInvalidException('Invalid type for the required `config` variable; expect `dict` '
'but received `{}`.'.format(type(config)))
if authn_id:
try:
decoded_authn_id = self.decode_id(authn_id)
except MalformedId as e:
log.debug(msg_template.format(f"cannot decode authz_id `{authn_id}`"))
raise e
try:
trans.app.authnz_manager.can_user_assume_authn(trans, decoded_authn_id)
except Exception as e:
raise e
# No two authorization configuration with
# exact same key/value should exist.
for ca in trans.user.cloudauthzs:
if ca.equals(trans.user.id, provider, authn_id, config):
log.debug("Rejected user `{}`'s request to create cloud authorization because a similar config "
"already exists.".format(trans.user.id))
raise ActionInputError("A similar cloud authorization configuration is already defined.")
try:
new_cloudauthz = self.cloudauthz_manager.create(
user_id=trans.user.id,
provider=provider,
config=config,
authn_id=authn_id,
description=description
)
view = self.cloudauthz_serializer.serialize_to_view(new_cloudauthz, trans=trans, **self._parse_serialization_params(kwargs, 'summary'))
log.debug('Created a new cloudauthz record for the user id `{}` '.format(str(trans.user.id)))
return view
except Exception as e:
log.exception(msg_template.format("exception while creating the new cloudauthz record"))
raise InternalServerError('An unexpected error has occurred while responding to the create request of the '
'cloudauthz API.' + unicodify(e))
@expose_api
def delete(self, trans, encoded_authz_id, **kwargs):
"""
* DELETE /api/cloud/authz/{encoded_authz_id}
Deletes the CloudAuthz record with the given ``encoded_authz_id`` from database.
:type trans: galaxy.webapps.base.webapp.GalaxyWebTransaction
:param trans: Galaxy web transaction
:type encoded_authz_id: string
:param encoded_authz_id: The encoded ID of the CloudAuthz record to be marked deleted.
:rtype JSON
:return The cloudauthz record marked as deleted, serialized as a JSON object.
"""
msg_template = "Rejected user `" + str(trans.user.id) + "`'s request to delete cloudauthz config because of {}."
try:
authz_id = self.decode_id(encoded_authz_id)
except MalformedId as e:
log.debug(msg_template.format(f"cannot decode authz_id `{encoded_authz_id}`"))
raise e
try:
cloudauthz = trans.app.authnz_manager.try_get_authz_config(trans.sa_session, trans.user.id, authz_id)
trans.sa_session.delete(cloudauthz)
trans.sa_session.flush()
log.debug('Deleted a cloudauthz record with id `{}` for the user id `{}` '.format(authz_id, str(trans.user.id)))
view = self.cloudauthz_serializer.serialize_to_view(cloudauthz, trans=trans, **self._parse_serialization_params(kwargs, 'summary'))
trans.response.status = '200'
return view
except Exception as e:
log.exception(msg_template.format("exception while deleting the cloudauthz record with "
"ID: `{}`.".format(encoded_authz_id)))
raise InternalServerError('An unexpected error has occurred while responding to the DELETE request of the '
'cloudauthz API.' + unicodify(e))
@expose_api
def update(self, trans, encoded_authz_id, payload, **kwargs):
"""
PUT /api/cloud/authz/{encoded_authz_id}
Updates the values for the cloudauthz configuration with the given ``encoded_authz_id``.
With this API only the following attributes of a cloudauthz configuration
can be updated: `authn_id`, `provider`, `config`, `deleted`.
:type trans: galaxy.webapps.base.webapp.GalaxyWebTransaction
:param trans: Galaxy web transaction
:type encoded_authz_id: string
:param encoded_authz_id: The encoded ID of the CloudAuthz record to be updated.
:type payload: dict
:param payload: A dictionary structure containing the attributes to modified with their new values.
It can contain any number of the following attributes:
* provider: the cloud-based resource provider
to which this configuration belongs to.
* authn_id: the (encoded) ID of a third-party authentication of a user.
To have this ID, user must have logged-in to this Galaxy server
using third-party identity (e.g., Google), or has associated
their Galaxy account with a third-party OIDC-based identity.
See this page: https://galaxyproject.org/authnz/config/
Note: A user can associate a cloudauthz record with their own
authentications only. If the given authentication with authn_id
belongs to a different user, Galaxy will throw the
ItemAccessibilityException exception.
* config: a dictionary containing all the configuration required to
request temporary credentials from the provider.
See the following page for details:
https://galaxyproject.org/authnz/
* deleted: a boolean type marking the specified cloudauthz as (un)deleted.
"""
msg_template = "Rejected user `" + str(trans.user.id) + "`'s request to delete cloudauthz config because of {}."
try:
authz_id = self.decode_id(encoded_authz_id)
except MalformedId as e:
log.debug(msg_template.format(f"cannot decode authz_id `{encoded_authz_id}`"))
raise e
try:
cloudauthz_to_update = trans.app.authnz_manager.try_get_authz_config(trans.sa_session, trans.user.id, authz_id)
self.cloudauthz_deserializer.deserialize(cloudauthz_to_update, payload, trans=trans)
self.cloudauthz_serializer.serialize_to_view(cloudauthz_to_update, view='summary')
return self.cloudauthz_serializer.serialize_to_view(cloudauthz_to_update, view='summary')
except MalformedId as e:
raise e
except Exception as e:
log.exception(msg_template.format("exception while updating the cloudauthz record with "
"ID: `{}`.".format(encoded_authz_id)))
raise InternalServerError('An unexpected error has occurred while responding to the PUT request of the '
'cloudauthz API.' + unicodify(e))
| 49.209302 | 147 | 0.603497 |
ebea6f15e3db82b709db81df32d647649d6a8a93
| 2,889 |
py
|
Python
|
node_registry/decorators.py
|
rahulkatiyar19955/node_registry
|
42a2ed4ea7271a312223c724543e278ff9c9f54c
|
[
"BSD-3-Clause"
] | 2 |
2021-07-05T12:36:29.000Z
|
2021-12-24T08:20:13.000Z
|
node_registry/decorators.py
|
rahulkatiyar19955/node_registry
|
42a2ed4ea7271a312223c724543e278ff9c9f54c
|
[
"BSD-3-Clause"
] | null | null | null |
node_registry/decorators.py
|
rahulkatiyar19955/node_registry
|
42a2ed4ea7271a312223c724543e278ff9c9f54c
|
[
"BSD-3-Clause"
] | 4 |
2021-06-30T04:22:58.000Z
|
2021-12-19T06:06:34.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2021, DeepX-inc
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# @author Krishneel Chaudhary
"""Python Xnode Decorators."""
from .xnode_builder import node_init
rosnode = node_init()
def register(globals_params: dict = None):
"""
Spin the rosnode on the executor.
Args:
----
globals_params (dict): Dictonary of module globals()
"""
if globals_params is not None:
assert isinstance(
globals_params, dict
), f'Expected {dict} but received {type(globals_params)}'
__keys__: list = ['__name__', 'rosnode']
for key in __keys__:
if key not in globals_params.keys():
raise KeyError(f'Key {key} is required')
name = globals_params['__name__']
if name not in ['__main__']:
print(
'\033[33m__main__ not found in the globals\033[0m'
)
return
if rosnode.node is None:
raise RuntimeError('Please initialize the node')
rosnode.spin()
def register_node(func_or_dict):
"""Registor Node in XNode."""
def _register(func):
assert callable(func)
func()
register(globals_params)
return func
if isinstance(func_or_dict, dict):
globals_params = func_or_dict
return _register
else:
globals_params = None
return _register(func_or_dict)
| 35.666667 | 77 | 0.695396 |
3245005919943544b4648dea45af507a74c2c488
| 4,219 |
py
|
Python
|
couchjs/scons/scons-local-2.0.1/SCons/Node/Python.py
|
Gussy/bigcouch
|
9e67d3f754186ce8368503509ae041a2847f2b7c
|
[
"Apache-2.0"
] | 73 |
2015-03-19T04:04:52.000Z
|
2021-08-16T10:45:11.000Z
|
couchjs/scons/scons-local-2.0.1/SCons/Node/Python.py
|
Gussy/bigcouch
|
9e67d3f754186ce8368503509ae041a2847f2b7c
|
[
"Apache-2.0"
] | 5 |
2016-04-26T13:19:25.000Z
|
2017-03-11T14:11:22.000Z
|
couchjs/scons/scons-local-2.0.1/SCons/Node/Python.py
|
Gussy/bigcouch
|
9e67d3f754186ce8368503509ae041a2847f2b7c
|
[
"Apache-2.0"
] | 13 |
2015-03-27T05:21:42.000Z
|
2017-05-22T11:45:30.000Z
|
"""scons.Node.Python
Python nodes.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Node/Python.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Node
class ValueNodeInfo(SCons.Node.NodeInfoBase):
current_version_id = 1
field_list = ['csig']
def str_to_node(self, s):
return Value(s)
class ValueBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
class Value(SCons.Node.Node):
"""A class for Python variables, typically passed on the command line
or generated by a script, but not from a file or some other source.
"""
NodeInfo = ValueNodeInfo
BuildInfo = ValueBuildInfo
def __init__(self, value, built_value=None):
SCons.Node.Node.__init__(self)
self.value = value
if built_value is not None:
self.built_value = built_value
def str_for_display(self):
return repr(self.value)
def __str__(self):
return str(self.value)
def make_ready(self):
self.get_csig()
def build(self, **kw):
if not hasattr(self, 'built_value'):
SCons.Node.Node.build(self, **kw)
is_up_to_date = SCons.Node.Node.children_are_up_to_date
def is_under(self, dir):
# Make Value nodes get built regardless of
# what directory scons was run from. Value nodes
# are outside the filesystem:
return 1
def write(self, built_value):
"""Set the value of the node."""
self.built_value = built_value
def read(self):
"""Return the value. If necessary, the value is built."""
self.build()
if not hasattr(self, 'built_value'):
self.built_value = self.value
return self.built_value
def get_text_contents(self):
"""By the assumption that the node.built_value is a
deterministic product of the sources, the contents of a Value
are the concatenation of all the contents of its sources. As
the value need not be built when get_contents() is called, we
cannot use the actual node.built_value."""
###TODO: something reasonable about universal newlines
contents = str(self.value)
for kid in self.children(None):
contents = contents + kid.get_contents()
return contents
get_contents = get_text_contents ###TODO should return 'bytes' value
def changed_since_last_build(self, target, prev_ni):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def get_csig(self, calc=None):
"""Because we're a Python value node and don't have a real
timestamp, we get to ignore the calculator and just use the
value contents."""
try:
return self.ninfo.csig
except AttributeError:
pass
contents = self.get_contents()
self.get_ninfo().csig = contents
return contents
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 32.705426 | 95 | 0.677886 |
2d897854d426c9d2fa9bd38ef33f0e71e81a4365
| 3,605 |
py
|
Python
|
linux_proc_extras/datadog_checks/linux_proc_extras/linux_proc_extras.py
|
Siecje/integrations-core
|
b2f3ea4145b25394be0b274093d1f0723e8f968d
|
[
"BSD-3-Clause"
] | 5 |
2018-04-09T09:53:04.000Z
|
2021-07-02T05:37:41.000Z
|
linux_proc_extras/datadog_checks/linux_proc_extras/linux_proc_extras.py
|
Siecje/integrations-core
|
b2f3ea4145b25394be0b274093d1f0723e8f968d
|
[
"BSD-3-Clause"
] | 19 |
2018-01-24T15:23:47.000Z
|
2020-07-14T14:30:53.000Z
|
linux_proc_extras/datadog_checks/linux_proc_extras/linux_proc_extras.py
|
Siecje/integrations-core
|
b2f3ea4145b25394be0b274093d1f0723e8f968d
|
[
"BSD-3-Clause"
] | 6 |
2018-01-09T21:37:20.000Z
|
2020-05-26T09:28:09.000Z
|
# (C) Cory Watson <cory@stripe.com> 2016
# (C) Datadog, Inc. 2016-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# project
from checks import AgentCheck
from utils.subprocess_output import get_subprocess_output
from collections import defaultdict
PROCESS_STATES = {
'D': 'uninterruptible',
'R': 'runnable',
'S': 'sleeping',
'T': 'stopped',
'W': 'paging',
'X': 'dead',
'Z': 'zombie',
}
PROCESS_PRIOS = {
'<': 'high',
'N': 'low',
'L': 'locked'
}
class MoreUnixCheck(AgentCheck):
def check(self, instance):
self.tags = instance.get('tags', [])
self.set_paths()
self.get_inode_info()
self.get_stat_info()
self.get_entropy_info()
self.get_process_states()
def set_paths(self):
proc_location = self.agentConfig.get('procfs_path', '/proc').rstrip('/')
self.proc_path_map = {
"inode_info": "sys/fs/inode-nr",
"stat_info": "stat",
"entropy_info": "sys/kernel/random/entropy_avail",
}
for key, path in self.proc_path_map.iteritems():
self.proc_path_map[key] = "{procfs}/{path}".format(procfs=proc_location, path=path)
def get_inode_info(self):
with open(self.proc_path_map['inode_info'], 'r') as inode_info:
inode_stats = inode_info.readline().split()
self.gauge('system.inodes.total', float(inode_stats[0]), tags=self.tags)
self.gauge('system.inodes.used', float(inode_stats[1]), tags=self.tags)
def get_stat_info(self):
with open(self.proc_path_map['stat_info'], 'r') as stat_info:
lines = [line.strip() for line in stat_info.readlines()]
for line in lines:
if line.startswith('ctxt'):
ctxt_count = float(line.split(' ')[1])
self.monotonic_count('system.linux.context_switches', ctxt_count, tags=self.tags)
elif line.startswith('processes'):
process_count = int(line.split(' ')[1])
self.monotonic_count('system.linux.processes_created', process_count, tags=self.tags)
elif line.startswith('intr'):
interrupts = int(line.split(' ')[1])
self.monotonic_count('system.linux.interrupts', interrupts, tags=self.tags)
def get_entropy_info(self):
with open(self.proc_path_map['entropy_info'], 'r') as entropy_info:
entropy = entropy_info.readline()
self.gauge('system.entropy.available', float(entropy), tags=self.tags)
def get_process_states(self):
state_counts = defaultdict(int)
prio_counts = defaultdict(int)
ps = get_subprocess_output(['ps', '--no-header', '-eo', 'stat'], self.log)
for state in ps[0]:
# Each process state is a flag in a list of characters. See ps(1) for details.
for flag in list(state):
if state in PROCESS_STATES:
state_counts[PROCESS_STATES[state]] += 1
elif state in PROCESS_PRIOS:
prio_counts[PROCESS_PRIOS[state]] += 1
for state in state_counts:
state_tags = list(self.tags)
state_tags.append("state:" + state)
self.gauge('system.processes.states', float(state_counts[state]), state_tags)
for prio in prio_counts:
prio_tags = list(self.tags)
prio_tags.append("priority:" + prio)
self.gauge('system.processes.priorities', float(prio_counts[prio]), prio_tags)
| 37.552083 | 105 | 0.601387 |
21c72c7cd993db13595d4a1e6122419a0a430f9b
| 2,502 |
py
|
Python
|
tests/test_referencing.py
|
chdemko/pandoc-numbering
|
19e6dfd6d5c2e213e279bc6aed0b5caae84249c7
|
[
"BSD-3-Clause"
] | 29 |
2016-03-16T10:46:25.000Z
|
2021-09-09T19:03:57.000Z
|
tests/test_referencing.py
|
chdemko/pandoc-numbering
|
19e6dfd6d5c2e213e279bc6aed0b5caae84249c7
|
[
"BSD-3-Clause"
] | 24 |
2016-03-16T10:53:13.000Z
|
2021-03-17T23:11:58.000Z
|
tests/test_referencing.py
|
chdemko/pandoc-numbering
|
19e6dfd6d5c2e213e279bc6aed0b5caae84249c7
|
[
"BSD-3-Clause"
] | 8 |
2017-08-28T21:30:30.000Z
|
2020-12-27T21:04:15.000Z
|
# This Python file uses the following encoding: utf-8
from unittest import TestCase
from helper import verify_conversion
class ReferencincTest(TestCase):
def test_referencing_standard(self):
verify_conversion(
self,
r"""
Header
======
Section
-------
Exercise (First title) -.+.#exercise:first
Exercise (Second title) -.+.#exercise:second
See [%D %d %T %t %g %s %n # %c](#exercise:first)
See [%D %d %T %t %g %s %n # %c](#exercise:second)
""",
r"""
# Header
## Section
[]{#exercise:header.section.first-title}[**Exercise 1.1** *(First title)*]{#exercise:first .pandoc-numbering-text .exercise}
[]{#exercise:header.section.second-title}[**Exercise 1.2** *(Second title)*]{#exercise:second .pandoc-numbering-text .exercise}
See [Exercise exercise First title first title 1.1.1 1.1 1.1 1.1 2](#exercise:first)
See [Exercise exercise Second title second title 1.1.2 1.1 1.2 1.2 2](#exercise:second)
""",
)
def test_referencing_latex(self):
verify_conversion(
self,
r"""
Title
=====
Exercise -.#first
Exercise (Title) -.#second
See [%D %d %T %t %g %s %n # %c %p](#exercise:first)
See [%D %d %T %t %g %s %n # %c %p](#exercise:second)
""",
r"""
---
header-includes:
- "`\\usepackage{tocloft}`{=tex}"
- "`\\usepackage{etoolbox}`{=tex}"
- "`\\ifdef{\\mainmatter}{\\let\\oldmainmatter\\mainmatter\\renewcommand{\\mainmatter}[0]{\\oldmainmatter}}{}`{=tex}"
---
`\usepackage{tocloft}`{=tex}
`\usepackage{etoolbox}`{=tex}
`\ifdef{\mainmatter}{\let\oldmainmatter\mainmatter\renewcommand{\mainmatter}[0]{\oldmainmatter}}{}`{=tex}
```{=tex}
\ifdef{\mainmatter}{}{}
```
# Title
`\phantomsection\addcontentsline{exercise}{exercise}{\protect\numberline {1.1}{\ignorespaces {Exercise}}}`{=tex}[]{#exercise:title.1}[`\label{exercise:first}`{=tex}`\label{exercise:title.1}`{=tex}**Exercise 1**]{#exercise:first .pandoc-numbering-text .exercise}
`\phantomsection\addcontentsline{exercise}{exercise}{\protect\numberline {1.2}{\ignorespaces {Title}}}`{=tex}[]{#exercise:title.title}[`\label{exercise:second}`{=tex}`\label{exercise:title.title}`{=tex}**Exercise 2** *(Title)*]{#exercise:second .pandoc-numbering-text .exercise}
See [Exercise exercise 1.1 1 1 1 2 `\pageref{exercise:first}`{=tex}](#exercise:first)
See [Exercise exercise Title title 1.2 1 2 2 2 `\pageref{exercise:second}`{=tex}](#exercise:second)
""",
"latex",
)
| 29.093023 | 278 | 0.635891 |
2600d62b53f56277f242f4ca9cdaee54198ba665
| 48,641 |
py
|
Python
|
kolibri/core/tasks/api.py
|
AtKristijan/kolibri
|
d9b4864a8e93205923c388b4387efc539ae82ec1
|
[
"MIT"
] | null | null | null |
kolibri/core/tasks/api.py
|
AtKristijan/kolibri
|
d9b4864a8e93205923c388b4387efc539ae82ec1
|
[
"MIT"
] | null | null | null |
kolibri/core/tasks/api.py
|
AtKristijan/kolibri
|
d9b4864a8e93205923c388b4387efc539ae82ec1
|
[
"MIT"
] | null | null | null |
import logging
import ntpath
import os
import shutil
from functools import partial
from tempfile import mkstemp
import requests
from django.apps.registry import AppRegistryNotReady
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.files.uploadedfile import UploadedFile
from django.core.management import call_command
from django.core.management.base import CommandError
from django.http.response import Http404
from django.http.response import HttpResponseBadRequest
from django.utils.translation import get_language_from_request
from django.utils.translation import gettext_lazy as _
from morango.models import ScopeDefinition
from morango.sync.controller import MorangoProfileController
from requests.exceptions import HTTPError
from rest_framework import decorators
from rest_framework import serializers
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import BasicAuthentication
from rest_framework.authentication import SessionAuthentication
from rest_framework.exceptions import APIException
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.exceptions import ParseError
from rest_framework.exceptions import PermissionDenied
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from six import string_types
from .permissions import FacilitySyncPermissions
from kolibri.core.auth.constants.morango_sync import PROFILE_FACILITY_DATA
from kolibri.core.auth.constants.morango_sync import State as FacilitySyncState
from kolibri.core.auth.management.utils import get_client_and_server_certs
from kolibri.core.auth.management.utils import get_dataset_id
from kolibri.core.auth.models import Facility
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.permissions import CanExportLogs
from kolibri.core.content.permissions import CanImportUsers
from kolibri.core.content.permissions import CanManageContent
from kolibri.core.content.utils.channels import get_mounted_drive_by_id
from kolibri.core.content.utils.channels import get_mounted_drives_with_channel_info
from kolibri.core.content.utils.channels import read_channel_metadata_from_db_file
from kolibri.core.content.utils.paths import get_channel_lookup_url
from kolibri.core.content.utils.paths import get_content_database_file_path
from kolibri.core.content.utils.upgrade import diff_stats
from kolibri.core.device.permissions import IsSuperuser
from kolibri.core.device.permissions import NotProvisionedCanPost
from kolibri.core.discovery.models import NetworkLocation
from kolibri.core.discovery.utils.network.client import NetworkClient
from kolibri.core.discovery.utils.network.errors import NetworkLocationNotFound
from kolibri.core.discovery.utils.network.errors import URLParseError
from kolibri.core.logger.csv_export import CSV_EXPORT_FILENAMES
from kolibri.core.tasks.exceptions import JobNotFound
from kolibri.core.tasks.exceptions import JobNotRestartable
from kolibri.core.tasks.exceptions import UserCancelledError
from kolibri.core.tasks.job import JobRegistry
from kolibri.core.tasks.job import State
from kolibri.core.tasks.main import facility_queue
from kolibri.core.tasks.main import job_storage
from kolibri.core.tasks.main import priority_queue
from kolibri.core.tasks.main import queue
from kolibri.core.tasks.utils import get_current_job
from kolibri.utils import conf
try:
from django.apps import apps
apps.check_apps_ready()
except AppRegistryNotReady:
import django
django.setup()
logger = logging.getLogger(__name__)
NETWORK_ERROR_STRING = _("There was a network error.")
DISK_IO_ERROR_STRING = _("There was a disk access error.")
CATCHALL_SERVER_ERROR_STRING = _("There was an unknown error.")
def get_channel_name(channel_id, require_channel=False):
try:
channel = ChannelMetadata.objects.get(id=channel_id)
channel_name = channel.name
except ChannelMetadata.DoesNotExist:
if require_channel:
raise serializers.ValidationError("This channel does not exist")
channel_name = ""
return channel_name
def validate_content_task(request, task_description, require_channel=False):
try:
channel_id = task_description["channel_id"]
except KeyError:
raise serializers.ValidationError("The channel_id field is required.")
channel_name = task_description.get(
"channel_name", get_channel_name(channel_id, require_channel)
)
node_ids = task_description.get("node_ids", None)
exclude_node_ids = task_description.get("exclude_node_ids", None)
if node_ids and not isinstance(node_ids, list):
raise serializers.ValidationError("node_ids must be a list.")
if exclude_node_ids and not isinstance(exclude_node_ids, list):
raise serializers.ValidationError("exclude_node_ids must be a list.")
return {
"channel_id": channel_id,
"channel_name": channel_name,
"exclude_node_ids": exclude_node_ids,
"node_ids": node_ids,
"started_by": request.user.pk,
"started_by_username": request.user.username,
}
def validate_remote_import_task(request, task_description):
import_task = validate_content_task(request, task_description)
try:
peer_id = task_description["peer_id"]
baseurl = NetworkLocation.objects.values_list("base_url", flat=True).get(
id=peer_id
)
except NetworkLocation.DoesNotExist:
raise serializers.ValidationError(
"Peer with id {} does not exist".format(peer_id)
)
except KeyError:
baseurl = conf.OPTIONS["Urls"]["CENTRAL_CONTENT_BASE_URL"]
peer_id = None
import_task.update({"baseurl": baseurl, "peer_id": peer_id})
return import_task
def _add_drive_info(import_task, task_description):
try:
drive_id = task_description["drive_id"]
except KeyError:
raise serializers.ValidationError("The drive_id field is required.")
try:
drive = get_mounted_drive_by_id(drive_id)
except KeyError:
raise serializers.ValidationError(
"That drive_id was not found in the list of drives."
)
import_task.update({"drive_id": drive_id, "datafolder": drive.datafolder})
return import_task
def validate_local_import_task(request, task_description):
task = validate_content_task(request, task_description)
task = _add_drive_info(task, task_description)
return task
def validate_local_export_task(request, task_description):
task = validate_content_task(request, task_description, require_channel=True)
task = _add_drive_info(task, task_description)
return task
def validate_deletion_task(request, task_description):
task = validate_content_task(request, task_description, require_channel=True)
task["force_delete"] = bool(task_description.get("force_delete"))
return task
class BaseViewSet(viewsets.ViewSet):
queues = []
permission_classes = []
# Adding auth classes explicitly until we find a fix for BasicAuth not
# working on tasks API (in dev settings)
authentication_classes = [SessionAuthentication, BasicAuthentication]
def initial(self, request, *args, **kwargs):
if len(self.permission_classes) == 0:
self.permission_classes = self.default_permission_classes()
if self.permission_classes is None:
self.permission_classes = []
return super(BaseViewSet, self).initial(request, *args, **kwargs)
def default_permission_classes(self):
# For all /api/tasks/ endpoints
return [CanManageContent]
def validate_create_req_data(self, request):
"""
Validates the request data received on POST /api/tasks/.
If `request.user` is authorized to initiate the `task` function, this returns
a list of `request.data` otherwise raises PermissionDenied.
"""
if isinstance(request.data, list):
request_data_list = request.data
else:
request_data_list = [request.data]
for request_data in request_data_list:
if "task" not in request_data:
raise serializers.ValidationError("The 'task' field is required.")
if not isinstance(request_data["task"], string_types):
raise serializers.ValidationError("The 'task' value must be a string.")
funcstr = request_data.get("task")
# Make sure the task is registered
try:
registered_job = JobRegistry.REGISTERED_JOBS[funcstr]
except KeyError:
raise serializers.ValidationError(
"'{funcstr}' is not registered.".format(funcstr=funcstr)
)
# Check permissions the DRF way
for permission in registered_job.permissions:
if not permission.has_permission(request, self):
self.permission_denied(request)
return request_data_list
def list(self, request):
jobs_response = [
_job_to_response(j) for _queue in self.queues for j in _queue.jobs
]
return Response(jobs_response)
def create(self, request):
"""
Enqueue a task for async processing.
API endpoint:
POST /api/tasks/
Request payload parameters:
- `task` (required): a string representing the dotted path to task function.
- all other key value pairs are passed to the validator if the
task function has one otherwise they are passed to the task function itself
as keyword args.
Keep in mind:
If a task function has a validator then dict returned by the validator
is passed to the task function as keyword args.
The validator can add `extra_metadata` in the returning dict to set `extra_metadata`
in the enqueued task.
"""
request_data_list = self.validate_create_req_data(request)
enqueued_jobs_response = []
# Once we have validated all the tasks, we are good to go!
for request_data in request_data_list:
funcstr = request_data.pop("task")
registered_job = JobRegistry.REGISTERED_JOBS[funcstr]
# Run validator with request and request_data as its argument
if registered_job.validator is not None:
try:
validator_result = registered_job.validator(request, request_data)
except Exception as e:
raise e
if not isinstance(validator_result, dict):
raise serializers.ValidationError("Validator must return a dict.")
extra_metadata = validator_result.get("extra_metadata")
if extra_metadata is not None and not isinstance(extra_metadata, dict):
raise serializers.ValidationError(
"In the dict returned by validator, 'extra_metadata' must be a dict."
)
request_data = validator_result
job_id = registered_job.enqueue(**request_data)
enqueued_jobs_response.append(_job_to_response(job_storage.get_job(job_id)))
if len(enqueued_jobs_response) == 1:
enqueued_jobs_response = enqueued_jobs_response[0]
return Response(enqueued_jobs_response)
def retrieve(self, request, pk=None):
for _queue in self.queues:
try:
task = _job_to_response(_queue.fetch_job(pk))
break
except JobNotFound:
continue
else:
raise Http404("Task with {pk} not found".format(pk=pk))
return Response(task)
@decorators.action(methods=["post"], detail=False)
def restarttask(self, request):
"""
Restart a task with its task id given in the task_id parameter.
"""
if "task_id" not in request.data:
raise serializers.ValidationError("The 'task_id' field is required.")
if not isinstance(request.data["task_id"], string_types):
raise serializers.ValidationError("The 'task_id' should be a string.")
resp = {}
for _queue in self.queues:
try:
task_id = _queue.restart_job(request.data["task_id"])
resp = _job_to_response(_queue.fetch_job(task_id))
break
except JobNotFound:
continue
except JobNotRestartable as e:
raise serializers.ValidationError(str(e))
return Response(resp)
def destroy(self, request, pk=None):
# unimplemented for now.
pass
@decorators.action(methods=["post"], detail=False)
def canceltask(self, request):
"""
Cancel a task with its task id given in the task_id parameter.
"""
if "task_id" not in request.data:
raise serializers.ValidationError("The 'task_id' field is required.")
if not isinstance(request.data["task_id"], string_types):
raise serializers.ValidationError("The 'task_id' should be a string.")
for _queue in self.queues:
try:
_queue.cancel(request.data["task_id"])
break
except JobNotFound:
continue
return Response({})
@decorators.action(methods=["post"], detail=False)
def cleartasks(self, request):
"""
Cancels all running tasks.
"""
for _queue in self.queues:
_queue.empty()
return Response({})
@decorators.action(methods=["post"], detail=False)
def cleartask(self, request):
# Given a single task ID, clear it from the queue
task_id = request.data.get("task_id")
if not task_id:
return Response({})
for _queue in self.queues:
_queue.clear_job(task_id)
return Response({"task_id": task_id})
@decorators.action(methods=["post"], detail=False)
def deletefinishedtasks(self, request):
"""
Delete all tasks that have succeeded, failed, or been cancelled.
"""
task_id = request.data.get("task_id")
if task_id:
for _queue in self.queues:
_queue.clear_job(task_id)
else:
for _queue in self.queues:
_queue.clear()
return Response({})
class TasksViewSet(BaseViewSet):
@property
def queues(self):
return [queue, priority_queue]
def default_permission_classes(self):
if self.action in ["list", "deletefinishedtasks"]:
return [CanManageContent | CanExportLogs]
elif self.action == "startexportlogcsv":
return [CanExportLogs]
elif self.action in ["importusersfromcsv", "exportuserstocsv"]:
return [CanImportUsers]
# For all other tasks
return [CanManageContent]
@decorators.action(methods=["post"], detail=False)
def startchannelupdate(self, request):
sourcetype = request.data.get("sourcetype", None)
new_version = request.data.get("new_version", None)
if sourcetype == "remote":
task = validate_remote_import_task(request, request.data)
task.update({"type": "UPDATECHANNEL", "new_version": new_version})
job_id = queue.enqueue(
_remoteimport,
task["channel_id"],
task["baseurl"],
peer_id=task["peer_id"],
node_ids=task["node_ids"],
is_updating=True,
extra_metadata=task,
track_progress=True,
cancellable=True,
)
elif sourcetype == "local":
task = validate_local_import_task(request, request.data)
task.update({"type": "UPDATECHANNEL", "new_version": new_version})
job_id = queue.enqueue(
_diskimport,
task["channel_id"],
task["datafolder"],
drive_id=task["drive_id"],
node_ids=task["node_ids"],
is_updating=True,
extra_metadata=task,
track_progress=True,
cancellable=True,
)
else:
raise serializers.ValidationError("sourcetype must be 'remote' or 'local'")
resp = _job_to_response(queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startremotebulkimport(self, request):
if not isinstance(request.data, list):
raise serializers.ValidationError(
"POST data must be a list of task descriptions"
)
tasks = map(partial(validate_remote_import_task, request), request.data)
job_ids = []
for task in tasks:
task.update({"type": "REMOTEIMPORT", "database_ready": False})
import_job_id = queue.enqueue(
_remoteimport,
task["channel_id"],
task["baseurl"],
peer_id=task["peer_id"],
extra_metadata=task,
cancellable=True,
track_progress=True,
)
job_ids.append(import_job_id)
resp = [_job_to_response(queue.fetch_job(job_id)) for job_id in job_ids]
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startremotechannelimport(self, request):
task = validate_remote_import_task(request, request.data)
task.update({"type": "REMOTECHANNELIMPORT"})
job_id = priority_queue.enqueue(
call_command,
"importchannel",
"network",
task["channel_id"],
baseurl=task["baseurl"],
peer_id=task["peer_id"],
extra_metadata=task,
cancellable=True,
)
resp = _job_to_response(priority_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startremotecontentimport(self, request):
task = validate_remote_import_task(request, request.data)
task.update({"type": "REMOTECONTENTIMPORT"})
job_id = queue.enqueue(
call_command,
"importcontent",
"network",
task["channel_id"],
baseurl=task["baseurl"],
peer_id=task["peer_id"],
node_ids=task["node_ids"],
exclude_node_ids=task["exclude_node_ids"],
extra_metadata=task,
track_progress=True,
cancellable=True,
)
resp = _job_to_response(queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startdiskbulkimport(self, request):
if not isinstance(request.data, list):
raise serializers.ValidationError(
"POST data must be a list of task descriptions"
)
tasks = map(partial(validate_local_import_task, request), request.data)
job_ids = []
for task in tasks:
task.update({"type": "DISKIMPORT", "database_ready": False})
import_job_id = queue.enqueue(
_diskimport,
task["channel_id"],
task["datafolder"],
drive_id=task["drive_id"],
extra_metadata=task,
track_progress=True,
cancellable=True,
)
job_ids.append(import_job_id)
resp = [_job_to_response(queue.fetch_job(job_id)) for job_id in job_ids]
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startdiskchannelimport(self, request):
task = validate_local_import_task(request, request.data)
task.update({"type": "DISKCHANNELIMPORT"})
job_id = priority_queue.enqueue(
call_command,
"importchannel",
"disk",
task["channel_id"],
task["datafolder"],
drive_id=task["drive_id"],
extra_metadata=task,
cancellable=True,
)
resp = _job_to_response(priority_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startdiskcontentimport(self, request):
task = validate_local_import_task(request, request.data)
task.update({"type": "DISKCONTENTIMPORT"})
job_id = queue.enqueue(
call_command,
"importcontent",
"disk",
task["channel_id"],
task["datafolder"],
drive_id=task["drive_id"],
node_ids=task["node_ids"],
exclude_node_ids=task["exclude_node_ids"],
extra_metadata=task,
track_progress=True,
cancellable=True,
)
resp = _job_to_response(queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startbulkdelete(self, request):
if not isinstance(request.data, list):
raise serializers.ValidationError(
"POST data must be a list of task descriptions"
)
tasks = map(partial(validate_deletion_task, request), request.data)
job_ids = []
for task in tasks:
task.update({"type": "DELETECHANNEL"})
if task["node_ids"] or task["exclude_node_ids"]:
task["file_size"] = None
task["total_resources"] = None
delete_job_id = queue.enqueue(
call_command,
"deletecontent",
task["channel_id"],
track_progress=True,
extra_metadata=task,
)
job_ids.append(delete_job_id)
resp = [_job_to_response(queue.fetch_job(job_id)) for job_id in job_ids]
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startdeletechannel(self, request):
"""
Delete a channel and all its associated content from the server
"""
task = validate_deletion_task(request, request.data)
task.update({"type": "DELETECONTENT"})
if task["node_ids"] or task["exclude_node_ids"]:
task["file_size"] = None
task["total_resources"] = None
task_id = queue.enqueue(
call_command,
"deletecontent",
task["channel_id"],
node_ids=task["node_ids"],
exclude_node_ids=task["exclude_node_ids"],
force_delete=task["force_delete"],
track_progress=True,
extra_metadata=task,
)
# attempt to get the created Task, otherwise return pending status
resp = _job_to_response(queue.fetch_job(task_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startdiskbulkexport(self, request):
if not isinstance(request.data, list):
raise serializers.ValidationError(
"POST data must be a list of task descriptions"
)
tasks = map(partial(validate_local_export_task, request), request.data)
job_ids = []
for task in tasks:
task.update({"type": "DISKEXPORT"})
export_job_id = queue.enqueue(
_localexport,
task["channel_id"],
task["drive_id"],
track_progress=True,
cancellable=True,
extra_metadata=task,
)
job_ids.append(export_job_id)
resp = [_job_to_response(queue.fetch_job(job_id)) for job_id in job_ids]
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startdiskexport(self, request):
"""
Export a channel to a local drive, and copy content to the drive.
"""
task = validate_local_export_task(request, request.data)
task.update({"type": "DISKCONTENTEXPORT"})
task_id = queue.enqueue(
_localexport,
task["channel_id"],
task["drive_id"],
track_progress=True,
cancellable=True,
node_ids=task["node_ids"],
exclude_node_ids=task["exclude_node_ids"],
extra_metadata=task,
)
# attempt to get the created Task, otherwise return pending status
resp = _job_to_response(queue.fetch_job(task_id))
return Response(resp)
@decorators.action(methods=["get"], detail=False)
def localdrive(self, request):
drives = get_mounted_drives_with_channel_info()
# make sure everything is a dict, before converting to JSON
if not isinstance(drives, dict):
raise AssertionError
out = [mountdata._asdict() for mountdata in drives.values()]
return Response(out)
@decorators.action(methods=["post"], detail=False)
def importusersfromcsv(self, request):
"""
Import users, classes, roles and roles assignemnts from a csv file.
:param: FILE: file dictionary with the file object
:param: csvfile: filename of the file stored in kolibri temp folder
:param: dryrun: validate the data but don't modify the database
:param: delete: Users not in the csv will be deleted from the facility, and classes cleared
:returns: An object with the job information
"""
def manage_fileobject(request, temp_dir):
upload = UploadedFile(request.FILES["csvfile"])
# Django uses InMemoryUploadedFile for files less than 2.5Mb
# and TemporaryUploadedFile for bigger files:
if type(upload.file) == InMemoryUploadedFile:
_, filepath = mkstemp(dir=temp_dir, suffix=".upload")
with open(filepath, "w+b") as dest:
filepath = dest.name
for chunk in upload.file.chunks():
dest.write(chunk)
else:
tmpfile = upload.file.temporary_file_path()
filename = ntpath.basename(tmpfile)
filepath = os.path.join(temp_dir, filename)
shutil.copy(tmpfile, filepath)
return filepath
temp_dir = os.path.join(conf.KOLIBRI_HOME, "temp")
if not os.path.isdir(temp_dir):
os.mkdir(temp_dir)
locale = get_language_from_request(request)
# the request must contain either an object file
# or the filename of the csv stored in Kolibri temp folder
# Validation will provide the file object, while
# Importing will provide the filename, previously validated
if not request.FILES:
filename = request.data.get("csvfile", None)
if filename:
filepath = os.path.join(temp_dir, filename)
else:
return HttpResponseBadRequest("The request must contain a file object")
else:
if "csvfile" not in request.FILES:
return HttpResponseBadRequest("Wrong file object")
filepath = manage_fileobject(request, temp_dir)
delete = request.data.get("delete", None)
dryrun = request.data.get("dryrun", None)
userid = request.user.pk
facility_id = request.data.get("facility_id", None)
job_type = "IMPORTUSERSFROMCSV"
job_metadata = {"type": job_type, "started_by": userid, "facility": facility_id}
job_args = ["bulkimportusers"]
if dryrun:
job_args.append("--dryrun")
if delete:
job_args.append("--delete")
job_args.append(filepath)
job_kwd_args = {
"facility": facility_id,
"userid": userid,
"locale": locale,
"extra_metadata": job_metadata,
"track_progress": True,
}
job_id = priority_queue.enqueue(call_command, *job_args, **job_kwd_args)
resp = _job_to_response(priority_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def exportuserstocsv(self, request):
"""
Export users, classes, roles and roles assignemnts to a csv file.
:param: facility_id
:returns: An object with the job information
"""
facility_id = request.data.get("facility_id", None)
try:
if facility_id:
facility = Facility.objects.get(pk=facility_id).id
else:
facility = request.user.facility
except Facility.DoesNotExist:
raise serializers.ValidationError(
"Facility with ID {} does not exist".format(facility_id)
)
job_type = "EXPORTUSERSTOCSV"
job_metadata = {
"type": job_type,
"started_by": request.user.pk,
"facility": facility,
}
locale = get_language_from_request(request)
job_id = priority_queue.enqueue(
call_command,
"bulkexportusers",
facility=facility,
locale=locale,
overwrite="true",
extra_metadata=job_metadata,
track_progress=True,
)
resp = _job_to_response(priority_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def startexportlogcsv(self, request):
"""
Dumps in csv format the required logs.
By default it will be dump contentsummarylog.
:param: logtype: Kind of log to dump, summary or session
:param: facility
:returns: An object with the job information
"""
facility_id = request.data.get("facility", None)
if facility_id:
facility = Facility.objects.get(pk=facility_id)
else:
facility = request.user.facility
log_type = request.data.get("logtype", "summary")
if log_type in CSV_EXPORT_FILENAMES.keys():
logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export")
filepath = os.path.join(
logs_dir,
CSV_EXPORT_FILENAMES[log_type].format(facility.name, facility_id[:4]),
)
else:
raise Http404(
"Impossible to create a csv export file for {}".format(log_type)
)
if not os.path.isdir(logs_dir):
os.mkdir(logs_dir)
job_type = (
"EXPORTSUMMARYLOGCSV" if log_type == "summary" else "EXPORTSESSIONLOGCSV"
)
job_metadata = {
"type": job_type,
"started_by": request.user.pk,
"facility": facility.id,
}
job_id = priority_queue.enqueue(
call_command,
"exportlogs",
log_type=log_type,
output_file=filepath,
facility=facility.id,
overwrite="true",
extra_metadata=job_metadata,
track_progress=True,
)
resp = _job_to_response(priority_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False)
def channeldiffstats(self, request):
job_metadata = {}
channel_id = request.data.get("channel_id")
method = request.data.get("method")
drive_id = request.data.get("drive_id")
baseurl = request.data.get("baseurl")
# request validation and job metadata info
if not channel_id:
raise serializers.ValidationError("The channel_id field is required.")
if not method:
raise serializers.ValidationError("The method field is required.")
if method == "network":
baseurl = baseurl or conf.OPTIONS["Urls"]["CENTRAL_CONTENT_BASE_URL"]
job_metadata["baseurl"] = baseurl
# get channel version metadata
url = get_channel_lookup_url(baseurl=baseurl, identifier=channel_id)
resp = requests.get(url)
channel_metadata = resp.json()
job_metadata["new_channel_version"] = channel_metadata[0]["version"]
elif method == "disk":
if not drive_id:
raise serializers.ValidationError(
"The drive_id field is required when using 'disk' method."
)
job_metadata = _add_drive_info(job_metadata, request.data)
# get channel version metadata
drive = get_mounted_drive_by_id(drive_id)
channel_metadata = read_channel_metadata_from_db_file(
get_content_database_file_path(channel_id, drive.datafolder)
)
job_metadata["new_channel_version"] = channel_metadata["version"]
else:
raise serializers.ValidationError(
"'method' field should either be 'network' or 'disk'."
)
job_metadata.update(
{
"type": "CHANNELDIFFSTATS",
"started_by": request.user.pk,
"channel_id": channel_id,
}
)
job_id = priority_queue.enqueue(
diff_stats,
channel_id,
method,
drive_id=drive_id,
baseurl=baseurl,
extra_metadata=job_metadata,
track_progress=False,
cancellable=True,
)
resp = _job_to_response(priority_queue.fetch_job(job_id))
return Response(resp)
class FacilityTasksViewSet(BaseViewSet):
@property
def queues(self):
return [facility_queue]
def default_permission_classes(self):
if self.action in ["list", "retrieve"]:
return [FacilitySyncPermissions]
@decorators.action(
methods=["post"], detail=False, permission_classes=[FacilitySyncPermissions]
)
def startdataportalsync(self, request):
"""
Initiate a PUSH sync with Kolibri Data Portal.
"""
facility_id = validate_facility(request)
sync_args = validate_sync_task(request)
job_data = prepare_sync_job(
facility=facility_id,
extra_metadata=prepare_sync_task(*sync_args, type="SYNCDATAPORTAL"),
)
job_id = facility_queue.enqueue(call_command, "sync", **job_data)
resp = _job_to_response(facility_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False, permission_classes=[IsSuperuser])
def startdataportalbulksync(self, request):
"""
Initiate a PUSH sync with Kolibri Data Portal for ALL registered facilities.
"""
responses = []
facilities = Facility.objects.filter(dataset__registered=True).values_list(
"id", "name"
)
for id, name in facilities:
request.data.update(facility=id, facility_name=name)
responses.append(self.startdataportalsync(request).data)
return Response(responses)
# Method needs to be available in Setup Wizard as well
@decorators.action(
methods=["post"],
detail=False,
permission_classes=[IsSuperuser | NotProvisionedCanPost],
)
def startpeerfacilityimport(self, request):
"""
Initiate a PULL of a specific facility from another device.
"""
baseurl, facility_id, username, password = validate_peer_sync_job(request)
validate_and_create_sync_credentials(baseurl, facility_id, username, password)
sync_args = validate_sync_task(request)
job_data = prepare_peer_sync_job(
baseurl,
facility_id,
no_push=True,
no_provision=True,
extra_metadata=prepare_sync_task(*sync_args, type="SYNCPEER/PULL"),
)
job_id = facility_queue.enqueue(call_command, "sync", **job_data)
resp = _job_to_response(facility_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(
methods=["post"], detail=False, permission_classes=[FacilitySyncPermissions]
)
def startpeerfacilitysync(self, request):
"""
Initiate a SYNC (PULL + PUSH) of a specific facility from another device.
"""
baseurl, facility_id, username, password = validate_peer_sync_job(request)
validate_and_create_sync_credentials(baseurl, facility_id, username, password)
sync_args = validate_sync_task(request)
job_data = prepare_peer_sync_job(
baseurl,
facility_id,
extra_metadata=prepare_sync_task(*sync_args, type="SYNCPEER/FULL"),
)
job_id = facility_queue.enqueue(call_command, "sync", **job_data)
resp = _job_to_response(facility_queue.fetch_job(job_id))
return Response(resp)
@decorators.action(methods=["post"], detail=False, permission_classes=[IsSuperuser])
def startdeletefacility(self, request):
"""
Initiate a task to delete a facility
"""
try:
facility_id = request.data.get("facility")
if not facility_id:
raise KeyError()
except KeyError:
raise ParseError(
dict(code="INVALID_FACILITY", message="Missing `facility` parameter")
)
if not Facility.objects.filter(id=facility_id).exists():
raise ValidationError(
dict(code="INVALID_FACILITY", message="Facility doesn't exist")
)
if not Facility.objects.exclude(id=facility_id).exists():
raise ValidationError(
dict(
code="SOLE_FACILITY",
message="Cannot delete the sole facility on the device",
)
)
if request.user.is_facility_user and request.user.facility_id == facility_id:
raise ValidationError(
dict(code="FACILITY_MEMBER", message="User is member of facility")
)
facility_name = Facility.objects.get(id=facility_id).name
job_id = facility_queue.enqueue(
call_command,
"deletefacility",
facility=facility_id,
track_progress=True,
noninteractive=True,
cancellable=False,
extra_metadata=dict(
facility=facility_id,
facility_name=facility_name,
started_by=request.user.pk,
started_by_username=request.user.username,
type="DELETEFACILITY",
),
)
resp = _job_to_response(facility_queue.fetch_job(job_id))
return Response(resp)
class ResourceGoneError(APIException):
"""
API error for when a peer no longer is online
"""
status_code = status.HTTP_410_GONE
default_detail = "Unable to connect"
def prepare_sync_task(
facility_id,
user_id,
username,
facility_name,
device_name,
device_id,
baseurl,
**kwargs
):
task_data = dict(
facility=facility_id,
started_by=user_id,
started_by_username=username,
sync_state=FacilitySyncState.PENDING,
bytes_sent=0,
bytes_received=0,
)
task_type = kwargs.get("type")
if task_type in ["SYNCPEER/PULL", "SYNCPEER/FULL"]:
# Extra metadata that can be passed from the client
extra_task_data = dict(
facility_name=facility_name,
device_name=device_name,
device_id=device_id,
baseurl=baseurl,
)
task_data.update(extra_task_data)
elif task_type == "SYNCDATAPORTAL":
# Extra metadata that can be passed from the client
extra_task_data = dict(facility_name=facility_name)
task_data.update(extra_task_data)
task_data.update(kwargs)
return task_data
def validate_facility(request):
# ensure we have the facility
try:
facility_id = request.data.get("facility")
if not facility_id:
raise KeyError()
except KeyError:
raise ParseError("Missing `facility` parameter")
return facility_id
def validate_sync_task(request):
facility_id = validate_facility(request)
user_id = request.user.pk
username = request.user.username
facility_name = request.data.get("facility_name", "")
device_name = request.data.get("device_name", "")
device_id = request.data.get("device_id", "")
baseurl = request.data.get("baseurl", "")
return (
facility_id,
user_id,
username,
facility_name,
device_name,
device_id,
baseurl,
)
def prepare_sync_job(**kwargs):
job_data = dict(
chunk_size=200,
noninteractive=True,
extra_metadata={},
track_progress=True,
cancellable=False,
)
job_data.update(kwargs)
return job_data
def validate_peer_sync_job(request):
# validate the baseurl
try:
address = request.data.get("baseurl")
if not address:
raise KeyError()
baseurl = NetworkClient(address=address).base_url
except KeyError:
raise ParseError("Missing `baseurl` parameter")
except URLParseError:
raise ParseError("Invalid URL")
except NetworkLocationNotFound:
raise ResourceGoneError()
facility_id = validate_facility(request)
username = request.data.get("username", None)
password = request.data.get("password", None)
return (baseurl, facility_id, username, password)
def validate_and_create_sync_credentials(
baseurl, facility_id, username, password, user_id=None
):
"""
Validates user credentials for syncing by performing certificate verification, which will also
save any certificates after successful authentication
:param user_id: Optional user ID for SoUD use case
"""
# call this in case user directly syncs without migrating database
if not ScopeDefinition.objects.filter():
call_command("loaddata", "scopedefinitions")
controller = MorangoProfileController(PROFILE_FACILITY_DATA)
network_connection = controller.create_network_connection(baseurl)
# try to get the certificate, which will save it if successful
try:
# make sure we get the dataset ID
dataset_id = get_dataset_id(
baseurl, identifier=facility_id, noninteractive=True
)
# username and password are not required for this to succeed unless there is no cert
get_client_and_server_certs(
username,
password,
dataset_id,
network_connection,
user_id=user_id,
facility_id=facility_id,
noninteractive=True,
)
except (CommandError, HTTPError) as e:
if not username and not password:
raise PermissionDenied()
else:
raise AuthenticationFailed(e)
def prepare_peer_sync_job(baseurl, facility_id, **kwargs):
"""
Initializes and validates connection to peer with username and password for the sync command. If
already initialized, the username and password do not need to be supplied
"""
return prepare_sync_job(facility=facility_id, baseurl=baseurl, **kwargs)
def prepare_soud_sync_job(baseurl, facility_id, user_id, **kwargs):
"""
A SoUD sync requires that the device is already "registered" with the server, so there
shouldn't be a need for username/password and the verification of those. This eliminates the
validation to keep overhead low for automated single-user syncing. To initialize with a peer
for a SoUD, use `prepare_peer_sync_job` with `user` keyword argument
"""
return prepare_sync_job(
baseurl=baseurl, facility=facility_id, user=user_id, **kwargs
)
def prepare_soud_resume_sync_job(baseurl, sync_session_id, user_id, **kwargs):
"""
Resuming a SoUD sync requires that a normal sync has occurred and the `SyncSession` is still
active
"""
return prepare_sync_job(baseurl=baseurl, id=sync_session_id, user=user_id, **kwargs)
def _remoteimport(
channel_id,
baseurl,
peer_id=None,
update_progress=None,
check_for_cancel=None,
node_ids=None,
is_updating=False,
exclude_node_ids=None,
extra_metadata=None,
):
call_command(
"importchannel",
"network",
channel_id,
baseurl=baseurl,
update_progress=update_progress,
check_for_cancel=check_for_cancel,
)
# Make some real-time updates to the metadata
job = get_current_job()
# Signal to UI that the DB-downloading step is done so it knows to display
# progress correctly
job.update_progress(0, 1.0)
job.extra_metadata["database_ready"] = True
# Add the channel name if it wasn't added initially
if job and job.extra_metadata.get("channel_name", "") == "":
job.extra_metadata["channel_name"] = get_channel_name(channel_id)
job.save_meta()
call_command(
"importcontent",
"network",
channel_id,
baseurl=baseurl,
peer_id=peer_id,
node_ids=node_ids,
exclude_node_ids=exclude_node_ids,
import_updates=is_updating,
update_progress=update_progress,
check_for_cancel=check_for_cancel,
)
def _diskimport(
channel_id,
directory,
drive_id=None,
update_progress=None,
check_for_cancel=None,
node_ids=None,
is_updating=False,
exclude_node_ids=None,
extra_metadata=None,
):
call_command(
"importchannel",
"disk",
channel_id,
directory,
update_progress=update_progress,
check_for_cancel=check_for_cancel,
)
# Make some real-time updates to the metadata
job = get_current_job()
# Signal to UI that the DB-downloading step is done so it knows to display
# progress correctly
job.update_progress(0, 1.0)
job.extra_metadata["database_ready"] = True
# Add the channel name if it wasn't added initially
if job and job.extra_metadata.get("channel_name", "") == "":
job.extra_metadata["channel_name"] = get_channel_name(channel_id)
job.save_meta()
# Skip importcontent step if updating and no nodes have changed
if is_updating and (node_ids is not None) and len(node_ids) == 0:
pass
else:
call_command(
"importcontent",
"disk",
channel_id,
directory,
drive_id=drive_id,
node_ids=node_ids,
exclude_node_ids=exclude_node_ids,
update_progress=update_progress,
check_for_cancel=check_for_cancel,
)
def _localexport(
channel_id,
drive_id,
update_progress=None,
check_for_cancel=None,
node_ids=None,
exclude_node_ids=None,
extra_metadata=None,
):
drive = get_mounted_drive_by_id(drive_id)
call_command(
"exportchannel",
channel_id,
drive.datafolder,
update_progress=update_progress,
check_for_cancel=check_for_cancel,
)
try:
call_command(
"exportcontent",
channel_id,
drive.datafolder,
node_ids=node_ids,
exclude_node_ids=exclude_node_ids,
update_progress=update_progress,
check_for_cancel=check_for_cancel,
)
except UserCancelledError:
try:
os.remove(
get_content_database_file_path(channel_id, datafolder=drive.datafolder)
)
except OSError:
pass
raise
def _job_to_response(job):
if not job:
return {
"type": None,
"started_by": None,
"status": State.SCHEDULED,
"percentage": 0,
"progress": [],
"id": None,
"cancellable": False,
"clearable": False,
}
output = {
"status": job.state,
"exception": str(job.exception),
"traceback": str(job.traceback),
"percentage": job.percentage_progress,
"id": job.job_id,
"cancellable": job.cancellable,
"clearable": job.state in [State.FAILED, State.CANCELED, State.COMPLETED],
}
output.update(job.extra_metadata)
return output
| 33.615066 | 100 | 0.632985 |
ede987a47b1789a599cfaa7e3ad1e65412ed3d95
| 11,967 |
py
|
Python
|
app/main/routes.py
|
justinsitarz/flask-toolio
|
f1ab22b2f9469a439af1c8436a378923a8c782ee
|
[
"MIT"
] | null | null | null |
app/main/routes.py
|
justinsitarz/flask-toolio
|
f1ab22b2f9469a439af1c8436a378923a8c782ee
|
[
"MIT"
] | null | null | null |
app/main/routes.py
|
justinsitarz/flask-toolio
|
f1ab22b2f9469a439af1c8436a378923a8c782ee
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from flask import render_template, flash, redirect, url_for, request, g, \
jsonify, current_app
from flask_login import current_user, login_required
from flask_babel import _, get_locale
from guess_language import guess_language
from app import db
from app.main.forms import EditProfileForm, EmptyForm, PostForm, SearchForm, \
MessageForm, AddToolForm, EditToolForm
from app.models import User, Post, Message, Notification, Tool
from app.translate import translate
from app.main import bp
import os
import boto3, botocore
from werkzeug.utils import secure_filename
import uuid
@bp.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
g.search_form = SearchForm()
g.locale = str(get_locale())
@bp.route('/', methods=['GET', 'POST'])
@bp.route('/index', methods=['GET', 'POST'])
@login_required
def index():
form = PostForm()
if form.validate_on_submit():
language = guess_language(form.post.data)
if language == 'UNKNOWN' or len(language) > 5:
language = ''
post = Post(body=form.post.data, author=current_user,
language=language)
db.session.add(post)
db.session.commit()
flash(_('Your post is now live!'))
return redirect(url_for('main.index'))
page = request.args.get('page', 1, type=int)
posts = current_user.followed_posts().paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.index', page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('main.index', page=posts.prev_num) \
if posts.has_prev else None
return render_template('index.html', title=_('Home'), form=form,
posts=posts.items, next_url=next_url,
prev_url=prev_url)
@bp.route('/explore')
@login_required
def explore():
page = request.args.get('page', 1, type=int)
tools = Tool.query.filter(Tool.user_id != current_user.id).order_by(Tool.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
print(tools.items)
next_url = url_for('main.explore', page=tools.next_num) \
if tools.has_next else None
prev_url = url_for('main.explore', page=tools.prev_num) \
if tools.has_prev else None
return render_template('explore.html', title=_('Explore'),
tools=tools.items, next_url=next_url,
prev_url=prev_url)
@bp.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
posts = user.posts.order_by(Post.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.user', username=user.username,
page=posts.next_num) if posts.has_next else None
prev_url = url_for('main.user', username=user.username,
page=posts.prev_num) if posts.has_prev else None
form = EmptyForm()
return render_template('user.html', user=user, posts=posts.items,
next_url=next_url, prev_url=prev_url, form=form)
@bp.route('/user/<username>/popup')
@login_required
def user_popup(username):
user = User.query.filter_by(username=username).first_or_404()
form = EmptyForm()
return render_template('user_popup.html', user=user, form=form)
@bp.route('/tool/<int:tool_id>/popup')
@login_required
def tool_popup(tool_id):
tool = Tool.query.get_or_404(tool_id)
form = EmptyForm()
return render_template('tool_popup.html', tool=tool, form=form)
@bp.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash(_('Your changes have been saved.'))
return redirect(url_for('main.edit_profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', title=_('Edit Profile'),
form=form)
@bp.route('/add_tool', methods=['GET', 'POST'])
@login_required
def add_tool():
form = AddToolForm()
if form.validate_on_submit():
f = form.image.data
url = upload_file(f)
tool = Tool(name=form.name.data, owner=current_user,
description=form.description.data,
image_path=url)
db.session.add(tool)
db.session.commit()
flash(_('Your tool has been listed!'))
return redirect(url_for('main.tools'))
return render_template('add_tool.html', title='Add a tool', form=form)
def upload_file(file):
if file:
file.filename = str(uuid.uuid1())
print(file.filename)
output = upload_file_to_s3(file, current_app.config["S3_BUCKET"])
return str(output)
else:
return redirect("/tools")
def upload_file_to_s3(file, bucket_name, acl="public-read"):
s3 = boto3.client("s3", aws_access_key_id=current_app.config['S3_KEY'],
aws_secret_access_key=current_app.config['S3_SECRET'])
try:
s3.upload_fileobj(file, bucket_name, file.filename, ExtraArgs={"ACL": acl,
"ContentType": file.content_type})
except Exception as e:
print("Something Happened: ", e)
return e
return "{}{}".format(current_app.config["S3_LOCATION"], file.filename)
@bp.route('/tool/delete/<int:tool_id>', methods=['POST'])
@login_required
def delete_tool(tool_id):
tool = Tool.query.get_or_404(tool_id)
db.session.delete(tool)
db.session.commit()
flash('Item deleted.')
return redirect(url_for('main.tools'))
@bp.route('/edit_tool', methods=['GET', 'POST'])
@login_required
def edit_tool():
tool_id = 1
tool = Tool.query.filter_by(id=tool_id).first()
form = EditToolForm()
if form.validate_on_submit():
f = form.image.data
url = upload_file(f)
tool.name = form.name.data
tool.description = form.description.data
tool.image_path = url
db.session.commit()
flash(_('Your changes have been saved.'))
return redirect(url_for('main.tools'))
elif request.method == 'GET':
form.name.data = tool.name
form.description.data = tool.description
return render_template('edit_tool.html', title=_('Edit Tool'),
form=form)
@bp.route('/follow/<username>', methods=['POST'])
@login_required
def follow(username):
form = EmptyForm()
if form.validate_on_submit():
user = User.query.filter_by(username=username).first()
if user is None:
flash(_('User %(username)s not found.', username=username))
return redirect(url_for('main.index'))
if user == current_user:
flash(_('You cannot follow yourself!'))
return redirect(url_for('main.user', username=username))
current_user.follow(user)
db.session.commit()
flash(_('You are following %(username)s!', username=username))
return redirect(url_for('main.user', username=username))
else:
return redirect(url_for('main.index'))
@bp.route('/unfollow/<username>', methods=['POST'])
@login_required
def unfollow(username):
form = EmptyForm()
if form.validate_on_submit():
user = User.query.filter_by(username=username).first()
if user is None:
flash(_('User %(username)s not found.', username=username))
return redirect(url_for('main.index'))
if user == current_user:
flash(_('You cannot unfollow yourself!'))
return redirect(url_for('main.user', username=username))
current_user.unfollow(user)
db.session.commit()
flash(_('You are not following %(username)s.', username=username))
return redirect(url_for('main.user', username=username))
else:
return redirect(url_for('main.index'))
@bp.route('/translate', methods=['POST'])
@login_required
def translate_text():
return jsonify({'text': translate(request.form['text'],
request.form['source_language'],
request.form['dest_language'])})
@bp.route('/search')
@login_required
def search():
if not g.search_form.validate():
return redirect(url_for('main.explore'))
page = request.args.get('page', 1, type=int)
posts, total = Post.search(g.search_form.q.data, page,
current_app.config['POSTS_PER_PAGE'])
next_url = url_for('main.search', q=g.search_form.q.data, page=page + 1) \
if total > page * current_app.config['POSTS_PER_PAGE'] else None
prev_url = url_for('main.search', q=g.search_form.q.data, page=page - 1) \
if page > 1 else None
return render_template('search.html', title=_('Search'), posts=posts,
next_url=next_url, prev_url=prev_url)
@bp.route('/send_message/<recipient>', methods=['GET', 'POST'])
@login_required
def send_message(recipient):
user = User.query.filter_by(username=recipient).first_or_404()
form = MessageForm()
if form.validate_on_submit():
msg = Message(author=current_user, recipient=user,
body=form.message.data)
db.session.add(msg)
user.add_notification('unread_message_count', user.new_messages())
db.session.commit()
flash(_('Your message has been sent.'))
return redirect(url_for('main.user', username=recipient))
return render_template('send_message.html', title=_('Send Message'),
form=form, recipient=recipient)
@bp.route('/tools')
@login_required
def tools():
# Retrieve tools owned by user
tools = Tool.query.filter_by(user_id=current_user.id)
return render_template('tools.html', title='Stuff', user=user, tools=tools)
@bp.route('/tool/<int:tool_id>', methods=['GET'])
@login_required
def tool(tool_id):
tool = Tool.query.get_or_404(tool_id)
return render_template('tool.html', tool=tool)
@bp.route('/messages')
@login_required
def messages():
current_user.last_message_read_time = datetime.utcnow()
current_user.add_notification('unread_message_count', 0)
db.session.commit()
page = request.args.get('page', 1, type=int)
messages = current_user.messages_received.order_by(
Message.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.messages', page=messages.next_num) \
if messages.has_next else None
prev_url = url_for('main.messages', page=messages.prev_num) \
if messages.has_prev else None
return render_template('messages.html', messages=messages.items,
next_url=next_url, prev_url=prev_url)
@bp.route('/export_posts')
@login_required
def export_posts():
if current_user.get_task_in_progress('export_posts'):
flash(_('An export task is currently in progress'))
else:
current_user.launch_task('export_posts', _('Exporting posts...'))
db.session.commit()
return redirect(url_for('main.user', username=current_user.username))
@bp.route('/notifications')
@login_required
def notifications():
since = request.args.get('since', 0.0, type=float)
notifications = current_user.notifications.filter(
Notification.timestamp > since).order_by(Notification.timestamp.asc())
return jsonify([{
'name': n.name,
'data': n.get_data(),
'timestamp': n.timestamp
} for n in notifications])
| 37.280374 | 104 | 0.654634 |
712787511adc3f3962be4c81c2d358cf40213f2d
| 34,796 |
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_03_01/aio/operations_async/_public_ip_addresses_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2 |
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_03_01/aio/operations_async/_public_ip_addresses_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15 |
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_03_01/aio/operations_async/_public_ip_addresses_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2 |
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PublicIPAddressesOperations:
"""PublicIPAddressesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
public_ip_address_name: str,
**kwargs
) -> None:
"""Deletes the specified public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: None, or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def get(
self,
resource_group_name: str,
public_ip_address_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.PublicIPAddress":
"""Gets the specified public IP address in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_03_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "models.PublicIPAddress",
**kwargs
) -> "models.PublicIPAddress":
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPAddress')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "models.PublicIPAddress",
**kwargs
) -> "models.PublicIPAddress":
"""Creates or updates a static or dynamic public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the public IP address.
:type public_ip_address_name: str
:param parameters: Parameters supplied to the create or update public IP address operation.
:type parameters: ~azure.mgmt.network.v2017_03_01.models.PublicIPAddress
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_03_01.models.PublicIPAddress
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
"""Gets all the public IP addresses in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_03_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddressListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
"""Gets all public IP addresses in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_03_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddressListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore
def list_virtual_machine_scale_set_public_ip_addresses(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
"""Gets information about all public IP addresses on a virtual machine scale set level.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_03_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddressListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_public_ip_addresses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/publicipaddresses'} # type: ignore
def list_virtual_machine_scale_set_vm_public_ip_addresses(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
**kwargs
) -> AsyncIterable["models.PublicIPAddressListResult"]:
"""Gets information about all public IP addresses in a virtual machine IP configuration in a
virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The network interface name.
:type network_interface_name: str
:param ip_configuration_name: The IP configuration name.
:type ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_03_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddressListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_public_ip_addresses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses'} # type: ignore
async def get_virtual_machine_scale_set_public_ip_address(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
public_ip_address_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.PublicIPAddress":
"""Get the specified public IP address in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the IP configuration.
:type ip_configuration_name: str
:param public_ip_address_name: The name of the public IP Address.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_03_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPAddress"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
# Construct URL
url = self.get_virtual_machine_scale_set_public_ip_address.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_public_ip_address.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses/{publicIpAddressName}'} # type: ignore
| 50.502177 | 395 | 0.673238 |
2fac46f82daec4abf9e2e66bfb2e5d95e78a4510
| 3,564 |
py
|
Python
|
download_mid_from_csv.py
|
gu-ma/Neural-Metal
|
b0ae8d41c924a33fe5946713f570525d5bfcd59d
|
[
"MIT"
] | null | null | null |
download_mid_from_csv.py
|
gu-ma/Neural-Metal
|
b0ae8d41c924a33fe5946713f570525d5bfcd59d
|
[
"MIT"
] | null | null | null |
download_mid_from_csv.py
|
gu-ma/Neural-Metal
|
b0ae8d41c924a33fe5946713f570525d5bfcd59d
|
[
"MIT"
] | null | null | null |
# Download files from a CSV scrapping
import os
import sys
import csv
import urllib.request
import urllib.parse
import urllib.error
import plistlib
import xattr
import re
from time import sleep
def parse_csv(file_name):
tracks_list = open(file_name, "r")
return csv.reader(tracks_list)
def write_xattr_tags(file_path, tags):
bpl_tags = plistlib.writePlistToString(tags)
optional_tag = "com.apple.metadata:"
map(lambda a: xattr.setxattr(file_path, optional_tag + a, bpl_tags),
["kMDItemFinderComment", "_kMDItemUserTags", "kMDItemOMUserTags"])
def create_path(path):
if not os.path.isdir(path):
os.makedirs(path)
def save_midi_files(output_folder_fp, csv_fp, start='', end='', sleep_duration=.2):
csv_reader = parse_csv(csv_fp)
header = next(csv_reader)
print(header)
# change headers here depending on the CSV format
# template for "midimelody_ru.csv"
if "midimelody_ru.csv" in csv_fp:
alphabet_index = header.index("alphabet")
artist_index = header.index("artist")
track_index = header.index("midi_files")
url_index = header.index("midi_files_href")
# template for "ninsheet.csv"
elif "ninsheet.csv" in csv_fp:
serie_index = header.index("series")
title_index = header.index("title")
url_index = header.index("mid-href")
for index, row in enumerate(csv_reader):
if index < end and index > start:
# template for "midimelody_ru.csv"
if "midimelody_ru.csv" in csv_fp:
alphabet = row[alphabet_index]
artist = row[artist_index]
track = row[track_index]
url = row[url_index]
artist_path = os.path.join(output_folder_fp, alphabet, artist)
song_path = os.path.join(artist_path, track)
label = "x"
if not os.path.isfile(song_path) and url:
create_path(artist_path)
urllib.request.urlretrieve(url, song_path)
label = "+"
print(label + " " + str(index) + "\t" + alphabet +
"\t" + artist + "\t" + track)
# template for "ninsheet.csv"
elif "ninsheet.csv" in csv_fp:
serie = row[serie_index]
title = row[title_index]
url = row[url_index]
artist_path = os.path.join(
output_folder_fp, re.sub('[^A-Za-z0-9]+', ' ', serie))
song_path = os.path.join(artist_path, re.sub(
'[^A-Za-z0-9]+', ' ', title) + ".mid")
label = "x"
if not os.path.isfile(song_path) and url:
create_path(artist_path)
urllib.request.urlretrieve(url, song_path)
label = "+"
print(label + " " + str(index) + "\t" + serie + "\t" + title)
# TBD
#
# tag_list1 = ['Rap', 'Yo']
# write_xattr_tags(song_path, tag_list1)
# plistlib.writePlist(tag_list1, song_path)
#
# with open(song_path, 'rb') as fp:
# pl = plistlib.readPlist(fp)
# print(pl)
# pl.append('Blue')
# plistlib.writePlist(pl, song_path)
sleep(sleep_duration)
def main():
script, csv_fp, output_folder_fp = sys.argv
save_midi_files(output_folder_fp, csv_fp, 0, 4000, .1)
if __name__ == '__main__':
sys.exit(main())
| 33.308411 | 83 | 0.565376 |
178bf55f04e7d0179f642648109ab02296d64292
| 589 |
py
|
Python
|
Binarization/Binarization_avgIntensity.py
|
PRAkTIKal24/CV
|
7cc2cce95283c32730a0a3976980efd89ef91085
|
[
"MIT"
] | null | null | null |
Binarization/Binarization_avgIntensity.py
|
PRAkTIKal24/CV
|
7cc2cce95283c32730a0a3976980efd89ef91085
|
[
"MIT"
] | null | null | null |
Binarization/Binarization_avgIntensity.py
|
PRAkTIKal24/CV
|
7cc2cce95283c32730a0a3976980efd89ef91085
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from matplotlib import pyplot as plt
I = cv2.imread('hwf.jpeg')
cv2.namedWindow('hwf',cv2.WINDOW_AUTOSIZE)
cv2.imshow('',I)
cv2.waitKey(0)
r, c, cl = I.shape
Sum = 0
for i in range(r):
for j in range(c):
Sum += ((I[i,j,1]+I[i,j,2]+I[i,j,0])**2)
#Finding mean of all pixel intensities and setting it as the threshold
T_avg = np.log(((Sum)/(r*c*cl))**0.5)
for i in range(r):
for j in range(c):
if ((I[i,j,1]+I[i,j,2]+I[i,j,0])>T_avg):
I[i,j] = 0
else:
I[i,j] = 255
cv2.namedWindow('hw0',cv2.WINDOW_AUTOSIZE)
cv2.imshow('',I)
cv2.waitKey(0)
| 21.035714 | 70 | 0.631579 |
099673bc3bbd846f4c0ad9148601d6780292c1d0
| 403 |
py
|
Python
|
monzo_to_ynab/wsgi.py
|
Ben-Hampson/monzo_to_ynab
|
44e4e6fb48133d006b73540205b7768c4fca47d3
|
[
"MIT"
] | null | null | null |
monzo_to_ynab/wsgi.py
|
Ben-Hampson/monzo_to_ynab
|
44e4e6fb48133d006b73540205b7768c4fca47d3
|
[
"MIT"
] | null | null | null |
monzo_to_ynab/wsgi.py
|
Ben-Hampson/monzo_to_ynab
|
44e4e6fb48133d006b73540205b7768c4fca47d3
|
[
"MIT"
] | null | null | null |
"""
WSGI config for monzo_to_ynab project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'monzo_to_ynab.settings')
application = get_wsgi_application()
| 23.705882 | 78 | 0.791563 |
f7194df8986a3f798477abbd22f0e92a006cfa20
| 967 |
py
|
Python
|
valid-sudoku/valid-sudoku.py
|
Atri10/Leet-code---Atri_Patel
|
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
|
[
"MIT"
] | 1 |
2021-10-10T20:21:18.000Z
|
2021-10-10T20:21:18.000Z
|
valid-sudoku/valid-sudoku.py
|
Atri10/Leet-code---Atri_Patel
|
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
|
[
"MIT"
] | null | null | null |
valid-sudoku/valid-sudoku.py
|
Atri10/Leet-code---Atri_Patel
|
49fc59b9147a44ab04a66128fbb2ef259b5f7b7c
|
[
"MIT"
] | null | null | null |
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
BZip = list(zip(*board))
def Checkline(li):
temp = [i for i in li if i!="."]
return len(set(temp))==len(temp)
def check_row(board):
for i in board:
if not Checkline(i):return False
return True
def check_col(board):
for i in BZip:
if not Checkline(i):return False
return True
def square(board):
for i in range(0,9,3):
for j in range(0,9,3):
sqr = [board[x][y] for x in range(i,i+3) for y in range(j,j+3)]
if not Checkline(sqr):return False
return True
def checkmat():
return (check_row(board) and check_col(board) and square(board))
return checkmat()
| 31.193548 | 83 | 0.458118 |
3b859f50d54f28879ae73a8835b483b3e63c7152
| 1,134 |
py
|
Python
|
rasa_nlu_gao/models/lenet.py
|
1073521013/rasa_nlu_gq
|
6c8bea1b14390246b39770abc544986f4c7acf26
|
[
"Apache-2.0"
] | 298 |
2018-10-10T04:15:45.000Z
|
2022-03-23T12:03:41.000Z
|
rasa_nlu_gao/models/lenet.py
|
1073521013/rasa_nlu_gq
|
6c8bea1b14390246b39770abc544986f4c7acf26
|
[
"Apache-2.0"
] | 40 |
2019-01-28T07:04:52.000Z
|
2022-02-10T00:30:06.000Z
|
rasa_nlu_gao/models/lenet.py
|
1073521013/rasa_nlu_gq
|
6c8bea1b14390246b39770abc544986f4c7acf26
|
[
"Apache-2.0"
] | 107 |
2018-11-28T05:54:54.000Z
|
2022-03-31T09:34:05.000Z
|
from __future__ import division, print_function, absolute_import
import numpy as np
import tensorflow as tf
# Build a convolutional neural network
def conv_net(x, n_classes, num_layers, layer_size, C2, dropout, is_training):
# Define a scope for reusing the variables
with tf.variable_scope('ConvNet'):
# Flatten the data to a 1-D vector for the fully connected layer
x = tf.contrib.layers.flatten(x)
reg = tf.contrib.layers.l2_regularizer(C2)
name = 'dense'
for i in range(num_layers):
x = tf.layers.dense(inputs=x,
units=layer_size[i],
activation=tf.nn.relu,
kernel_regularizer=reg,
name='hidden_layer_{}_{}'.format(name, i))
x = tf.layers.dropout(x, rate=dropout, training=is_training)
out = tf.layers.dense(inputs=x,
units=n_classes,
kernel_regularizer=reg,
name='dense_layer_{}'.format(name))
return out
| 42 | 78 | 0.554674 |
e879fb672c74f7dea3595024bf8e4606e4bc7fb5
| 317 |
py
|
Python
|
xamin/xamin/__init__.py
|
michaelmunje/Xamin-Python-Interface
|
d88ae596a32ec956beac045ec04c89a4ad8b5330
|
[
"MIT"
] | null | null | null |
xamin/xamin/__init__.py
|
michaelmunje/Xamin-Python-Interface
|
d88ae596a32ec956beac045ec04c89a4ad8b5330
|
[
"MIT"
] | null | null | null |
xamin/xamin/__init__.py
|
michaelmunje/Xamin-Python-Interface
|
d88ae596a32ec956beac045ec04c89a4ad8b5330
|
[
"MIT"
] | null | null | null |
from .get_all_names import get_all_names
from .get_all import get_all_tables as get_all_raw
from .get import run_table_query
from .helpers import get_inline_script_output
from .helpers import get_script_output
from .install import install
from .get_downloads import download_tables, multi_core_download, mc_gz_to_csv
| 39.625 | 77 | 0.870662 |
c9e50e9a0437a27c0467e5cea272cc29b2fef2ce
| 2,445 |
py
|
Python
|
tools/ftp/spider_ftp.py
|
ChristophBerg/pgweb
|
bf6e7a81c0a5f329ffd2f7b3e50c4c7c68735ffc
|
[
"PostgreSQL"
] | 1 |
2020-12-03T21:52:50.000Z
|
2020-12-03T21:52:50.000Z
|
tools/ftp/spider_ftp.py
|
ChristophBerg/pgweb
|
bf6e7a81c0a5f329ffd2f7b3e50c4c7c68735ffc
|
[
"PostgreSQL"
] | null | null | null |
tools/ftp/spider_ftp.py
|
ChristophBerg/pgweb
|
bf6e7a81c0a5f329ffd2f7b3e50c4c7c68735ffc
|
[
"PostgreSQL"
] | null | null | null |
#!/usr/bin/python
#
# spider_ftp.py - spider the ftp site and generate an output file with all
# the metadata we require, that can be transferred over to
# the master web server.
#
import sys
import os
from datetime import datetime
import cPickle as pickle
import codecs
import urllib2
# Directories, specified from the root of the ftp tree and down, that
# will be recursively excluded from the pickle.
exclude_roots = ['/repos', ]
allnodes = {}
def read_file(fn):
f = codecs.open(fn, 'r', encoding='utf-8', errors='replace')
t = f.read()
f.close()
return t
def parse_directory(dirname, rootlen):
mynode = {}
for f in os.listdir(dirname):
if f.startswith(".") and not f == ".message": continue
if f == "sync_timestamp": continue
fn = os.path.join(dirname, f)
if os.path.isdir(fn):
# Can be a directory itself, or a symbolic link to a directory
if os.path.islink(fn):
# This is a symbolic link
mynode[f] = {
't': 'l',
'd': os.readlink(fn),
}
else:
# This is a subdirectory, recurse into it, unless it happens
# to be on our exclude list.
if not fn[rootlen:] in exclude_roots:
parse_directory(fn, rootlen)
mynode[f] = {
't': 'd',
}
else:
# This a file
stat = os.stat(fn)
mynode[f] = {
't': 'f',
's': stat.st_size,
'd': datetime.fromtimestamp(stat.st_mtime),
}
if f == "README" or f == "CURRENT_MAINTAINER" or f == ".message":
mynode[f]['c'] = read_file(fn)
allnodes[dirname[rootlen:].strip("/")] = mynode
def Usage():
print "Usage: spider_ftp.py <ftp_root> <pickle_file>"
print ""
print "If <pickle_file> starts with http[s]://, the file will be uploaded"
print "to that URL instead of written to the filesystem."
sys.exit(1)
if len(sys.argv) != 3: Usage()
parse_directory(sys.argv[1], len(sys.argv[1]))
if sys.argv[2].startswith("http://") or sys.argv[2].startswith("https://"):
o = urllib2.build_opener(urllib2.HTTPHandler)
r = urllib2.Request(sys.argv[2], data=pickle.dumps(allnodes))
r.add_header('Content-type', 'application/octet-stream')
r.add_header('Host', 'www.postgresql.org')
r.get_method = lambda: 'PUT'
u = o.open(r)
x = u.read()
if x != "NOT CHANGED" and x != "OK":
print "Failed to upload: %s" % x
sys.exit(1)
else:
f = open(sys.argv[2] + ".tmp", "wb")
pickle.dump(allnodes, f)
f.close()
os.rename(sys.argv[2] + ".tmp", sys.argv[2])
#pprint(allnodes)
| 26.290323 | 75 | 0.641309 |
f50acf7b1a4b6e69601329c546719420984c4c70
| 2,503 |
py
|
Python
|
modules/basic.py
|
eieio/pyy
|
dface9d5c8914bb74ef1ee4df112269b65e62bec
|
[
"Apache-2.0"
] | null | null | null |
modules/basic.py
|
eieio/pyy
|
dface9d5c8914bb74ef1ee4df112269b65e62bec
|
[
"Apache-2.0"
] | 1 |
2020-09-29T22:20:49.000Z
|
2020-09-29T22:20:49.000Z
|
modules/basic.py
|
eieio/pyy
|
dface9d5c8914bb74ef1ee4df112269b65e62bec
|
[
"Apache-2.0"
] | null | null | null |
''' basic.py
Basic module
Copyright 2008 Corey Tabaka
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from irclib import *
from admin import trusted
from bot import BotCommandHandler
import re, sys
@BotCommandHandler('modules')
@trusted
def _modules(context):
'''Lists the loaded modules'''
context.reply('Modules loaded: %s' % ', '.join(sorted(context.proc.modules.keys())))
@BotCommandHandler('load')
@trusted('admin')
def _load(context):
'''Usage: load <module name>\nAttempts to load the named module'''
module = (context.args or '').strip()
try:
if module not in context.proc.modules:
context.proc.loadModule(module)
context.reply("Loaded module '%s' successfully" % module)
else:
context.reply("Module '%s' already loaded" % module)
except:
context.reply("Error loading module '%s': %s" % (module, str(sys.exc_value)))
@BotCommandHandler('do')
@trusted
def _do(context):
'''Usage: do [@(nick | channel)] <message>\nDirects the given message at nick or channel as a CTCP action'''
m = re.match('(?:@(#{0,2}[^\s]+)\s+)?(.*)', context.args)
if m:
where, what = m.groups()
if where:
context.proc.action(where, what)
else:
context.proc.action(context.replyto(), what)
else:
context.reply("I do not understand '%s'" % context.args)
@BotCommandHandler('say')
@trusted
def _say(context):
'''Usage: say [@(nick | channel)] <message>\nDirects the given message at nick or channel'''
m = re.match('(?:@(#{0,2}[^\s]+)\s+)?(.*)', context.args)
if m:
where, what = m.groups()
if where:
context.proc.privmsg(where, what)
else:
context.proc.privmsg(context.replyto(), what)
else:
context.reply("I do not understand '%s'" % context.args)
@BotCommandHandler('server')
@trusted('admin')
def _server(context):
'''Usage: server <IRC protocol string>\nSends the given raw protocol string to the server'''
context.proc._socket.sendall(context.args + '\r\n')
| 31.683544 | 110 | 0.674391 |
6886740f5f89338575a8fca451eac138d0005385
| 5,502 |
py
|
Python
|
examples/id_pools_vmac_ranges.py
|
doziya/hpeOneView
|
ef9bee2a0e1529e93bd6e8d84eff07fb8533049d
|
[
"MIT"
] | 107 |
2015-02-16T12:40:36.000Z
|
2022-03-09T05:27:58.000Z
|
examples/id_pools_vmac_ranges.py
|
doziya/hpeOneView
|
ef9bee2a0e1529e93bd6e8d84eff07fb8533049d
|
[
"MIT"
] | 148 |
2015-03-17T16:09:39.000Z
|
2020-02-09T16:28:06.000Z
|
examples/id_pools_vmac_ranges.py
|
doziya/hpeOneView
|
ef9bee2a0e1529e93bd6e8d84eff07fb8533049d
|
[
"MIT"
] | 80 |
2015-01-03T22:58:53.000Z
|
2021-04-16T11:37:03.000Z
|
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from pprint import pprint
from hpOneView.oneview_client import OneViewClient
from hpOneView.exceptions import HPOneViewException
from config_loader import try_load_from_file
config = {
"ip": "",
"credentials": {
"userName": "administrator",
"password": ""
}
}
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
options = {
"type": "Range",
"startAddress": "E2:13:C5:F0:00:00",
"endAddress": "E2:13:C5:FF:FF:FF",
"rangeCategory": "Custom"
}
options_additional = {
"type": "Range",
"name": None,
"prefix": None,
"enabled": True,
"rangeCategory": "Generated",
"startAddress": "E2:13:C5:F0:00:00",
"endAddress": "E2:13:C5:FF:FF:FF",
"totalCount": 1048575,
"freeIdCount": 1048575,
"allocatedIdCount": 0,
"allocatorUri":
"/rest/id-pools/vmac/ranges/5613a502-9253-45c6-aa78-a83635241cf8/allocator",
"collectorUri":
"/rest/id-pools/vmac/ranges/5613a502-9253-45c6-aa78-a83635241cf8/collector",
"reservedIdCount": 0,
"freeFragmentUri":
"/rest/id-pools/vmac/ranges/5613a502-9253-45c6-aa78-a83635241cf8/free-fragments?start=0&count=-1",
"allocatedFragmentUri":
"/rest/id-pools/vmac/ranges/5613a502-9253-45c6-aa78-a83635241cf8/allocated-fragments?start=0&count=-1",
"uri":
"/rest/id-pools/vmac/ranges/5613a502-9253-45c6-aa78-a83635241cf8",
"category": "id-range-VMAC",
"eTag": None,
"created": "2013-03-20 01:29:10.570",
"modified": "2013-03-20 01:29:10.570"
}
# Create vmac Range for id pools
vmac_range = oneview_client.id_pools_vmac_ranges.create(options)
pprint(vmac_range)
# Get vmac range by uri
vmac_range_byuri = oneview_client.id_pools_vmac_ranges.get(vmac_range['uri'])
print("Got vmac range from '{}' to '{}' by uri:\n '{}'".format(vmac_range_byuri[
'startAddress'], vmac_range_byuri['endAddress'], vmac_range_byuri['uri']))
# Get vmac range by id
vmac_range_byId = oneview_client.id_pools_vmac_ranges.get(vmac_range['uri'])
print("Got vmac range from '{}' to '{}' by uri:\n '{}'".format(vmac_range_byId[
'startAddress'], vmac_range_byId['endAddress'], vmac_range_byId['uri']))
# Enable a vMAC range
information = {
"type": "Range",
"enabled": True
}
vmac_range = oneview_client.id_pools_vmac_ranges.enable(
information, vmac_range['uri'])
print("Successfully enabled vmac range at\n 'uri': {}\n with 'enabled': {}".format(
vmac_range['uri'], vmac_range['enabled']))
# Allocate a set of IDs from vmac range
information = {
"count": 10
}
successfully_allocated_ids = oneview_client.id_pools_vmac_ranges.allocate(
information, vmac_range['uri'])
print("Successfully allocated IDs:")
pprint(successfully_allocated_ids)
# Get all allocated fragments in vmac range
print("Get all allocated fragments in vmac range")
allocated_fragments = oneview_client.id_pools_vmac_ranges.get_allocated_fragments(
vmac_range['uri'])
pprint(allocated_fragments)
# Get all free fragments in vmac range
print("Get all free fragments in vmac range")
allocated_fragments = oneview_client.id_pools_vmac_ranges.get_free_fragments(
vmac_range['uri'])
pprint(allocated_fragments)
# Collect a set of IDs back to vmac range
try:
information = {
"idList": successfully_allocated_ids['idList']
}
successfully_collected_ids = oneview_client.id_pools_vmac_ranges.collect(
information, vmac_range['uri'])
except HPOneViewException as e:
print(e.msg)
# Disable a vmac range
information = {
"type": "Range",
"enabled": False
}
vmac_range = oneview_client.id_pools_vmac_ranges.enable(
information, vmac_range['uri'])
print("Successfully disabled vmac range at\n 'uri': {}\n with 'enabled': {}".format(
vmac_range['uri'], vmac_range['enabled']))
# Delete vmac_range
oneview_client.id_pools_vmac_ranges.delete(vmac_range)
print("Successfully deleted vmac range")
# Create vmac Range for id pools with more options specified
print("Create vMAC range with more options specified for id pools")
vmac_range = oneview_client.id_pools_vmac_ranges.create(options_additional)
pprint(vmac_range)
# Delete vmac_range
oneview_client.id_pools_vmac_ranges.delete(vmac_range)
print("Successfully deleted newly created vMAC range")
| 35.496774 | 111 | 0.729553 |
7da30d5caed00294b86055736ab3a477c3d2d3d6
| 10,641 |
py
|
Python
|
tests/test_schema_evolution.py
|
spenczar/fastavro
|
a6f18252415f23ad1ce06ce019ac2f1d9feef0aa
|
[
"MIT"
] | null | null | null |
tests/test_schema_evolution.py
|
spenczar/fastavro
|
a6f18252415f23ad1ce06ce019ac2f1d9feef0aa
|
[
"MIT"
] | null | null | null |
tests/test_schema_evolution.py
|
spenczar/fastavro
|
a6f18252415f23ad1ce06ce019ac2f1d9feef0aa
|
[
"MIT"
] | null | null | null |
from fastavro import writer as fastavro_writer
from fastavro.read import SchemaResolutionError
import fastavro
import pytest
from io import BytesIO
schema_dict_a = {
"namespace": "example.avro2",
"type": "record",
"name": "evtest",
"fields": [{"name": "a", "type": "int"}],
}
record_a = {"a": 123}
schema_dict_a_b = {
"namespace": "example.avro2",
"type": "record",
"name": "evtest",
"fields": [
{"name": "a", "type": "int"},
{"name": "b", "type": ["null", "int"], "default": None},
],
}
record_a_b = {"a": 234, "b": 345}
schema_dict_a_c = {
"namespace": "example.avro2",
"type": "record",
"name": "evtest",
"fields": [{"name": "a", "type": "int"}, {"name": "c", "type": ["null", "int"]}],
}
def avro_to_bytes_with_schema(avro_schema, avro_dict):
with BytesIO() as bytes_io:
fastavro_writer(bytes_io, avro_schema, [avro_dict])
return bytes_io.getvalue()
def bytes_with_schema_to_avro(avro_read_schema, binary):
with BytesIO(binary) as bytes_io:
reader = fastavro.reader(bytes_io, avro_read_schema)
return next(reader)
def test_evolution_drop_field():
record_bytes_a_b = avro_to_bytes_with_schema(schema_dict_a_b, record_a_b)
record_a = bytes_with_schema_to_avro(schema_dict_a, record_bytes_a_b)
assert "b" not in record_a
def test_evolution_add_field_with_default():
record_bytes_a = avro_to_bytes_with_schema(schema_dict_a, record_a)
record_b = bytes_with_schema_to_avro(schema_dict_a_b, record_bytes_a)
assert "b" in record_b
assert record_b.get("b") is None
def test_evolution_add_field_without_default():
with pytest.raises(SchemaResolutionError):
record_bytes_a = avro_to_bytes_with_schema(schema_dict_a, record_a)
bytes_with_schema_to_avro(schema_dict_a_c, record_bytes_a)
def test_enum_evolution_no_default_failure():
original_schema = {
"type": "enum",
"name": "test",
"symbols": ["FOO", "BAR"],
}
new_schema = {
"type": "enum",
"name": "test",
"symbols": ["BAZ", "BAR"],
}
original_records = ["FOO"]
bio = BytesIO()
fastavro.writer(bio, original_schema, original_records)
bio.seek(0)
with pytest.raises(fastavro.read.SchemaResolutionError):
list(fastavro.reader(bio, new_schema))
def test_enum_evolution_using_default():
original_schema = {
"type": "enum",
"name": "test",
"symbols": ["A", "B"],
}
new_schema = {
"type": "enum",
"name": "test",
"symbols": ["C", "D"],
"default": "C",
}
original_records = ["A"]
bio = BytesIO()
fastavro.writer(bio, original_schema, original_records)
bio.seek(0)
new_records = list(fastavro.reader(bio, new_schema))
assert new_records == ["C"]
def test_schema_matching_with_records_in_arrays():
"""https://github.com/fastavro/fastavro/issues/363"""
original_schema = {
"type": "record",
"name": "DataRecord",
"fields": [
{
"name": "string1",
"type": "string",
},
{
"name": "subrecord",
"type": {
"type": "array",
"items": {
"type": "record",
"name": "SubRecord",
"fields": [
{
"name": "string2",
"type": "string",
}
],
},
},
},
],
}
new_schema = {
"type": "record",
"name": "DataRecord",
"fields": [
{
"name": "string1",
"type": "string",
},
{
"name": "subrecord",
"type": {
"type": "array",
"items": {
"type": "record",
"name": "SubRecord",
"fields": [
{
"name": "string2",
"type": "string",
},
{
"name": "logs",
"default": None,
"type": [
"null",
{
"type": "array",
"items": {
"type": "record",
"name": "LogRecord",
"fields": [
{
"name": "msg",
"type": "string",
"default": "",
}
],
},
},
],
},
],
},
},
},
],
}
record = {
"string1": "test",
"subrecord": [{"string2": "foo"}],
}
binary = avro_to_bytes_with_schema(original_schema, record)
output_using_original_schema = bytes_with_schema_to_avro(original_schema, binary)
assert output_using_original_schema == record
output_using_new_schema = bytes_with_schema_to_avro(new_schema, binary)
assert output_using_new_schema == {
"string1": "test",
"subrecord": [{"string2": "foo", "logs": None}],
}
def test_schema_migrate_record_to_union():
"""https://github.com/fastavro/fastavro/issues/406"""
original_schema = {
"name": "Item",
"type": "record",
"fields": [
{
"name": "category",
"type": {
"type": "record",
"name": "Category",
"fields": [{"name": "name", "type": "string"}],
},
}
],
}
new_schema_record_first = {
"name": "Item",
"type": "record",
"fields": [
{
"name": "category",
"type": [
{
"type": "record",
"name": "Category",
"fields": [{"name": "name", "type": "string"}],
},
"null",
],
}
],
}
new_schema_null_first = {
"name": "Item",
"type": "record",
"fields": [
{
"name": "category",
"type": [
"null",
{
"type": "record",
"name": "Category",
"fields": [{"name": "name", "type": "string"}],
},
],
}
],
}
record = {"category": {"name": "my-category"}}
binary = avro_to_bytes_with_schema(original_schema, record)
output_using_original_schema = bytes_with_schema_to_avro(original_schema, binary)
assert output_using_original_schema == record
output_using_new_schema_record_first = bytes_with_schema_to_avro(
new_schema_record_first, binary
)
assert output_using_new_schema_record_first == record
output_using_new_schema_null_first = bytes_with_schema_to_avro(
new_schema_null_first, binary
)
assert output_using_new_schema_null_first == record
def test_union_of_lists_evolution_with_doc():
"""https://github.com/fastavro/fastavro/issues/486"""
original_schema = {
"name": "test_union_of_lists_evolution_with_doc",
"type": "record",
"fields": [
{
"name": "id",
"type": [
"null",
{
"name": "some_record",
"type": "record",
"fields": [{"name": "field", "type": "string"}],
},
],
}
],
}
new_schema = {
"name": "test_union_of_lists_evolution_with_doc",
"type": "record",
"fields": [
{
"name": "id",
"type": [
"null",
{
"name": "some_record",
"type": "record",
"doc": "some documentation",
"fields": [{"name": "field", "type": "string"}],
},
],
}
],
}
record = {"id": {"field": "foo"}}
binary = avro_to_bytes_with_schema(original_schema, record)
output_using_new_schema = bytes_with_schema_to_avro(new_schema, binary)
assert output_using_new_schema == record
def test_union_of_lists_evolution_with_extra_type():
"""https://github.com/fastavro/fastavro/issues/486"""
original_schema = {
"name": "test_union_of_lists_evolution_with_extra_type",
"type": "record",
"fields": [
{
"name": "id",
"type": [
"null",
{
"name": "some_record",
"type": "record",
"fields": [{"name": "field", "type": "string"}],
},
],
}
],
}
new_schema = {
"name": "test_union_of_lists_evolution_with_extra_type",
"type": "record",
"fields": [
{
"name": "id",
"type": [
"null",
{
"name": "some_record",
"type": "record",
"fields": [{"name": "field", "type": "string"}],
},
"string",
],
}
],
}
record = {"id": {"field": "foo"}}
binary = avro_to_bytes_with_schema(original_schema, record)
output_using_new_schema = bytes_with_schema_to_avro(new_schema, binary)
assert output_using_new_schema == record
| 28.52815 | 85 | 0.428813 |
c963b336d95e318394b319d0bcd9c3f3425079d8
| 290 |
py
|
Python
|
files/read_binary.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 13 |
2017-08-22T12:26:07.000Z
|
2021-07-29T16:13:50.000Z
|
files/read_binary.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 1 |
2021-02-08T10:24:33.000Z
|
2021-02-08T10:24:33.000Z
|
files/read_binary.py
|
janbodnar/Python-Course
|
51705ab5a2adef52bcdb99a800e94c0d67144a38
|
[
"BSD-2-Clause"
] | 17 |
2018-08-13T11:10:33.000Z
|
2021-07-29T16:14:02.000Z
|
#!/usr/bin/python
with open('web.png', 'rb') as f:
hexdata = f.read().hex()
n = 2
data = [hexdata[i:i+n] for i in range(0, len(hexdata), n)]
i = 0
for e in data:
print(e, end=' ')
i += 1
if i % 20 == 0:
print()
print()
| 14.5 | 62 | 0.427586 |
751695f991698f8fdd4543246c38e13a80c36f7f
| 14,877 |
py
|
Python
|
PaddleCV/PaddleGAN/trainer/CycleGAN.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 2 |
2021-09-13T06:48:23.000Z
|
2021-09-13T06:48:28.000Z
|
PaddleCV/PaddleGAN/trainer/CycleGAN.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | null | null | null |
PaddleCV/PaddleGAN/trainer/CycleGAN.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 1 |
2019-08-05T11:32:13.000Z
|
2019-08-05T11:32:13.000Z
|
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from network.CycleGAN_network import CycleGAN_model
from util import utility
import paddle.fluid as fluid
import sys
import time
lambda_A = 10.0
lambda_B = 10.0
lambda_identity = 0.5
class GTrainer():
def __init__(self, input_A, input_B, cfg, step_per_epoch):
self.program = fluid.default_main_program().clone()
with fluid.program_guard(self.program):
model = CycleGAN_model()
self.fake_B = model.network_G(input_A, name="GA", cfg=cfg)
self.fake_B.persistable = True
self.fake_A = model.network_G(input_B, name="GB", cfg=cfg)
self.fake_A.persistable = True
self.cyc_A = model.network_G(self.fake_B, name="GB", cfg=cfg)
self.cyc_B = model.network_G(self.fake_A, name="GA", cfg=cfg)
self.infer_program = self.program.clone()
# Cycle Loss
diff_A = fluid.layers.abs(
fluid.layers.elementwise_sub(
x=input_A, y=self.cyc_A))
diff_B = fluid.layers.abs(
fluid.layers.elementwise_sub(
x=input_B, y=self.cyc_B))
self.cyc_A_loss = fluid.layers.reduce_mean(diff_A) * lambda_A
self.cyc_A_loss.persistable = True
self.cyc_B_loss = fluid.layers.reduce_mean(diff_B) * lambda_B
self.cyc_B_loss.persistable = True
self.cyc_loss = self.cyc_A_loss + self.cyc_B_loss
# GAN Loss D_A(G_A(A))
self.fake_rec_A = model.network_D(self.fake_B, name="DA", cfg=cfg)
self.G_A = fluid.layers.reduce_mean(
fluid.layers.square(self.fake_rec_A - 1))
self.G_A.persistable = True
# GAN Loss D_B(G_B(B))
self.fake_rec_B = model.network_D(self.fake_A, name="DB", cfg=cfg)
self.G_B = fluid.layers.reduce_mean(
fluid.layers.square(self.fake_rec_B - 1))
self.G_B.persistable = True
self.G = self.G_A + self.G_B
# Identity Loss G_A
self.idt_A = model.network_G(input_B, name="GA", cfg=cfg)
self.idt_loss_A = fluid.layers.reduce_mean(
fluid.layers.abs(
fluid.layers.elementwise_sub(
x=input_B, y=self.idt_A))) * lambda_B * lambda_identity
self.idt_loss_A.persistable = True
# Identity Loss G_B
self.idt_B = model.network_G(input_A, name="GB", cfg=cfg)
self.idt_loss_B = fluid.layers.reduce_mean(
fluid.layers.abs(
fluid.layers.elementwise_sub(
x=input_A, y=self.idt_B))) * lambda_A * lambda_identity
self.idt_loss_B.persistable = True
self.idt_loss = fluid.layers.elementwise_add(self.idt_loss_A,
self.idt_loss_B)
self.g_loss = self.cyc_loss + self.G + self.idt_loss
vars = []
for var in self.program.list_vars():
if fluid.io.is_parameter(var) and (var.name.startswith("GA") or
var.name.startswith("GB")):
vars.append(var.name)
self.param = vars
lr = cfg.learning_rate
if cfg.epoch <= 100:
optimizer = fluid.optimizer.Adam(
learning_rate=lr, beta1=0.5, beta2=0.999, name="net_G")
else:
optimizer = fluid.optimizer.Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=[99 * step_per_epoch] + [
x * step_per_epoch
for x in range(100, cfg.epoch - 1)
],
values=[lr] + [
lr * (1.0 - (x - 99.0) / 101.0)
for x in range(100, cfg.epoch)
]),
beta1=0.5,
beta2=0.999,
name="net_G")
optimizer.minimize(self.g_loss, parameter_list=vars)
class DATrainer():
def __init__(self, input_B, fake_pool_B, cfg, step_per_epoch):
self.program = fluid.default_main_program().clone()
with fluid.program_guard(self.program):
model = CycleGAN_model()
self.rec_B = model.network_D(input_B, name="DA", cfg=cfg)
self.fake_pool_rec_B = model.network_D(
fake_pool_B, name="DA", cfg=cfg)
self.d_loss_A = (fluid.layers.square(self.fake_pool_rec_B) +
fluid.layers.square(self.rec_B - 1)) / 2.0
self.d_loss_A = fluid.layers.reduce_mean(self.d_loss_A)
self.d_loss_A.persistable = True
vars = []
for var in self.program.list_vars():
if fluid.io.is_parameter(var) and var.name.startswith("DA"):
vars.append(var.name)
self.param = vars
lr = cfg.learning_rate
if cfg.epoch <= 100:
optimizer = fluid.optimizer.Adam(
learning_rate=lr, beta1=0.5, beta2=0.999, name="net_DA")
else:
optimizer = fluid.optimizer.Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=[99 * step_per_epoch] + [
x * step_per_epoch
for x in range(100, cfg.epoch - 1)
],
values=[lr] + [
lr * (1.0 - (x - 99.0) / 101.0)
for x in range(100, cfg.epoch)
]),
beta1=0.5,
beta2=0.999,
name="net_DA")
optimizer.minimize(self.d_loss_A, parameter_list=vars)
class DBTrainer():
def __init__(self, input_A, fake_pool_A, cfg, step_per_epoch):
self.program = fluid.default_main_program().clone()
with fluid.program_guard(self.program):
model = CycleGAN_model()
self.rec_A = model.network_D(input_A, name="DB", cfg=cfg)
self.fake_pool_rec_A = model.network_D(
fake_pool_A, name="DB", cfg=cfg)
self.d_loss_B = (fluid.layers.square(self.fake_pool_rec_A) +
fluid.layers.square(self.rec_A - 1)) / 2.0
self.d_loss_B = fluid.layers.reduce_mean(self.d_loss_B)
self.d_loss_B.persistable = True
vars = []
for var in self.program.list_vars():
if fluid.io.is_parameter(var) and var.name.startswith("DB"):
vars.append(var.name)
self.param = vars
lr = 0.0002
if cfg.epoch <= 100:
optimizer = fluid.optimizer.Adam(
learning_rate=lr, beta1=0.5, beta2=0.999, name="net_DA")
else:
optimizer = fluid.optimizer.Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=[99 * step_per_epoch] + [
x * step_per_epoch
for x in range(100, cfg.epoch - 1)
],
values=[lr] + [
lr * (1.0 - (x - 99.0) / 101.0)
for x in range(100, cfg.epoch)
]),
beta1=0.5,
beta2=0.999,
name="net_DB")
optimizer.minimize(self.d_loss_B, parameter_list=vars)
class CycleGAN(object):
def add_special_args(self, parser):
parser.add_argument(
'--net_G',
type=str,
default="resnet_9block",
help="Choose the CycleGAN generator's network, choose in [resnet_9block|resnet_6block|unet_128|unet_256]"
)
parser.add_argument(
'--net_D',
type=str,
default="basic",
help="Choose the CycleGAN discriminator's network, choose in [basic|nlayers|pixel]"
)
parser.add_argument(
'--d_nlayers',
type=int,
default=3,
help="only used when CycleGAN discriminator is nlayers")
return parser
def __init__(self,
cfg=None,
A_reader=None,
B_reader=None,
A_test_reader=None,
B_test_reader=None,
batch_num=1):
self.cfg = cfg
self.A_reader = A_reader
self.B_reader = B_reader
self.A_test_reader = A_test_reader
self.B_test_reader = B_test_reader
self.batch_num = batch_num
def build_model(self):
data_shape = [-1, 3, self.cfg.crop_size, self.cfg.crop_size]
input_A = fluid.layers.data(
name='input_A', shape=data_shape, dtype='float32')
input_B = fluid.layers.data(
name='input_B', shape=data_shape, dtype='float32')
fake_pool_A = fluid.layers.data(
name='fake_pool_A', shape=data_shape, dtype='float32')
fake_pool_B = fluid.layers.data(
name='fake_pool_B', shape=data_shape, dtype='float32')
gen_trainer = GTrainer(input_A, input_B, self.cfg, self.batch_num)
d_A_trainer = DATrainer(input_B, fake_pool_B, self.cfg, self.batch_num)
d_B_trainer = DBTrainer(input_A, fake_pool_A, self.cfg, self.batch_num)
# prepare environment
place = fluid.CUDAPlace(0) if self.cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
A_pool = utility.ImagePool()
B_pool = utility.ImagePool()
if self.cfg.init_model:
utility.init_checkpoints(self.cfg, exe, gen_trainer, "net_G")
utility.init_checkpoints(self.cfg, exe, d_A_trainer, "net_DA")
utility.init_checkpoints(self.cfg, exe, d_B_trainer, "net_DB")
### memory optim
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = True
build_strategy.memory_optimize = False
gen_trainer_program = fluid.CompiledProgram(
gen_trainer.program).with_data_parallel(
loss_name=gen_trainer.g_loss.name,
build_strategy=build_strategy)
d_A_trainer_program = fluid.CompiledProgram(
d_A_trainer.program).with_data_parallel(
loss_name=d_A_trainer.d_loss_A.name,
build_strategy=build_strategy)
d_B_trainer_program = fluid.CompiledProgram(
d_B_trainer.program).with_data_parallel(
loss_name=d_B_trainer.d_loss_B.name,
build_strategy=build_strategy)
losses = [[], []]
t_time = 0
for epoch_id in range(self.cfg.epoch):
batch_id = 0
for i in range(self.batch_num):
data_A = next(self.A_reader())
data_B = next(self.B_reader())
tensor_A = fluid.LoDTensor()
tensor_B = fluid.LoDTensor()
tensor_A.set(data_A, place)
tensor_B.set(data_B, place)
s_time = time.time()
# optimize the g_A network
g_A_loss, g_A_cyc_loss, g_A_idt_loss, g_B_loss, g_B_cyc_loss,\
g_B_idt_loss, fake_A_tmp, fake_B_tmp = exe.run(
gen_trainer_program,
fetch_list=[
gen_trainer.G_A, gen_trainer.cyc_A_loss,
gen_trainer.idt_loss_A, gen_trainer.G_B,
gen_trainer.cyc_B_loss, gen_trainer.idt_loss_B,
gen_trainer.fake_A, gen_trainer.fake_B
],
feed={"input_A": tensor_A,
"input_B": tensor_B})
fake_pool_B = B_pool.pool_image(fake_B_tmp)
fake_pool_A = A_pool.pool_image(fake_A_tmp)
# optimize the d_A network
d_A_loss = exe.run(
d_A_trainer_program,
fetch_list=[d_A_trainer.d_loss_A],
feed={"input_B": tensor_B,
"fake_pool_B": fake_pool_B})[0]
# optimize the d_B network
d_B_loss = exe.run(
d_B_trainer_program,
fetch_list=[d_B_trainer.d_loss_B],
feed={"input_A": tensor_A,
"fake_pool_A": fake_pool_A})[0]
batch_time = time.time() - s_time
t_time += batch_time
if batch_id % self.cfg.print_freq == 0:
print("epoch{}: batch{}: \n\
d_A_loss: {}; g_A_loss: {}; g_A_cyc_loss: {}; g_A_idt_loss: {}; \n\
d_B_loss: {}; g_B_loss: {}; g_B_cyc_loss: {}; g_B_idt_loss: {}; \n\
Batch_time_cost: {:.2f}".format(
epoch_id, batch_id, d_A_loss[0], g_A_loss[0],
g_A_cyc_loss[0], g_A_idt_loss[0], d_B_loss[0], g_B_loss[
0], g_B_cyc_loss[0], g_B_idt_loss[0], batch_time))
losses[0].append(g_A_loss[0])
losses[1].append(d_A_loss[0])
sys.stdout.flush()
batch_id += 1
if self.cfg.run_test:
test_program = gen_trainer.infer_program
utility.save_test_image(epoch_id, self.cfg, exe, place,
test_program, gen_trainer,
self.A_test_reader, self.B_test_reader)
if self.cfg.save_checkpoints:
utility.checkpoints(epoch_id, self.cfg, exe, gen_trainer,
"net_G")
utility.checkpoints(epoch_id, self.cfg, exe, d_A_trainer,
"net_DA")
utility.checkpoints(epoch_id, self.cfg, exe, d_B_trainer,
"net_DB")
| 43.121739 | 117 | 0.53734 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.