hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73dd760eb7eeee35ecd7836a780605903f466cd6 | 42 | py | Python | astwro/exttools/__init__.py | majkelx/astwro | 4a9bbe3e4757c4076ad7c0d90cf08e38dab4e794 | [
"MIT"
] | 6 | 2017-06-15T20:34:51.000Z | 2020-04-15T14:21:43.000Z | astwro/exttools/__init__.py | majkelx/astwro | 4a9bbe3e4757c4076ad7c0d90cf08e38dab4e794 | [
"MIT"
] | 18 | 2017-08-15T20:53:55.000Z | 2020-10-05T23:40:34.000Z | astwro/exttools/__init__.py | majkelx/astwro | 4a9bbe3e4757c4076ad7c0d90cf08e38dab4e794 | [
"MIT"
] | 2 | 2017-11-06T15:33:53.000Z | 2020-10-02T21:06:05.000Z | # coding=utf-8
from .Runner import Runner
| 14 | 26 | 0.761905 |
73ddbbe445acccbb58d676dedfdc5ce96db4c141 | 708 | py | Python | setup.py | Wykleph/Swarm | c0b10c673a555213fdf46dd0648a2c729dd33494 | [
"MIT"
] | 7 | 2018-10-04T01:15:03.000Z | 2018-11-03T01:51:28.000Z | setup.py | Wykleph/Swarm | c0b10c673a555213fdf46dd0648a2c729dd33494 | [
"MIT"
] | 2 | 2018-10-04T05:47:58.000Z | 2018-10-04T15:48:49.000Z | setup.py | Wykleph/Swarm | c0b10c673a555213fdf46dd0648a2c729dd33494 | [
"MIT"
] | 1 | 2018-10-04T03:57:46.000Z | 2018-10-04T03:57:46.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="Swarm",
version="0.0.3",
author="py-am-i",
author_email="duckpuncherirl@gmail.com",
description="Swarm is a strategy rouge-like space simulator game written with `pygame/python3`.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Wykleph/Swarm",
packages=setuptools.find_packages(),
install_requires=[
'pygame'
],
classifiers=[
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 28.32 | 101 | 0.65678 |
73ddbcbad82a5d4cb55abfeca11847ac41c7e831 | 8,280 | py | Python | tests/integration/test_submission.py | ohsu-comp-bio/gen3-etl-lite | c91093e6c7b269db5baca41619a93356a68c4e97 | [
"MIT"
] | null | null | null | tests/integration/test_submission.py | ohsu-comp-bio/gen3-etl-lite | c91093e6c7b269db5baca41619a93356a68c4e97 | [
"MIT"
] | 1 | 2021-03-25T22:26:08.000Z | 2021-03-25T22:26:08.000Z | tests/integration/test_submission.py | ohsu-comp-bio/gen3-etl-lite | c91093e6c7b269db5baca41619a93356a68c4e97 | [
"MIT"
] | null | null | null | import uuid
import json
import pytest
import time
import requests
SLEEP_TIME=15
try:
from types import SimpleNamespace as SN
except ImportError as error:
class SN (object):
def __init__ (self, **kwargs):
self.__dict__.update(kwargs)
def __repr__ (self):
keys = sorted(self.__dict__)
items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
return "{}({})".format(type(self).__name__, ", ".join(items))
def __eq__ (self, other):
return self.__dict__ == other.__dict__
@pytest.fixture(scope="session")
def program_name():
return 'smmart'
@pytest.fixture(scope="session")
def project_name():
return 'atac'
@pytest.fixture(scope="session")
def case_name():
return 'case-1.1'
@pytest.fixture(scope="session")
def sample_name():
return 'sample-1'
@pytest.fixture(scope="session")
def aliquot_name():
return 'aliquot-1'
@pytest.fixture(scope="session")
def submitted_methylation_name():
return 'submitted_methylation'
sample_name = 'sample-1'
aliquot_name = 'aliquot-1'
submitted_methylation_name = 'submitted_methylation-1'
def test_submission(submission_client):
assert submission_client, 'should have a configured submission_client'
def test_list_projects(submission_client):
q = '{ project { id, type, code } }'
graph = submission_client.query(q)
assert graph, 'should have a response to graphQL query'
assert graph['data'], 'should have a data node {}'.format(graph)
assert graph['data']['project'], 'should have a project(s) node {}'.format(graph)
projects = list(map(lambda x: SN(**x), graph['data']['project']))
assert len(projects), 'should have at least one project'
assert projects[0].type == 'project', 'first element should be a project'
print(projects)
def create_program(submission_client, program_name):
program = SN(name=program_name, dbgap_accession_number=program_name, type='program').__dict__
response = json.loads(submission_client.create_program(program))
assert 'id' in response, 'could not create program {}'.format(response['message'])
return response
def create_project(submission_client, program_name, project_name):
project = SN(name=project_name,
state="open", availability_type="Open",
dbgap_accession_number=project_name, code=project_name, type='project').__dict__
response = json.loads(submission_client.create_project(program_name, project))
assert response['code']==200 , 'could not create project {}'.format(response['message'])
return response
def create_node(submission_client, program_name, project_code, node):
response = json.loads(submission_client.submit_node(program_name, project_code, node))
assert response['code']==200 , 'could not create {} {}'.format(node['type'], response['message'])
print('created {} {}'.format(response['entities'][0]['type'], response['entities'][0]['id']))
return response
def create_experiment(submission_client, program_name, project_code, submitter_id):
experiment = {
'*projects': {'code': project_code},
'*submitter_id': submitter_id,
'type': 'experiment'
}
return create_node(submission_client, program_name, project_code, experiment)
def create_case(submission_client, program_name, project_code, submitter_id):
case = {
'*experiments': {'submitter_id': project_code},
'*submitter_id': submitter_id,
'type': 'case'
}
return create_node(submission_client, program_name, project_code, case)
def create_sample(submission_client, program_name, project_code, case_name, submitter_id):
sample = {
'*cases': {'submitter_id': case_name},
'*submitter_id': submitter_id,
'type': 'sample'
}
return create_node(submission_client, program_name, project_code, sample)
def create_aliquot(submission_client, program_name, project_code, sample_name, submitter_id):
aliquot = {
'*samples': {'submitter_id': sample_name},
'*submitter_id': submitter_id,
'type': 'aliquot'
}
return create_node(submission_client, program_name, project_code, aliquot)
def create_submitted_methylation(submission_client, program_name, project_code, aliquot_name, submitter_id):
submitted_methylation = {
"*data_category": 'Methylation Data',
"*data_format": 'IDAT',
"type": "submitted_methylation",
"*submitter_id": submitter_id,
"*data_type": 'Methylation Intensity Values',
"*md5sum": '12345678901234567890123456789012',
"*file_size": 1000,
"aliquots": {
"submitter_id": aliquot_name
},
'urls': 'foo',
"*file_name": 'my-file-name',
}
return create_node(submission_client, program_name, project_code, submitted_methylation)
def delete_all(submission_client, program_name, project_name):
types = ['submitted_methylation', 'aliquot', 'sample', 'case', 'experiment']
for t in types:
response = submission_client.export_node_all_type("smmart", "atac", t)
if 'data' not in response:
print('no data?', response)
else:
for n in response['data']:
delete_response = json.loads(submission_client.delete_node(program_name, project_name, n['node_id']))
assert delete_response['code'] == 200, delete_response
print('deleted {} {}'.format(t, n['node_id']))
submission_client.delete_project(program_name, project_name)
def create_all(submission_client, program_name, project_name, case_name, sample_name, aliquot_name, submitted_methylation_name):
program = create_program(submission_client, program_name)
print('created program {}'.format(program_name))
project = create_project(submission_client, program_name, project_name)
print('created project {}'.format(project_name))
experiment = create_experiment(submission_client, program_name, project_name, submitter_id=project_name)
case = create_case(submission_client, program_name, project_name, submitter_id=case_name)
sample = create_sample(submission_client, program_name, project_name, case_name, submitter_id=sample_name)
aliquot = create_aliquot(submission_client, program_name, project_name, sample_name, submitter_id=aliquot_name)
submitted_methylation = create_submitted_methylation(submission_client, program_name, project_name, aliquot_name, submitter_id=submitted_methylation_name)
def test_delete_all(submission_client, program_name, project_name, elastic_host):
delete_all(submission_client, program_name, project_name)
print('waiting {} secs to check replication'.format(SLEEP_TIME))
time.sleep(SLEEP_TIME)
url = '{}/submitted_methylation/_search'.format(elastic_host)
response = requests.get(url)
assert response.status_code in [200, 404] , '{} should return 200 or 404 status'.format(url)
# assert response.json()['hits']['total'] == 0, 'should have 0 record {}'.format(url)
url = '{}/aliquot/_search'.format(elastic_host)
response = requests.get(url)
assert response.status_code in [200, 404] , '{} should return 200 or 404 status'.format(url)
# assert response.json()['hits']['total'] == 0, 'should have 0 record {}'.format(url)
print('delete_all OK')
def test_create_program_project(submission_client, program_name, project_name, case_name, sample_name, aliquot_name, submitted_methylation_name, elastic_host):
create_all(submission_client, program_name, project_name, case_name, sample_name, aliquot_name, submitted_methylation_name)
print('waiting {} secs to check replication'.format(SLEEP_TIME))
time.sleep(SLEEP_TIME)
url = '{}/submitted_methylation/_search'.format(elastic_host)
response = requests.get(url)
assert response.status_code == 200, '{} should return 200 status'.format(url)
assert response.json()['hits']['total'] == 1, 'should have 1 record {}'.format(url)
url = '{}/aliquot/_search'.format(elastic_host)
response = requests.get(url)
assert response.status_code == 200, '{} should return 200 status'.format(url)
assert response.json()['hits']['total'] == 1, 'should have 1 record {}'.format(url)
print('replication OK')
| 40.788177 | 159 | 0.7093 |
73ddc3664c541f75e13cc636fe83f0a3c8181948 | 18,617 | py | Python | dali/python/nvidia/dali/plugin/paddle.py | truthiswill/DALI | 1c96cb62018138585b616888d4616646135cedad | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-12-25T01:36:32.000Z | 2019-12-25T01:36:32.000Z | dali/python/nvidia/dali/plugin/paddle.py | CZZLEGEND/DALI | efd1f39b32b893c320ad580e7e84557df8f73983 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | dali/python/nvidia/dali/plugin/paddle.py | CZZLEGEND/DALI | efd1f39b32b893c320ad580e7e84557df8f73983 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import logging
import math
import numpy as np
from nvidia.dali import types
from nvidia.dali.backend import TensorListCPU, TensorGPU, TensorListGPU
from paddle import fluid
dtype_map = {
"=?": fluid.core.VarDesc.VarType.BOOL,
"=e": fluid.core.VarDesc.VarType.FP16,
"=f": fluid.core.VarDesc.VarType.FP32,
"=d": fluid.core.VarDesc.VarType.FP64,
"=B": fluid.core.VarDesc.VarType.UINT8,
"=b": fluid.core.VarDesc.VarType.INT8,
"=h": fluid.core.VarDesc.VarType.INT16,
"=i": fluid.core.VarDesc.VarType.INT32,
"=q": fluid.core.VarDesc.VarType.INT64,
"=l": fluid.core.VarDesc.VarType.INT64
}
def to_paddle_type(tensor):
r"""
Get paddle dtype for given tensor or tensor list
Args:
tensor: tensor or tensor list
Returns: fluid.core.VarDesc.VarType
"""
if isinstance(tensor, (TensorListCPU, TensorListGPU)):
tensor = tensor.at(0)
dtype = tensor.dtype
if callable(dtype):
dtype = dtype()
else:
dtype = '=' + dtype.char
return dtype_map[dtype]
def feed_ndarray(dali_tensor, ptr):
"""
Copy contents of DALI tensor to Paddle's Tensor.
Parameters
----------
`dali_tensor` : dali.backend.TensorCPU or dali.backend.TensorGPU
Tensor from which to copy
`ptr` : LoDTensor data pointer
Destination of the copy
"""
c_type_pointer = ctypes.c_void_p(ptr)
dali_tensor.copy_to_external(c_type_pointer)
return ptr
def recursive_length(tensor, lod_level):
def _recurse(data, result, level):
if level > 0:
if isinstance(data, (TensorListCPU, TensorListGPU)):
# handle tensor list
length = len(data)
result[0].append(length)
for i in range(length):
_recurse(data.at(i), result[1:], level - 1)
elif hasattr(data, 'shape'):
# handle dense GPU tensors and numpy.ndarray
shape = data.shape
if callable(shape):
shape = shape()
length = shape[0]
result[0].append(length)
for i in range(length):
_recurse(shape[1:], result[1:], level - 1)
else:
# handle shape
length = data[0]
result[0].append(length)
for i in range(length):
_recurse(data[1:], result[1:], level - 1)
seq_len = [[] for _ in range(lod_level)]
_recurse(tensor, seq_len, lod_level)
return seq_len
def lod_tensor_clip(lod_tensor, size):
output = fluid.core.LoDTensor()
ndarray = np.array(lod_tensor)
seq_len = lod_tensor.recursive_sequence_lengths()
if not seq_len:
output.set(ndarray[0:size], fluid.CPUPlace())
else:
last_len = size
out_seq_len = []
for lengths in seq_len:
lengths = lengths[0:last_len]
out_seq_len.append(lengths)
last_len = sum(lengths)
output.set(ndarray[0:sum(out_seq_len[-1])], fluid.CPUPlace())
output.set_recursive_sequence_lengths(out_seq_len)
return output
class DALIGenericIterator(object):
"""
General DALI iterator for Paddle. It can return any number of
outputs from the DALI pipeline in the form of Paddle's Tensors.
Please keep in mind that Tensors returned by the iterator are
still owned by DALI. They are valid till the next iterator call.
If the content needs to be preserved please copy it to another tensor.
Parameters
----------
pipelines : list of nvidia.dali.pipeline.Pipeline
List of pipelines to use
output_map : list of str or pair of type (str, int)
The strings maps consecutive outputs of DALI pipelines to
user specified name. Outputs will be returned from iterator
as dictionary of those names. Each name should be distinct.
Item can also be a pair of (str, int), where the int value
specifies the LoD level of the resulting LoDTensor.
size : int
Number of samples in the epoch (Usually the size of the dataset).
auto_reset : bool, optional, default = False
Whether the iterator resets itself for the next epoch
or it requires reset() to be called separately.
fill_last_batch : bool, optional, default = True
Whether to return a fraction of a full batch of data
such that the total entries returned by the
iterator == 'size'. Setting this flag to False will
cause the iterator to return the first integer multiple
of self._num_gpus * self.batch_size which exceeds 'size'.
dynamic_shape: bool, optional, default = False
Whether the shape of the output of the DALI pipeline can
change during execution. If True, the LoDTensor will be
resized accordingly if the shape of DALI returned tensors
changes during execution.
If False, the iterator will fail in case of change.
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with `fill_last_batch` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch. If set to False next epoch will end sooner as data from
it was consumed but dropped. If set to True next epoch would be the
same length as the first one. For this happen, the option `pad_last_batch`
in the reader need to be set to `True` as well.
Example
-------
With the data set [1,2,3,4,5,6,7] and the batch size 2:
fill_last_batch = False, last_batch_padded = True -> last batch = [7], next iteration will return [1, 2]
fill_last_batch = False, last_batch_padded = False -> last batch = [7], next iteration will return [2, 3]
fill_last_batch = True, last_batch_padded = True -> last batch = [7, 7], next iteration will return [1, 2]
fill_last_batch = True, last_batch_padded = False -> last batch = [7, 1], next iteration will return [2, 3]
"""
def __init__(self,
pipelines,
output_map,
size,
auto_reset=False,
fill_last_batch=True,
dynamic_shape=False,
last_batch_padded=False):
if not isinstance(pipelines, list):
pipelines = [pipelines]
self._num_gpus = len(pipelines)
assert pipelines is not None, \
"Number of provided pipelines has to be at least 1"
self.batch_size = pipelines[0].batch_size
self._size = int(size)
self._auto_reset = auto_reset
self._dynamic_shape = dynamic_shape
self._fill_last_batch = fill_last_batch
self._last_batch_padded = last_batch_padded
self._pipes = pipelines
# Build all pipelines
for p in self._pipes:
with p._check_api_type_scope(types.PipelineAPIType.ITERATOR):
p.build()
# Use double-buffering of data batches
self._data_batches = [None for i in range(self._num_gpus)]
self._counter = 0
normalized_map = {}
for v in output_map:
if isinstance(v, str):
normalized_map[v] = 0
else:
normalized_map[v[0]] = v[1]
self.normalized_map = normalized_map
output_map = [isinstance(v, str) and v or v[0] for v in output_map]
assert len(set(output_map)) == len(output_map), \
"output_map names should be distinct"
self.output_map = output_map
# We need data about the batches (like shape information),
# so we need to run a single batch as part of setup to get that info
for p in self._pipes:
with p._check_api_type_scope(types.PipelineAPIType.ITERATOR):
p.schedule_run()
self._first_batch = None
self._first_batch = self.next()
def __next__(self):
if self._first_batch is not None:
batch = self._first_batch
self._first_batch = None
return batch
if self._counter >= self._size:
if self._auto_reset:
self.reset()
raise StopIteration
# Gather outputs
outputs = []
for p in self._pipes:
with p._check_api_type_scope(types.PipelineAPIType.ITERATOR):
outputs.append(p.share_outputs())
for i in range(self._num_gpus):
dev_id = self._pipes[i].device_id
# Initialize dict for all output categories
category_outputs = dict()
# Segregate outputs into categories
for j, out in enumerate(outputs[i]):
category_outputs[self.output_map[j]] = out
pd_gpu_place = fluid.CUDAPlace(dev_id)
pd_cpu_place = fluid.CPUPlace()
category_pd_type = dict()
category_place = dict()
category_tensors = dict()
category_shapes = dict()
category_lengths = dict()
for cat, out in category_outputs.items():
lod = self.normalized_map[cat]
assert out.is_dense_tensor() or lod > 0, \
"non-dense tensor lists must have LoD > 0"
if lod > 0:
# +1 for batch dim
seq_len = recursive_length(out, lod + 1)[1:]
shape = out.at(0).shape
if callable(shape):
shape = shape()
shape = [sum(seq_len[-1])] + list(shape[lod:])
category_shapes[cat] = shape
category_lengths[cat] = seq_len
else:
out = out.as_tensor()
category_shapes[cat] = out.shape()
category_lengths[cat] = []
category_tensors[cat] = out
category_pd_type[cat] = to_paddle_type(out)
if isinstance(out, (TensorGPU, TensorListGPU)):
category_place[cat] = pd_gpu_place
else:
category_place[cat] = pd_cpu_place
if self._data_batches[i] is None:
pd_tensors = {}
for cat, lod in self.normalized_map.items():
lod_tensor = fluid.core.LoDTensor()
lod_tensor._set_dims(category_shapes[cat])
pd_tensors[cat] = lod_tensor
self._data_batches[i] = pd_tensors
else:
pd_tensors = self._data_batches[i]
# Copy data from DALI Tensors to LoDTensors
for cat, tensor in category_tensors.items():
if hasattr(tensor, 'shape'): # could be tensor list
assert self._dynamic_shape or \
tensor.shape() == pd_tensors[cat].shape(), \
("Shapes do not match: DALI tensor has size {0}, "
"but LoDTensor has size {1}".format(
tensor.shape(), pd_tensors[cat].shape()))
lod_tensor = pd_tensors[cat]
lod_tensor._set_dims(category_shapes[cat])
seq_len = category_lengths[cat]
lod_tensor.set_recursive_sequence_lengths(seq_len)
ptr = lod_tensor._mutable_data(category_place[cat],
category_pd_type[cat])
feed_ndarray(tensor, ptr)
for p in self._pipes:
with p._check_api_type_scope(types.PipelineAPIType.ITERATOR):
p.release_outputs()
p.schedule_run()
self._counter += self._num_gpus * self.batch_size
if (not self._fill_last_batch) and (self._counter > self._size):
# First calculate how much data is required to
# return exactly self._size entries.
diff = self._num_gpus * self.batch_size - (self._counter
- self._size)
# Figure out how many GPUs to grab from.
num_gpus_to_grab = int(math.ceil(diff / self.batch_size))
# Figure out how many results to grab from the last GPU
# (as a fractional GPU batch may be required to bring us
# right up to self._size).
mod_diff = diff % self.batch_size
data_from_last_gpu = mod_diff if mod_diff else self.batch_size
# Grab the relevant data.
# 1) Grab everything from the relevant GPUs.
# 2) Grab the right data from the last GPU.
# 3) Append data together correctly and return.
output = self._data_batches[0:num_gpus_to_grab]
output[-1] = output[-1].copy()
for cat in self.output_map:
lod_tensor = output[-1][cat]
output[-1][cat] = lod_tensor_clip(lod_tensor,
data_from_last_gpu)
return output
return self._data_batches
def next(self):
"""
Returns the next batch of data.
"""
return self.__next__()
def __iter__(self):
return self
def reset(self):
"""
Resets the iterator after the full epoch.
DALI iterators do not support resetting before the end of the epoch
and will ignore such request.
"""
if self._counter >= self._size:
if self._fill_last_batch and not self._last_batch_padded:
self._counter = self._counter % self._size
else:
self._counter = 0
for p in self._pipes:
p.reset()
if p.empty():
with p._check_api_type_scope(types.PipelineAPIType.ITERATOR):
p.schedule_run()
else:
logging.warning("DALI iterator does not support resetting while epoch is not finished. Ignoring...")
class DALIClassificationIterator(DALIGenericIterator):
"""
DALI iterator for classification tasks for Paddle. It returns 2 outputs
(data and label) in the form of LoDTensor.
Calling
.. code-block:: python
DALIClassificationIterator(pipelines, size)
is equivalent to calling
.. code-block:: python
DALIGenericIterator(pipelines, ["data", "label"], size)
Please keep in mind that Tensors returned by the iterator are
still owned by DALI. They are valid till the next iterator call.
If the content needs to be preserved please copy it to another tensor.
Parameters
----------
pipelines : list of nvidia.dali.pipeline.Pipeline
List of pipelines to use
size : int
Number of samples in the epoch (Usually the size of the dataset).
auto_reset : bool, optional, default = False
Whether the iterator resets itself for the next epoch
or it requires reset() to be called separately.
fill_last_batch : bool, optional, default = True
Whether to return a fraction of a full batch of data
such that the total entries returned by the
iterator == 'size'. Setting this flag to False will
cause the iterator to return the first integer multiple
dynamic_shape: bool, optional, default = False
Whether the shape of the output of the DALI pipeline can
change during execution. If True, the LoDtensor will be resized accordingly
if the shape of DALI returned tensors changes during execution.
If False, the iterator will fail in case of change.
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with `fill_last_batch` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch. If set to False next epoch will end sooner as data from
it was consumed but dropped. If set to True next epoch would be the
same length as the first one.
Example
-------
With the data set [1,2,3,4,5,6,7] and the batch size 2:
fill_last_batch = False, last_batch_padded = True -> last batch = [7], next iteration will return [1, 2]
fill_last_batch = False, last_batch_padded = False -> last batch = [7], next iteration will return [2, 3]
fill_last_batch = True, last_batch_padded = True -> last batch = [7, 7], next iteration will return [1, 2]
fill_last_batch = True, last_batch_padded = False -> last batch = [7, 1], next iteration will return [2, 3]
"""
def __init__(self,
pipelines,
size,
auto_reset=False,
fill_last_batch=True,
dynamic_shape=False,
last_batch_padded=False):
super(DALIClassificationIterator, self).__init__(
pipelines, ["data", "label"], size, auto_reset=auto_reset,
fill_last_batch=fill_last_batch,
dynamic_shape=dynamic_shape,
last_batch_padded=last_batch_padded)
| 41.64877 | 112 | 0.596337 |
73ddf5ea7fc34a59fb06937bd5e05dc5452b67d7 | 775 | py | Python | core/apps/home/urls.py | yavuzbektas/StudentAnalayze | 794f433f650633f646bee5cbe08d04afdbd75e84 | [
"MIT"
] | null | null | null | core/apps/home/urls.py | yavuzbektas/StudentAnalayze | 794f433f650633f646bee5cbe08d04afdbd75e84 | [
"MIT"
] | null | null | null | core/apps/home/urls.py | yavuzbektas/StudentAnalayze | 794f433f650633f646bee5cbe08d04afdbd75e84 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from django.urls import path, re_path
from apps.home import views
def repath_func(request):
from django.http import HttpResponseRedirect
return HttpResponseRedirect(request.path_info)
urlpatterns = [
# The home page
path('', views.index, name='home'),
# Matches any html file
path('profil.html', views.profilUpdate, name='users-edit'),
path('view/<int:pk>', views.profilView, name='users-view'),
path('usr-ogretmenler.html', views.profilShowList,name='kullanicilar'),
path('delete/<int:pk>',views.profilDelete, name='delete'),
path('yetki/<int:pk>',views.issuperUser, name='yetki'),
#re_path(r'^.*\.*', views.pages, name='pages'),
]
| 26.724138 | 75 | 0.659355 |
73de01103f3993ddf915e121cfc1b5e897750f3d | 9,361 | py | Python | Implement_google_AdaptiveFL/ServerAndClient/FlowerEC10/server.py | kuihao/KuihaoFL | 69c9161497f2a82ab8ef2d785a0c7e2e6975a328 | [
"Apache-2.0"
] | 4 | 2021-12-13T06:03:35.000Z | 2022-03-22T05:12:41.000Z | Implement_google_AdaptiveFL/ServerAndClient/FlowerEC10/server.py | kuihao/KuihaoFL | 69c9161497f2a82ab8ef2d785a0c7e2e6975a328 | [
"Apache-2.0"
] | null | null | null | Implement_google_AdaptiveFL/ServerAndClient/FlowerEC10/server.py | kuihao/KuihaoFL | 69c9161497f2a82ab8ef2d785a0c7e2e6975a328 | [
"Apache-2.0"
] | null | null | null | from enum import Flag
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
import flwr as fl
from flwr.server.strategy import FedAvg, FedYogi, FedAdam, FedAdagrad
from mypkg import (
ServerArg,
ModelNameGenerator,
secure_mkdir,
MyFedAdagrad,
MyFedYogi,
MyFedAdam
)
# --------
# [Welcome prompt] Make model name
# --------
args = ServerArg()
model_name = ModelNameGenerator(args.name)
print(f"*** This model name: {model_name} ***\n")
# --------
# [Hardware setting] CPU only or limit the GPU usage
# --------
if args.cpu:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
else:
#from mypkg.TF import setGPU
#setGPU(mode=1)
if args.gpu is not None:
os.environ["CUDA_VISIBLE_DEVICES"]= str(args.gpu)
from mypkg.TF import setGPU
setGPU(mode=3, device_num=args.gpu)
else:
from mypkg.TF import setGPU
setGPU(mode=1) # Dataset size 會影響 GPU memory 需求
import tensorflow as tf
from mypkg.TF import CNN_Model, myResNet
# --------
# [Hyperparemeter]
# --------
SEED = 2021
'''fix random seed'''
np.random.seed(SEED)
tf.random.set_seed(SEED)
model_input_shape = (32,32,3)
model_class_number = 100 # This is LABEL
SAVE = True
'''(bool) save log or not'''
HyperSet_Model = myResNet().ResNet18(model_input_shape,model_class_number)
#CNN_Model(model_input_shape,model_class_number)
#myResNet().ResNet18(model_input_shape,model_class_number)
HyperSet_Aggregation = FedAvg #MyFedAdagrad
HyperSet_Agg_eta = 1e-3 #1e-3
HyperSet_Agg_tau = 1e-2
HyperSet_client_number = 10
HyperSet_round = 10
# --------
# [Global varables]
# --------
Training_result_distributed = {'loss':[],'accuracy':[],'top_k_categorical_accuracy':[]}
'''Clients Training 的聚合結果'''
Testing_result_distributed = {'loss':[],'accuracy':[],'top_k_categorical_accuracy':[]}
'''[未使用] Clients Testing 的聚合結果'''
Training_result_centralized = {'loss':[],'accuracy':[],'top_k_categorical_accuracy':[]}
'''[未使用] Server Training 的結果'''
Testing_result_centralized = {'loss':[],'accuracy':[],'top_k_categorical_accuracy':[]}
'''Server Testing 的結果'''
# --------
# [Main]
# --------
def main() -> None:
"""Create Global model, set FL strategy, start Server, save FL results"""
# Step 1. Build Global Model (建立全域模型)
model = HyperSet_Model
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
model.compile(optimizer,
tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
# `from_logits=True` means use softmax as activaction function
metrics=["accuracy", 'top_k_categorical_accuracy'])
# Step 2. Make the strategy (制定聯合學習策略)
strategy = MyAggregation(
fraction_fit=1.0, # 每一輪參與Training的Client比例
#fraction_eval=1.0, # 每一輪參與Evaluating的Client比例
min_fit_clients=HyperSet_client_number, # 每一輪參與Training的最少Client連線數量 (與比例衝突時,以此為準)
#min_eval_clients=3, # 每一輪參與Evaluating的最少Client連線數量 (與比例衝突時,以此為準)
min_available_clients=HyperSet_client_number, # 啟動聯合學習之前,Client連線的最小數量
on_fit_config_fn=fit_config, # 設定 Client-side Training Hyperparameter
on_evaluate_config_fn=None, #evaluate_config, # 設定 Client-side Evaluating Hyperparameter
eval_fn=get_eval_fn(model), # 設定 Server-side Evaluating Hyperparameter (用Global Dataset進行評估)
initial_parameters=fl.common.weights_to_parameters(model.get_weights()), # Global Model 初始參數設定
)
# Step 3. Run the server with the strategy
#fl.server.start_server("localhost:8080", config={"num_rounds": 3}, strategy=strategy) #windows
fl.server.start_server("[::]:8080", config={"num_rounds": HyperSet_round}, strategy=strategy) #linux
# [Kuihao addition] Save FL results to numpy-zip
global Training_result_distributed, Testing_result_centralized
if SAVE:
FL_Results_folder = secure_mkdir("FL_Results"+"/"+model_name)
np.savez(f"{FL_Results_folder}/Training_result_distributed.npz", Training_result_distributed)
np.savez(f"{FL_Results_folder}/Testing_result_centralized.npz", Testing_result_centralized)
# --------
# [Customized Aggregation Strategy]
# --------
class MyAggregation(HyperSet_Aggregation):
'''
(1) 制定聚合演算法 (FedAvg MyFedAdagrad MyFedAdam MyFedYogi)\n
(2) 保存Clients的Training結果\n
(3) 儲存Global model weights
'''
def aggregate_fit(
self,
rnd: int,
results: List[Tuple[fl.server.client_proxy.ClientProxy, fl.common.FitRes]],
failures: List[BaseException],
) -> Optional[fl.common.Weights]:
'''Override'''
# Call Parent-class's aggregate_fit()
aggregated_weights = super().aggregate_fit(rnd=rnd,
#K_defining_eta=HyperSet_Agg_eta, # default: 1e-1
#K_defining_tau=HyperSet_Agg_tau, # default: 1e-9
results=results,
failures=failures)
# Aggregate clients' training results (loss, acc., top-k-acc.)
examples = [r.num_examples for _, r in results]
accuracies = [r.metrics["accuracy"] * r.num_examples for _, r in results]
accuracy_aggregated = sum(accuracies) / sum(examples)
losses = [r.metrics["loss"] * r.num_examples for _, r in results]
loss_aggregated = sum(losses) / sum(examples)
topK_accuracies = [r.metrics["top_k_categorical_accuracy"] * r.num_examples for _, r in results]
topK_accuracies_aggregated = sum(topK_accuracies) / sum(examples)
# [Kuihao addition] 暫存Client-side訓練結果
global Training_result_distributed
Training_result_distributed["loss"].append(loss_aggregated)
Training_result_distributed["accuracy"].append(accuracy_aggregated)
Training_result_distributed["top_k_categorical_accuracy"].append(topK_accuracies_aggregated)
print(f"\n****\nRound {rnd}, train results from clients after aggregation:\n"\
f"Loss:{loss_aggregated} Acc.:{accuracy_aggregated} TopK Acc.:{topK_accuracies_aggregated}"\
f"\n****")
# [Kuihao addition] 保存每一輪 Global Model Weights
if SAVE and rnd==HyperSet_round:
checkpoint_folder = secure_mkdir("ckpoint"+"/"+model_name)
if aggregated_weights is not None:
# Save aggregated_weights
print(f"****Saving round {rnd} aggregated_weights...****")
np.savez(f"{checkpoint_folder}/round-{rnd}-weights.npz", *aggregated_weights)
return aggregated_weights
# --------
# [Server-side function] config and evaluate
# --------
def fit_config(rnd: int):
"""
[Client-side, training hyperparameter]
* 設定Client Training 的 Hyperparameter: 包含batch_size、epochs、
learning-rate...皆可設定。
* 甚至可以設定不同 FL round 給予 client 不同的 Hyperparameter
* Return training configuration dict for each round.
Keep batch size fixed at 128, perform two rounds of training with one
local epoch, increase to two local epochs afterwards.
"""
config = {
"batch_size": 16,
"local_epochs": 1,
"rnd":rnd,
}
return config
def evaluate_config(rnd: int):
"""
[Client-side, evaluating hyperparameter]
* 設定Client Testing 的 Hyperparameter: 包含epochs、steps(Total number of steps,
也就是 batche個數 (batches of samples))。
* 可以設定不同 FL round 給予 client 不同的 Hyperparameter
* Return evaluation configuration dict for each round.
Perform five local evaluation steps on each client (i.e., use five
batches) during rounds one to three, then increase to ten local
evaluation steps.
"""
val_steps = 1 # not use
return {"val_steps": val_steps}
def get_eval_fn(model):
'''
[Server-side, evaluating hyperparameter]
* Return an evaluation function for server-side evaluation.
* 用 Global Dataset 評估 Global model (不含訓練)
'''
# Load data and model here to avoid the overhead of doing it in `evaluate` itself
#_ ,(x_test,y_test) = tf.keras.datasets.cifar10.load_data()
tfds_test_server = tf.data.experimental.load(f'dataset/cifar100_test_server/content/zip/cifar100_client/test_server/federated_test_data_all')
# Data preprocessing
#x_test = x_test / 255.0
#y_test = tf.squeeze(y_test,axis=1) # cifar10 version
# The `evaluate` function will be called after every round
def evaluate(
weights: fl.common.Weights,
) -> Optional[Tuple[float, Dict[str, fl.common.Scalar]]]:
model.set_weights(weights) # Update model with the latest parameters
loss, accuracy,top_k_categorical_accuracy = model.evaluate(tfds_test_server) #model.evaluate(x_test, y_test)
# [Kuihao addition] 暫存Server-side評估結果
global Testing_result_centralized
Testing_result_centralized["loss"].append(loss)
Testing_result_centralized["accuracy"].append(accuracy)
Testing_result_centralized["top_k_categorical_accuracy"].append(top_k_categorical_accuracy)
return loss, {"accuracy": accuracy, 'top_k_categorical_accuracy':top_k_categorical_accuracy}
return evaluate
# --------
# [Main Exe.]
# --------
if __name__ == "__main__":
main() | 39.834043 | 145 | 0.67653 |
73de1a01211701cb3610f9942677e6795fc4c8ed | 7,218 | py | Python | core/argo/core/optimizers/ExtendedNesterovNonconst.py | szokejokepu/natural-rws | bb1ad4ca3ec714e6bf071d2136593dc853492b68 | [
"MIT"
] | 4 | 2020-12-07T19:13:13.000Z | 2022-01-30T18:52:18.000Z | core/argo/core/optimizers/ExtendedNesterovNonconst.py | szokejokepu/natural-rws | bb1ad4ca3ec714e6bf071d2136593dc853492b68 | [
"MIT"
] | 12 | 2020-09-25T22:41:28.000Z | 2022-02-09T23:46:34.000Z | core/argo/core/optimizers/ExtendedNesterovNonconst.py | szokejokepu/natural-rws | bb1ad4ca3ec714e6bf071d2136593dc853492b68 | [
"MIT"
] | 2 | 2021-03-02T18:31:04.000Z | 2021-03-02T21:56:43.000Z | '''
DOCUMENTATION:
Nesterov method with constant momentum factor is given in the work of Defazio:
https://arxiv.org/abs/1812.04634
See Table 1, page 3 - 'Modern Momentum' (here: beta is the momentum factor)
The Extended Nesterov method (without prox argument), is given in the following articles:
https://arxiv.org/pdf/1908.02574.pdf
Here we have 2 non-constant momentum-type sequences, 'alpha_n' and 'beta_n' that depend on 3 coefficients:
'alpha > 0', 'beta in R' and 'gamma >= 0'.
We impose, as in the continuous version, the following conditions: 'alpha > 3', 'beta in R' and 'gamma > 0'.
Some notes on default values and preliminary computational simulations:
1) I have put the default value of 'alpha' is 4, since 'alpha > 3'
2) For 'beta = 3' or similar high values, it does not work. I have put the default value to 'beta = 0.1'
3) A moderate ~ small value of 'gamma = 0.5' seems goo enough
'''
# Loading modules
from tensorflow.python.training import optimizer # Here we have the 'Optimizer' class
from tensorflow.python.framework import ops # From here we need the function that converts to 'Tensor' object
from tensorflow.python.ops import math_ops # From here we need mathematical operations for 'Tensor' objects
from tensorflow.python.ops import state_ops # From here we need 'Operations' on 'Tensor' objects
from tensorflow.python.ops import control_flow_ops # From here we need the function 'group'
# The subclass of Optimizer class, containing Nesterov method with constant momentum coefficients, namely 'alpha' and 'beta'
class ExtendedNesterovNonconst(optimizer.Optimizer):
# The constructor of the class
def __init__(self, model, learning_rate = 1e-2, alpha = 4, beta = 0.1, gamma = 0.5, use_locking = False, name = 'ExtendedNesterovNonconst'):
# Call the constructor of the 'Optimizer' superclass using the parameters 'use_locking' and 'name'
super(ExtendedNesterovNonconst, self).__init__(use_locking, name)
# Initialize the private Python variables of the current subclass
self._lr = learning_rate
self._alpha = alpha
self._beta = beta
self._gamma = gamma
self._model = model
# Initialize the private 'Tensor' objects of the current subclass
self._lr_t = None
self._alpha_t = None
self._beta_t = None
self._gamma_t = None
# We construct all the 'Tensor' objects before we apply the gradients
# Except the learning rate, all the coefficients are part of the momentum-type terms
# Private function
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name = 'learning_rate')
self._alpha_t = ops.convert_to_tensor(self._alpha, name = 'alpha')
self._beta_t = ops.convert_to_tensor(self._beta, name = 'beta')
self._gamma_t = ops.convert_to_tensor(self._gamma, name = 'gamma')
# We create the slots for the variables. A 'Slot' is an additional variable associated with the variables to train
# We allocate and manage these auxiliary variables
# Private function
def _create_slots(self, var_list):
for v in var_list:
# The accumulator variable is 'p^{k+1}' in the work of Defazio
self._zeros_slot(v, "old_accum", self._name)
self._zeros_slot(v, "accum", self._name)
self._zeros_slot(v, "curr_it", self._name)
# The actual Extended Nesterov implementation for the general case when we have dense 'Tensor' objects
# All of the operations are applied to 'Tensor' variables
# 'apply_gradients', 'compute_gradients' and 'minimize' are public functions of 'Optimizer' class
# Order of functions:
# minimize(loss, global_step, var_list)
# => grads_and_vars = compute_gradients(loss, var_list)
# => grads_and_vars = list(zip(grads, var_list))
# => grads = gradients.gradients(loss, var_refs)
# var_list = (variables.trainable_variables() + ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
# apply_gradients(grads_and_vars, global_step)
# => for g, v in grads_and vars: p = _get_processor(v)
# => _TensorProcessor(v), _DenseResourceVariableProcessor(v), _DenseResourceVariableProcessor(v), _RefVariableProcessor(v)
# => for grad, var, processor in converted_grads_and_vars: processor.update_op(grad)
# => update_op(self, optimizer, g)
# => return update_op = optimizer._apply_dense(g, self._v)
def _apply_dense(self, grad, var):
# 1st step: we convert our 'Tensor' objects to have the type of the training variables
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
alpha_t = math_ops.cast(self._alpha_t, var.dtype.base_dtype)
beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype)
gamma_t = math_ops.cast(self._gamma_t, var.dtype.base_dtype)
# 2nd step: we define the gradient accumulations, using the identifier 'accum' from '_create_slots()'
# We also memorize the old accumulator, since we will update the 'accum' variable. Here, 'old_accum' is 'p^{k+1}'
old_accum = self.get_slot(var, "old_accum")
accum = self.get_slot(var, "accum")
# 3rd step: define the current iteration needed for the momentum inertial sequence
# It must be converted to the same type as the trainable variables
# We have here the inertial sequences 'alpha_n' and 'beta_n'
curr_it = self.get_slot(var, "curr_it")
n = curr_it + 1
alpha_iteration = n / (n + alpha_t)
beta_iteration = (n * gamma_t + beta_t) / (n + alpha_t)
beta_iteration_plus_1 = ((n+1) * gamma_t + beta_t) / (n + alpha_t + 1)
# 4th step: we have the Extended Nesterov formula 'accum_t <- accum_t * alpha_t + grad', i.e. 'p^{k+1}' from Defazio
# We update 'accum' by assigning the value 'momentum_t * accum + grad' to it. Furthermore, the new value is return in the 'Tensor' object 'accum_t'
old_accum_t = state_ops.assign(old_accum, accum)
with ops.control_dependencies([old_accum_t]):
accum_t = state_ops.assign(accum, alpha_iteration * accum + grad, use_locking = False)
# 5th step: variables updates by using 'var_update <- var - ( lr_t * grad + lr_t * beta_t * accum_t + (alpha_t-beta_t) * old_accum )', i.e. 'x^{k+1}' from Defazio
# Here, 'accum_t' is 'p^{k+1}' because was already updated before
with ops.control_dependencies([old_accum, accum_t]):
var_update = state_ops.assign_sub(var, lr_t * grad + lr_t * beta_iteration_plus_1 * accum_t + (alpha_iteration - beta_iteration) * old_accum_t)
# 6th step: return the updates, i.e. we return the Graph 'Operation' that will group multiple 'Tensor' ops.
# For more complex algorithms, the 'control_flow_ops.group' is used in the '_finish()' function, after '_apply_dense()'
return control_flow_ops.group(*[var_update, old_accum_t, accum_t, n])
# I did not implemented the algorithm for the case of 'Sparse Tensor' variables
def _apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
| 53.466667 | 170 | 0.694514 |
73de1bffb46de3df866150585652ad860bb192ae | 2,706 | py | Python | bcherry-web/playground/main.py | bcherry/bcherry | 5d2f1144dbdbf35d6284018fa2c9e24ec5cecec6 | [
"MIT"
] | 3 | 2016-11-13T09:06:41.000Z | 2021-09-11T23:36:19.000Z | bcherry-web/playground/main.py | bcherry/bcherry | 5d2f1144dbdbf35d6284018fa2c9e24ec5cecec6 | [
"MIT"
] | null | null | null | bcherry-web/playground/main.py | bcherry/bcherry | 5d2f1144dbdbf35d6284018fa2c9e24ec5cecec6 | [
"MIT"
] | 2 | 2017-04-04T10:03:18.000Z | 2021-09-11T23:36:26.000Z | #!/usr/bin/env python
import os
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
import time
class MainHandler(webapp.RequestHandler):
def get(self):
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, {}))
class SetTimeoutHandler(webapp.RequestHandler):
def get(self):
path = os.path.join(os.path.dirname(__file__), 'settimeout.html')
self.response.out.write(template.render(path, {}))
class ComparingComparisonsHandler(webapp.RequestHandler):
def get(self):
path = os.path.join(os.path.dirname(__file__), 'comparisonperformance.html')
self.response.out.write(template.render(path, {}))
class DefaultValuesHandler(webapp.RequestHandler):
def get(self):
path = os.path.join(os.path.dirname(__file__), 'defaultvalues.html')
self.response.out.write(template.render(path, {}))
class ChainingHandler(webapp.RequestHandler):
def get(self):
path = os.path.join(os.path.dirname(__file__), 'chaining.html')
self.response.out.write(template.render(path, {}))
class SpyConstructorsHandler(webapp.RequestHandler):
def get(self):
path = os.path.join(os.path.dirname(__file__), 'spying-constructors.html')
self.response.out.write(template.render(path, {}))
class KeyboardShortcutsHandler(webapp.RequestHandler):
def get(self):
path = os.path.join(os.path.dirname(__file__), 'keyboardshortcuts.html')
self.response.out.write(template.render(path, {}))
class SaneHTML5HistoryHandler(webapp.RequestHandler):
def get(self, *args):
path = os.path.join(os.path.dirname(__file__), 'html5history.html')
self.response.out.write(template.render(path, {}))
class PushStateImgHandler(webapp.RequestHandler):
def get(self):
time.sleep(1)
self.response.headers["Content-Type"] = "image/jpeg"
self.response.out.write('')
class AnyHandler(webapp.RequestHandler):
def get(self, path):
file_path = os.path.join(os.path.dirname(__file__), path + '.html')
self.response.out.write(open(file_path).read())
def main():
application = webapp.WSGIApplication([
('/playground', MainHandler),
('/playground/settimeout', SetTimeoutHandler),
('/playground/comparisons', ComparingComparisonsHandler),
('/playground/defaultvalues', DefaultValuesHandler),
('/playground/spying-constructors', SpyConstructorsHandler),
('/playground/keyboard-shortcuts', KeyboardShortcutsHandler),
('/playground/pushstate.jpg', PushStateImgHandler),
('/playground/sanerhtml5history(/(.+)?)?', SaneHTML5HistoryHandler),
('/playground/(.*)', AnyHandler),
], debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| 34.692308 | 78 | 0.746489 |
73de2f8ecea34a0ee1bced8502ba1bd0cdcc0760 | 2,170 | py | Python | python/sdk/setup.py | Omrisnyk/merlin | cc2dbeabe52ac6e413db7f7647ed54c7edb7695f | [
"Apache-2.0"
] | 97 | 2020-10-15T08:03:56.000Z | 2022-03-31T22:30:59.000Z | python/sdk/setup.py | babywyrm/merlin | 29f669ab613d6808d0186067b948496b508caa96 | [
"Apache-2.0"
] | 91 | 2020-10-26T03:15:27.000Z | 2022-03-31T10:19:55.000Z | python/sdk/setup.py | babywyrm/merlin | 29f669ab613d6808d0186067b948496b508caa96 | [
"Apache-2.0"
] | 26 | 2020-10-21T03:53:36.000Z | 2022-03-16T06:43:15.000Z | #!/usr/bin/env python
# Copyright 2020 The Merlin Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imp
import os
from setuptools import setup, find_packages
version = imp.load_source(
'merlin.version', os.path.join('merlin', 'version.py')).VERSION
REQUIRES = [
"certifi>=2017.4.17",
"python-dateutil>=2.1",
"six>=1.10",
"mlflow>=1.2.0",
"google-cloud-storage <=1.28.1",
"boto3>=1.9.84",
"urllib3>=1.23",
"PyPrind>=2.11.2",
"google-auth>=1.11.0,<2.0dev",
'Click>=7.0',
"cloudpickle==1.2.2",
"cookiecutter>=1.7.2",
"docker>=4.2.1",
"google-api-core<=1.17.0", # https://github.com/googleapis/python-pubsub/issues/115
"grpcio<=1.27.2",
"google-cloud-core==1.3.0",
"PyYAML>=5.3.1"
]
TEST_REQUIRES = [
"pytest",
"pytest-dependency",
"pytest-cov",
"pytest-xdist",
"urllib3-mock>=0.3.3",
"requests",
"xgboost==0.82",
"scikit-learn==0.20.3",
"joblib>=0.13.0",
"mypy==0.812",
"google-cloud-bigquery==1.22.0",
"google-cloud-bigquery-storage==0.7.0",
"grpcio==1.22.0"
]
setup(
name="merlin-sdk",
version=version,
description="Python SDK for Merlin",
url="https://github.com/gojek/merlin",
author="Merlin",
packages=find_packages(),
package_data={"merlin": [
"docker/pyfunc.Dockerfile", "docker/standard.Dockerfile"]},
zip_safe=True,
install_requires=REQUIRES,
setup_requires=["setuptools_scm"],
tests_require=TEST_REQUIRES,
extras_require={'test': TEST_REQUIRES},
python_requires='>=3.7',
entry_points='''
[console_scripts]
merlin=merlin.merlin:cli
'''
)
| 27.468354 | 88 | 0.646544 |
73de4d0bb16a37bc57add7f17f9a5babc091adfa | 3,483 | py | Python | scripts/converter_v3.py | ATLAS-Titan/allocation-modeling | b315aa7ac0cf613ed02c59188ff19e9738f36aca | [
"Apache-2.0"
] | null | null | null | scripts/converter_v3.py | ATLAS-Titan/allocation-modeling | b315aa7ac0cf613ed02c59188ff19e9738f36aca | [
"Apache-2.0"
] | null | null | null | scripts/converter_v3.py | ATLAS-Titan/allocation-modeling | b315aa7ac0cf613ed02c59188ff19e9738f36aca | [
"Apache-2.0"
] | null | null | null | #
# Copyright European Organization for Nuclear Research (CERN)
# National Research Centre "Kurchatov Institute"
# Rutgers University
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Author(s):
# - Mikhail Titov, <mikhail.titov@cern.ch>, 2018
#
IS_ANALYSIS_FORMAT = False
if IS_ANALYSIS_FORMAT:
OUTPUT_FORMAT = 'analysis'
else:
OUTPUT_FORMAT = 'stream'
INPUT_FILE_NAMES = []
OUTPUT_FILE_NAME = 'titan-logs-v3-{0}-format.csv'.format(OUTPUT_FORMAT)
FTR = [3600, 60, 1]
def converter(input_files, output_file):
"""
Converter.
@param input_files: File names with input data (from TITAN support team).
@type input_files: list
@param output_file: File name for derived data (of simulator format).
@type output_file: str
"""
output = []
for input_file_name in input_files:
with open(input_file_name, 'r') as f_in:
for line in f_in:
params = line.split('\r\n')[0].split(' ')
output_line = [None, None, None, None, None, None, None]
for p in params:
try:
if 'user=' in p:
output_line[5] = p.split('user=')[1]
elif p.startswith('account='):
output_line[6] = p.split('=')[1]
elif p.startswith('qtime='):
output_line[0] = float(p.split('=')[1])
elif p.startswith('start='):
output_line[1] = float(p.split('=')[1])
elif p.startswith('end='):
output_line[2] = float(p.split('=')[1])
elif 'Resource_List.walltime' in p:
output_line[3] = float(
sum([a * b for a, b in zip(FTR, map(
int, p.split('=')[1].split(':')))]))
elif 'Resource_List.nodes' in p:
output_line[4] = int(p.split('=')[1].split(':')[0])
# dependency -> 'Resource_List.depend'
except Exception, e:
print 'Portion: {0} | Line: {1}'.format(p, line)
raise Exception(e)
if None in output_line:
continue
output.append((output_line[0],
output_line[1],
','.join([str(output_line[3]),
str(output_line[2] - output_line[1]),
str(output_line[4]),
output_line[5],
output_line[6]])))
output.sort()
if IS_ANALYSIS_FORMAT:
with open(output_file, 'a') as f_out:
for x in output:
f_out.write(','.join([str(x[0]), str(x[1]), x[2]]) + '\n')
else:
init_time = output[0][0] - 1
with open(output_file, 'a') as f_out:
for x in output:
f_out.write(','.join([str(x[0] - init_time), x[2]]) + '\n')
if __name__ == '__main__':
converter(input_files=INPUT_FILE_NAMES,
output_file=OUTPUT_FILE_NAME)
| 35.540816 | 79 | 0.480907 |
73de68566a7a5cadd2256d48bf99e427ce678fe8 | 2,816 | py | Python | src/fidesops/service/processors/post_processor_strategy/post_processor_strategy_filter.py | mohan-pogala/fidesops | 5c686362d4fb3b85253dd7e2898be1131a5071ab | [
"Apache-2.0"
] | null | null | null | src/fidesops/service/processors/post_processor_strategy/post_processor_strategy_filter.py | mohan-pogala/fidesops | 5c686362d4fb3b85253dd7e2898be1131a5071ab | [
"Apache-2.0"
] | null | null | null | src/fidesops/service/processors/post_processor_strategy/post_processor_strategy_filter.py | mohan-pogala/fidesops | 5c686362d4fb3b85253dd7e2898be1131a5071ab | [
"Apache-2.0"
] | null | null | null | import logging
from typing import Any, Optional, Dict
from fidesops.schemas.saas.strategy_configuration import (
FilterPostProcessorConfiguration,
StrategyConfiguration,
IdentityParamRef,
)
from fidesops.service.processors.post_processor_strategy.post_processor_strategy import (
PostProcessorStrategy,
)
STRATEGY_NAME = "filter"
logger = logging.getLogger(__name__)
class FilterPostProcessorStrategy(PostProcessorStrategy):
"""
Filters object or array given field name and value
Value can be reference a dynamic identity passed in through the request OR hard-coded value.
E.g.
data = [
{
"id": 1397429347
"email_contact": somebody@email.com
"name": Somebody Awesome
},
{
"id": 238475234
"email_contact": somebody-else@email.com
"name": Somebody Cool
}
]
field: email_contact
value: {"identity": email}, where email == somebody@email.com
result = {
id: 1397429347
email_contact: somebody@email.com
name: Somebody Awesome
}
"""
def __init__(self, configuration: FilterPostProcessorConfiguration):
self.field = configuration.field
self.value = configuration.value
def get_strategy_name(self) -> str:
return STRATEGY_NAME
def process(self, data: Any, identity_data: Dict[str, Any] = None) -> Optional[Any]:
"""
:param data: A list or an object
:param identity_data: Dict of cached identity data
:return: filtered object or None
"""
if not data:
return None
filter_value = self.value
if isinstance(self.value, IdentityParamRef):
if (
identity_data is None
or identity_data.get(self.value.identity, None) is None
):
logger.warning(
f"Could not retrieve identity reference '{self.value.identity}' due to missing identity data for the following post processing strategy: {self.get_strategy_name()}"
)
return None
filter_value = identity_data.get(self.value.identity)
try:
if isinstance(data, list):
filtered = [item for item in data if item[self.field] == filter_value]
return filtered if filtered else None
return data if data[self.field] == filter_value else None
except KeyError:
logger.warning(
f"{self.field} could not be found on data for the following post processing strategy: {self.get_strategy_name()}"
)
return None
@staticmethod
def get_configuration_model() -> StrategyConfiguration:
return FilterPostProcessorConfiguration
| 33.129412 | 184 | 0.628551 |
73de72a27148de0df4d07890ab3151d6fae7be4b | 4,061 | py | Python | models/learned_loss_old.py | allenai/interactron | 2be41b34adf6917348cb2440cded9975ec4f5d0d | [
"Apache-2.0"
] | 4 | 2022-03-30T06:07:32.000Z | 2022-03-31T02:05:18.000Z | models/learned_loss.py | allenai/interactron | 2be41b34adf6917348cb2440cded9975ec4f5d0d | [
"Apache-2.0"
] | null | null | null | models/learned_loss.py | allenai/interactron | 2be41b34adf6917348cb2440cded9975ec4f5d0d | [
"Apache-2.0"
] | 2 | 2022-03-30T10:05:42.000Z | 2022-03-31T02:05:23.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import matplotlib.pyplot as plt
import cv2
from models.detectron2_detector import Detectron2Detector
from models.gpt import GPT
from models.components import LinearBlock
from utils.constants import tlvis_classes
from utils.model_utils import merge_batch_seq, unmerge_batch_seq
from utils.detection_utils import iou
from utils.time_utils import Timer
from utils.viz_utils import draw_box
class LinearBlock(nn.Module):
def __init__(self, in_dim, out_dim, bias=False):
super().__init__()
self.model = nn.Sequential(
nn.Linear(in_features=in_dim, out_features=out_dim, bias=bias),
nn.LayerNorm(out_dim),
nn.GELU(),
)
def forward(self, x):
og_shape = x.shape
x = self.model(x.view(-1, og_shape[-1]))
return x.view(*og_shape[:-1], -1)
class LearnedLossModel(nn.Module):
def __init__(self, cfg):
super().__init__()
self.model = GPT(cfg.TRANSFORMER)
self.proposal_encoder = LinearBlock(2264, cfg.TRANSFORMER.EMBEDDING_DIM, bias=False)
self.img_feature_encoder = LinearBlock(2048, cfg.TRANSFORMER.EMBEDDING_DIM, bias=False)
self.box_decoder = nn.Linear(in_features=1024, out_features=4, bias=False)
self.category_decoder = nn.Linear(in_features=1024, out_features=1236, bias=False)
self.cfg = cfg
self.is_train = True
self.timer = Timer()
self.logger = None
self.mode = 'train'
if cfg.TRANSFORMER.PREDICT_ACTIONS:
self.policy_tokens = nn.Parameter(1, 5, cfg.TRANSFORMER.EMBEDDING_DIM)
def forward(self, predictions, images):
seq = self.fold_sequence(predictions)
if self.cfg.TRANSFORMER.PREDICT_ACTIONS:
seq = torch.cat((seq, self.policy_tokens), dim=1)
out = self.model(seq)
pred_embs = out[:, :250]
learned_loss = torch.norm(pred_embs, p=2)
if self.cfg.TRANSFORMER.PREDICT_ACTIONS:
action_predictions = out[:, -5:]
return learned_loss, action_predictions
return learned_loss
def configure_optimizer(self, train_config):
optim_groups = self.model.get_optimizer_groups(train_config)
assert train_config.OPTIM_TYPE in ["Adam", "AdamW", "SGD"], \
"Invalid optimizer type {}. Please select Adam, AdamW or SGD"
if train_config.OPTIM_TYPE == "AdamW":
optimizer = torch.optim.AdamW(optim_groups, lr=train_config.LEARNING_RATE,
betas=(train_config.BETA1, train_config.BETA2))
elif train_config.OPTIM_TYPE == "Adam":
optimizer = torch.optim.Adam(optim_groups, lr=train_config.LEARNING_RATE,
betas=(train_config.BETA1, train_config.BETA2))
else:
optimizer = torch.optim.SGD(optim_groups, lr=train_config.LEARNING_RATE, momentum=train_config.MOMENTUM)
return optimizer
def eval(self):
return self.train(False)
def train(self, mode=True):
self.mode = 'train' if mode else 'test'
self.model.train(mode)
self.is_train = mode
return self
def fold_sequence(self, predictions):
img_features = predictions.get_image_features()
box_features = predictions.get_box_features()
boxes = predictions.get_boxes()
logits = predictions.get_logits()
detections = torch.cat((box_features, boxes, logits), dim=-1)
b, t = img_features.shape[:2]
img_features = img_features.permute(0, 1, 3, 4, 2)
seq_img_features = self.img_feature_encoder(img_features.reshape(b, -1, img_features.shape[-1]))
det_image_features = self.proposal_encoder(detections.reshape(b, -1, detections.shape[-1]))
return torch.cat((det_image_features, seq_img_features), dim=1)
def set_logger(self, logger):
assert self.logger is None, "This model already has a logger!"
self.logger = logger
| 39.427184 | 116 | 0.664615 |
73de7b41da5d0cb9fcaa2203fc40713383b71f3a | 508 | py | Python | components/amp-utility/python/uhashlib.py | ekmixon/AliOS-Things | 00334295af8aa474d818724149726ca93da4645d | [
"Apache-2.0"
] | 4,538 | 2017-10-20T05:19:03.000Z | 2022-03-30T02:29:30.000Z | components/amp-utility/python/uhashlib.py | ekmixon/AliOS-Things | 00334295af8aa474d818724149726ca93da4645d | [
"Apache-2.0"
] | 1,088 | 2017-10-21T07:57:22.000Z | 2022-03-31T08:15:49.000Z | components/amp-utility/python/uhashlib.py | willianchanlovegithub/AliOS-Things | 637c0802cab667b872d3b97a121e18c66f256eab | [
"Apache-2.0"
] | 1,860 | 2017-10-20T05:22:35.000Z | 2022-03-27T10:54:14.000Z | # * coding: UTF8 *
"""
该模块实现相应 CPython 模块的子集,如下所示 , 模块实现二进制数据的散列算法。目前实现了SHA256算法。SHA256是深思熟虑之选,这是一种现代的加密安全算法。 这意味着单个算法既可覆盖任何散列算法的用例,也可覆盖与安全相关的使用,从而省略了诸如MD5或SHA1之类的遗留算法以节省时间。
SHA256 - 最新一代,现代散列算法
类
------------------------------
"""
class sha256(object):
"""
创建一个hasher对象,并选择性地将数据输入其中
"""
def __init__(self,data):
pass
def update(self,data):
"""
将更多二进制数据输入hash
"""
pass
def digest(self):
"""
返回用于所有通过散列传递的所有数据的散列
"""
pass
| 14.111111 | 150 | 0.588583 |
73de89ee3a6f19f38f86e246786fc059f9542160 | 801 | py | Python | features/steps/calculating_tubing_descriptors.py | tonyroberts/project-tetra-display | ada1169d3884e61c06e90fe50a9886b50564c7dd | [
"MIT"
] | 4 | 2020-07-29T09:18:16.000Z | 2021-05-19T22:31:23.000Z | features/steps/calculating_tubing_descriptors.py | tonyroberts/project-tetra-display | ada1169d3884e61c06e90fe50a9886b50564c7dd | [
"MIT"
] | 44 | 2020-07-29T09:12:07.000Z | 2021-07-04T01:50:57.000Z | features/steps/calculating_tubing_descriptors.py | tonyroberts/project-tetra-display | ada1169d3884e61c06e90fe50a9886b50564c7dd | [
"MIT"
] | 4 | 2020-07-31T20:02:47.000Z | 2021-05-14T08:48:38.000Z | from behave import given, when, then
import server
from tetra_constants import DESCRIPTORS, NUMBER_OF_PATIENTS
@given("there are sensors connected")
def step_impl(context):
context.sensors = server.Sensors()
@given("there is a calculator to parse sensor data")
def step_impl(context):
context.calculator = server.Calculator()
@when("data is requested from the sensors")
def step_impl(context):
context.sensor_data = context.sensors.poll()
@then("the sensors yield all of the descriptors")
def step_impl(context):
context.calculator.add_datum(context.sensor_data)
assert all(descriptor
in context.calculator.get_datum()[f"patient-{patient_number}"]
for descriptor in DESCRIPTORS
for patient_number in range(NUMBER_OF_PATIENTS))
| 27.62069 | 77 | 0.734082 |
73de95734ae8fc0d79749d49185671ba98ba9b5f | 5,644 | py | Python | src/test/tests/operators/lcs.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 226 | 2018-12-29T01:13:49.000Z | 2022-03-30T19:16:31.000Z | src/test/tests/operators/lcs.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 5,100 | 2019-01-14T18:19:25.000Z | 2022-03-31T23:08:36.000Z | src/test/tests/operators/lcs.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 84 | 2019-01-24T17:41:50.000Z | 2022-03-10T10:01:46.000Z | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: LCS.py
#
# Tests: operator - LCS
#
# Programmer: Allen Sanderson
# Date: August 25, 2015
#
# Modifications:
# Kathleen Biagas, Thur Sep 17, 2020
# Shorten name used for 'Testing database' TestSection. Turn of warnings.
# ----------------------------------------------------------------------------
RequiredDatabasePlugin("PICS_Tester")
#-vargs="-debug 5"
# For testing using the CLI
#def data_path( db_name ):
# db="/Projects/VisIt/trunk/build/data/%s" %(db_name)
# return db
#def Test(case_name):
# swatts = SaveWindowAttributes()
# swatts.family = 0
# swatts.fileName = "/Projects/tmp/lcs/ser/%s" %(case_name)
# SetSaveWindowAttributes(swatts)
# SaveWindow()
# return
#def TestSection(tmpstr):
# return
#def Exit():
# return
# Open the database here and add a plot as for some reason it fails
# within a loop. It only happens with all-in-one plots with an operator
# such as with "Pseudocolor" and "operators/LCS/velocity"
db=data_path("pics_test_data/ftle_double_gyre_1_domain.pics")
OpenDatabase(db)
AddPlot("Pseudocolor", "operators/LCS/velocity")
LCSAtts = LCSAttributes()
LCSAtts.Resolution = (101, 51, 1)
LCSAtts.integrationDirection = LCSAtts.Forward
LCSAtts.auxiliaryGridSpacing = 0.005
LCSAtts.maxSteps = 1000000
LCSAtts.operationType = LCSAtts.Lyapunov
LCSAtts.cauchyGreenTensor = LCSAtts.Right
LCSAtts.eigenComponent = LCSAtts.Largest
LCSAtts.operatorType = LCSAtts.BaseValue
LCSAtts.terminationType = LCSAtts.Time
LCSAtts.terminateByTime = 1
LCSAtts.termTime = 4
LCSAtts.maxStepLength = 0.001
LCSAtts.integrationType = LCSAtts.AdamsBashforth
LCSAtts.parallelizationAlgorithmType = LCSAtts.ParallelStaticDomains
LCSAtts.pathlines = 1
LCSAtts.pathlinesCMFE = LCSAtts.CONN_CMFE
LCSAtts.issueAdvectionWarnings = 0
LCSAtts.issueBoundaryWarnings = 0
LCSAtts.issueTerminationWarnings = 0
LCSAtts.issueStepsizeWarnings = 0
LCSAtts.issueStiffnessWarnings = 0
LCSAtts.issueCriticalPointsWarnings = 0
SetOperatorOptions(LCSAtts, 0)
databases=["ftle_double_gyre_1_domain", "ftle_double_gyre_2_domains"]
src_type=[LCSAtts.RegularGrid, LCSAtts.NativeMesh]
src_type_str=["RegularGrid", "NativeMesh"]
aux_grid=[LCSAtts.NONE, LCSAtts.TwoDim]
aux_grid_str=["NoAuxGrid", "2DAuxGrid"]
TestSection("Basic FTLE function")
for i in range(len(databases)):
db=data_path("pics_test_data/%s.pics") %(databases[i])
tmpstr="Testing database = %s.pics" %(databases[i])
TestSection(tmpstr)
OpenDatabase(db)
# Replace the database from before with this one as a new plot can
# not be opened within the loop when using runtest. This issue is a
# bug.
ReplaceDatabase(db)
# DeleteAllPlots()
# AddPlot("Pseudocolor", "operators/LCS/velocity")
for j in range(len(src_type)):
tmpstr="Testing sample source = %s" %(src_type_str[j])
TestSection(tmpstr)
LCSAtts.sourceType = src_type[j] # NativeMesh, RegularGrid
for k in range(len(aux_grid)):
tmpstr="Testing auxiliary grid = %s" %(aux_grid_str[k])
TestSection(tmpstr)
LCSAtts.auxiliaryGrid = aux_grid[k] # None, TwoDim
SetOperatorOptions(LCSAtts, 0)
DrawPlots()
tmpstr="lcs_%s_%s_%s" %(databases[i], src_type_str[j], aux_grid_str[k])
Test(tmpstr)
# FTLE with smallest exponent
LCSAtts.eigenComponent = LCSAtts.Smallest # Smallest, Intermediate, Largest, PosShearVector, NegShearVector, PosLambdaShearVector, NegLambdaShearVector
SetOperatorOptions(LCSAtts, 0)
DrawPlots()
tmpstr="lcs_%s_%s_%s_Smallest" %(databases[i], src_type_str[j], aux_grid_str[k])
Test(tmpstr)
# FTLE with left Cauchy Green Tensor
LCSAtts.cauchyGreenTensor = LCSAtts.Left # Left, Right
LCSAtts.eigenComponent = LCSAtts.Largest # Smallest, Intermediate, Largest, PosShearVector, NegShearVector, PosLambdaShearVector, NegLambdaShearVector
SetOperatorOptions(LCSAtts, 0)
DrawPlots()
tmpstr="lcs_%s_%s_%s_Left" %(databases[i], src_type_str[j], aux_grid_str[k])
Test(tmpstr)
# FTLE with eigen value
LCSAtts.operationType = LCSAtts.EigenValue # IntegrationTime, ArcLength, AverageDistanceFromSeed, EigenValue, EigenVector, Lyapunov
LCSAtts.cauchyGreenTensor = LCSAtts.Right # Left, Right
LCSAtts.eigenComponent = LCSAtts.Largest # Smallest, Intermediate, Largest, PosShearVector, NegShearVector, PosLambdaShearVector, NegLambdaShearVector
SetOperatorOptions(LCSAtts, 0)
DrawPlots()
tmpstr="lcs_%s_%s_%s_EigenValue" %(databases[i], src_type_str[j], aux_grid_str[k])
Test(tmpstr)
Exit()
# 1 processor:
#wo/aux grid
# Native 1 -0.04343 - 1.066 190 zeros # Match
# Rect 1 -0.04343 - 1.066 190 zeros #
# Native 2 -0.04343 - 1.066 193 zeros # Match
# Rect 2 -0.04343 - 1.233 190 zeros # Match with ghost cells but for a few cells.
# Errors in the domain boundary gradients
#w/aux grid
# Native 1 0.004539 - 1.396 304 exited / 680 zeros # Match
# Rect 1 0.004539 - 1.396 304 exited / 680 zeros #
# Native 2 0.004539 - 1.396 308 exited / 690 zeros # Match
# Rect 2 0.004539 - 1.396 304 exited / 680 zeros #
# 4 processors:
#wo/aux grid
# Native 1 -0.04343 - 1.066 190 zeros # Match
# Rect 1 -0.04343 - 1.066 190 zeros #
# Native 2 -0.04343 - 1.066 193 zeros # Match
# Rect 2 -0.04343 - 1.233 190 zeros # Match with ghost cells but for a few cells.
#w/aux grid
# Native 1 0.004539 - 1.396 304 exited / 680 zeros # Match
# Rect 1 0.004539 - 1.396 304 exited / 680 zeros #
# Native 2 0.004539 - 1.396 308 exited / 690 zeros # Match
# Rect 2 0.004539 - 1.396 304 exited / 680 zeros #
| 31.18232 | 152 | 0.709426 |
73dec6057c60d6406f21eb32b61317c770f12e4c | 1,444 | py | Python | src/external/jemalloc.py | cychan-lbnl/devastator | a23bb21f9da5841fa2036edc68f3a89a2241831b | [
"BSD-3-Clause-LBNL"
] | 1 | 2022-03-16T08:11:02.000Z | 2022-03-16T08:11:02.000Z | src/external/jemalloc.py | cychan-lbnl/devastator | a23bb21f9da5841fa2036edc68f3a89a2241831b | [
"BSD-3-Clause-LBNL"
] | null | null | null | src/external/jemalloc.py | cychan-lbnl/devastator | a23bb21f9da5841fa2036edc68f3a89a2241831b | [
"BSD-3-Clause-LBNL"
] | null | null | null | import shlex
@brutal.rule(caching='file')
def jemalloc_source():
url = "https://github.com/jemalloc/jemalloc/releases/download/5.2.1/jemalloc-5.2.1.tar.bz2"
tgz = brutal.download(url)
return brutal.untar(tgz)
@brutal.rule(caching='file')
@brutal.coroutine
def jemalloc_context():
src_dir = yield jemalloc_source()
build_dir = brutal.mkpath('jemalloc-build')
brutal.os.mkdir(build_dir)
install_dir = brutal.mkpath('jemalloc-install')
cc = c_compiler()
cxx = cxx_compiler()
cg_flags = base_cg_flags()
configure = brutal.os.path.join(src_dir, 'configure')
configure = [configure, '--prefix', install_dir, '--with-jemalloc-prefix=je_', '--enable-cxx', '--disable-fill', '--disable-shared']
yield brutal.process(configure, cwd=build_dir, env_add={'CC':cc, 'CFLAGS':cg_flags, 'CXX':cxx, 'CXXFLAGS': cxx14_flags() + cg_flags})
yield brutal.process(['make','install'], cwd=build_dir)
jemalloc_config = brutal.os.path.join(install_dir, 'bin', 'jemalloc-config')
pp_misc = yield brutal.process([jemalloc_config, '--cppflags'])
pp_misc = shlex.split(pp_misc)
lib_misc = yield brutal.process([jemalloc_config, '--libs'])
lib_misc = shlex.split(lib_misc)
yield CodeContext(
compiler = cxx,
pp_misc = pp_misc,
#lib_misc = ['-L'+brutal.os.path.join(install_dir, 'lib'), '-ljemalloc'] + lib_misc
lib_misc = [brutal.os.path.join(install_dir, 'lib', 'libjemalloc.a')] + lib_misc
)
| 33.581395 | 135 | 0.698061 |
73dec81bad9814ee479aeb16c7b10c65c3486981 | 1,496 | py | Python | setup.py | jdidion/htsget-server | cd00c4165aadb66b21be4a0e5dbaaaddfa97ee64 | [
"CC0-1.0"
] | 1 | 2019-08-22T16:51:23.000Z | 2019-08-22T16:51:23.000Z | setup.py | jdidion/htsget-server | cd00c4165aadb66b21be4a0e5dbaaaddfa97ee64 | [
"CC0-1.0"
] | null | null | null | setup.py | jdidion/htsget-server | cd00c4165aadb66b21be4a0e5dbaaaddfa97ee64 | [
"CC0-1.0"
] | null | null | null | """
Build atropos.
Cython is run when
* no pre-generated C sources are found,
* or the pre-generated C sources are out of date,
* or when --cython is given on the command line.
"""
import sys
from setuptools import setup, find_packages
# Define install and test requirements based on python version
version_info = sys.version_info
if sys.version_info < (3, 6):
sys.stdout.write("At least Python 3.6 is required.\n")
sys.exit(1)
with open('README.md') as f:
README = f.read()
with open('LICENSE') as f:
LICENSE = f.read()
setup(
name='atropos',
version='0.1.0',
author='John Didion',
author_email='github@didion.net',
url='https://github.com/jdidion/htsget-server',
description='Reference implementation of an htsget server.',
long_description=README,
license=LICENSE,
packages=find_packages(),
install_requires=[
'ngsindex'
],
tests_requires=['pytest'],
entry_points={
'console_scripts': [
'htsget-server=htsgetserver.__main__:main'
]
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"License :: OSI Approved :: MIT License",
"License :: Public Domain",
"Natural Language :: English",
"Programming Language :: Cython",
"Programming Language :: Python :: 3.6"
]
)
| 25.355932 | 64 | 0.639037 |
73dee999b8fe71d0515f80076913941dcabb76d2 | 8,742 | py | Python | tests/calendar_cli/model/test_event.py | mogproject/calendar-cli | 095fe4cfe76bc1890742d22accef48a5f9f6ca45 | [
"Apache-2.0"
] | 12 | 2015-10-14T09:05:48.000Z | 2021-06-27T11:32:48.000Z | tests/calendar_cli/model/test_event.py | mogproject/calendar-cli | 095fe4cfe76bc1890742d22accef48a5f9f6ca45 | [
"Apache-2.0"
] | 18 | 2015-10-14T06:20:26.000Z | 2017-01-23T23:33:01.000Z | tests/calendar_cli/model/test_event.py | mogproject/calendar-cli | 095fe4cfe76bc1890742d22accef48a5f9f6ca45 | [
"Apache-2.0"
] | 4 | 2016-10-12T18:35:24.000Z | 2019-12-31T17:13:02.000Z | # encoding: utf-8
from __future__ import division, print_function, absolute_import, unicode_literals
from datetime import datetime
import pytz
from mog_commons import unittest
from calendar_cli.model import EventTime, Event
from calendar_cli.i18n import MSG_ALL_DAY, MSG_WEEK_DAY
class TestEventTime(unittest.TestCase):
tz_tokyo = pytz.timezone('Asia/Tokyo')
d0 = datetime(2015, 10, 17, 0, 0, 0, 0, pytz.timezone('Asia/Tokyo'))
d1 = datetime(2015, 10, 18, 0, 0, 0, 0, pytz.timezone('Asia/Tokyo'))
d2 = datetime(2015, 10, 17, 0, 0, 0, 0, pytz.utc)
d3 = datetime(2016, 10, 17, 0, 0, 0, 0, pytz.utc)
d4 = datetime(2015, 10, 17, 12, 34, 56, 0, pytz.timezone('Asia/Tokyo'))
d5 = datetime(2015, 10, 18, 12, 34, 56, 0, pytz.timezone('Asia/Tokyo'))
d6 = datetime(2015, 10, 17, 3, 34, 56, 0, pytz.utc)
d7 = datetime(2015, 10, 17, 3, 34, 57, 0, pytz.utc)
def test_init(self):
t = EventTime(False, self.d1)
self.assertFalse(t.has_time)
self.assertEqual(t.datetime_tz, self.d1)
t = EventTime(True, self.d5)
self.assertTrue(t.has_time)
self.assertEqual(t.datetime_tz, self.d5)
def test_init_error(self):
self.assertRaisesRegexp(AssertionError, 'datetime_tz must be timezone-aware',
EventTime, True, datetime(2015, 10, 17, 12, 34, 56))
def test_cmp(self):
ts1 = [EventTime(False, t) for t in [self.d0, self.d1, self.d2, self.d3]]
ts2 = [EventTime(True, t) for t in [self.d4, self.d5, self.d6, self.d7]]
ts = ts1 + ts2
self.assertEqual(sorted(ts), [ts[0], ts[2], ts[1], ts[3], ts[6], ts[4], ts[7], ts[5]])
def test_to_short_summary(self):
self.assertEqual(EventTime(False, self.d0).to_short_summary(), None)
self.assertEqual(EventTime(False, self.d2).to_short_summary(), None)
self.assertEqual(EventTime(True, self.d4).to_short_summary(), '12:34')
self.assertEqual(EventTime(True, self.d6).to_short_summary(), '03:34')
def test_to_long_summary(self):
self.assertEqual(EventTime(False, self.d0).to_long_summary(), '2015-10-17 %s' % MSG_WEEK_DAY[5])
self.assertEqual(EventTime(False, self.d2).to_long_summary(), '2015-10-17 %s' % MSG_WEEK_DAY[5])
self.assertEqual(EventTime(True, self.d4).to_long_summary(), '2015-10-17 %s' % MSG_WEEK_DAY[5])
self.assertEqual(EventTime(True, self.d6).to_long_summary(), '2015-10-17 %s' % MSG_WEEK_DAY[5])
def test_to_dict(self):
self.assertEqual(EventTime(False, self.d0).to_dict(),
{'date': '2015-10-17', 'timeZone': 'Asia/Tokyo'})
self.assertEqual(EventTime(False, self.d2).to_dict(),
{'date': '2015-10-17', 'timeZone': 'UTC'})
self.assertEqual(EventTime(True, self.d4).to_dict(),
{'dateTime': '2015-10-17T12:34:56+09:00', 'timeZone': 'Asia/Tokyo'})
self.assertEqual(EventTime(True, self.d6).to_dict(),
{'dateTime': '2015-10-17T03:34:56+00:00', 'timeZone': 'UTC'})
def test_parse_dict(self):
self.assertEqual(EventTime.parse_dict({'date': '2015-10-17'}, 'Asia/Tokyo'),
EventTime(False, datetime(2015, 10, 17, 0, 0, 0, 0, self.tz_tokyo)))
self.assertEqual(EventTime.parse_dict({'dateTime': '2015-10-01T10:30:00+09:00'}, 'Asia/Tokyo'),
EventTime(True, datetime(2015, 10, 1, 10, 30, 0, 0, self.tz_tokyo)))
self.assertEqual(EventTime.parse_dict({'dateTime': '2015-10-01T10:30:00+09:00', 'timeZone': 'Asia/Tokyo'},
'Asia/Tokyo'),
EventTime(True, datetime(2015, 10, 1, 10, 30, 0, 0, self.tz_tokyo)))
self.assertEqual(EventTime.parse_dict({'dateTime': '2015-10-01T10:30:00Z'}, 'Asia/Tokyo'),
EventTime(True, datetime(2015, 10, 1, 19, 30, 0, 0, self.tz_tokyo)))
self.assertEqual(EventTime.parse_dict({'dateTime': '2015-10-01T10:30:00-04:00'}, 'Asia/Tokyo'),
EventTime(True, datetime(2015, 10, 1, 23, 30, 0, 0, self.tz_tokyo)))
# When the timezone doesn't match the timeZone field, ignore timeZone.
self.assertEqual(EventTime.parse_dict({'dateTime': '2015-10-01T10:30:00-04:00'}, 'UTC'),
EventTime(True, datetime(2015, 10, 1, 14, 30, 0, 0, pytz.utc)))
# When the timezone is missing in dateTime field, use timeZone filed to parse.
self.assertEqual(EventTime.parse_dict({'dateTime': '2015-10-01T10:30:00', 'timeZone': 'Asia/Tokyo'}, 'UTC'),
EventTime(True, datetime(2015, 10, 1, 1, 30, 0, 0, pytz.utc)))
class TestEvent(unittest.TestCase):
tz_la = pytz.timezone('America/Los_Angeles')
tz_tokyo = pytz.timezone('Asia/Tokyo')
t0 = EventTime(True, tz_la.localize(datetime(2015, 5, 28, 9, 0, 0, 0)))
t1 = EventTime(True, tz_la.localize(datetime(2015, 5, 28, 17, 0, 0, 0)))
t2 = EventTime(False, tz_tokyo.localize(datetime(2015, 10, 17, 0, 0, 0, 0)))
t3 = EventTime(False, tz_tokyo.localize(datetime(2015, 10, 18, 0, 0, 0, 0)))
t4 = EventTime(False, datetime(2015, 10, 18, 0, 0, 0, 0, pytz.utc))
e0 = Event(t0, t1, 'Google I/O 2015', 'Foo Bar', 'foo@example.com')
e1 = Event(t2, t3, 'あいうえお', None, None)
e2 = Event(t2, t3, 'あいうえお', None, 'foo@example.com')
e3 = Event(t0, t1, 'Google I/O 2016', 'Foo Bar', 'foo@example.com', 'Mountain View')
def test_init(self):
self.assertEqual(self.e0.start_time, self.t0)
self.assertEqual(self.e0.end_time, self.t1)
self.assertEqual(self.e0.summary, 'Google I/O 2015')
self.assertEqual(self.e0.creator_name, 'Foo Bar')
self.assertEqual(self.e0.creator_email, 'foo@example.com')
self.assertEqual(self.e1.start_time, self.t2)
self.assertEqual(self.e1.end_time, self.t3)
self.assertEqual(self.e1.summary, 'あいうえお')
self.assertEqual(self.e1.creator_name, None)
self.assertEqual(self.e1.creator_email, None)
def test_init_error(self):
"""todo"""
def test_to_format(self):
self.assertEqual(self.e0.to_format('[%T] %S%L%C'), '[09:00-17:00] Google I/O 2015 (Foo Bar)')
self.assertEqual(self.e1.to_format('[%T] %S%L%C'), '[%s] あいうえお' % MSG_ALL_DAY)
self.assertEqual(self.e2.to_format('[%T] %S%L%C'), '[%s] あいうえお (foo@example.com)' % MSG_ALL_DAY)
self.assertEqual(self.e3.to_format('[%T] %S%L%C'), '[09:00-17:00] Google I/O 2016 @Mountain View (Foo Bar)')
def test_to_long_summary(self):
self.assertEqual(self.e0.to_long_summary(), '2015-05-28 %s [09:00-17:00] Google I/O 2015' % MSG_WEEK_DAY[3])
self.assertEqual(self.e1.to_long_summary(), '2015-10-17 %s [%s] あいうえお' % (MSG_WEEK_DAY[5], MSG_ALL_DAY))
self.assertEqual(self.e2.to_long_summary(), '2015-10-17 %s [%s] あいうえお' % (MSG_WEEK_DAY[5], MSG_ALL_DAY))
def test_to_dict(self):
self.assertEqual(self.e0.to_dict(), {
'summary': 'Google I/O 2015',
'start': {'dateTime': '2015-05-28T09:00:00-07:00', 'timeZone': 'America/Los_Angeles'},
'end': {'dateTime': '2015-05-28T17:00:00-07:00', 'timeZone': 'America/Los_Angeles'},
'creator': {'displayName': 'Foo Bar', 'email': 'foo@example.com', },
})
self.assertEqual(self.e1.to_dict(), {
'summary': 'あいうえお',
'start': {'date': '2015-10-17', 'timeZone': 'Asia/Tokyo'},
'end': {'date': '2015-10-18', 'timeZone': 'Asia/Tokyo'},
})
self.assertEqual(self.e2.to_dict(), {
'summary': 'あいうえお',
'start': {'date': '2015-10-17', 'timeZone': 'Asia/Tokyo'},
'end': {'date': '2015-10-18', 'timeZone': 'Asia/Tokyo'},
'creator': {'email': 'foo@example.com'},
})
def test_parse_dict(self):
self.assertEqual(Event.parse_dict({
'summary': 'Google I/O 2015',
'start': {'dateTime': '2015-05-28T09:00:00-07:00', 'timeZone': 'America/Los_Angeles'},
'end': {'dateTime': '2015-05-28T17:00:00-07:00', 'timeZone': 'America/Los_Angeles'},
'creator': {'displayName': 'Foo Bar', 'email': 'foo@example.com', },
}, 'America/Los_Angeles'), self.e0)
self.assertEqual(Event.parse_dict({
'summary': 'あいうえお',
'start': {'date': '2015-10-17'},
'end': {'date': '2015-10-18'},
}, 'Asia/Tokyo'), self.e1)
self.assertEqual(Event.parse_dict({
'summary': 'あいうえお',
'start': {'date': '2015-10-17'},
'end': {'date': '2015-10-18'},
'creator': {'email': 'foo@example.com'},
}, 'Asia/Tokyo'), self.e2)
| 53.304878 | 116 | 0.597461 |
73defb0da4dc8164f81c8b2c90d2ee6f8689cc24 | 47 | py | Python | geokey_wegovnow/management/commands/__init__.py | ExCiteS/geokey-wegonow | 66df7e17fa3eb2d8da2e56e39236b019f98a2a08 | [
"MIT"
] | null | null | null | geokey_wegovnow/management/commands/__init__.py | ExCiteS/geokey-wegonow | 66df7e17fa3eb2d8da2e56e39236b019f98a2a08 | [
"MIT"
] | 2 | 2017-02-22T13:20:29.000Z | 2018-11-07T16:39:33.000Z | geokey_wegovnow/management/commands/__init__.py | ExCiteS/geokey-wegonow | 66df7e17fa3eb2d8da2e56e39236b019f98a2a08 | [
"MIT"
] | null | null | null | """All commands for the WeGovNow extension."""
| 23.5 | 46 | 0.723404 |
73defe04b20cb398fb6e5d985e57b4f8a7e89e50 | 2,375 | py | Python | sdks/python/appcenter_sdk/models/Symbols.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | null | null | null | sdks/python/appcenter_sdk/models/Symbols.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 6 | 2019-10-23T06:38:53.000Z | 2022-01-22T07:57:58.000Z | sdks/python/appcenter_sdk/models/Symbols.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 2 | 2019-10-23T06:31:05.000Z | 2021-08-21T17:32:47.000Z | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class Symbols(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
available = "available"
ignored = "ignored"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""Symbols - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Symbols):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.685393 | 80 | 0.550316 |
73df0f98ab8f4b7beafc278117fb89ae875e0922 | 2,320 | py | Python | python/pathilico/pygletelm/message.py | OtaYuji/pathilico | b28cb0d3db043de1f8a70723f8a8142b73b8c8bd | [
"Apache-2.0"
] | null | null | null | python/pathilico/pygletelm/message.py | OtaYuji/pathilico | b28cb0d3db043de1f8a70723f8a8142b73b8c8bd | [
"Apache-2.0"
] | null | null | null | python/pathilico/pygletelm/message.py | OtaYuji/pathilico | b28cb0d3db043de1f8a70723f8a8142b73b8c8bd | [
"Apache-2.0"
] | null | null | null | # Copyright
# 2019 Department of Dermatology, School of Medicine, Tohoku University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Message(object): # Mock class
def __init__(self, *args):
self.args = args
def __call__(self, *args, **kwargs):
return self
class MetaEq(type):
def __eq__(cls, other):
return getattr(cls, "identity", 0) == other
class MessageBase(object, metaclass=MetaEq):
identity = 0
__slots__ = tuple()
def __init__(self, *args, **kwargs):
d = dict(zip(self.__slots__, args), **kwargs)
for k in self.__slots__:
setattr(self, k, d[k])
def __eq__(self, other):
return self.identity == other
def __str__(self):
msg = "<[ {0} : {1} ]>".format(
getattr(self, "__class__"),
str(["{}: {}".format(k, getattr(self, k)) for k in self.__slots__])
)
return msg
class MetaMessage(type): # Meta class
id_count = 100
def __new__(meta, name, bases, class_dict):
new_cls_dict = dict()
for key, value in class_dict.items():
if isinstance(value, Message):
new_cls_dict[key] = type(
key, (MessageBase, ),
{"__slots__": value.args, "identity": meta.id_count}
)
meta.id_count += 1
cls = type.__new__(meta, name, bases, new_cls_dict)
return cls
class UnionMessage(object, metaclass=MetaMessage):
Ok = Message("value")
Err = Message("error")
WindowResized = Message("width", "height")
WindowApiChangeCursorIcon = Message("cursor")
Ok = UnionMessage.Ok
Err = UnionMessage.Err
WindowResized = UnionMessage.WindowResized
WindowApiChangeCursor = UnionMessage.WindowApiChangeCursorIcon
| 29.367089 | 79 | 0.632759 |
73df28ddff17914b28a2ac223a604cdc97e21b05 | 3,899 | py | Python | apps/Analyzer_Datasets/time_series/lib_utils_time.py | c-hydro/hat | 6342f87237ae9e71f05d54264bce97299ab4eb61 | [
"MIT"
] | 1 | 2022-03-21T03:30:43.000Z | 2022-03-21T03:30:43.000Z | apps/Analyzer_Datasets/time_series/lib_utils_time.py | c-hydro/hat | 6342f87237ae9e71f05d54264bce97299ab4eb61 | [
"MIT"
] | 3 | 2021-05-06T15:22:37.000Z | 2021-05-06T16:31:05.000Z | apps/Analyzer_Datasets/time_series/lib_utils_time.py | c-hydro/hat | 6342f87237ae9e71f05d54264bce97299ab4eb61 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------------------
# Libraries
import logging
import pandas as pd
from datetime import date
from lib_info_args import logger_name
# Logging
log_stream = logging.getLogger(logger_name)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to set time run
def set_time(time_run_args=None, time_run_file=None, time_format='%Y-%m-%d %H:$M',
time_run_file_start=None, time_run_file_end=None,
time_period=1, time_frequency='H', time_rounding='H', time_reverse=True):
log_stream.info(' ----> Set time period ... ')
if (time_run_file_start is None) and (time_run_file_end is None):
log_stream.info(' -----> Time info defined by "time_run" argument ... ')
if time_run_args is not None:
time_run = time_run_args
log_stream.info(' ------> Time ' + time_run + ' set by argument')
elif (time_run_args is None) and (time_run_file is not None):
time_run = time_run_file
log_stream.info(' ------> Time ' + time_run + ' set by user')
elif (time_run_args is None) and (time_run_file is None):
time_now = date.today()
time_run = time_now.strftime(time_format)
logging.info(' ------> Time ' + time_run + ' set by system')
else:
log_stream.info(' ----> Set time period ... FAILED')
log_stream.error(' ===> Argument "time_run" is not correctly set')
raise IOError('Time type or format is wrong')
time_tmp = pd.Timestamp(time_run)
time_run = time_tmp.floor(time_rounding)
if time_period > 0:
time_range = pd.date_range(end=time_run, periods=time_period, freq=time_frequency)
else:
log_stream.warning(' ===> TimePeriod must be greater then 0. TimePeriod is set automatically to 1')
time_range = pd.DatetimeIndex([time_now], freq=time_frequency)
log_stream.info(' -----> Time info defined by "time_run" argument ... DONE')
elif (time_run_file_start is not None) and (time_run_file_end is not None):
log_stream.info(' -----> Time info defined by "time_start" and "time_end" arguments ... ')
time_run_file_start = pd.Timestamp(time_run_file_start)
time_run_file_start = time_run_file_start.floor(time_rounding)
time_run_file_end = pd.Timestamp(time_run_file_end)
time_run_file_end = time_run_file_end.floor(time_rounding)
time_now = date.today()
time_run = time_now.strftime(time_format)
time_run = pd.Timestamp(time_run)
time_run = time_run.floor(time_rounding)
time_range = pd.date_range(start=time_run_file_start, end=time_run_file_end, freq=time_frequency)
log_stream.info(' -----> Time info defined by "time_start" and "time_end" arguments ... DONE')
else:
log_stream.info(' ----> Set time period ... FAILED')
log_stream.error(' ===> Arguments "time_start" and/or "time_end" is/are not correctly set')
raise IOError('Time type or format is wrong')
if time_reverse:
time_range = time_range[::-1]
time_chunks = set_chunks(time_range)
log_stream.info(' ----> Set time period ... DONE')
return time_run, time_range, time_chunks
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to set chunks
def set_chunks(time_range, time_period='D'):
time_groups = time_range.to_period(time_period)
time_chunks = time_range.groupby(time_groups)
return time_chunks
# -------------------------------------------------------------------------------------
| 41.042105 | 111 | 0.568351 |
73df3c0eb8fec2b7771ceae6cfb845c8b3b75d43 | 5,726 | py | Python | platon/beacon/main.py | shinnng/platon.py | 3197fac3839896290210da04dd0d45f0bdc731ce | [
"MIT"
] | null | null | null | platon/beacon/main.py | shinnng/platon.py | 3197fac3839896290210da04dd0d45f0bdc731ce | [
"MIT"
] | null | null | null | platon/beacon/main.py | shinnng/platon.py | 3197fac3839896290210da04dd0d45f0bdc731ce | [
"MIT"
] | null | null | null | from typing import (
Any,
Dict,
)
import requests
from platon.module import (
Module,
)
class Beacon(Module):
def __getattribute__(self):
raise ModuleNotFoundError('This module is not available')
# def __init__(
# self,
# base_url: str,
# session: requests.Session = requests.Session(),
# ) -> None:
# self.base_url = base_url
# self.session = session
#
# def _make_get_request(self, endpoint: str) -> Dict[str, Any]:
# url = self.base_url + endpoint
# response = self.session.get(url)
# response.raise_for_status()
# return response.json()
#
# # [ BEACON endpoints ]
#
# def get_genesis(self) -> Dict[str, Any]:
# endpoint = "/platon/v1/beacon/genesis"
# return self._make_get_request(endpoint)
#
# def get_hash_root(self, state_id: str = "head") -> Dict[str, Any]:
# endpoint = f"/platon/v1/beacon/states/{state_id}/root"
# return self._make_get_request(endpoint)
#
# def get_fork_data(self, state_id: str = "head") -> Dict[str, Any]:
# endpoint = f"/platon/v1/beacon/states/{state_id}/fork"
# return self._make_get_request(endpoint)
#
# def get_finality_checkpoint(self, state_id: str = "head") -> Dict[str, Any]:
# endpoint = f"/platon/v1/beacon/states/{state_id}/finality_checkpoints"
# return self._make_get_request(endpoint)
#
# def get_validators(self, state_id: str = "head") -> Dict[str, Any]:
# endpoint = f"/platon/v1/beacon/states/{state_id}/validators"
# return self._make_get_request(endpoint)
#
# def get_validator(
# self, validator_id: str, state_id: str = "head"
# ) -> Dict[str, Any]:
# endpoint = f"/platon/v1/beacon/states/{state_id}/validators/{validator_id}"
# return self._make_get_request(endpoint)
#
# def get_validator_balances(self, state_id: str = "head") -> Dict[str, Any]:
# endpoint = f"/platon/v1/beacon/states/{state_id}/validator_balances"
# return self._make_get_request(endpoint)
#
# def get_epoch_committees(self, state_id: str = "head") -> Dict[str, Any]:
# endpoint = f"/platon/v1/beacon/states/{state_id}/committees"
# return self._make_get_request(endpoint)
#
# def get_block_headers(self) -> Dict[str, Any]:
# endpoint = "/platon/v1/beacon/headers"
# return self._make_get_request(endpoint)
#
# def get_block_header(self, block_id: str) -> Dict[str, Any]:
# endpoint = f"/platon/v1/beacon/headers/{block_id}"
# return self._make_get_request(endpoint)
#
# def get_block(self, block_id: str) -> Dict[str, Any]:
# endpoint = f"/platon/v1/beacon/blocks/{block_id}"
# return self._make_get_request(endpoint)
#
# def get_block_root(self, block_id: str) -> Dict[str, Any]:
# endpoint = f"/platon/v1/beacon/blocks/{block_id}/root"
# return self._make_get_request(endpoint)
#
# def get_block_attestations(self, block_id: str) -> Dict[str, Any]:
# endpoint = f"/platon/v1/beacon/blocks/{block_id}/attestations"
# return self._make_get_request(endpoint)
#
# def get_attestations(self) -> Dict[str, Any]:
# endpoint = "/platon/v1/beacon/pool/attestations"
# return self._make_get_request(endpoint)
#
# def get_attester_slashings(self) -> Dict[str, Any]:
# endpoint = "/platon/v1/beacon/pool/attester_slashings"
# return self._make_get_request(endpoint)
#
# def get_proposer_slashings(self) -> Dict[str, Any]:
# endpoint = "/platon/v1/beacon/pool/proposer_slashings"
# return self._make_get_request(endpoint)
#
# def get_voluntary_exits(self) -> Dict[str, Any]:
# endpoint = "/platon/v1/beacon/pool/voluntary_exits"
# return self._make_get_request(endpoint)
#
# # [ CONFIG endpoints ]
#
# def get_fork_schedule(self) -> Dict[str, Any]:
# endpoint = "/platon/v1/config/fork_schedule"
# return self._make_get_request(endpoint)
#
# def get_spec(self) -> Dict[str, Any]:
# endpoint = "/platon/v1/config/spec"
# return self._make_get_request(endpoint)
#
# def get_deposit_contract(self) -> Dict[str, Any]:
# endpoint = "/platon/v1/config/deposit_contract"
# return self._make_get_request(endpoint)
#
# # [ DEBUG endpoints ]
#
# def get_beacon_state(self, state_id: str = "head") -> Dict[str, Any]:
# endpoint = f"/platon/v1/debug/beacon/states/{state_id}"
# return self._make_get_request(endpoint)
#
# def get_beacon_heads(self) -> Dict[str, Any]:
# endpoint = "/platon/v1/debug/beacon/heads"
# return self._make_get_request(endpoint)
#
# # [ NODE endpoints ]
#
# def get_node_identity(self) -> Dict[str, Any]:
# endpoint = "/platon/v1/node/identity"
# return self._make_get_request(endpoint)
#
# def get_peers(self) -> Dict[str, Any]:
# endpoint = "/platon/v1/node/peers"
# return self._make_get_request(endpoint)
#
# def get_peer(self, peer_id: str) -> Dict[str, Any]:
# endpoint = f"/platon/v1/node/peers/{peer_id}"
# return self._make_get_request(endpoint)
#
# def get_health(self) -> int:
# endpoint = "/platon/v1/node/health"
# url = self.base_url + endpoint
# response = self.session.get(url)
# return response.status_code
#
# def get_version(self) -> Dict[str, Any]:
# endpoint = "/platon/v1/node/version"
# return self._make_get_request(endpoint)
#
# def get_syncing(self) -> Dict[str, Any]:
# endpoint = "/platon/v1/node/syncing"
# return self._make_get_request(endpoint)
| 36.941935 | 85 | 0.634125 |
73df615a6efac0b92b33237a3a04b2320bdb4875 | 1,096 | py | Python | ui/app_list/PRESUBMIT.py | Wzzzx/chromium-crosswalk | 768dde8efa71169f1c1113ca6ef322f1e8c9e7de | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 27 | 2016-04-27T01:02:03.000Z | 2021-12-13T08:53:19.000Z | ui/app_list/PRESUBMIT.py | maidiHaitai/haitaibrowser | a232a56bcfb177913a14210e7733e0ea83a6b18d | [
"BSD-3-Clause"
] | 2 | 2017-03-09T09:00:50.000Z | 2017-09-21T15:48:20.000Z | ui/app_list/PRESUBMIT.py | maidiHaitai/haitaibrowser | a232a56bcfb177913a14210e7733e0ea83a6b18d | [
"BSD-3-Clause"
] | 17 | 2016-04-27T02:06:39.000Z | 2019-12-18T08:07:00.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for app_list.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.(cc|h)$',
)
EXCLUDE = (
# Objective C confuses everything.
r'.*/cocoa/.*',
)
def CheckChangeLintsClean(input_api, output_api):
"""Makes sure that the ui/app_list/ code is cpplint clean."""
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list = INCLUDE_CPP_FILES_ONLY, black_list = black_list)
return input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources, lint_filters=[], verbose_level=1)
def CheckChangeOnUpload(input_api, output_api):
results = []
results += CheckChangeLintsClean(input_api, output_api)
results += input_api.canned_checks.CheckPatchFormatted(input_api, output_api)
return results
| 33.212121 | 79 | 0.760949 |
73df7ab71f5006a9b92cb7673900fa39c40718f8 | 45,836 | py | Python | tests/curve_dags/test_data.py | kevin0120/airflow | fa263cbf0ac002bdb26239ce36d5dc2a1b6251fd | [
"Apache-2.0"
] | 1 | 2021-03-03T07:00:02.000Z | 2021-03-03T07:00:02.000Z | tests/curve_dags/test_data.py | kevin0120/airflow | fa263cbf0ac002bdb26239ce36d5dc2a1b6251fd | [
"Apache-2.0"
] | 36 | 2021-11-26T00:08:49.000Z | 2021-11-26T00:09:33.000Z | tests/curve_dags/test_data.py | kevin0120/airflow | fa263cbf0ac002bdb26239ce36d5dc2a1b6251fd | [
"Apache-2.0"
] | 3 | 2020-06-30T02:38:17.000Z | 2022-01-19T06:14:08.000Z | curve_param = {
"torque_low_limit": 4.0,
"torque_up_limit_min": 26.0,
"torque_up_limit_max": 34.0,
"threshold": 0.30000001192092896,
"slope_threshold": 16.5905818939209,
"torque_threshold": 2.5,
"angle_threshold": 0.07500000298023224,
"angle_up_limit_min": 20.0,
"angle_up_limit_max": 114.0,
"sampling_time": 0.004999999888241291
}
curve = {
"cur_m": [
0.015539206012,
0.031082325196000002,
0.031082325196000002,
0.015539206012,
0.015539206012,
0.015539206012,
0.015539206012,
0.015539206012,
0.015539206012,
0.015539206012,
0.023310765604,
0.023310765604,
0.015539206012,
0.023310765604,
0.015539206012,
0.023310765604,
0.023310765604,
0.015539206012,
0.015539206012,
0.015539206012,
0.023310765604,
0.015539206012,
0.023310765604,
0.015539206012,
0.015539206012,
0.015539206012,
0.015539206012,
0.023310765604,
0.023310765604,
0.015539206012,
0.023310765604,
0.023310765604,
0.023310765604,
0.023310765604,
0.023310765604,
0.031082325196000002,
0.031082325196000002,
0.023310765604,
0.031082325196000002,
0.031082325196000002,
0.023310765604,
0.031082325196000002,
0.031082325196000002,
0.023310765604,
0.031082325196000002,
0.031082325196000002,
0.031082325196000002,
0.031082325196000002,
0.031082325196000002,
0.031082325196000002,
0.038853884788000004,
0.031082325196000002,
0.038853884788000004,
0.038853884788000004,
0.038853884788000004,
0.038853884788000004,
0.038853884788000004,
0.04662544438,
0.04662544438,
0.04662544438,
0.04662544438,
0.04662544438,
0.04662544438,
0.04662544438,
0.04662544438,
0.04662544438,
0.054397003972,
0.04662544438,
0.054397003972,
0.04662544438,
0.04662544438,
0.054397003972,
0.062168563564,
0.062168563564,
0.062168563564,
0.062168563564,
0.062168563564,
0.069940123156,
0.062168563564,
0.062168563564,
0.062168563564,
0.069940123156,
0.069940123156,
0.069940123156,
0.069940123156,
0.069940123156,
0.069940123156,
0.077711682748,
0.077711682748,
0.077711682748,
0.077711682748,
0.077711682748,
0.077711682748,
0.077711682748,
0.077711682748,
0.08548324234,
0.09325088876,
0.077711682748,
0.08548324234,
0.08548324234,
0.09325088876,
0.09325088876,
0.09325088876,
0.09325088876,
0.09325088876,
0.09325088876,
0.101022448352,
0.101022448352,
0.101022448352,
0.101022448352,
0.101022448352,
0.108794007944,
0.101022448352,
0.108794007944,
0.108794007944,
0.108794007944,
0.116565567536,
0.116565567536,
0.116565567536,
0.116565567536,
0.116565567536,
0.124337127128,
0.116565567536,
0.124337127128,
0.116565567536,
0.124337127128,
0.124337127128,
0.13210868672,
0.13210868672,
0.13210868672,
0.139880246312,
0.139880246312,
0.139880246312,
0.139880246312,
0.147651805904,
0.147651805904,
0.147651805904,
0.147651805904,
0.155423365496,
0.155423365496,
0.155423365496,
0.155423365496,
0.16319492508800001,
0.16319492508800001,
0.16319492508800001,
0.17096648468,
0.17096648468,
0.17096648468,
0.17096648468,
0.17096648468,
0.17096648468,
0.17096648468,
0.17096648468,
0.155423365496,
0.155423365496,
0.147651805904,
0.147651805904,
0.147651805904,
0.155423365496,
0.155423365496,
0.147651805904,
0.147651805904,
0.147651805904,
0.139880246312,
0.139880246312,
0.139880246312,
0.147651805904,
0.147651805904,
0.155423365496,
0.147651805904,
0.155423365496,
0.155423365496,
0.155423365496,
0.155423365496,
0.155423365496,
0.155423365496,
0.155423365496,
0.155423365496,
0.155423365496,
0.155423365496,
0.155423365496,
0.155423365496,
0.16319492508800001,
0.155423365496,
0.155423365496,
0.16319492508800001,
0.17096648468,
0.16319492508800001,
0.17096648468,
0.17096648468,
0.17096648468,
0.17096648468,
0.17096648468,
0.17096648468,
0.17096648468,
0.16319492508800001,
0.16319492508800001,
0.17096648468,
0.17096648468,
0.17096648468,
0.17096648468,
0.16319492508800001,
0.155423365496,
0.17096648468,
0.17096648468,
0.17096648468,
0.16319492508800001,
0.16319492508800001,
0.155423365496,
0.17096648468,
0.17096648468,
0.16319492508800001,
0.17096648468,
0.17096648468,
0.17096648468,
0.17096648468,
0.17096648468,
0.17096648468,
0.1787341311,
0.17096648468,
0.1787341311,
0.17096648468,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.17096648468,
0.17096648468,
0.17096648468,
0.17096648468,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.194277250284,
0.18650569069200001,
0.18650569069200001,
0.1787341311,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.18650569069200001,
0.1787341311,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.194277250284,
0.194277250284,
0.194277250284,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.1787341311,
0.1787341311,
0.1787341311,
0.1787341311,
0.18650569069200001,
0.1787341311,
0.18650569069200001,
0.1787341311,
0.18650569069200001,
0.18650569069200001,
0.18650569069200001,
0.194277250284,
0.194277250284,
0.20204880987600002,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.18650569069200001,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.20204880987600002,
0.20204880987600002,
0.20204880987600002,
0.194277250284,
0.20204880987600002,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.20204880987600002,
0.194277250284,
0.20204880987600002,
0.194277250284,
0.20204880987600002,
0.20204880987600002,
0.194277250284,
0.194277250284,
0.20204880987600002,
0.20204880987600002,
0.20204880987600002,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.20204880987600002,
0.194277250284,
0.194277250284,
0.194277250284,
0.194277250284,
0.20204880987600002,
0.20204880987600002,
0.194277250284,
0.20204880987600002,
0.20204880987600002,
0.20204880987600002,
0.20204880987600002,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.20204880987600002,
0.20204880987600002,
0.209820369468,
0.209820369468,
0.20204880987600002,
0.209820369468,
0.20204880987600002,
0.20204880987600002,
0.20204880987600002,
0.209820369468,
0.209820369468,
0.20204880987600002,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.21759192906,
0.21759192906,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.20204880987600002,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.21759192906,
0.209820369468,
0.209820369468,
0.21759192906,
0.21759192906,
0.21759192906,
0.209820369468,
0.21759192906,
0.21759192906,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.209820369468,
0.21759192906,
0.21759192906,
0.225363488652,
0.21759192906,
0.233135048244,
0.21759192906,
0.225363488652,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.209820369468,
0.21759192906,
0.209820369468,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.225363488652,
0.21759192906,
0.225363488652,
0.225363488652,
0.225363488652,
0.225363488652,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.233135048244,
0.233135048244,
0.225363488652,
0.225363488652,
0.225363488652,
0.225363488652,
0.21759192906,
0.225363488652,
0.225363488652,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.233135048244,
0.225363488652,
0.233135048244,
0.225363488652,
0.233135048244,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.225363488652,
0.21759192906,
0.21759192906,
0.225363488652,
0.225363488652,
0.21759192906,
0.225363488652,
0.225363488652,
0.225363488652,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906,
0.225363488652,
0.225363488652,
0.21759192906,
0.225363488652,
0.225363488652,
0.225363488652,
0.233135048244,
0.233135048244,
0.225363488652,
0.233135048244,
0.225363488652,
0.225363488652,
0.225363488652,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.225363488652,
0.233135048244,
0.233135048244,
0.24090660783600001,
0.233135048244,
0.233135048244,
0.225363488652,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.225363488652,
0.233135048244,
0.24090660783600001,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.24090660783600001,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.24090660783600001,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.24090660783600001,
0.233135048244,
0.233135048244,
0.24090660783600001,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.248678167428,
0.248678167428,
0.248678167428,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.233135048244,
0.233135048244,
0.233135048244,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.248678167428,
0.24090660783600001,
0.24090660783600001,
0.233135048244,
0.233135048244,
0.233135048244,
0.233135048244,
0.24090660783600001,
0.248678167428,
0.248678167428,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.233135048244,
0.24090660783600001,
0.233135048244,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.248678167428,
0.24090660783600001,
0.248678167428,
0.248678167428,
0.248678167428,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.24090660783600001,
0.248678167428,
0.24090660783600001,
0.24090660783600001,
0.248678167428,
0.248678167428,
0.248678167428,
0.256445813848,
0.248678167428,
0.24090660783600001,
0.24090660783600001,
0.248678167428,
0.248678167428,
0.248678167428,
0.248678167428,
0.248678167428,
0.248678167428,
0.248678167428,
0.24090660783600001,
0.248678167428,
0.248678167428,
0.248678167428,
0.248678167428,
0.24090660783600001,
0.24090660783600001,
0.248678167428,
0.248678167428,
0.256445813848,
0.248678167428,
0.248678167428,
0.248678167428,
0.21759192906,
0.20204880987600002,
0.209820369468,
0.21759192906,
0.21759192906,
0.225363488652,
0.225363488652,
0.21759192906,
0.225363488652,
0.21759192906,
0.225363488652,
0.21759192906,
0.21759192906,
0.21759192906,
0.21759192906
],
"cur_w": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1,
0.1,
0.1,
0.1,
0.2,
0.2,
0.2,
0.2,
0.30000000000000004,
0.30000000000000004,
0.30000000000000004,
0.30000000000000004,
0.4,
0.4,
0.5,
0.5,
0.5,
0.5,
0.6000000000000001,
0.6000000000000001,
0.7000000000000001,
0.7000000000000001,
0.8,
0.8,
0.8,
0.8,
0.9,
0.9,
1,
1,
1.1,
1.1,
1.2000000000000002,
1.2000000000000002,
1.2000000000000002,
1.2000000000000002,
1.3,
1.3,
1.4000000000000001,
1.4000000000000001,
1.5,
1.5,
1.6,
1.6,
1.7000000000000002,
1.7000000000000002,
1.9000000000000001,
1.9000000000000001,
2,
2,
2,
2,
2.2,
2.2,
2.3000000000000003,
2.3000000000000003,
2.4000000000000004,
2.4000000000000004,
2.5,
2.5,
2.7,
2.7,
2.8000000000000003,
2.8000000000000003,
2.9000000000000004,
2.9000000000000004,
3.1,
3.1,
3.2,
3.2,
3.4000000000000004,
3.4000000000000004,
3.5,
3.5,
3.6,
3.6,
3.8000000000000003,
3.8000000000000003,
4,
4,
4.1000000000000005,
4.1000000000000005,
4.3,
4.3,
4.4,
4.4,
4.6000000000000005,
4.6000000000000005,
4.7,
4.7,
4.9,
4.9,
5.1000000000000005,
5.1000000000000005,
5.2,
5.2,
5.4,
5.4,
5.6000000000000005,
5.6000000000000005,
5.800000000000001,
5.800000000000001,
6,
6,
6.2,
6.2,
6.300000000000001,
6.300000000000001,
6.5,
6.5,
6.7,
6.7,
6.9,
6.9,
7.1000000000000005,
7.1000000000000005,
7.300000000000001,
7.300000000000001,
7.5,
7.5,
7.7,
7.7,
7.9,
7.9,
8.200000000000001,
8.200000000000001,
8.4,
8.4,
8.6,
8.6,
8.8,
8.8,
9,
9,
9.3,
9.3,
9.5,
9.5,
9.8,
9.8,
10,
10,
10.200000000000001,
10.200000000000001,
10.5,
10.5,
10.8,
10.8,
11,
11,
11.200000000000001,
11.200000000000001,
11.5,
11.5,
11.700000000000001,
11.700000000000001,
12,
12,
12.200000000000001,
12.200000000000001,
12.5,
12.5,
12.700000000000001,
12.700000000000001,
13,
13,
13.3,
13.3,
13.600000000000001,
13.600000000000001,
13.9,
13.9,
14.200000000000001,
14.200000000000001,
14.4,
14.4,
14.700000000000001,
14.700000000000001,
15,
15,
15.3,
15.3,
15.600000000000001,
15.600000000000001,
15.9,
15.9,
16.2,
16.2,
16.400000000000002,
16.400000000000002,
16.8,
16.8,
17.1,
17.1,
17.400000000000002,
17.400000000000002,
17.7,
17.7,
18,
18,
18.3,
18.3,
18.7,
18.7,
19,
19,
19.3,
19.3,
19.6,
19.6,
20,
20,
20.3,
20.3,
20.6,
20.6,
21,
21,
21.3,
21.3,
21.6,
21.6,
22,
22,
22.3,
22.3,
22.700000000000003,
22.700000000000003,
23,
23,
23.400000000000002,
23.400000000000002,
23.8,
23.8,
24.200000000000003,
24.200000000000003,
24.5,
24.5,
24.900000000000002,
24.900000000000002,
25.3,
25.3,
25.700000000000003,
25.700000000000003,
26,
26,
26.400000000000002,
26.400000000000002,
26.700000000000003,
26.700000000000003,
27.1,
27.1,
27.5,
27.5,
27.900000000000002,
27.900000000000002,
28.3,
28.3,
28.8,
28.8,
29.200000000000003,
29.200000000000003,
29.6,
29.6,
29.900000000000002,
29.900000000000002,
30.3,
30.3,
30.700000000000003,
30.700000000000003,
31.1,
31.1,
31.5,
31.5,
32,
32,
32.4,
32.4,
32.800000000000004,
32.800000000000004,
33.300000000000004,
33.300000000000004,
33.7,
33.7,
34.1,
34.1,
34.6,
34.6,
35,
35,
35.4,
35.4,
35.9,
35.9,
36.300000000000004,
36.300000000000004,
36.7,
36.7,
37.2,
37.2,
37.6,
37.6,
38.1,
38.1,
38.6,
38.6,
39.1,
39.1,
39.6,
39.6,
40,
40,
40.400000000000006,
40.400000000000006,
40.900000000000006,
40.900000000000006,
41.300000000000004,
41.300000000000004,
41.800000000000004,
41.800000000000004,
42.300000000000004,
42.300000000000004,
42.800000000000004,
42.800000000000004,
43.300000000000004,
43.300000000000004,
43.900000000000006,
43.900000000000006,
44.300000000000004,
44.300000000000004,
44.800000000000004,
44.800000000000004,
45.2,
45.2,
45.7,
45.7,
46.2,
46.2,
46.800000000000004,
46.800000000000004,
47.300000000000004,
47.300000000000004,
47.800000000000004,
47.800000000000004,
48.300000000000004,
48.300000000000004,
48.800000000000004,
48.800000000000004,
49.400000000000006,
49.400000000000006,
49.900000000000006,
49.900000000000006,
50.400000000000006,
50.400000000000006,
50.900000000000006,
50.900000000000006,
51.400000000000006,
51.400000000000006,
51.900000000000006,
51.900000000000006,
52.5,
52.5,
53,
53,
53.6,
53.6,
54.2,
54.2,
54.7,
54.7,
55.2,
55.2,
55.7,
55.7,
56.300000000000004,
56.300000000000004,
56.800000000000004,
56.800000000000004,
57.400000000000006,
57.400000000000006,
58,
58,
58.6,
58.6,
59.1,
59.1,
59.7,
59.7,
60.2,
60.2,
60.800000000000004,
60.800000000000004,
61.400000000000006,
61.400000000000006,
62,
62,
62.6,
62.6,
63.2,
63.2,
63.800000000000004,
63.800000000000004,
64.3,
64.3,
64.9,
64.9,
65.5,
65.5,
66.10000000000001,
66.10000000000001,
66.7,
66.7,
67.3,
67.3,
67.9,
67.9,
68.60000000000001,
68.60000000000001,
69.2,
69.2,
69.8,
69.8,
70.4,
70.4,
71,
71,
71.60000000000001,
71.60000000000001,
72.3,
72.3,
73,
73,
73.60000000000001,
73.60000000000001,
74.10000000000001,
74.10000000000001,
74.8,
74.8,
75.4,
75.4,
76,
76,
76.7,
76.7,
77.4,
77.4,
78,
78,
78.7,
78.7,
79.30000000000001,
79.30000000000001,
79.9,
79.9,
80.60000000000001,
80.60000000000001,
81.30000000000001,
81.30000000000001,
81.9,
81.9,
82.7,
82.7,
83.4,
83.4,
84,
84,
84.60000000000001,
84.60000000000001,
85.30000000000001,
85.30000000000001,
86,
86,
86.7,
86.7,
87.5,
87.5,
88.10000000000001,
88.10000000000001,
88.80000000000001,
88.80000000000001,
89.4,
89.4,
90.2,
90.2,
90.9,
90.9,
91.60000000000001,
91.60000000000001,
92.30000000000001,
92.30000000000001,
93,
93,
93.7,
93.7,
94.4,
94.4,
95.10000000000001,
95.10000000000001,
95.80000000000001,
95.80000000000001,
96.60000000000001,
96.60000000000001,
97.4,
97.4,
98.10000000000001,
98.10000000000001,
98.80000000000001,
98.80000000000001,
99.5,
99.5,
100.2,
100.2,
101,
101,
101.80000000000001,
101.80000000000001,
102.5,
102.5,
103.2,
103.2,
103.9,
103.9,
104.7,
104.7,
105.5,
105.5,
106.2,
106.2,
107,
107,
107.80000000000001,
107.80000000000001,
108.5,
108.5,
109.30000000000001,
109.30000000000001,
110,
110,
110.80000000000001,
110.80000000000001,
111.60000000000001,
111.60000000000001,
112.4,
112.4,
113.2,
113.2,
113.9,
113.9,
114.7,
114.7,
115.60000000000001,
115.60000000000001,
116.4,
116.4,
117.2,
117.2,
117.9,
117.9,
118.7,
118.7,
119.5,
119.5,
120.30000000000001,
120.30000000000001,
121.2,
121.2,
122,
122,
122.80000000000001,
122.80000000000001,
123.60000000000001,
123.60000000000001,
124.30000000000001,
124.30000000000001,
125.2,
125.2,
126.10000000000001,
126.10000000000001,
126.9,
126.9,
127.7,
127.7,
128.5,
128.5,
129.4,
129.4,
130.3,
130.3,
131.1,
131.1,
131.9,
131.9,
132.70000000000002,
132.70000000000002,
133.6,
133.6,
134.5,
134.5,
135.3,
135.3,
136.20000000000002,
136.20000000000002,
137,
137,
137.9,
137.9,
138.70000000000002,
138.70000000000002,
139.6,
139.6,
140.5,
140.5,
141.4,
141.4,
142.3,
142.3,
143.1,
143.1,
144,
144,
145,
145,
145.8,
145.8,
146.70000000000002,
146.70000000000002,
147.6,
147.6,
148.5,
148.5,
149.4,
149.4,
150.3,
150.3,
151.1,
151.1,
151.6,
151.6,
151.8,
151.8,
151.9,
151.9,
152,
152,
152,
152,
152,
152,
152,
152
],
"cur_t": [
0.5,
1,
1.5,
2,
2.5,
3,
3.5,
4,
4.5,
5,
5.5,
6,
6.5,
7,
7.5,
8,
8.5,
9,
9.5,
10,
10.5,
11,
11.5,
12,
12.5,
13,
13.5,
14,
14.5,
15,
15.5,
16,
16.5,
17,
17.5,
18,
18.5,
19,
19.5,
20,
20.5,
21,
21.5,
22,
22.5,
23,
23.5,
24,
24.5,
25,
25.5,
26,
26.5,
27,
27.5,
28,
28.5,
29,
29.5,
30,
30.5,
31,
31.5,
32,
32.5,
33,
33.5,
34,
34.5,
35,
35.5,
36,
36.5,
37,
37.5,
38,
38.5,
39,
39.5,
40,
40.5,
41,
41.5,
42,
42.5,
43,
43.5,
44,
44.5,
45,
45.5,
46,
46.5,
47,
47.5,
48,
48.5,
49,
49.5,
50,
50.5,
51,
51.5,
52,
52.5,
53,
53.5,
54,
54.5,
55,
55.5,
56,
56.5,
57,
57.5,
58,
58.5,
59,
59.5,
60,
60.5,
61,
61.5,
62,
62.5,
63,
63.5,
64,
64.5,
65,
65.5,
66,
66.5,
67,
67.5,
68,
68.5,
69,
69.5,
70,
70.5,
71,
71.5,
72,
72.5,
73,
73.5,
74,
74.5,
75,
75.5,
76,
76.5,
77,
77.5,
78,
78.5,
79,
79.5,
80,
80.5,
81,
81.5,
82,
82.5,
83,
83.5,
84,
84.5,
85,
85.5,
86,
86.5,
87,
87.5,
88,
88.5,
89,
89.5,
90,
90.5,
91,
91.5,
92,
92.5,
93,
93.5,
94,
94.5,
95,
95.5,
96,
96.5,
97,
97.5,
98,
98.5,
99,
99.5,
100,
100.5,
101,
101.5,
102,
102.5,
103,
103.5,
104,
104.5,
105,
105.5,
106,
106.5,
107,
107.5,
108,
108.5,
109,
109.5,
110,
110.5,
111,
111.5,
112,
112.5,
113,
113.5,
114,
114.5,
115,
115.5,
116,
116.5,
117,
117.5,
118,
118.5,
119,
119.5,
120,
120.5,
121,
121.5,
122,
122.5,
123,
123.5,
124,
124.5,
125,
125.5,
126,
126.5,
127,
127.5,
128,
128.5,
129,
129.5,
130,
130.5,
131,
131.5,
132,
132.5,
133,
133.5,
134,
134.5,
135,
135.5,
136,
136.5,
137,
137.5,
138,
138.5,
139,
139.5,
140,
140.5,
141,
141.5,
142,
142.5,
143,
143.5,
144,
144.5,
145,
145.5,
146,
146.5,
147,
147.5,
148,
148.5,
149,
149.5,
150,
150.5,
151,
151.5,
152,
152.5,
153,
153.5,
154,
154.5,
155,
155.5,
156,
156.5,
157,
157.5,
158,
158.5,
159,
159.5,
160,
160.5,
161,
161.5,
162,
162.5,
163,
163.5,
164,
164.5,
165,
165.5,
166,
166.5,
167,
167.5,
168,
168.5,
169,
169.5,
170,
170.5,
171,
171.5,
172,
172.5,
173,
173.5,
174,
174.5,
175,
175.5,
176,
176.5,
177,
177.5,
178,
178.5,
179,
179.5,
180,
180.5,
181,
181.5,
182,
182.5,
183,
183.5,
184,
184.5,
185,
185.5,
186,
186.5,
187,
187.5,
188,
188.5,
189,
189.5,
190,
190.5,
191,
191.5,
192,
192.5,
193,
193.5,
194,
194.5,
195,
195.5,
196,
196.5,
197,
197.5,
198,
198.5,
199,
199.5,
200,
200.5,
201,
201.5,
202,
202.5,
203,
203.5,
204,
204.5,
205,
205.5,
206,
206.5,
207,
207.5,
208,
208.5,
209,
209.5,
210,
210.5,
211,
211.5,
212,
212.5,
213,
213.5,
214,
214.5,
215,
215.5,
216,
216.5,
217,
217.5,
218,
218.5,
219,
219.5,
220,
220.5,
221,
221.5,
222,
222.5,
223,
223.5,
224,
224.5,
225,
225.5,
226,
226.5,
227,
227.5,
228,
228.5,
229,
229.5,
230,
230.5,
231,
231.5,
232,
232.5,
233,
233.5,
234,
234.5,
235,
235.5,
236,
236.5,
237,
237.5,
238,
238.5,
239,
239.5,
240,
240.5,
241,
241.5,
242,
242.5,
243,
243.5,
244,
244.5,
245,
245.5,
246,
246.5,
247,
247.5,
248,
248.5,
249,
249.5,
250,
250.5,
251,
251.5,
252,
252.5,
253,
253.5,
254,
254.5,
255,
255.5,
256,
256.5,
257,
257.5,
258,
258.5,
259,
259.5,
260,
260.5,
261,
261.5,
262,
262.5,
263,
263.5,
264,
264.5,
265,
265.5,
266,
266.5,
267,
267.5,
268,
268.5,
269,
269.5,
270,
270.5,
271,
271.5,
272,
272.5,
273,
273.5,
274,
274.5,
275,
275.5,
276,
276.5,
277,
277.5,
278,
278.5,
279,
279.5,
280,
280.5,
281,
281.5,
282,
282.5,
283,
283.5,
284,
284.5,
285,
285.5,
286,
286.5,
287,
287.5,
288,
288.5,
289,
289.5,
290,
290.5,
291,
291.5,
292,
292.5,
293,
293.5,
294,
294.5,
295,
295.5,
296,
296.5,
297,
297.5,
298,
298.5,
299,
299.5,
300,
300.5,
301,
301.5,
302,
302.5,
303,
303.5,
304,
304.5,
305,
305.5,
306,
306.5,
307,
307.5,
308,
308.5,
309,
309.5,
310,
310.5,
311,
311.5,
312,
312.5,
313,
313.5,
314,
314.5,
315,
315.5,
316,
316.5,
317,
317.5,
318,
318.5,
319,
319.5,
320,
320.5,
321,
321.5,
322,
322.5,
323,
323.5,
324,
324.5,
325,
325.5,
326,
326.5,
327,
327.5,
328,
328.5,
329,
329.5,
330,
330.5,
331,
331.5,
332,
332.5,
333,
333.5,
334,
334.5,
335,
335.5,
336,
336.5,
337,
337.5,
338,
338.5,
339,
339.5,
340,
340.5,
341,
341.5,
342,
342.5,
343,
343.5,
344
]
}
cluster_data = {
"algorithm_version": 1,
"curve_template_groups_k": 1,
"curve_template_group_array": [
{
"template_centroid_index": 0,
"template_data_array": [
{
"template_angle": [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.019819820299744606,
0.04144144058227539,
0.061261262744665146,
0.08288288116455078,
0.10450451076030731,
0.12612612545490265,
0.14774774014949799,
0.16756756603717804,
0.18918919563293457,
0.2108108103275299,
0.23063063621520996,
0.2522522509098053,
0.27387386560440063,
0.29549548029899597,
0.315315306186676,
0.33693695068359375,
0.3567567467689514,
0.37837839126586914,
0.3981982171535492,
0.41981980204582214,
0.44144144654273987,
0.4630630910396576,
0.48288285732269287,
0.5045045018196106,
0.5261261463165283,
0.5477477312088013,
0.569369375705719,
0.5891892313957214,
0.6108108162879944,
0.6324324011802673,
0.6540540456771851,
0.6738739013671875,
0.6954954862594604,
0.7153153419494629,
0.7369369864463806,
0.7567567825317383,
0.7783783674240112,
0.800000011920929,
0.8216215968132019,
0.8432432413101196,
0.8630630970001221,
0.884684681892395,
0.906306266784668,
0.9279279112815857,
0.9495495557785034,
0.9693693518638611,
0.9981982111930847,
1.0
],
"template_torque": [
4.795041561126709,
6.183536529541016,
7.269715785980225,
7.903450012207031,
8.448033332824707,
8.843896865844727,
9.172185897827148,
9.437216758728027,
9.691280364990234,
9.950499534606934,
10.18355655670166,
10.387411117553711,
10.586793899536133,
10.82284164428711,
11.091927528381348,
11.359953880310059,
11.659576416015625,
11.960306167602539,
12.280977249145508,
12.595656394958496,
12.9249267578125,
13.279316902160645,
13.665048599243164,
14.09547233581543,
14.5075044631958,
14.879796028137207,
15.208659172058105,
15.522689819335938,
15.811670303344727,
16.069271087646484,
16.33078956604004,
16.603214263916016,
16.912200927734375,
17.238882064819336,
17.58001708984375,
17.919179916381836,
18.27083969116211,
18.658002853393555,
19.094768524169922,
19.555355072021484,
20.05722427368164,
20.53191566467285,
20.939228057861328,
21.25922966003418,
21.541889190673828,
21.82962417602539,
22.084102630615234,
22.314346313476562,
22.549890518188477,
22.820247650146484,
23.118629455566406,
23.422975540161133,
23.735721588134766,
24.05453872680664,
24.3846492767334,
24.74663734436035,
25.165857315063477,
25.60770034790039,
26.075511932373047,
26.52484130859375,
26.927270889282227,
27.2296199798584,
27.49077796936035,
27.742076873779297,
27.989971160888672,
28.173410415649414,
28.33307456970215,
28.513696670532227,
28.720355987548828,
28.97340965270996,
29.275951385498047,
29.591073989868164,
29.831607818603516,
26.774738311767578
],
"start_point": 6
}
]
}
]
}
| 20.34443 | 45 | 0.418012 |
73dfa520d48157846e90bf0a83321dc34ebe7176 | 123 | py | Python | webapp/admin.py | saintlyzero/Django-API-Assignment | 3e582f3388536df05c9c8e2ea2b6ab2fd6349a2d | [
"MIT"
] | 1 | 2019-11-20T10:00:56.000Z | 2019-11-20T10:00:56.000Z | webapp/admin.py | saintlyzero/Django-API-Assignment | 3e582f3388536df05c9c8e2ea2b6ab2fd6349a2d | [
"MIT"
] | null | null | null | webapp/admin.py | saintlyzero/Django-API-Assignment | 3e582f3388536df05c9c8e2ea2b6ab2fd6349a2d | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Employees
# Register your models here.
admin.site.register(Employees) | 24.6 | 32 | 0.821138 |
73dfb28d8fc1858b1963cfe4cbb25f4e3947caaf | 781 | py | Python | func_tests/tests/projects/testsmsquestionnaire/project_test_sms_questionnaire_data.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | 1 | 2015-11-02T09:11:12.000Z | 2015-11-02T09:11:12.000Z | func_tests/tests/projects/testsmsquestionnaire/project_test_sms_questionnaire_data.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | func_tests/tests/projects/testsmsquestionnaire/project_test_sms_questionnaire_data.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | from tests.logintests.login_data import USERNAME, PASSWORD
VALID_CREDENTIALS = {USERNAME: "quotareached@mailinator.com",
PASSWORD: "test123"}
PROJECT_NAME = "clinic3 test project"
UPGRADE_INSTRUCTION_MSG = u'You have reached your limit of 1000 free Submissions. Subscribe to a monthly subscription to continue submitting data for your questionnaires.'
MY_DATASENDERS_PAGE_TITLE = "Questionnaires - Data Senders"
MY_SUBJECTS_PAGE_TITLE = "Questionnaires - Identification Numbers"
REMINDERS_PAGE_TITLE = "Questionnaire - Reminders"
REVIEW_N_TEST_PAGE_TITLE = "Review & Test"
DATA_ANALYSIS_PAGE_TITLE = "Data Analysis"
SEND_MESSAGE_PAGE_TITLE = "Questionnaire - Messages"
SUBMISSION_LOG_PAGE_TITLE = "Submission Log"
WEB_SUBMISSION_PAGE_TITLE = "Web Submission" | 52.066667 | 171 | 0.805378 |
73dfb2f89834f89c63524cd67d09c68a6f584df0 | 2,344 | py | Python | HackerRank/DynamicProg_MaxArraySum.py | RafayAK/CodingPrep | 718eccb439db0f6e727806964766a40e8234c8a9 | [
"MIT"
] | 5 | 2019-09-07T17:31:17.000Z | 2022-03-05T09:59:46.000Z | HackerRank/DynamicProg_MaxArraySum.py | RafayAK/CodingPrep | 718eccb439db0f6e727806964766a40e8234c8a9 | [
"MIT"
] | null | null | null | HackerRank/DynamicProg_MaxArraySum.py | RafayAK/CodingPrep | 718eccb439db0f6e727806964766a40e8234c8a9 | [
"MIT"
] | 2 | 2019-09-07T17:31:24.000Z | 2019-10-28T16:10:52.000Z | '''Given an array of integers, find the subset of non-adjacent elements with the maximum sum. Calculate the sum of that subset.
For example, given an array arr= [-2, 1, 3, -4, 5] we have the following possible subsets:
Subset Sum
[-2, 3, 5] 6
[-2, 3] 1
[-2, -4] -6
[-2, 5] 3
[1, -4] -3
[1, 5] 6
[3, 5] 8
Our maximum subset sum is 8.
Function Description
Complete the maxSubsetSum function in the editor below. It should return an integer representing the maximum subset sum for the given array.
maxSubsetSum has the following parameter(s):
arr: an array of integers
Return the maximum sum described in the statement.
Sample Input 0
5
3 7 4 6 5
Sample Output 0
13
Explanation 0
Our possible subsets are and . The largest subset sum is from subset
Sample Input 1
5
2 1 5 8 4
Sample Output 1
11
Explanation 1
Our subsets are and . The maximum subset sum is from the first subset listed.
Sample Input 2
5
3 5 -7 8 10
Sample Output 2
15
Explanation 2
Our subsets are and . The maximum subset sum is from the fifth subset listed.
'''
# !/bin/python3
import math
import os
import random
import re
import sys
# Complete the maxSubsetSum function below.
'''
Intro: Since we know it's DP, we can solve the problem by solving subproblems of smaller size.
Because of the condition. No two adjacent elements can be picked.
Therefore we can either take one and then skip one, or skip one and run the subroutine.
we can solve this problem in linear time and constant space ;)
Idea: store solutions for problem of size i-2 and i-1, where i is the size of the subproblem.
The solution for problem of size i is either:
1- solution for problem i-1, or
2- solution for problem i-2, or
3- solution of problem i-2 + arr[i],
4- or ar[i].
Iterate for every i. Start with 0, 0 for problems of size - 2 and -1
'''
def maxSubsetSum(arr):
a = float('-inf') # store solution for set of size i-2
b = float('-inf') # store solution for set of size i-1
for value in arr:
a,b = b, max(a, a+value, b, value)
return b
if __name__ == '__main__':
#fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = maxSubsetSum(arr)
print(res)
# fptr.write(str(res) + '\n')
#
# fptr.close()
| 21.504587 | 140 | 0.682167 |
73dff2c940155d5cc3d594399f925533d8ecf12b | 10,772 | py | Python | doc/conf.py | glotzerlab/signac | a03a6d05d0f994ac9c4d5353533883e49cf6b386 | [
"BSD-3-Clause"
] | 100 | 2019-01-31T01:37:20.000Z | 2022-03-29T10:35:34.000Z | doc/conf.py | glotzerlab/signac | a03a6d05d0f994ac9c4d5353533883e49cf6b386 | [
"BSD-3-Clause"
] | 607 | 2019-01-31T14:08:17.000Z | 2022-03-31T21:51:48.000Z | doc/conf.py | glotzerlab/signac | a03a6d05d0f994ac9c4d5353533883e49cf6b386 | [
"BSD-3-Clause"
] | 31 | 2019-01-31T14:36:50.000Z | 2022-03-14T03:48:32.000Z | """signac documentation build configuration file.
This file is executed with the current directory set to its containing dir.
Note that not all possible configuration values are present in this
autogenerated file.
All configuration values have a default; values that are commented out serve
to show the default.
"""
import sys
from unittest.mock import MagicMock
import sphinx_rtd_theme
class Mock(MagicMock):
"""Mocks modules and their contained objects."""
@classmethod
def __getattr__(cls, name):
if name == "_mock_methods":
return []
if name == "version_tuple":
return (3, 0)
return Mock()
MOCK_MODULES = ["pymongo", "gridfs", "mpi4py"]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"IPython.sphinxext.ipython_console_highlighting",
"nbsphinx",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinxcontrib.programoutput",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "signac"
copyright = "The Regents of the University of Michigan"
author = (
"Carl S. Adorf, Vyas Ramasubramani, Bradley D. Dice, "
"Michael M. Henry, Brandon Butler, Paul M. Dodd, Sharon C. Glotzer"
)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.7.0"
# The full version, including alpha/beta/rc tags.
release = "1.7.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Show the [+] icon to expand headings in the sidebar. Default is True.
"collapse_navigation": False,
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "images/logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "images/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# These paths are either relative to html_static_path
# or fully qualified paths (eg. https://...)
html_css_files = [
"css/signac-theme.css",
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "signacdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements: dict = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "signac.tex", "signac Documentation", "Carl Simon Adorf", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "signac", "signac Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"signac",
"signac Documentation",
author,
"signac",
"A simple data management framework.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"pymongo": ("https://pymongo.readthedocs.io/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"h5py": ("https://docs.h5py.org/en/stable/", None),
"zarr": ("https://zarr.readthedocs.io/en/stable", None),
"redis": ("https://redis-py.readthedocs.io/en/stable/", None),
"numcodecs": ("https://numcodecs.readthedocs.io/en/stable/", None),
}
| 32.841463 | 85 | 0.702748 |
73dff4f34405910632f481fc2e5a3b3700c44449 | 3,842 | py | Python | PythonProjects/99-CapstoneProject-202020-DavidAndVIbha/libs/rosebot.py | much2mutch/csse120-public | 4f862a6deb7a5373fb5723fb2a23e4042e4d4157 | [
"MIT"
] | null | null | null | PythonProjects/99-CapstoneProject-202020-DavidAndVIbha/libs/rosebot.py | much2mutch/csse120-public | 4f862a6deb7a5373fb5723fb2a23e4042e4d4157 | [
"MIT"
] | null | null | null | PythonProjects/99-CapstoneProject-202020-DavidAndVIbha/libs/rosebot.py | much2mutch/csse120-public | 4f862a6deb7a5373fb5723fb2a23e4042e4d4157 | [
"MIT"
] | null | null | null | """
Capstone Team Project. Code to run on a ROBOT (NOT a laptop).
This code defines the RoseBot class (the top-level class for a robot).
In the code that you write for making a robot do things,
you should construct a RoseBot object and then use it as in this example:
------------------------------------------------
import libs.rosebot as rb
def main():
robot = rb.RoseBot()
robot.drive_system.go(100, -40)
robot.touch_sensor.wait_until_pressed()
etc
------------------------------------------------
Authors: Your professors (for the framework).
Winter term, 2019-2020.
"""
###############################################################################
# STUDENTS: *** READ this file. Understanding it is CRITICAL. ***
# *** But do NOT change ANYTHING in this module. ***
###############################################################################
# -----------------------------------------------------------------------------
# Note below how to write an IMPORT statement
# that imports a module that is in the LIBS sub-folder.
# -----------------------------------------------------------------------------
import libs.rosebot_drive_system as drive
import libs.rosebot_touch_sensor as touch
import libs.rosebot_arm_and_claw as arm
import libs.rosebot_leds as leds
import libs.rosebot_brick_buttons as bb
import libs.rosebot_remote_control as rc
import libs.rosebot_color_sensor as color
import libs.rosebot_infrared_proximity_sensor as proximity
import libs.rosebot_camera_sensor as camera
import libs.rosebot_sound as sound
import libs.rosebot_beacon_sensor as b_sensor
# -----------------------------------------------------------------------------
# The following import OPTIONAL modules, which you will implement or not
# as you choose. They contain "stubs" so they will not break any code as is.
# -----------------------------------------------------------------------------
import libs.rosebot_beacon_seeker as b_seeker
import libs.rosebot_line_follower as follower
import libs.rosebot_camera_tracker as tracker
###############################################################################
# RoseBot.
###############################################################################
class RoseBot(object):
""" The top-level class for making a robot do things. """
def __init__(self):
"""
Constructs instances of each of the sub-systems of a Snatch3r robot
and sets instance variables to them.
"""
self.drive_system = drive.DriveSystem("B", "C")
self.touch_sensor = touch.TouchSensor(1)
self.arm_and_claw = arm.ArmAndClaw(self.touch_sensor)
self.leds = leds.Leds()
self.brick_buttons = bb.BrickButtons()
self.remote_control = rc.RemoteControl()
self.color_sensor = color.ColorSensor(3)
self.infrared_proximity_sensor = proximity.InfraredProximitySensor(4)
self.camera = camera.CameraSensor(2)
self.sound = sound.Sound()
self.beacon_sensor = b_sensor.BeaconSensor(4, 1)
# ---------------------------------------------------------------------
# The following are OPTIONAL sub-systems, which you will implement
# or not as you choose. If you do not implement them,
# then of course these sub-systems will not work.
# ---------------------------------------------------------------------
self.beacon_seeker = b_seeker.BeaconSeeker(self.beacon_sensor,
self.drive_system)
self.line_follower = follower.LineFollower(self.color_sensor,
self.drive_system)
self.camera_tracker = tracker.CameraTracker(self.camera,
self.drive_system)
| 45.2 | 79 | 0.530713 |
73dffd013f513141da172fae9acad35f1d938d13 | 7,036 | py | Python | djstripe/migrations/0006_2_3.py | ExtraE113/dj-stripe | 1b50be13fc99b624388a005b8aa1e26c57392203 | [
"MIT"
] | 937 | 2017-06-04T18:44:20.000Z | 2022-03-27T07:28:32.000Z | djstripe/migrations/0006_2_3.py | ExtraE113/dj-stripe | 1b50be13fc99b624388a005b8aa1e26c57392203 | [
"MIT"
] | 969 | 2017-06-05T01:57:20.000Z | 2022-03-31T23:42:54.000Z | djstripe/migrations/0006_2_3.py | ExtraE113/dj-stripe | 1b50be13fc99b624388a005b8aa1e26c57392203 | [
"MIT"
] | 309 | 2017-06-12T03:18:10.000Z | 2022-03-29T17:05:18.000Z | # Generated by Django 3.0.5 on 2020-04-10 02:30
import django.db.models.deletion
from django.conf import settings
from django.db import migrations
import djstripe.enums
import djstripe.fields
class Migration(migrations.Migration):
dependencies = [("djstripe", "0001_initial")]
operations = [
migrations.RemoveField(
model_name="invoice",
name="closed",
),
migrations.RemoveField(
model_name="invoice",
name="forgiven",
),
migrations.RemoveField(
model_name="upcominginvoice",
name="closed",
),
migrations.RemoveField(
model_name="upcominginvoice",
name="forgiven",
),
migrations.AddField(
model_name="invoice",
name="status",
field=djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.InvoiceStatus,
help_text="The status of the invoice, one of draft, open, paid, uncollectible, or void.",
max_length=13,
),
),
migrations.AddField(
model_name="upcominginvoice",
name="status",
field=djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.InvoiceStatus,
help_text="The status of the invoice, one of draft, open, paid, uncollectible, or void.",
max_length=13,
),
),
migrations.RenameField(
model_name="subscription",
old_name="billing",
new_name="collection_method",
),
migrations.AddField(
model_name="invoice",
name="discount",
field=djstripe.fields.JSONField(
blank=True,
help_text="Describes the current discount applied to this subscription, if there is one. When billing, a discount applied to a subscription overrides a discount applied on a customer-wide basis.",
null=True,
),
),
migrations.AddField(
model_name="subscription",
name="default_payment_method",
field=djstripe.fields.StripeForeignKey(
blank=True,
help_text="The default payment method for the subscription. It must belong to the customer associated with the subscription. If not set, invoices will use the default payment method in the customer's invoice settings.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="djstripe.PaymentMethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AddField(
model_name="subscription",
name="discount",
field=djstripe.fields.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name="subscription",
name="next_pending_invoice_item_invoice",
field=djstripe.fields.StripeDateTimeField(
blank=True,
help_text="Specifies the approximate timestamp on which any pending invoice items will be billed according to the schedule provided at pending_invoice_item_interval.",
null=True,
),
),
migrations.AddField(
model_name="subscription",
name="pending_invoice_item_interval",
field=djstripe.fields.JSONField(
blank=True,
help_text="Specifies an interval for how often to bill for any pending invoice items. It is analogous to calling Create an invoice for the given subscription at the specified interval.",
null=True,
),
),
migrations.AddField(
model_name="subscription",
name="pending_update",
field=djstripe.fields.JSONField(
blank=True,
help_text="If specified, pending updates that will be applied to the subscription once the latest_invoice has been paid.",
null=True,
),
),
migrations.AddField(
model_name="subscription",
name="start_date",
field=djstripe.fields.StripeDateTimeField(
blank=True,
help_text="Date when the subscription was first created. The date might differ from the created date due to backdating.",
null=True,
),
),
migrations.AddField(
model_name="upcominginvoice",
name="default_source",
field=djstripe.fields.PaymentMethodForeignKey(
help_text="The default payment source for the invoice. It must belong to the customer associated with the invoice and be in a chargeable state. If not set, defaults to the subscription's default source, if any, or to the customer's default source.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="upcoming_invoices",
to="djstripe.DjstripePaymentMethod",
),
),
migrations.AddField(
model_name="upcominginvoice",
name="discount",
field=djstripe.fields.JSONField(
blank=True,
help_text="Describes the current discount applied to this subscription, if there is one. When billing, a discount applied to a subscription overrides a discount applied on a customer-wide basis.",
null=True,
),
),
migrations.AddField(
model_name="invoice",
name="default_source",
field=djstripe.fields.PaymentMethodForeignKey(
blank=True,
help_text="The default payment source for the invoice. It must belong to the customer associated with the invoice and be in a chargeable state. If not set, defaults to the subscription's default source, if any, or to the customer's default source.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="invoices",
to="djstripe.DjstripePaymentMethod",
),
),
migrations.AddField(
model_name="subscription",
name="default_source",
field=djstripe.fields.PaymentMethodForeignKey(
blank=True,
help_text="The default payment source for the subscription. It must belong to the customer associated with the subscription and be in a chargeable state. If not set, defaults to the customer's default source.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="subscriptions",
to="djstripe.DjstripePaymentMethod",
),
),
]
| 42.131737 | 265 | 0.584565 |
73dfffedbdf487042aae7f145e1657c9691e0c82 | 16,308 | py | Python | easyml/helpers/model_builder.py | evancasey1/EasyML | 69f0c246cb7e1d6f7167eb504c30693088e703fd | [
"MIT"
] | null | null | null | easyml/helpers/model_builder.py | evancasey1/EasyML | 69f0c246cb7e1d6f7167eb504c30693088e703fd | [
"MIT"
] | null | null | null | easyml/helpers/model_builder.py | evancasey1/EasyML | 69f0c246cb7e1d6f7167eb504c30693088e703fd | [
"MIT"
] | 1 | 2020-10-25T08:14:33.000Z | 2020-10-25T08:14:33.000Z | import json
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.neighbors.nearest_centroid import NearestCentroid
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
from .constants import COLUMN_TYPE, ALGORITHM, ALGORITHM_NAME_MAP, ALGORITHM_TYPES
from mainsite.models import CsvFile, CsvFileData, MLModel
from .util import get_dataframe
from .util import get_match_acc
def create_model(algorithm_type_num, file_id, parameters):
file_data = CsvFileData.objects.filter(parent_file_id=file_id)\
.exclude(type=COLUMN_TYPE.IGNORE).order_by('column_num')
if file_data.count() == 0:
print("Error: No data for file {}".format(file_id))
return
input_data = file_data.filter(type=COLUMN_TYPE.INPUT).order_by('column_num')
target_data = file_data.filter(type=COLUMN_TYPE.TARGET)
alg_type = ALGORITHM_NAME_MAP[algorithm_type_num]
input_df = get_dataframe(input_data)
target_df = get_dataframe(target_data)
target_df = target_df.values.ravel()
algorithm_type_nums = [algorithm_type_num]
if algorithm_type_num == ALGORITHM.AUTOMATIC:
alg_method = parameters['auto_alg_type']
if alg_method == 'auto_classification':
algorithm_type_nums = ALGORITHM_TYPES.CLASSIFICATION
else:
algorithm_type_nums = ALGORITHM_TYPES.REGRESSION
best_acc = None
best_acc_type = None
best_model = None
temp_model = None
best_alg_type = alg_type
for alg_type_num in algorithm_type_nums:
if alg_type_num == ALGORITHM.LINEAR_REGRESSION:
temp_model = create_linear_regression_model(input_df, target_df, parameters)
elif alg_type_num == ALGORITHM.K_NEAREST_NEIGHBORS_CLASSIFIER:
temp_model = create_k_nearest_neighbors_classifier(input_df, target_df, parameters)
elif alg_type_num == ALGORITHM.K_NEAREST_NEIGHBORS_REGRESSOR:
temp_model = create_k_nearest_neighbors_regressor(input_df, target_df, parameters)
elif alg_type_num == ALGORITHM.LOGISTIC_REGRESSION:
temp_model = create_logistic_regression_model(input_df, target_df, parameters)
elif alg_type_num == ALGORITHM.NEAREST_CENTROID:
temp_model = create_nearest_centroid(input_df, target_df, parameters)
elif alg_type_num == ALGORITHM.LINEAR_DISCRIMINANT_ANALYSIS:
temp_model = create_linear_discriminant_analysis(input_df, target_df, parameters)
elif alg_type_num == ALGORITHM.DECISION_TREE_REGRESSOR:
temp_model = create_decision_tree_regressor(input_df, target_df, parameters)
elif alg_type_num == ALGORITHM.GAUSSIAN_NAIVE_BAYES:
temp_model = create_gaussian_naive_bayes(input_df, target_df, parameters)
elif alg_type_num == ALGORITHM.RANDOM_FOREST_CLASSIFIER:
temp_model = create_random_forest_classifier(input_df, target_df, parameters)
elif alg_type_num == ALGORITHM.RANDOM_FOREST_REGRESSOR:
temp_model = create_random_forest_regressor(input_df, target_df, parameters)
elif alg_type_num == ALGORITHM.SUPPORT_VECTOR_MACHINE_CLASSIFIER:
temp_model = create_support_vector_machine_classifier(input_df, target_df, parameters)
elif alg_type_num == ALGORITHM.SUPPORT_VECTOR_MACHINE_REGRESSOR:
temp_model = create_support_vector_machine_regressor(input_df, target_df, parameters)
if not best_acc or parameters['accuracy'] > best_acc:
best_acc = parameters['accuracy']
best_acc_type = parameters['accuracy_type']
best_model = temp_model
if algorithm_type_num == ALGORITHM.AUTOMATIC:
best_alg_type = 'Automatic_' + ALGORITHM_NAME_MAP[alg_type_num]
else:
best_alg_type = ALGORITHM_NAME_MAP[alg_type_num]
if best_model:
save_model(best_model, best_alg_type, algorithm_type_num, file_id, parameters, best_acc, best_acc_type)
def save_model(model, alg_type, algorithm_type_num, file_id, parameters, best_acc, best_acc_type):
parent_file = CsvFile.objects.get(id=file_id)
display_name = "{}_{}".format(parent_file.display_name, alg_type)
same_name_count = MLModel.objects.filter(name=parent_file.display_name, type=alg_type).count()
if same_name_count > 0:
display_name += ' ({})'.format(same_name_count)
display_name = display_name.replace(' ', '_')
model_obj = MLModel()
model_obj.type = alg_type
model_obj.type_num = algorithm_type_num
model_obj.data = model
model_obj.name = parent_file.display_name
model_obj.display_name = display_name
model_obj.parameters = json.dumps(parameters)
model_obj.parent_file = CsvFile.objects.get(id=file_id)
model_obj.accuracy = best_acc
model_obj.accuracy_type = best_acc_type
model_obj.save()
def create_linear_regression_model(input_df, target_df, parameters):
fit_intercept = bool(parameters.get('linreg_fit_intercept', False))
normalize = bool(parameters.get('linreg_normalize', False))
x_train, x_test, y_train, y_test = train_test_split(input_df, target_df)
lin_reg = LinearRegression(fit_intercept=fit_intercept, normalize=normalize)
lin_reg_test = lin_reg.fit(x_train, y_train)
score = round(lin_reg_test.score(x_test, y_test), 4)
parameters['accuracy'] = score
parameters['accuracy_type'] = 'R^2'
lin_reg = lin_reg.fit(input_df, target_df)
return lin_reg
def create_logistic_regression_model(input_df, target_df, parameters):
logreg_penalty = parameters.get('logreg_penalty', 'l2')
logreg_c_select = parameters.get('logreg_C_select', 'custom')
logreg_fit_intercept = bool(parameters.get('logreg_fit_intercept', False))
if logreg_penalty == 'l1':
solver = 'liblinear'
else:
solver = 'lbfgs'
if logreg_c_select == 'custom':
logreg_c = int(parameters.get('logreg_C', 1.0))
logreg = LogisticRegression(C=logreg_c,
penalty=logreg_penalty,
fit_intercept=logreg_fit_intercept,
solver=solver)
else:
steps = [('std_scaler', StandardScaler())]
steps += [('log_regression', LogisticRegression(penalty=logreg_penalty,
multi_class='auto',
fit_intercept=logreg_fit_intercept,
solver=solver))]
pipe = Pipeline(steps)
param_grid = {'log_regression__C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000]}
logreg = GridSearchCV(estimator=pipe, param_grid=param_grid, cv=5)
x_train, x_test, y_train, y_test = train_test_split(input_df, target_df)
clf_test = logreg.fit(x_train, y_train)
acc = get_match_acc(clf_test.predict(x_test), y_test)
parameters['accuracy'] = acc
parameters['accuracy_type'] = 'Accuracy [%]'
clf = logreg.fit(input_df, target_df)
return clf
def create_linear_discriminant_analysis(input_df, target_df, parameters):
solver = parameters.get('lda_solver', 'svd')
clf = LinearDiscriminantAnalysis(solver=solver)
x_train, x_test, y_train, y_test = train_test_split(input_df, target_df)
clf_test = clf.fit(x_train, y_train)
acc = get_match_acc(clf_test.predict(x_test), y_test)
parameters['accuracy'] = acc
parameters['accuracy_type'] = 'Accuracy [%]'
clf.fit(input_df, target_df)
return clf
def create_decision_tree_regressor(input_df, target_df, parameters):
criterion = parameters.get('dtr_criterion', 'mse')
presort = bool(parameters.get('dtr_presort', False))
max_depth_choice = parameters.get('dtr_max_depth', 'none')
x_train, x_test, y_train, y_test = train_test_split(input_df, target_df)
if max_depth_choice == 'none':
best_depth = None
elif max_depth_choice == 'custom':
best_depth = parameters.get('dtr_custom_depth', None)
else:
r2_lst = []
depth_iter = 5
depth_start = 10
depth_lst = []
for i in range(depth_iter):
depth_lst.append(depth_start**i)
# Select model with best r^2 and least depth
for depth in depth_lst:
dt_regr = DecisionTreeRegressor(max_depth=depth, presort=presort, criterion=criterion)
dt_regr.fit(x_train, y_train)
r2_lst.append(dt_regr.score(x_test, y_test))
depth_index, r2 = min(enumerate(r2_lst), key=lambda x: abs(x[1] - 1))
best_depth = depth_lst[depth_index]
dt_regr = DecisionTreeRegressor(max_depth=best_depth,
presort=presort,
criterion=criterion)
regr_test = dt_regr.fit(x_train, y_train)
score = round(regr_test.score(x_test, y_test), 4)
parameters['accuracy'] = score
parameters['accuracy_type'] = 'R^2'
dt_regr.fit(input_df, target_df)
return dt_regr
def create_gaussian_naive_bayes(input_df, target_df, parameters):
gnb = GaussianNB()
x_train, x_test, y_train, y_test = train_test_split(input_df, target_df)
clf_test = gnb.fit(x_train, y_train)
acc = get_match_acc(clf_test.predict(x_test), y_test)
parameters['accuracy'] = acc
parameters['accuracy_type'] = 'Accuracy [%]'
gnb.fit(input_df, target_df)
return gnb
def create_random_forest_classifier(input_df, target_df, parameters):
criterion = parameters.get('rfc_criterion', 'gini')
n_estimators = int(parameters.get('rfc_n_estimators', 100))
depth_select = parameters.get('rfc_max_depth', 'none')
x_train, x_test, y_train, y_test = train_test_split(input_df, target_df)
if depth_select == 'none':
best_depth = None
elif depth_select == 'custom':
best_depth = parameters.get('rfc_custom_depth', None)
else:
r2_lst = []
depth_iter = 5
depth_start = 10
depth_lst = []
for i in range(depth_iter):
depth_lst.append(depth_start**i)
# Select model with best r^2 and least depth
for depth in depth_lst:
rf_clf = RandomForestClassifier(n_estimators=n_estimators,
max_depth=depth,
criterion=criterion,
oob_score=True)
rf_clf.fit(x_train, y_train)
r2_lst.append(rf_clf.oob_score_)
depth_index, r2 = min(enumerate(r2_lst), key=lambda x: abs(x[1] - 1))
best_depth = depth_lst[depth_index]
rf_clf = RandomForestClassifier(n_estimators=n_estimators,
max_depth=best_depth,
criterion=criterion)
clf_test = rf_clf.fit(x_train, y_train)
acc = get_match_acc(clf_test.predict(x_test), y_test)
parameters['accuracy'] = acc
parameters['accuracy_type'] = 'Accuracy [%]'
rf_clf.fit(input_df, target_df)
return rf_clf
def create_random_forest_regressor(input_df, target_df, parameters):
criterion = parameters.get('rfc_criterion', 'mse')
n_estimators = int(parameters.get('rfc_n_estimators', 100))
depth_select = parameters.get('rfc_max_depth', 'none')
x_train, x_test, y_train, y_test = train_test_split(input_df, target_df)
if depth_select == 'none':
best_depth = None
elif depth_select == 'custom':
best_depth = parameters.get('rfc_custom_depth', None)
else:
r2_lst = []
depth_iter = 5
depth_start = 10
depth_lst = []
for i in range(depth_iter):
depth_lst.append(depth_start**i)
# Select model with best r^2 and least depth
for depth in depth_lst:
rf_regr_test = RandomForestRegressor(n_estimators=n_estimators,
max_depth=depth,
criterion=criterion,
oob_score=True)
rf_regr_test.fit(x_train, y_train)
r2_lst.append(rf_regr_test.oob_score_)
depth_index, r2 = min(enumerate(r2_lst), key=lambda x: abs(x[1] - 1))
best_depth = depth_lst[depth_index]
rf_regr = RandomForestRegressor(n_estimators=n_estimators,
max_depth=best_depth,
criterion=criterion)
regr_test = rf_regr.fit(x_train, y_train)
score = round(regr_test.score(x_test, y_test), 4)
parameters['accuracy'] = score
parameters['accuracy_type'] = 'R^2'
rf_regr.fit(input_df, target_df)
return rf_regr
def create_k_nearest_neighbors_classifier(input_df, target_df, parameters):
n_neighbors = int(parameters.get('nnc_k', 5))
weights = parameters.get('weights', 'uniform')
algorithm = parameters.get('algorithm', 'auto')
p = int(parameters.get('nnc_p', 2))
neighbors = KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm=algorithm,
weights=weights,
p=p)
x_train, x_test, y_train, y_test = train_test_split(input_df, target_df)
clf_test = neighbors.fit(x_train, y_train)
acc = get_match_acc(clf_test.predict(x_test), y_test)
parameters['accuracy'] = acc
parameters['accuracy_type'] = 'Accuracy [%]'
neighbors.fit(input_df, target_df)
return neighbors
def create_k_nearest_neighbors_regressor(input_df, target_df, parameters):
n_neighbors = int(parameters.get('nnc_k', 5))
weights = parameters.get('weights', 'uniform')
algorithm = parameters.get('algorithm', 'auto')
p = int(parameters.get('nnc_p', 2))
x_train, x_test, y_train, y_test = train_test_split(input_df, target_df)
neighbors = KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm=algorithm,
weights=weights,
p=p)
regr_test = neighbors.fit(x_train, y_train)
score = round(regr_test.score(x_test, y_test), 4)
parameters['accuracy'] = score
parameters['accuracy_type'] = 'R^2'
neighbors.fit(input_df, target_df)
return neighbors
def create_nearest_centroid(input_df, target_df, parameters):
clf = NearestCentroid()
x_train, x_test, y_train, y_test = train_test_split(input_df, target_df)
clf_test = clf.fit(x_train, y_train)
acc = get_match_acc(clf_test.predict(x_test), y_test)
parameters['accuracy'] = acc
parameters['accuracy_type'] = 'Accuracy [%]'
clf.fit(input_df, target_df)
return clf
def create_support_vector_machine_classifier(input_df, target_df, parameters):
kernel = parameters.get('svc_kernel', 'rbf')
degree = int(parameters.get('svc_degree', 3))
c = parameters.get('svc_C', 1.0)
clf = svm.SVC(kernel=kernel, degree=degree, C=c)
x_train, x_test, y_train, y_test = train_test_split(input_df, target_df)
clf_test = clf.fit(x_train, y_train)
acc = get_match_acc(clf_test.predict(x_test), y_test)
parameters['accuracy'] = acc
parameters['accuracy_type'] = 'Accuracy [%]'
clf.fit(input_df, target_df)
return clf
def create_support_vector_machine_regressor(input_df, target_df, parameters):
kernel = parameters.get('svr_kernel', 'rbf')
degree = int(parameters.get('svr_degree', 3))
svm_reg = svm.SVR(kernel=kernel, degree=degree)
x_train, x_test, y_train, y_test = train_test_split(input_df, target_df)
regr_test = svm_reg.fit(x_train, y_train)
score = round(regr_test.score(x_test, y_test), 4)
parameters['accuracy'] = score
parameters['accuracy_type'] = 'R^2'
svm_reg.fit(input_df, target_df)
return svm_reg
| 37.40367 | 111 | 0.67194 |
73e016bee0bbdbd050f1186b02c0f18ced34620a | 93,863 | py | Python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2019_08_01/operations/_app_service_certificate_orders_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2021-09-07T18:39:05.000Z | 2021-09-07T18:39:05.000Z | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2019_08_01/operations/_app_service_certificate_orders_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2019_08_01/operations/_app_service_certificate_orders_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/certificateOrders')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_validate_purchase_information_request(
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/validateCertificateRegistrationInformation')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_by_resource_group_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
certificate_order_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
resource_group_name: str,
certificate_order_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
certificate_order_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
resource_group_name: str,
certificate_order_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_certificates_request(
resource_group_name: str,
certificate_order_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_certificate_request(
resource_group_name: str,
certificate_order_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_certificate_request_initial(
resource_group_name: str,
certificate_order_name: str,
name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_certificate_request(
resource_group_name: str,
certificate_order_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_certificate_request(
resource_group_name: str,
certificate_order_name: str,
name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_reissue_request(
resource_group_name: str,
certificate_order_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/reissue')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_renew_request(
resource_group_name: str,
certificate_order_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/renew')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_resend_email_request(
resource_group_name: str,
certificate_order_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/resendEmail')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_resend_request_emails_request(
resource_group_name: str,
certificate_order_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/resendRequestEmails')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_retrieve_site_seal_request(
resource_group_name: str,
certificate_order_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/retrieveSiteSeal')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_verify_domain_ownership_request(
resource_group_name: str,
certificate_order_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/verifyDomainOwnership')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"certificateOrderName": _SERIALIZER.url("certificate_order_name", certificate_order_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_retrieve_certificate_actions_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveCertificateActions')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_retrieve_certificate_email_history_request(
resource_group_name: str,
name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveEmailHistory')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
"name": _SERIALIZER.url("name", name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class AppServiceCertificateOrdersOperations(object):
"""AppServiceCertificateOrdersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.AppServiceCertificateOrderCollection"]:
"""List all certificate orders in a subscription.
Description for List all certificate orders in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceCertificateOrderCollection or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.AppServiceCertificateOrderCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrderCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AppServiceCertificateOrderCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/certificateOrders'} # type: ignore
@distributed_trace
def validate_purchase_information(
self,
app_service_certificate_order: "_models.AppServiceCertificateOrder",
**kwargs: Any
) -> None:
"""Validate information for a certificate order.
Description for Validate information for a certificate order.
:param app_service_certificate_order: Information for a certificate order.
:type app_service_certificate_order:
~azure.mgmt.web.v2019_08_01.models.AppServiceCertificateOrder
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(app_service_certificate_order, 'AppServiceCertificateOrder')
request = build_validate_purchase_information_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.validate_purchase_information.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
validate_purchase_information.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/validateCertificateRegistrationInformation'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.AppServiceCertificateOrderCollection"]:
"""Get certificate orders in a resource group.
Description for Get certificate orders in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceCertificateOrderCollection or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.AppServiceCertificateOrderCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrderCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AppServiceCertificateOrderCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
certificate_order_name: str,
**kwargs: Any
) -> "_models.AppServiceCertificateOrder":
"""Get a certificate order.
Description for Get a certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order..
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceCertificateOrder, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.AppServiceCertificateOrder
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrder"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
certificate_order_name: str,
certificate_distinguished_name: "_models.AppServiceCertificateOrder",
**kwargs: Any
) -> "_models.AppServiceCertificateOrder":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrder"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(certificate_distinguished_name, 'AppServiceCertificateOrder')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
certificate_order_name: str,
certificate_distinguished_name: "_models.AppServiceCertificateOrder",
**kwargs: Any
) -> LROPoller["_models.AppServiceCertificateOrder"]:
"""Create or update a certificate purchase order.
Description for Create or update a certificate purchase order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param certificate_distinguished_name: Distinguished name to use for the certificate order.
:type certificate_distinguished_name:
~azure.mgmt.web.v2019_08_01.models.AppServiceCertificateOrder
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AppServiceCertificateOrder or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.web.v2019_08_01.models.AppServiceCertificateOrder]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrder"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
certificate_distinguished_name=certificate_distinguished_name,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
certificate_order_name: str,
**kwargs: Any
) -> None:
"""Delete an existing certificate order.
Description for Delete an existing certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
certificate_order_name: str,
certificate_distinguished_name: "_models.AppServiceCertificateOrderPatchResource",
**kwargs: Any
) -> "_models.AppServiceCertificateOrder":
"""Create or update a certificate purchase order.
Description for Create or update a certificate purchase order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param certificate_distinguished_name: Distinguished name to use for the certificate order.
:type certificate_distinguished_name:
~azure.mgmt.web.v2019_08_01.models.AppServiceCertificateOrderPatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceCertificateOrder, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.AppServiceCertificateOrder
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateOrder"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(certificate_distinguished_name, 'AppServiceCertificateOrderPatchResource')
request = build_update_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateOrder', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'} # type: ignore
@distributed_trace
def list_certificates(
self,
resource_group_name: str,
certificate_order_name: str,
**kwargs: Any
) -> Iterable["_models.AppServiceCertificateCollection"]:
"""List all certificates associated with a certificate order.
Description for List all certificates associated with a certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceCertificateCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2019_08_01.models.AppServiceCertificateCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_certificates_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
template_url=self.list_certificates.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_certificates_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AppServiceCertificateCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_certificates.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates'} # type: ignore
@distributed_trace
def get_certificate(
self,
resource_group_name: str,
certificate_order_name: str,
name: str,
**kwargs: Any
) -> "_models.AppServiceCertificateResource":
"""Get the certificate associated with a certificate order.
Description for Get the certificate associated with a certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceCertificateResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.AppServiceCertificateResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_certificate_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get_certificate.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
def _create_or_update_certificate_initial(
self,
resource_group_name: str,
certificate_order_name: str,
name: str,
key_vault_certificate: "_models.AppServiceCertificateResource",
**kwargs: Any
) -> "_models.AppServiceCertificateResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(key_vault_certificate, 'AppServiceCertificateResource')
request = build_create_or_update_certificate_request_initial(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_certificate_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_certificate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
@distributed_trace
def begin_create_or_update_certificate(
self,
resource_group_name: str,
certificate_order_name: str,
name: str,
key_vault_certificate: "_models.AppServiceCertificateResource",
**kwargs: Any
) -> LROPoller["_models.AppServiceCertificateResource"]:
"""Creates or updates a certificate and associates with key vault secret.
Description for Creates or updates a certificate and associates with key vault secret.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:param key_vault_certificate: Key vault certificate resource Id.
:type key_vault_certificate: ~azure.mgmt.web.v2019_08_01.models.AppServiceCertificateResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either AppServiceCertificateResource or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.web.v2019_08_01.models.AppServiceCertificateResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_certificate_initial(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
name=name,
key_vault_certificate=key_vault_certificate,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
@distributed_trace
def delete_certificate(
self,
resource_group_name: str,
certificate_order_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete the certificate associated with a certificate order.
Description for Delete the certificate associated with a certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_certificate_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.delete_certificate.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
@distributed_trace
def update_certificate(
self,
resource_group_name: str,
certificate_order_name: str,
name: str,
key_vault_certificate: "_models.AppServiceCertificatePatchResource",
**kwargs: Any
) -> "_models.AppServiceCertificateResource":
"""Creates or updates a certificate and associates with key vault secret.
Description for Creates or updates a certificate and associates with key vault secret.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:param key_vault_certificate: Key vault certificate resource Id.
:type key_vault_certificate:
~azure.mgmt.web.v2019_08_01.models.AppServiceCertificatePatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceCertificateResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.AppServiceCertificateResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceCertificateResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(key_vault_certificate, 'AppServiceCertificatePatchResource')
request = build_update_certificate_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update_certificate.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'} # type: ignore
@distributed_trace
def reissue(
self,
resource_group_name: str,
certificate_order_name: str,
reissue_certificate_order_request: "_models.ReissueCertificateOrderRequest",
**kwargs: Any
) -> None:
"""Reissue an existing certificate order.
Description for Reissue an existing certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param reissue_certificate_order_request: Parameters for the reissue.
:type reissue_certificate_order_request:
~azure.mgmt.web.v2019_08_01.models.ReissueCertificateOrderRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(reissue_certificate_order_request, 'ReissueCertificateOrderRequest')
request = build_reissue_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.reissue.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
reissue.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/reissue'} # type: ignore
@distributed_trace
def renew(
self,
resource_group_name: str,
certificate_order_name: str,
renew_certificate_order_request: "_models.RenewCertificateOrderRequest",
**kwargs: Any
) -> None:
"""Renew an existing certificate order.
Description for Renew an existing certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param renew_certificate_order_request: Renew parameters.
:type renew_certificate_order_request:
~azure.mgmt.web.v2019_08_01.models.RenewCertificateOrderRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(renew_certificate_order_request, 'RenewCertificateOrderRequest')
request = build_renew_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.renew.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
renew.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/renew'} # type: ignore
@distributed_trace
def resend_email(
self,
resource_group_name: str,
certificate_order_name: str,
**kwargs: Any
) -> None:
"""Resend certificate email.
Description for Resend certificate email.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resend_email_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
template_url=self.resend_email.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
resend_email.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/resendEmail'} # type: ignore
@distributed_trace
def resend_request_emails(
self,
resource_group_name: str,
certificate_order_name: str,
name_identifier: "_models.NameIdentifier",
**kwargs: Any
) -> None:
"""Verify domain ownership for this certificate order.
Description for Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name_identifier: Email address.
:type name_identifier: ~azure.mgmt.web.v2019_08_01.models.NameIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(name_identifier, 'NameIdentifier')
request = build_resend_request_emails_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.resend_request_emails.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
resend_request_emails.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/resendRequestEmails'} # type: ignore
@distributed_trace
def retrieve_site_seal(
self,
resource_group_name: str,
certificate_order_name: str,
site_seal_request: "_models.SiteSealRequest",
**kwargs: Any
) -> "_models.SiteSeal":
"""Verify domain ownership for this certificate order.
Description for Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param site_seal_request: Site seal request.
:type site_seal_request: ~azure.mgmt.web.v2019_08_01.models.SiteSealRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SiteSeal, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2019_08_01.models.SiteSeal
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SiteSeal"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(site_seal_request, 'SiteSealRequest')
request = build_retrieve_site_seal_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.retrieve_site_seal.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SiteSeal', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_site_seal.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/retrieveSiteSeal'} # type: ignore
@distributed_trace
def verify_domain_ownership(
self,
resource_group_name: str,
certificate_order_name: str,
**kwargs: Any
) -> None:
"""Verify domain ownership for this certificate order.
Description for Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_verify_domain_ownership_request(
resource_group_name=resource_group_name,
certificate_order_name=certificate_order_name,
subscription_id=self._config.subscription_id,
template_url=self.verify_domain_ownership.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
verify_domain_ownership.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/verifyDomainOwnership'} # type: ignore
@distributed_trace
def retrieve_certificate_actions(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> List["_models.CertificateOrderAction"]:
"""Retrieve the list of certificate actions.
Description for Retrieve the list of certificate actions.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate order.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of CertificateOrderAction, or the result of cls(response)
:rtype: list[~azure.mgmt.web.v2019_08_01.models.CertificateOrderAction]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.CertificateOrderAction"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_retrieve_certificate_actions_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.retrieve_certificate_actions.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[CertificateOrderAction]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_certificate_actions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveCertificateActions'} # type: ignore
@distributed_trace
def retrieve_certificate_email_history(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> List["_models.CertificateEmail"]:
"""Retrieve email history.
Description for Retrieve email history.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate order.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of CertificateEmail, or the result of cls(response)
:rtype: list[~azure.mgmt.web.v2019_08_01.models.CertificateEmail]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.CertificateEmail"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_retrieve_certificate_email_history_request(
resource_group_name=resource_group_name,
name=name,
subscription_id=self._config.subscription_id,
template_url=self.retrieve_certificate_email_history.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[CertificateEmail]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_certificate_email_history.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveEmailHistory'} # type: ignore
| 43.576137 | 251 | 0.687886 |
73e0278697b45741896112b5a9a1676cd7d48dba | 654 | py | Python | scripts/Bluetooth/BLEClient.py | arsakhar/FitViz | 49bcfa74db6a023e2032c3662dc92ab27b554746 | [
"MIT"
] | 1 | 2020-11-24T14:13:52.000Z | 2020-11-24T14:13:52.000Z | scripts/Bluetooth/BLEClient.py | arsakhar/FitViz | 49bcfa74db6a023e2032c3662dc92ab27b554746 | [
"MIT"
] | null | null | null | scripts/Bluetooth/BLEClient.py | arsakhar/FitViz | 49bcfa74db6a023e2032c3662dc92ab27b554746 | [
"MIT"
] | null | null | null | import asyncio
from bleak import BleakClient
"""
BLE Client Class. Inherits from BleakClient.
"""
class BLEClient(BleakClient):
def __init__(self, address, loop):
super().__init__(address, loop)
"""
Connect to a specified device. **kwargs specifies timeout period
"""
def connect(self, **kwargs):
return super().connect(**kwargs)
"""
Check if connection is active.
"""
def is_connected(self):
return super().is_connected()
"""
Retrieve GATT Services (https://www.bluetooth.com/specifications/gatt/services/)
"""
def get_services(self):
return super().get_services() | 23.357143 | 84 | 0.646789 |
73e03e0047e6a3346b25645ca5934ce1fac17921 | 1,531 | py | Python | active_learning/acquisition_functions.py | a-r-j/FlowMO | 8109be8e2e095e4122873bc4e3d9e290c1986bfa | [
"MIT"
] | null | null | null | active_learning/acquisition_functions.py | a-r-j/FlowMO | 8109be8e2e095e4122873bc4e3d9e290c1986bfa | [
"MIT"
] | null | null | null | active_learning/acquisition_functions.py | a-r-j/FlowMO | 8109be8e2e095e4122873bc4e3d9e290c1986bfa | [
"MIT"
] | null | null | null | """
Module containing acquisition functions for active learning
"""
import numpy as np
from scipy.stats import norm
import gpflow
def gp_var(X: np.ndarray, model) -> np.ndarray:
"""
Suggests samples by maximising the predicted uncertainty (no exploitation, full exploration)
:param X: input data
:param model: GPFlow model
:return: y_var
"""
_, y_var = model.predict_f(X)
return y_var.numpy().flatten()
def gp_ei(
X_test: np.ndarray, y_train: np.ndarray, model, xi: float = 0.01
) -> np.ndarray:
"""
Suggests samples by maximising the predicted expected improvement
Balance exploitation & exploration via parameter xi
:param X_test: input features of points to be sampled
:param y_train: target values from observed samples
:param model: GPFlow model
:param xi: Controls balance of explore/exploit
:return: EI scores
"""
y_pred, y_var = model.predict_y(X_test)
y_best = np.amax(y_train) # Best sample so far
y_std = np.sqrt(y_var)
with np.errstate(divide="warn"):
imp = y_pred - y_best - xi
Z = imp / y_std
ei = imp * norm.cdf(Z) + y_std * norm.pdf(Z)
# ei[y_var<1e-8] = 0.0
return ei.flatten()
def gp_greed(X: np.ndarray, model) -> np.ndarray:
"""
Suggests samples by maximising the predicted y (full exploitation, no exploration)
:param X: input data
:param model: GPFlow model
:return: X_samples
"""
y_pred, _ = model.predict_y(X)
return y_pred.numpy().flatten()
| 28.351852 | 96 | 0.663619 |
73e06726e327812647777baf08e220f3fc49f2f0 | 1,964 | py | Python | td_gen_layout_event_handles.py | sandeep-gh/PySimpleGui_UI_Generator | 602daef1035e0ea7173c8f3dd124ee806e6eb2bd | [
"MIT"
] | null | null | null | td_gen_layout_event_handles.py | sandeep-gh/PySimpleGui_UI_Generator | 602daef1035e0ea7173c8f3dd124ee806e6eb2bd | [
"MIT"
] | 1 | 2021-01-18T00:47:26.000Z | 2021-01-18T16:45:10.000Z | td_gen_layout_event_handles.py | sandeep-gh/PySimpleGui_UI_generator | 602daef1035e0ea7173c8f3dd124ee806e6eb2bd | [
"MIT"
] | null | null | null | import PySimpleGUI as sg
import time
from layout_directive_definitions import Gelem, BlockLD
from layout_directive_definitions import TreeNodeLD
import random
import event_codegen as ecm
import layout_generator_step_by_step as lg
from everything_bagel_dictionary import everything_bagel
import event_codegen as ecm
bld1 = BlockLD([
Gelem("button1", "button 1", sg.Button, sty={"auto_size_button": 'False',
"size": (8, 4)
}, ex_toggle_attrs=[('button_color', (
('white', 'green'), ('blue', 'black')))]),
Gelem("button2", "button 2", sg.Button, sty={"auto_size_button": 'False',
"size": (8, 4)
})
], stacked='H', framed=True
)
bld2 = BlockLD([
Gelem("text1", "text 1", sg.Text, sty={"auto_size_text": 'False',
"size": (8, 4)
}),
Gelem("text2", "text 2", sg.Text, sty={"auto_size_text": 'False',
"size": (8, 4)
})
], stacked='H', framed=False
)
tnld = TreeNodeLD(bld1,
bld2, stacked='H', framed=True)
for bld in ecm.walk_tld(tnld):
print(bld)
lg.set_bld_layout_generator(tnld)
ecm.gen_event_actions(tnld, ['A', 'B'])
lgen = lg.get_layout_generator_tnld(tnld)
the_layout = lg.build_layout_set(
lgen, ['A', 'B'], stacked='H', framed=False)
print(the_layout)
layout = []
exit_button_row = [[
sg.Button('Exit')
]
]
layout = layout + [[the_layout]] + exit_button_row
window = sg.Window('PGAppAnalytics', layout)
while True:
event, values = window.read()
print("event pressed = ", event)
if event == 'Exit':
break
everything_bagel(window, event)
window.close()
| 33.288136 | 96 | 0.528513 |
73e06dbb955d7a5aeebada93f18e58d6eadb1094 | 4,700 | py | Python | mwe_discov_eval/databases/NgramDb.py | nicolaCirillo/mwe_discov_eval | 36f544a802f68d7e274627d57a65531ff23c4c5e | [
"MIT"
] | null | null | null | mwe_discov_eval/databases/NgramDb.py | nicolaCirillo/mwe_discov_eval | 36f544a802f68d7e274627d57a65531ff23c4c5e | [
"MIT"
] | null | null | null | mwe_discov_eval/databases/NgramDb.py | nicolaCirillo/mwe_discov_eval | 36f544a802f68d7e274627d57a65531ff23c4c5e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 1 20:58:38 2021
@author: nicol
"""
from shutil import copyfile
from .SqlDatabase import SqlDatabase
from . import sql_commands as sql
from mwe_discov_eval import messages
from mwe_discov_eval.utils import utils
COMMIT_EACH = 1000
class NgramDb(SqlDatabase):
"""An interface to an SQL database that stores n-gram statistics.
An interface to an SQLite database containing n-gram statistics. This class
is passed as input to the functions that compute n-gram measures to compute
that measure and store the output in the SQL database. Use
'from_NgramCounter' to create a new instance and 'load' to load an NgramDb
from file.
Parameters
----------
fileroot: str
Path to the file.
Examples
----------
To create a new NgramDb:
>>> from mwe_discov_eval.databases import NgramCounter, NgramDb
>>> counter = NgramCounter.load('sample_counter')
>>> ngram_db = NgramDb.from_NgramCounter(counter, 'sample_ngram_db')
To compute an n-gram measure:
>>> from mwe_discov_eval.databases import NgramCounter
>>> from mwe_discov_eval.measures.am_extended import compute_measure, pmi
>>> ngram_db = NgramDb.load('sample_ngram_db')
>>> compute_measure(ngam_db, pmi)
"""
def __init__(self, fileroot: str):
self.fileroot = fileroot
db_file = fileroot + '.db'
self.info_file = fileroot + '.info.json'
super().__init__(db_file, new=False)
self._query = None
@classmethod
def from_NgramCounter(cls, ngram_counter, fileroot: str):
"""Creates a new NgramDb from an NgramCounter.
Parameters
----------
ngram_counter: NgramCounter
The NgramCounter from which to create the NgramDb.
fileroot: str
Path to the file of the new NgramCounter.
Returns
-------
NgramDb
"""
messages.new_display()
copyfile(ngram_counter.db, fileroot + '.db')
new_cls = cls(fileroot)
new_cls.connect()
new_cls._gen_info()
new_cls.disconnect()
new_cls._load_info()
return new_cls
@classmethod
def load(cls, fileroot: str):
"""Loads an NgramDb from file
Parameters
----------
fileroot: str
Path to the file.
Returns
-------
NgramDb
"""
new_cls = cls(fileroot)
new_cls._load_info()
return new_cls
def _load_info(self):
info = utils.load_json(self.info_file)
self.n_max = info['n_max']
self.num_rows = info['num_rows']
def _gen_info(self):
messages.msg("Generating info file...")
num_rows = dict()
tables = self.get_tables()
if tables:
n_max = self.query(sql.MAX_LEN.format(table=tables[0]))[0][0]
for t in tables:
for n in range(1, n_max+1):
n_rows = self.query(sql.ROW_COUNT.format(table=t), n)
num_rows.setdefault(t, dict())[n] = n_rows[0][0]
else:
n_max, num_rows = 0, None
info = {'n_max': n_max, 'num_rows': num_rows}
utils.save_json(info, self.info_file)
messages.done()
def upd_info(self, table: str):
"""Updates the .info.json file with regards to a table.
Parameters
----------
table: str
The name of the table of which to update the info
"""
for n in range(1, self.n_max+1):
n_rows = self.query(sql.ROW_COUNT.format(table=table), n)
self.num_rows.setdefault(table, dict())[n] = n_rows[0][0]
info = {'n_max': self.n_max, 'num_rows': self.num_rows}
utils.save_json(info, self.info_file)
messages.done()
def _load_ngrams(self, n):
query = sql.format_query(self._query_table, self.query_fields,
cond=True, limit=self._limit,
offset=self._offset)
self.execute(query, n)
def __getitem__(self, n):
self._load_ngrams(n)
num = self.num_rows[self._query_table][str(n)]
return self.cur, num
def set_query(self, table: str, fields=['*'], limit=None, offset=None):
self._limit = limit
self._offset = offset
self._query_table = table
self.query_fields = fields
def frequency_threshold(self, table, threshold):
messages.msg("Filtering ngrams...")
delete = sql.DELETE.format(table=table)
self.execute(delete, threshold)
self.commit()
messages.done()
self.upd_info(table)
| 29.559748 | 79 | 0.597234 |
73e07aaedeb89a21636a8561748cf0fbc8085ab8 | 125 | py | Python | dAAMs/__init__.py | yuxiang-zhou/DensePoseModel | 382d9cc2ccee629c64ec873110c3653bcc3a30fe | [
"MIT"
] | 1 | 2019-05-07T15:01:16.000Z | 2019-05-07T15:01:16.000Z | dAAMs/__init__.py | yuxiang-zhou/DenseHumanPose | 382d9cc2ccee629c64ec873110c3653bcc3a30fe | [
"MIT"
] | null | null | null | dAAMs/__init__.py | yuxiang-zhou/DenseHumanPose | 382d9cc2ccee629c64ec873110c3653bcc3a30fe | [
"MIT"
] | null | null | null | from .svs import SVS
from .base import dAAMs
from .fitter import LucasKanadeDAAMFitter
from .results import DAAMFitterResult
| 25 | 41 | 0.84 |
73e08ce7d5969de2ae54e2505fa7b449bfaf631a | 16,639 | py | Python | tensorflow/python/debug/wrappers/framework_test.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 522 | 2016-06-08T02:15:50.000Z | 2022-03-02T05:30:36.000Z | tensorflow/python/debug/wrappers/framework_test.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/python/debug/wrappers/framework_test.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 108 | 2016-06-16T15:34:05.000Z | 2022-03-12T13:23:11.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug-wrapped sessions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
from tensorflow.python.util import tf_inspect
class TestDebugWrapperSession(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test."""
def __init__(self, sess, dump_root, observer, thread_name_filter=None):
# Supply dump root.
self._dump_root = dump_root
# Supply observer.
self._obs = observer
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter)
def on_session_init(self, request):
"""Override abstract on-session-init callback method."""
self._obs["sess_init_count"] += 1
self._obs["request_sess"] = request.session
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Override abstract on-run-start callback method."""
self._obs["on_run_start_count"] += 1
self._obs["run_fetches"] = request.fetches
self._obs["run_feed_dict"] = request.feed_dict
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
["file://" + self._dump_root])
def on_run_end(self, request):
"""Override abstract on-run-end callback method."""
self._obs["on_run_end_count"] += 1
self._obs["performed_action"] = request.performed_action
self._obs["tf_error"] = request.tf_error
return framework.OnRunEndResponse()
class TestDebugWrapperSessionBadAction(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test.
This class intentionally puts a bad action value in OnSessionInitResponse
and/or in OnRunStartAction to test the handling of such invalid cases.
"""
def __init__(
self,
sess,
bad_init_action=None,
bad_run_start_action=None,
bad_debug_urls=None):
"""Constructor.
Args:
sess: The TensorFlow Session object to be wrapped.
bad_init_action: (str) bad action value to be returned during the
on-session-init callback.
bad_run_start_action: (str) bad action value to be returned during the
the on-run-start callback.
bad_debug_urls: Bad URL values to be returned during the on-run-start
callback.
"""
self._bad_init_action = bad_init_action
self._bad_run_start_action = bad_run_start_action
self._bad_debug_urls = bad_debug_urls
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(self, sess)
def on_session_init(self, request):
if self._bad_init_action:
return framework.OnSessionInitResponse(self._bad_init_action)
else:
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
debug_urls = self._bad_debug_urls or []
if self._bad_run_start_action:
return framework.OnRunStartResponse(
self._bad_run_start_action, debug_urls)
else:
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN, debug_urls)
def on_run_end(self, request):
return framework.OnRunEndResponse()
class DebugWrapperSessionTest(test_util.TensorFlowTestCase):
def _no_rewrite_session_config(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def setUp(self):
self._observer = {
"sess_init_count": 0,
"request_sess": None,
"on_run_start_count": 0,
"run_fetches": None,
"run_feed_dict": None,
"on_run_end_count": 0,
"performed_action": None,
"tf_error": None,
}
self._dump_root = tempfile.mkdtemp()
self._sess = session.Session(config=self._no_rewrite_session_config())
self._a_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
self._b_init_val = np.array([[2.0], [-1.0]])
self._c_val = np.array([[-4.0], [6.0]])
self._a_init = constant_op.constant(
self._a_init_val, shape=[2, 2], name="a_init")
self._b_init = constant_op.constant(
self._b_init_val, shape=[2, 1], name="b_init")
self._ph = array_ops.placeholder(dtype=dtypes.float64, name="ph")
self._a = variables.Variable(self._a_init, name="a1")
self._b = variables.Variable(self._b_init, name="b")
self._c = constant_op.constant(self._c_val, shape=[2, 1], name="c")
# Matrix product of a and b.
self._p = math_ops.matmul(self._a, self._b, name="p1")
# Matrix product of a and ph.
self._q = math_ops.matmul(self._a, self._ph, name="q")
# Sum of two vectors.
self._s = math_ops.add(self._p, self._c, name="s")
# Initialize the variables.
self._sess.run(self._a.initializer)
self._sess.run(self._b.initializer)
def tearDown(self):
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
ops.reset_default_graph()
def testSessionInit(self):
self.assertEqual(0, self._observer["sess_init_count"])
wrapper_sess = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# Assert that on-session-init callback is invoked.
self.assertEqual(1, self._observer["sess_init_count"])
# Assert that the request to the on-session-init callback carries the
# correct session object.
self.assertEqual(self._sess, self._observer["request_sess"])
# Verify that the wrapper session implements the session.SessionInterface.
self.assertTrue(isinstance(wrapper_sess, session.SessionInterface))
self.assertEqual(self._sess.sess_str, wrapper_sess.sess_str)
self.assertEqual(self._sess.graph, wrapper_sess.graph)
self.assertEqual(self._sess.graph_def, wrapper_sess.graph_def)
# Check that the partial_run_setup and partial_run are not implemented for
# the debug wrapper session.
with self.assertRaises(NotImplementedError):
wrapper_sess.partial_run_setup(self._p)
def testInteractiveSessionInit(self):
"""The wrapper should work also on other subclasses of session.Session."""
TestDebugWrapperSession(
session.InteractiveSession(), self._dump_root, self._observer)
def testSessionRun(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer)
# Check initial state of the observer.
self.assertEqual(0, self._observer["on_run_start_count"])
self.assertEqual(0, self._observer["on_run_end_count"])
s = wrapper.run(self._s)
# Assert the run return value is correct.
self.assertAllClose(np.array([[3.0], [4.0]]), s)
# Assert the on-run-start method is invoked.
self.assertEqual(1, self._observer["on_run_start_count"])
# Assert the on-run-start request reflects the correct fetch.
self.assertEqual(self._s, self._observer["run_fetches"])
# Assert the on-run-start request reflects the correct feed_dict.
self.assertIsNone(self._observer["run_feed_dict"])
# Assert the file debug URL has led to dump on the filesystem.
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(7, len(dump.dumped_tensor_data))
# Assert the on-run-end method is invoked.
self.assertEqual(1, self._observer["on_run_end_count"])
# Assert the performed action field in the on-run-end callback request is
# correct.
self.assertEqual(
framework.OnRunStartAction.DEBUG_RUN,
self._observer["performed_action"])
# No TensorFlow runtime error should have happened.
self.assertIsNone(self._observer["tf_error"])
def testSessionInitInvalidSessionType(self):
"""Attempt to wrap a non-Session-type object should cause an exception."""
wrapper = TestDebugWrapperSessionBadAction(self._sess)
with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"):
TestDebugWrapperSessionBadAction(wrapper)
def testSessionInitBadActionValue(self):
with self.assertRaisesRegexp(
ValueError, "Invalid OnSessionInitAction value: nonsense_action"):
TestDebugWrapperSessionBadAction(
self._sess, bad_init_action="nonsense_action")
def testRunStartBadActionValue(self):
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_run_start_action="nonsense_action")
with self.assertRaisesRegexp(
ValueError, "Invalid OnRunStartAction value: nonsense_action"):
wrapper.run(self._s)
def testRunStartBadURLs(self):
# debug_urls ought to be a list of str, not a str. So an exception should
# be raised during a run() call.
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_debug_urls="file://foo")
with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"):
wrapper.run(self._s)
def testErrorDuringRun(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# No matrix size mismatch.
self.assertAllClose(
np.array([[11.0], [-1.0]]),
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0]])}))
self.assertEqual(1, self._observer["on_run_end_count"])
self.assertIsNone(self._observer["tf_error"])
# Now there should be a matrix size mismatch error.
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0], [3.0]])})
self.assertEqual(2, self._observer["on_run_end_count"])
self.assertTrue(
isinstance(self._observer["tf_error"], errors.InvalidArgumentError))
def testUsingWrappedSessionShouldWorkAsContextManager(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
with wrapper as sess:
self.assertAllClose([[3.0], [4.0]], self._s.eval())
self.assertEqual(1, self._observer["on_run_start_count"])
self.assertEqual(self._s, self._observer["run_fetches"])
self.assertEqual(1, self._observer["on_run_end_count"])
self.assertAllClose(
[[11.0], [-1.0]],
sess.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0]])}))
self.assertEqual(2, self._observer["on_run_start_count"])
self.assertEqual(self._q, self._observer["run_fetches"])
self.assertEqual(2, self._observer["on_run_end_count"])
def testUsingWrappedSessionShouldSupportEvalWithAsDefault(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
with wrapper.as_default():
foo = constant_op.constant(42, name="foo")
self.assertEqual(42, foo.eval())
self.assertEqual(foo, self._observer["run_fetches"])
def testWrapperShouldSupportSessionClose(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
wrapper.close()
def testWrapperThreadNameFilterMainThread(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter="MainThread")
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump.size)
self.assertEqual("a_init", dump.dumped_tensor_data[0].node_name)
def testWrapperThreadNameFilterChildThread(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter=r"Child.*")
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump.size)
self.assertEqual("b_init", dump.dumped_tensor_data[0].node_name)
def testWrapperThreadNameFilterBothThreads(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer,
thread_name_filter=None)
child_run_output = []
def child_thread_job():
child_run_output.append(wrapper.run(self._b_init))
thread = threading.Thread(name="ChildThread", target=child_thread_job)
thread.start()
self.assertAllClose(self._a_init_val, wrapper.run(self._a_init))
thread.join()
self.assertAllClose([self._b_init_val], child_run_output)
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertEqual(2, dump.size)
self.assertItemsEqual(
["a_init", "b_init"],
[datum.node_name for datum in dump.dumped_tensor_data])
def _is_public_method_name(method_name):
return (method_name.startswith("__") and method_name.endswith("__")
or not method_name.startswith("_"))
class SessionWrapperPublicMethodParityTest(test_util.TensorFlowTestCase):
def testWrapperHasAllPublicMethodsOfSession(self):
session_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(session.Session, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
wrapper_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(
framework.BaseDebugWrapperSession, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
missing_public_methods = [
method for method in session_public_methods
if method not in wrapper_public_methods]
self.assertFalse(missing_public_methods)
def testWrapperHasAllPublicMethodsOfMonitoredSession(self):
session_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(monitored_session.MonitoredSession,
predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
wrapper_public_methods = [
method_tuple[0] for method_tuple in
tf_inspect.getmembers(
framework.BaseDebugWrapperSession, predicate=tf_inspect.ismethod)
if _is_public_method_name(method_tuple[0])]
missing_public_methods = [
method for method in session_public_methods
if method not in wrapper_public_methods]
self.assertFalse(missing_public_methods)
if __name__ == "__main__":
googletest.main()
| 36.89357 | 88 | 0.719274 |
73e0a59ba2647a2bcd63d30ed890d89cba61ee37 | 5,079 | py | Python | pytorch/torch/optim/adagrad.py | zhou3968322/dl-code-read | aca204a986dabe2755becff0f42de1082299d791 | [
"MIT"
] | null | null | null | pytorch/torch/optim/adagrad.py | zhou3968322/dl-code-read | aca204a986dabe2755becff0f42de1082299d791 | [
"MIT"
] | null | null | null | pytorch/torch/optim/adagrad.py | zhou3968322/dl-code-read | aca204a986dabe2755becff0f42de1082299d791 | [
"MIT"
] | null | null | null | import torch
from .optimizer import Optimizer
class Adagrad(Optimizer):
"""Implements Adagrad algorithm.
It has been proposed in `Adaptive Subgradient Methods for Online Learning
and Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
lr_decay (float, optional): learning rate decay (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-10)
.. _Adaptive Subgradient Methods for Online Learning and Stochastic
Optimization: http://jmlr.org/papers/v12/duchi11a.html
"""
def __init__(self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= lr_decay:
raise ValueError("Invalid lr_decay value: {}".format(lr_decay))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= initial_accumulator_value:
raise ValueError("Invalid initial_accumulator_value value: {}".format(initial_accumulator_value))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
defaults = dict(lr=lr, lr_decay=lr_decay, eps=eps, weight_decay=weight_decay,
initial_accumulator_value=initial_accumulator_value)
super(Adagrad, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['sum'] = torch.full_like(p, initial_accumulator_value, memory_format=torch.preserve_format)
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['sum'].share_memory_()
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
state['step'] += 1
if group['weight_decay'] != 0:
if p.grad.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
grad = grad.add(p, alpha=group['weight_decay'])
clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay'])
# >>> i = [[1, 1]]
# >>> v = [3, 4]
# >>> s=torch.sparse_coo_tensor(i, v, (3,))
# >>> s
# tensor(indices=tensor([[1, 1]]),
# values=tensor( [3, 4]),
# size=(3,), nnz=2, layout=torch.sparse_coo)
# sparse tensor的layout就是torch.sparse_coo
if grad.is_sparse:
# the coalescing process will accumulate the multi-valued elements into a single value using summation:
# >>> s.coalesce()
# tensor(indices=tensor([[1]]),
# values=tensor([7]),
# size=(3,), nnz=1, layout=torch.sparse_coo)
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
# state中获取梯度的和
state['sum'].add_(make_sparse(grad_values.pow(2)))
std = state['sum'].sparse_mask(grad)
# 来获取std的sqrt
std_values = std._values().sqrt_().add_(group['eps'])
p.add_(make_sparse(grad_values / std_values), alpha=-clr)
else:
state['sum'].addcmul_(grad, grad, value=1)
std = state['sum'].sqrt().add_(group['eps'])
# 也就是对应的公式
p.addcdiv_(grad, std, value=-clr)
return loss
| 41.631148 | 123 | 0.534554 |
73e0a9dc80cd46ff5074a070812f55801da17266 | 4,010 | py | Python | chapter_7_design/wake_transcribe.py | fancyerii/voicebook | def82da8577086d0361643a05fec2463006533a9 | [
"Apache-2.0"
] | 1 | 2020-03-05T01:19:17.000Z | 2020-03-05T01:19:17.000Z | chapter_7_design/wake_transcribe.py | fancyerii/voicebook | def82da8577086d0361643a05fec2463006533a9 | [
"Apache-2.0"
] | null | null | null | chapter_7_design/wake_transcribe.py | fancyerii/voicebook | def82da8577086d0361643a05fec2463006533a9 | [
"Apache-2.0"
] | null | null | null | '''
================================================
## VOICEBOOK REPOSITORY ##
================================================
repository name: voicebook
repository version: 1.0
repository link: https://github.com/jim-schwoebel/voicebook
author: Jim Schwoebel
author contact: js@neurolex.co
description: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts.
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-09-28
This code (voicebook) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
## LICENSE TERMS ##
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
## SERVICE STATEMENT ##
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in Kafka distributed architectures, microservices
built on top of Node.js / Python / Docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ js@neurolex.co.
================================================
## WAKE_TRANSCRIBE.PY ##
================================================
Use asynchronous transcription as a wakeword detector.
'''
import soundfile as sf
import sounddevice as sd
import speech_recognition as sr_audio
import pyttsx3
import os, time
# transcribe with pocketsphinx (open-source)
def speak():
engine = pyttsx3.init()
engine.say("hello!!")
engine.runAndWait()
def find_wake(transcript, hotwords):
for i in range(len(hotwords)):
## print(transcript)
## print(transcript.lower().find(hotwords[i]))
if transcript.lower().find(hotwords[i])>=0:
print('%s wakeword found!!'%(hotwords[i].upper()))
speak()
break
def transcribe_sphinx(file):
try:
r=sr_audio.Recognizer()
with sr_audio.AudioFile(file) as source:
audio = r.record(source)
transcript=r.recognize_sphinx(audio)
print('sphinx transcript: '+transcript)
except:
transcript=''
print(transcript)
return transcript
def async_record(hotwords, filename, filename2, duration, fs, channels):
print('recording')
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
transcript=transcribe_sphinx(filename2)
find_wake(transcript, hotwords)
sd.wait()
sf.write(filename, myrecording, fs)
print('done recording')
# initial parameters
hotwords=['test', 'testing']
i=0
t=1
filename2='n/a'
# create infinite loop
while t>0:
# record a mono file asynchronous, transcribe, and fine wakeword
filename=str(i+1)+'.wav'
async_record(hotwords, filename, filename2, 3, 16000, 1)
filename2=filename
i=i+1
try:
os.remove(str(i-2)+'.wav')
except:
pass
| 33.140496 | 121 | 0.61995 |
73e0ae8db048d9e437287582bf92c241a88d16b4 | 3,063 | py | Python | brian2/monitors/ratemonitor.py | achilleas-k/brian2 | 906563b6b1321585b082f79f74f1b4ab386347ec | [
"BSD-2-Clause"
] | null | null | null | brian2/monitors/ratemonitor.py | achilleas-k/brian2 | 906563b6b1321585b082f79f74f1b4ab386347ec | [
"BSD-2-Clause"
] | null | null | null | brian2/monitors/ratemonitor.py | achilleas-k/brian2 | 906563b6b1321585b082f79f74f1b4ab386347ec | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
from brian2.core.variables import Variables
from brian2.units.allunits import second, hertz
from brian2.units.fundamentalunits import Unit, Quantity
from brian2.groups.group import CodeRunner, Group
__all__ = ['PopulationRateMonitor']
class PopulationRateMonitor(Group, CodeRunner):
'''
Record instantaneous firing rates, averaged across neurons from a
`NeuronGroup` or other spike source.
Parameters
----------
source : (`NeuronGroup`, `SpikeSource`)
The source of spikes to record.
name : str, optional
A unique name for the object, otherwise will use
``source.name+'_ratemonitor_0'``, etc.
codeobj_class : class, optional
The `CodeObject` class to run code with.
Notes
-----
Currently, this monitor can only monitor the instantaneous firing rates at
each time step of the source clock. Any binning/smoothing of the firing
rates has to be done manually afterwards.
'''
invalidates_magic_network = False
add_to_magic_network = True
def __init__(self, source, name='ratemonitor*',
codeobj_class=None):
#: The group we are recording from
self.source = source
self.codeobj_class = codeobj_class
CodeRunner.__init__(self, group=self, code='', template='ratemonitor',
clock=source.clock, when='end', order=0, name=name)
self.add_dependency(source)
self.variables = Variables(self)
# Handle subgroups correctly
start = getattr(source, 'start', 0)
stop = getattr(source, 'stop', len(source))
self.variables.add_constant('_source_start', Unit(1), start)
self.variables.add_constant('_source_stop', Unit(1), stop)
self.variables.add_reference('_spikespace', source)
self.variables.add_dynamic_array('rate', size=0, unit=hertz,
constant_size=False)
self.variables.add_dynamic_array('t', size=0, unit=second,
constant_size=False)
self.variables.add_reference('_num_source_neurons', source, 'N')
self.variables.add_attribute_variable('N', unit=Unit(1), obj=self,
attribute='_N', dtype=np.int32)
self.variables.create_clock_variables(self._clock,
prefix='_clock_')
self._enable_group_attributes()
@property
def _N(self):
return len(self.variables['t'].get_value())
def resize(self, new_size):
self.variables['rate'].resize(new_size)
self.variables['t'].resize(new_size)
def __len__(self):
return self._N
def reinit(self):
'''
Clears all recorded rates
'''
raise NotImplementedError()
def __repr__(self):
description = '<{classname}, recording {source}>'
return description.format(classname=self.__class__.__name__,
source=self.source.name)
| 36.035294 | 79 | 0.624878 |
73e0d1840d2263197b0fc5f8c337460e5faa6272 | 478 | py | Python | askci/apps/api/admin.py | hpsee/askci | ef1e2e75481b71db7fbe774cb81938055aa596d0 | [
"MIT"
] | 3 | 2019-11-21T09:04:36.000Z | 2019-11-23T13:29:43.000Z | askci/apps/api/admin.py | hpsee/askci | ef1e2e75481b71db7fbe774cb81938055aa596d0 | [
"MIT"
] | 13 | 2019-11-21T20:28:23.000Z | 2019-11-26T19:34:22.000Z | askci/apps/api/admin.py | hpsee/askci | ef1e2e75481b71db7fbe774cb81938055aa596d0 | [
"MIT"
] | null | null | null | """
Copyright (C) 2019-2020 Vanessa Sochat.
This Source Code Form is subject to the terms of the
Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
from django.contrib import admin
from askci.apps.api.models import Webhook
class WebhookAdmin(admin.ModelAdmin):
list_display = ("name", "secret", "app_from", "created", "modified")
admin.site.register(Webhook, WebhookAdmin)
| 23.9 | 72 | 0.736402 |
73e0e6413e54202a25babe46c933b015592be710 | 3,442 | py | Python | synapseutils/monitor.py | zimingd/synapsePythonClient | 6bddd4d9de2f06f6f79d8ce814ac57970d6bebee | [
"Apache-2.0"
] | null | null | null | synapseutils/monitor.py | zimingd/synapsePythonClient | 6bddd4d9de2f06f6f79d8ce814ac57970d6bebee | [
"Apache-2.0"
] | null | null | null | synapseutils/monitor.py | zimingd/synapsePythonClient | 6bddd4d9de2f06f6f79d8ce814ac57970d6bebee | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import traceback
import sys
from multiprocessing import Value, Lock
from synapseclient.utils import printTransferProgress
def notifyMe(syn, messageSubject='', retries=0):
"""Function decorator that notifies you via email whenever an function completes running or
there is a failure.
:param syn: A synapse object as obtained with syn = synapseclient.login()
:param messageSubject: A string with subject line for sent out messages.
:param retries: Number of retries to attempt on failure (default=0)
Example::
# to decorate a function that you define
from synapseutils import notifyMe
import synapseclient
syn = synapseclient.login()
@notifyMe(syn, 'Long running function', retries=2)
def my_function(x):
doing_something()
return long_runtime_func(x)
my_function(123)
#############################
# to wrap a function that already exists
from synapseutils import notifyMe
import synapseclient
syn = synapseclient.login()
notify_decorator = notifyMe(syn, 'Long running query', retries=2)
my_query = notify_decorator(syn.tableQuery)
results = my_query("select id from syn1223")
#############################
"""
def notify_decorator(func):
@functools.wraps(func)
def with_retry_and_messaging(*args, **kwargs):
attempt = 0
destination = syn.getUserProfile()['ownerId']
while attempt<=retries:
try:
output = func(*args, **kwargs)
syn.sendMessage([destination], messageSubject,
messageBody='Call to %s completed successfully!' %func.__name__)
return output
except Exception as e:
sys.stderr.write(traceback.format_exc())
syn.sendMessage([destination], messageSubject,
messageBody = ('Encountered a temporary Failure during upload. '
'Will retry %i more times. \n\n Error message was:\n%s\n\n%s'
%(retries-attempt, e, traceback.format_exc())))
attempt +=1
return with_retry_and_messaging
return notify_decorator
def with_progress_bar(func, totalCalls, prefix = '', postfix='', isBytes=False):
"""Wraps a function to add a progress bar based on the number of calls to that function.
:param func: Function being wrapped with progress Bar
:param totalCalls: total number of items/bytes when completed
:param prefix: String printed before progress bar
:param prefix: String printed after progress bar
:param isBytes: A boolean indicating weather to convert bytes to kB, MB, GB etc.
:return: a wrapped function that contains a progress bar
"""
completed = Value('d', 0)
lock = Lock()
def progress(*args, **kwargs):
with lock:
completed.value +=1
printTransferProgress(completed.value, totalCalls, prefix, postfix, isBytes)
return func(*args, **kwargs)
return progress
| 38.674157 | 112 | 0.612725 |
73e0ff5c2f299cb000f037297a6e449078332f9b | 6,065 | py | Python | cotk/dataloader/context.py | ishine/cotk | 2242c16523830254b0ac509f7739b4cbcb03dea4 | [
"Apache-2.0"
] | 117 | 2019-03-14T15:06:42.000Z | 2022-02-28T07:06:34.000Z | cotk/dataloader/context.py | ishine/cotk | 2242c16523830254b0ac509f7739b4cbcb03dea4 | [
"Apache-2.0"
] | 149 | 2019-03-12T09:43:21.000Z | 2020-08-24T02:56:34.000Z | cotk/dataloader/context.py | ishine/cotk | 2242c16523830254b0ac509f7739b4cbcb03dea4 | [
"Apache-2.0"
] | 51 | 2019-03-14T15:12:48.000Z | 2021-08-09T03:37:48.000Z |
from typing import List, Any, Dict, Union, Optional
from .._utils.metaclass import DocStringInheritor
# For type checking
if False: #pylint: disable=using-constant-test
#pylint: disable=unused-import
from .vocab import Vocab
from .tokenizer import Tokenizer
from collections import OrderedDict
class _UNDEFINED():
pass
class Context(metaclass=DocStringInheritor):
'''An abstract base class for context manager.
This class is used for setting default parameters
for :class:`Field` or :class:`Vocab`, without directly
passing parameters to ``__init__`` of the object.
See :ref:`examples<dataloader_context_ref>` for how to use context manager.
Arguments:
parameter_dict (Dict[str, Any]): Key-value dict for changed parameters.
weak (bool, optional): When ``False``, overwrite existing parameters. Default: ``False``.
none_as_ignored (bool, optional): When ``True``, ``None`` values in ``parameter_dict`` are ignored.
Otherwise, the corresponding key will be set to ``None``.
Default: ``True``.
'''
context_dict: Dict[str, Any] = {}
corrupted = False
UNDEFINED = _UNDEFINED()
def __init__(self, parameter_dict: Dict[str, Any], weak=False, none_as_ignored=True):
if self.__class__.corrupted:
raise RuntimeError("A context object do not close before becoming invalid. Use ``with`` statement, " \
"or make sure of calling close.")
self._old_parameters = self._set_parameters(parameter_dict, weak=weak, none_as_ignored=none_as_ignored)
self._closed = False
@classmethod
def _set_parameters(cls, parameter_dict: Dict[str, Any], weak=False, none_as_ignored=True) -> Dict[str, Any]:
old_parameters: Dict[str, Any] = {}
for key, value in parameter_dict.items():
old_parameters[key] = cls.set(key, value, weak=weak, none_as_ignored=none_as_ignored)
return old_parameters
@classmethod
def get(cls, key: str, default: Any = None, no_default=False) -> Any:
'''Get the value of parameter named ``key`` stored in this class.
Arguments:
key (str): name of the parameter
default (Any, optional): Default value if ``key`` is not set. Defaults: ``None``.
no_default (bool, optional): When ``True``, Raise ``KeyError`` if ``key`` is not set. Defaults: ``False``.
'''
if key in cls.context_dict:
return cls.context_dict[key]
else:
if no_default:
raise KeyError("Must specify %s in Context." % key)
else:
return default
WEAK_ARGS = r'''
weak (bool, optional): When ``False``, overwrite existing parameters. Defaults: ``False``.
'''
NONE_AS_IGNORED_ARGS = r'''
none_as_ignored (bool, optional): When ``True``, ``None`` values in ``parameter_dict`` are ignored.
Otherwise, the corresponding value will be set to ``None``.
Default: ``True``.
'''
@classmethod
def set(cls, key: str, value: Any, weak=False, none_as_ignored=True) -> Any:
'''Set the parameter named ``key`` to ``value``, stored in this class.
If weak is ``True``, do not overwrite if ``key`` is already set.
Return the old value.
Arguments:
key (str): The name of the changed parameter.
value (Any): The new value of changed parameter.
If want to delete the key, use ``Context.UNDEFINED``.
{WEAK_ARGS}
{NONE_AS_IGNORED_ARGS}
'''
if key not in cls.context_dict:
old = Context.UNDEFINED
if value or not none_as_ignored:
cls.context_dict[key] = value
return old
old = cls.context_dict[key]
if not weak:
if value is Context.UNDEFINED:
del cls.context_dict[key]
elif value is not None or not none_as_ignored:
cls.context_dict[key] = value
return old
def __enter__(self):
'''Enter a context'''
return self
@classmethod
def _restore(cls, old_parameters):
for name, param in old_parameters.items():
if name not in cls.context_dict:
continue
if param is Context.UNDEFINED:
del cls.context_dict[name]
else:
cls.context_dict[name] = param
def __exit__(self, exc_type, exc_val, exc_tb):
'''Exit the context and restore the old parameter.'''
self.close()
def close(self):
'''Restore the old parameter.'''
self._restore(self._old_parameters)
self._closed = True
def __del__(self):
if hasattr(self, "_closed") and not self._closed:
self.__class__.corrupted = True
raise RuntimeError("A context object do not close before becoming invalid. Use ``with`` statement, " \
"or make sure of calling close.")
class FieldContext(Context):
'''Bases: :class:`.dataloader.Context`
A context class for setting default parameters for :class:`.Field`.
'''
context_dict: Dict[str, Any] = {}
corrupted = False
UNDEFINED = Context.UNDEFINED
NONE_AS_IGNORED_ARGS = Context.NONE_AS_IGNORED_ARGS.replace("``parameter_dict``", "``kwargs``")
# pylint: disable=unused-argument
@classmethod
def set_parameters(cls, *, weak=False, none_as_ignored=True, **kwargs) -> "FieldContext":
'''Set a context for initialization of :class:`Field`.
See :ref:`examples<dataloader_context_ref>` for how to use context manager.
Arguments:
{WEAK_ARGS}
{NONE_AS_IGNORED_ARGS}
\*\*kwargs: Any parameters to be set. Set ``key`` to ``FieldContext.UNDEFINED`` to delete a parameter.
'''
return FieldContext(kwargs, weak=weak, none_as_ignored=none_as_ignored)
class VocabContext(Context):
'''Bases: :class:`.dataloader.Context`
A context class for setting default parameters for :class:`.Vocab`.
'''
context_dict: Dict[str, Any] = {}
corrupted = False
UNDEFINED = Context.UNDEFINED
NONE_AS_IGNORED_ARGS = Context.NONE_AS_IGNORED_ARGS.replace("``parameter_dict``", "``kwargs``")
# pylint: disable=unused-argument
@classmethod
def set_parameters(cls, *, weak=False, none_as_ignored=True, **kwargs) -> "VocabContext":
'''Set a context for initialization of :class:`Vocab`.
See :ref:`examples<dataloader_context_ref>` for how to use context manager.
Arguments:
{WEAK_ARGS}
{NONE_AS_IGNORED_ARGS}
\*\*kwargs: Any parameters to be set. Set ``key`` to ``VocabContext.UNDEFINED`` to delete a parameter.
'''
return VocabContext(kwargs, weak=weak, none_as_ignored=none_as_ignored)
| 32.961957 | 110 | 0.713768 |
73e1257004bfd59befcb10a0f4cedad704ba387d | 1,177 | py | Python | analysis/computation/ambitus.py | msampaio/FlautaSolo | 407253513712462e3521c06bcc35a9b95da0fe19 | [
"BSD-3-Clause"
] | null | null | null | analysis/computation/ambitus.py | msampaio/FlautaSolo | 407253513712462e3521c06bcc35a9b95da0fe19 | [
"BSD-3-Clause"
] | 4 | 2019-03-18T20:11:30.000Z | 2019-03-18T20:11:59.000Z | analysis/computation/ambitus.py | msampaio/FlautaSolo | 407253513712462e3521c06bcc35a9b95da0fe19 | [
"BSD-3-Clause"
] | null | null | null | from collections import Counter
from analysis.computation import utils
def frequency(ambitus_list):
freq = Counter(ambitus_list)
r = [['Ambitus', 'Pieces']]
for k, v in sorted(freq.items()):
r.append([k, v])
return r
def frequency_pie(ambitus_list):
r = utils.aux_pie_chart(Counter(ambitus_list))
r.insert(0, ['Ambitus', 'Amount'])
return r
def analysis(compositions):
ambitus_list = utils.get_music_data_attrib(compositions, 'ambitus', 'append')
if ambitus_list:
basic_stats = utils.aux_basic_stats(ambitus_list, 'Pieces number', False)
dist_value = utils.distribution(ambitus_list, basic_stats, False)
args = {
'basic_stats': basic_stats,
'frequency': frequency(ambitus_list),
'histogram': utils.histogram(ambitus_list, 10, ['Ambitus', 'Pieces'], False, True),
'distribution_value': dist_value,
'distribution_amount': utils.distribution(ambitus_list, basic_stats, True),
'frequency_pie': frequency_pie(ambitus_list),
'boxplot': utils.boxplot(basic_stats),
}
return args
else:
return {}
| 30.179487 | 95 | 0.649108 |
73e152dd67f965304d1b73b9f581bc57f6f82e12 | 1,752 | py | Python | setup.py | textX/EasyColorLang | b7b660777ef04422a9a9df5b59b15bb80df0e3b9 | [
"MIT"
] | null | null | null | setup.py | textX/EasyColorLang | b7b660777ef04422a9a9df5b59b15bb80df0e3b9 | [
"MIT"
] | null | null | null | setup.py | textX/EasyColorLang | b7b660777ef04422a9a9df5b59b15bb80df0e3b9 | [
"MIT"
] | null | null | null | import codecs
import os
from setuptools import find_packages, setup
PACKAGE_NAME = "EasyColorLang"
VERSION = "0.1"
AUTHOR = "Igor Majic"
AUTHOR_EMAIL = "majic753@gmail.com"
DESCRIPTION = "A syntax highlighting generator for any textX language"
KEYWORDS = "textX DSL python all languages highlighting coloring"
LICENSE = "MIT"
URL = "https://github.com/IgorMaj/SyntaxColoring"
packages = find_packages()
print("packages:", packages)
README = codecs.open(
os.path.join(os.path.dirname(__file__), "README.md"), "r", encoding="utf-8").read()
setup(
name=PACKAGE_NAME,
version=VERSION,
description=DESCRIPTION,
long_description=README,
long_description_content_type="text/markdown",
url=URL,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
keywords=KEYWORDS,
license=LICENSE,
packages=packages,
include_package_data=True,
install_requires=["click", "jinja2", "textx"],
entry_points={
"textx_generators": ["easy_gen_coloring = gen_coloring.coloring_entry_point:textmate_gen_coloring"],
"textx_languages": ["easy_coloring_lang = gen_coloring.coloring_entry_point:easy_coloring_lang"]
},
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| 31.854545 | 108 | 0.681507 |
73e1fd387d797e62b65c558c725eb9486b1e4469 | 267 | py | Python | Battleships/model/rules.py | ruslanabdulin1985/TheTask | 99e53bd283273a90233b29c8aa7aa4380211263b | [
"MIT"
] | null | null | null | Battleships/model/rules.py | ruslanabdulin1985/TheTask | 99e53bd283273a90233b29c8aa7aa4380211263b | [
"MIT"
] | null | null | null | Battleships/model/rules.py | ruslanabdulin1985/TheTask | 99e53bd283273a90233b29c8aa7aa4380211263b | [
"MIT"
] | null | null | null |
class Rules:
def __init__(self, mode, opponent):
if mode == 'standard':
self.ship_stack = [4,3,3,2,2,2,1,1,1,1]; # each element is a ship. e.g [4,3,3] say that there is 1 of type 4 and 2
self.opponent = opponent # human or computer
| 33.375 | 127 | 0.595506 |
73e242f22a9d8a0fc453ee5907bc2cb97f50d328 | 4,823 | py | Python | src/calc/serial/calc_B.py | paytonrodman/athena-analysis | f635338122e15c318dfd754d06cc3dbaa42273d2 | [
"BSD-3-Clause"
] | null | null | null | src/calc/serial/calc_B.py | paytonrodman/athena-analysis | f635338122e15c318dfd754d06cc3dbaa42273d2 | [
"BSD-3-Clause"
] | null | null | null | src/calc/serial/calc_B.py | paytonrodman/athena-analysis | f635338122e15c318dfd754d06cc3dbaa42273d2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# calc_B.py
#
# A program to calculate the effective magnetic field derived from the magnetic flux at the inner
# simulation edge, and compare to the average field in the disk.
#
# Usage: python calc_B.py [options]
#
import numpy as np
import os
import sys
#sys.path.insert(0, '/home/per29/rds/rds-accretion-zyNhkonJSR8/athena-analysis/dependencies')
sys.path.insert(0, '/Users/paytonrodman/athena-sim/athena-analysis/dependencies')
import athena_read
import AAT
import glob
import re
import csv
import argparse
def main(**kwargs):
problem = args.prob_id
root_dir = "/Users/paytonrodman/athena-sim/"
#root_dir = '/home/per29/rds/rds-accretion-zyNhkonJSR8/'
prob_dir = root_dir + problem + '/'
data_dir = prob_dir + 'data/'
runfile_dir = prob_dir + 'runfiles/'
os.chdir(data_dir)
csv_time = np.empty(0)
csv_array = []
with open(prob_dir + 'flux_with_time.csv', 'r', newline='') as f:
csv_reader = csv.reader(f, delimiter='\t')
next(csv_reader, None) # skip header
for row in csv_reader:
csv_time = np.append(csv_time, float(row[0]))
array_entry = np.array([float(row[0]),float(row[2]),float(row[3])]) # [time, mf_u, mf_l]
csv_array.append(array_entry)
files = glob.glob('./*.athdf')
times = np.empty(0)
for f in files:
time_sec = re.findall(r'\b\d+\b', f)
if args.update:
if float(time_sec[0]) not in times and float(time_sec[0]) in csv_time:
times = np.append(times, float(time_sec[0]))
else:
if float(time_sec[0]) in csv_time:
times = np.append(times, float(time_sec[0]))
if len(times)==0:
sys.exit('No new timesteps to analyse in the given directory. Exiting.')
#times = [0.,5000.,10000.,15000.,20000.,25000.,30000.]
#times = [25000.]
data_input = athena_read.athinput(runfile_dir + 'athinput.' + problem)
x1min = data_input['mesh']['x1min']
x1max = data_input['refinement3']['x1max']
scale_height = data_input['problem']['h_r']
angles = [0.,1.*scale_height,2.*scale_height,3.*scale_height]
data_init = athena_read.athdf(problem + '.cons.00000.athdf', quantities=['x2v'])
x2v = data_init['x2v']
th_l_id = []
th_u_id = []
for val in angles:
th_u_id.append(AAT.find_nearest(x2v, np.pi/2. + val))
th_l_id.append(AAT.find_nearest(x2v, np.pi/2. - val))
Bav = []
Bcc1_av_0 = []
Bcc1_av_1 = []
Bcc1_av_2 = []
Bcc1_av_3 = []
sim_time = []
for t in times:
str_t = str(int(t)).zfill(5)
data_cons = athena_read.athdf(problem + '.cons.' + str_t + '.athdf', quantities=['Bcc1','Bcc2','Bcc3'])
for item in csv_array:
if (item[0] == data_cons['Time']):
mf_u = item[1]
mf_l = item[2]
continue
Bav_l = mf_l / (2.*np.pi*(x1min**2.))
Bav_u = mf_u / (2.*np.pi*(x1min**2.))
Bav.append([Bav_l,Bav_u])
Bcc1 = data_cons['Bcc1']
Bcc2 = data_cons['Bcc2']
Bcc3 = data_cons['Bcc3']
B = np.sqrt(Bcc1**2. + Bcc2**2. + Bcc3**2.)
Bcc1_av_0.append(np.average(B[:x1max,th_l_id[0],:]))
Bcc1_av_1.append([np.average(B[:x1max,th_l_id[1],:]),np.average(B[:x1max,th_u_id[1],:])])
Bcc1_av_2.append([np.average(B[:x1max,th_l_id[2],:]),np.average(B[:x1max,th_u_id[2],:])])
Bcc1_av_3.append([np.average(B[:x1max,th_l_id[3],:]),np.average(B[:x1max,th_u_id[3],:])])
sim_time.append(data_cons['Time'])
sim_time,Bav,Bcc1_av_0,Bcc1_av_1,Bcc1_av_2,Bcc1_av_3 = (list(t) for t in zip(*sorted(zip(sim_time,Bav,Bcc1_av_0,Bcc1_av_1,Bcc1_av_2,Bcc1_av_3))))
os.chdir(prob_dir)
if args.update:
with open('B_strength_with_time.csv', 'a', newline='') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerows(zip(sim_time, Bav, Bcc1_av_0, Bcc1_av_1, Bcc1_av_2, Bcc1_av_3))
else:
with open('B_strength_with_time.csv', 'w', newline='') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerow(["sim_time", "Bav", "Bcc1_0sh", "B_1sh", "B_2sh", "B_3sh"])
writer.writerows(zip(sim_time, Bav, Bcc1_av_0, Bcc1_av_1, Bcc1_av_2, Bcc1_av_3))
# Execute main function
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculate various quality factors from raw simulation data.')
parser.add_argument('prob_id',
help='base name of the data being analysed, e.g. inflow_var or disk_base')
parser.add_argument('-u', '--update',
action="store_true",
help='specify whether the results being analysed are from a restart')
args = parser.parse_args()
main(**vars(args))
| 38.584 | 149 | 0.614763 |
73e248c16f95da21f08d549ef951aa4d3f852761 | 13,061 | gyp | Python | extensions/extensions.gyp | Wzzzx/chromium-crosswalk | 768dde8efa71169f1c1113ca6ef322f1e8c9e7de | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2019-01-28T08:09:58.000Z | 2021-11-15T15:32:10.000Z | extensions/extensions.gyp | Wzzzx/chromium-crosswalk | 768dde8efa71169f1c1113ca6ef322f1e8c9e7de | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | extensions/extensions.gyp | Wzzzx/chromium-crosswalk | 768dde8efa71169f1c1113ca6ef322f1e8c9e7de | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 6 | 2020-09-23T08:56:12.000Z | 2021-11-18T03:40:49.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'extensions.gypi',
],
'variables': {
'chromium_code': 1,
},
'targets': [
{
# GN version: //extensions/common
'target_name': 'extensions_common_constants',
'type': 'static_library',
'include_dirs': [
'..',
'<(INTERMEDIATE_DIR)',
],
'sources': [
'<@(extensions_common_constants_sources)',
],
# Disable c4267 warnings until we fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
},
{
# GN version: //extensions/common:mojo
'target_name': 'extensions_common_mojo',
# The type of this target must be none. This is so that resources can
# depend upon this target for generating the js bindings files. Any
# generated cpp files must be listed explicitly in extensions_common
'type': 'none',
'includes': [
'../mojo/mojom_bindings_generator.gypi',
],
'sources': [
'<@(extensions_common_mojo_sources)',
],
'conditions': [
['proprietary_codecs==1 and enable_wifi_display==1', {
'sources': [
'<@(extensions_common_mojo_sources_wifi_display)',
],
}],
],
},
{
# GN version: //extensions/common
'target_name': 'extensions_common',
'type': 'static_library',
'dependencies': [
'../components/components.gyp:crx_file',
'../components/components.gyp:url_matcher',
'../content/content.gyp:content_common',
'../crypto/crypto.gyp:crypto',
'../device/bluetooth/bluetooth.gyp:device_bluetooth',
# For Mojo generated headers for generated_api.cc.
'../device/serial/serial.gyp:device_serial_mojo',
'../device/power_save_blocker/power_save_blocker.gyp:device_power_save_blocker',
'../device/usb/usb.gyp:device_usb',
'../ipc/ipc.gyp:ipc',
'../net/net.gyp:net',
'../third_party/icu/icu.gyp:icuuc',
'../third_party/re2/re2.gyp:re2',
'../ui/base/ui_base.gyp:ui_base',
'../ui/gfx/gfx.gyp:gfx_geometry',
'../ui/gfx/ipc/geometry/gfx_ipc_geometry.gyp:gfx_ipc_geometry',
'../ui/gfx/ipc/gfx_ipc.gyp:gfx_ipc',
'../ui/gfx/ipc/skia/gfx_ipc_skia.gyp:gfx_ipc_skia',
'../url/url.gyp:url_lib',
'../third_party/boringssl/boringssl.gyp:boringssl',
'../third_party/libxml/libxml.gyp:libxml',
'../url/ipc/url_ipc.gyp:url_ipc',
'common/api/api.gyp:extensions_api',
'extensions_resources.gyp:extensions_resources',
'extensions_strings.gyp:extensions_strings',
'extensions_common_constants',
'extensions_common_mojo',
],
'include_dirs': [
'..',
'<(INTERMEDIATE_DIR)',
],
'sources': [
'<@(extensions_common_sources)',
# Mojom generated files should not be included in the common gypi source
# list because GN has proper mojom dependency support.
'<(SHARED_INTERMEDIATE_DIR)/extensions/common/mojo/keep_alive.mojom.cc',
'<(SHARED_INTERMEDIATE_DIR)/extensions/common/mojo/keep_alive.mojom.h',
'<(SHARED_INTERMEDIATE_DIR)/extensions/common/mojo/stash.mojom.cc',
'<(SHARED_INTERMEDIATE_DIR)/extensions/common/mojo/stash.mojom.h',
],
# Disable c4267 warnings until we fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
'conditions': [
['disable_nacl==0', {
# NaClModulesHandler does not use any code in NaCl, so no dependency
# on nacl_common.
'sources': [
'<@(extensions_common_sources_nacl)',
],
}],
],
},
{
# GN version: //extensions/browser
'target_name': 'extensions_browser',
'type': 'static_library',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../components/components.gyp:browsing_data',
'../components/components.gyp:cast_certificate',
'../components/components.gyp:device_event_log_component',
'../components/components.gyp:guest_view_browser',
'../components/components.gyp:keyed_service_content',
'../components/components.gyp:keyed_service_core',
'../components/components.gyp:onc_component',
'../components/components.gyp:pref_registry',
'../components/components.gyp:sessions_content',
'../components/components.gyp:storage_monitor',
'../components/components.gyp:update_client',
'../components/components.gyp:variations',
'../components/components.gyp:version_info',
'../components/components.gyp:web_cache_browser',
'../components/components.gyp:web_modal',
'../components/components.gyp:zoom',
'../components/prefs/prefs.gyp:prefs',
'../content/content.gyp:content_browser',
'../device/bluetooth/bluetooth.gyp:device_bluetooth',
'../device/serial/serial.gyp:device_serial',
'../google_apis/google_apis.gyp:google_apis',
'../skia/skia.gyp:skia',
'../third_party/leveldatabase/leveldatabase.gyp:leveldatabase',
'../third_party/re2/re2.gyp:re2',
'browser/api/api_registration.gyp:extensions_api_registration',
'common/api/api.gyp:cast_channel_proto',
'common/api/api.gyp:extensions_api',
'extensions_common',
'extensions_resources.gyp:extensions_resources',
'extensions_strings.gyp:extensions_strings',
],
'include_dirs': [
'..',
'<(INTERMEDIATE_DIR)',
# Needed to access generated API headers.
'<(SHARED_INTERMEDIATE_DIR)',
],
'sources': [
'<@(extensions_browser_sources)',
],
'conditions': [
# This condition exists only because the extensions_common_constants
# target is always built and thus this file gets evaluated by GYP.
# This does not need to be replicated into extensions/browser/BUILD.gn.
['OS == "ios" or OS == "android"', {
'dependencies!': [
'../components/components.gyp:storage_monitor',
],
}],
['chromeos == 1', {
'dependencies': [
'../chromeos/chromeos.gyp:chromeos',
],
'sources': [
'<@(extensions_browser_sources_chromeos)',
],
}, { # chromeos==0
'sources': [
'<@(extensions_browser_sources_nonchromeos)',
],
}],
['OS == "win" or OS == "mac"', {
'dependencies': [
'../components/components.gyp:wifi_component',
],
'sources': [
'<@(extensions_browser_sources_win_or_mac)',
],
}],
['OS == "linux" and chromeos == 0', {
'dependencies': [
'../build/linux/system.gyp:dbus',
'../dbus/dbus.gyp:dbus',
],
'sources': [
'<@(extensions_browser_sources_linux_nonchromeos)',
],
}],
['proprietary_codecs==1 and enable_wifi_display == 1', {
'sources': [
'<@(extensions_browser_sources_wifi_display)',
'<(SHARED_INTERMEDIATE_DIR)/extensions/common/mojo/wifi_display_session_service.mojom.cc',
'<(SHARED_INTERMEDIATE_DIR)/extensions/common/mojo/wifi_display_session_service.mojom.h',
],
}],
],
# Disable c4267 warnings until we fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
},
{
# GN version: //extensions/renderer
'target_name': 'extensions_renderer',
'type': 'static_library',
'dependencies': [
'extensions_resources.gyp:extensions_resources',
'../components/components.gyp:guest_view_renderer',
'../content/content.gyp:content_common',
'../content/content.gyp:content_resources',
'../gin/gin.gyp:gin',
'../mojo/mojo_public.gyp:mojo_js_bindings',
'../third_party/WebKit/public/blink.gyp:blink',
],
'include_dirs': [
'..',
],
'sources': [
'<@(extensions_renderer_sources)',
],
# Disable c4267 warnings until we fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
'conditions': [
['proprietary_codecs==1 and enable_wifi_display==1', {
'dependencies': [
'../third_party/openh264/openh264.gyp:openh264_encoder',
'../third_party/wds/wds.gyp:libwds',
],
'sources': [
'<@(extensions_renderer_sources_wifi_display)',
],
}],
],
},
{
# GN version: //extensions/utility
'target_name': 'extensions_utility',
'type': 'static_library',
'dependencies': [
'../content/content.gyp:content_common',
'../content/content.gyp:content_utility',
'../skia/skia.gyp:skia',
'../third_party/zlib/google/zip.gyp:zip',
'extensions_common',
'extensions_strings.gyp:extensions_strings',
],
'include_dirs': [
'..',
],
'sources': [
'<@(extensions_utility_sources)',
],
},
{
# GN version: //extensions:test_support
'target_name': 'extensions_test_support',
'type': 'static_library',
'dependencies': [
'../base/base.gyp:base',
'../components/components.gyp:pref_registry_test_support',
'../components/components.gyp:user_prefs',
'../components/prefs/prefs.gyp:prefs_test_support',
'../content/content.gyp:content_browser',
'../content/content.gyp:content_common',
'../content/content_shell_and_tests.gyp:test_support_content',
'../net/net.gyp:net_test_support',
'../testing/gtest.gyp:gtest',
'../third_party/cld_2/cld_2.gyp:cld_2',
'browser/api/api_registration.gyp:extensions_api_registration',
'common/api/api.gyp:cast_channel_proto',
'common/api/api.gyp:extensions_api',
'extensions_browser',
'extensions_common',
'extensions_resources.gyp:extensions_resources',
],
'include_dirs': [
'..',
'<(SHARED_INTERMEDIATE_DIR)',
],
'sources': [
'<@(extensions_test_support_sources)',
],
# Disable c4267 warnings until we fix size_t to int truncations.
'msvs_disabled_warnings': [ 4267, ],
},
{
# The pak file generated by this target is intended to be shared by
# both shell and test targets. It was combined because it might help a
# little bit with build time by avoiding a repack step (one instead of
# two).
'target_name': 'extensions_shell_and_test_pak',
'type': 'none',
'dependencies': [
'../content/browser/devtools/devtools_resources.gyp:devtools_resources',
'../content/content.gyp:content_resources',
'../content/content_shell_and_tests.gyp:content_shell_resources',
'../third_party/WebKit/public/blink_resources.gyp:blink_resources',
'../ui/resources/ui_resources.gyp:ui_resources',
'../ui/strings/ui_strings.gyp:ui_strings',
'extensions_resources.gyp:extensions_resources',
'extensions_strings.gyp:extensions_strings',
'shell/app_shell_resources.gyp:app_shell_resources',
],
'actions': [
{
'action_name': 'repack_extensions_shell_and_test_pak',
'variables': {
'pak_inputs': [
'<(SHARED_INTERMEDIATE_DIR)/blink/public/resources/blink_image_resources_100_percent.pak',
'<(SHARED_INTERMEDIATE_DIR)/blink/public/resources/blink_resources.pak',
'<(SHARED_INTERMEDIATE_DIR)/content/app/strings/content_strings_en-US.pak',
'<(SHARED_INTERMEDIATE_DIR)/content/content_resources.pak',
'<(SHARED_INTERMEDIATE_DIR)/content/shell_resources.pak',
'<(SHARED_INTERMEDIATE_DIR)/extensions/extensions_browser_resources_100_percent.pak',
'<(SHARED_INTERMEDIATE_DIR)/extensions/extensions_renderer_resources.pak',
'<(SHARED_INTERMEDIATE_DIR)/extensions/extensions_resources.pak',
'<(SHARED_INTERMEDIATE_DIR)/extensions/shell/app_shell_resources.pak',
'<(SHARED_INTERMEDIATE_DIR)/extensions/strings/extensions_strings_en-US.pak',
'<(SHARED_INTERMEDIATE_DIR)/ui/resources/ui_resources_100_percent.pak',
'<(SHARED_INTERMEDIATE_DIR)/ui/strings/app_locale_settings_en-US.pak',
'<(SHARED_INTERMEDIATE_DIR)/ui/strings/ui_strings_en-US.pak',
'<(SHARED_INTERMEDIATE_DIR)/blink/devtools_resources.pak',
],
'pak_output': '<(PRODUCT_DIR)/extensions_shell_and_test.pak',
},
'includes': [ '../build/repack_action.gypi' ],
},
],
},
]
}
| 39.222222 | 104 | 0.605696 |
73e24c193dfdeaaf093d4484761adb8ae1a8f670 | 1,838 | py | Python | Python_files/OD_matrix_estimation_GLS_Jan_weekday_PM_ext.py | jingzbu/InverseVITraffic | c0d33d91bdd3c014147d58866c1a2b99fb8a9608 | [
"MIT"
] | null | null | null | Python_files/OD_matrix_estimation_GLS_Jan_weekday_PM_ext.py | jingzbu/InverseVITraffic | c0d33d91bdd3c014147d58866c1a2b99fb8a9608 | [
"MIT"
] | null | null | null | Python_files/OD_matrix_estimation_GLS_Jan_weekday_PM_ext.py | jingzbu/InverseVITraffic | c0d33d91bdd3c014147d58866c1a2b99fb8a9608 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
__author__ = "Jing Zhang"
__email__ = "jingzbu@gmail.com"
__status__ = "Development"
from util import *
import numpy as np
from numpy.linalg import inv
import json
# load logit_route_choice_probability_matrix
P = zload('../temp_files/logit_route_choice_probability_matrix_ext.pkz')
P = np.matrix(P)
# print(np.size(P,0), np.size(P,1))
# load path-link incidence matrix
A = zload('../temp_files/path-link_incidence_matrix_ext.pkz')
# load link counts data
with open('../temp_files/link_day_minute_Jan_dict_ext_JSON_insert_links_adjusted.json', 'r') as json_file:
link_day_minute_Jan_dict_ext_JSON = json.load(json_file)
# week_day_Jan_list = [2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 16, 17, 18, 19, 20, 23, 24, 25, 26, 27, 30, 31]
week_day_Jan_list = [25, 26, 27, 30, 31]
link_day_minute_Jan_list = []
for link_idx in range(74):
for day in week_day_Jan_list:
for minute_idx in range(120):
key = 'link_' + str(link_idx) + '_' + str(day)
link_day_minute_Jan_list.append(link_day_minute_Jan_dict_ext_JSON[key] ['PM_flow_minute'][minute_idx])
# print(len(link_day_minute_Jan_list))
x = np.matrix(link_day_minute_Jan_list)
x = np.matrix.reshape(x, 74, 600)
x = np.nan_to_num(x)
y = np.array(np.transpose(x))
y = y[np.all(y != 0, axis=1)]
x = np.transpose(y)
x = np.matrix(x)
# print(np.size(x,0), np.size(x,1))
# print(x[:,:2])
# print(np.size(A,0), np.size(A,1))
L = 22 * (22 - 1) # dimension of lam
lam_list = GLS(x, A, P, L)
# write estimation result to file
n = 22 # number of nodes
with open('../temp_files/OD_demand_matrix_Jan_weekday_PM_ext.txt', 'w') as the_file:
idx = 0
for i in range(n + 1)[1:]:
for j in range(n + 1)[1:]:
if i != j:
the_file.write("%d,%d,%f\n" %(i, j, lam_list[idx]))
idx += 1
| 27.848485 | 114 | 0.662677 |
73e252228f2622299f80336e10321317810bb8f5 | 18,823 | py | Python | cockroach/test_client.py | abhishekgahlot/cockroach-python | 72903f583da182530e208cf41644d74e1a7ef505 | [
"Apache-2.0"
] | 1 | 2015-11-05T19:20:27.000Z | 2015-11-05T19:20:27.000Z | cockroach/test_client.py | abhishekgahlot/cockroach-python | 72903f583da182530e208cf41644d74e1a7ef505 | [
"Apache-2.0"
] | null | null | null | cockroach/test_client.py | abhishekgahlot/cockroach-python | 72903f583da182530e208cf41644d74e1a7ef505 | [
"Apache-2.0"
] | null | null | null | import logging
import os
import threading
import unittest
from cockroach import errors
from cockroach.interface import KVSender, TransactionOptions
from cockroach.kv import KV
from cockroach.http_sender import HTTPSender
from cockroach.methods import Methods
from cockroach.proto import api_pb2, config_pb2, data_pb2
class NotifyingSender(KVSender):
"""NotifyingSender wraps a KVSender to provide notifications when an RPC is sent."""
def __init__(self, sender):
self.wrapped = sender
self.callback = None
def reset(self, callback):
"""Schedules callback to be run after the next send.
It is undefined to call reset while a previous callback is pending.
"""
self.callback = callback
def send(self, call):
self.wrapped.send(call)
if self.callback is not None:
self.callback()
self.callback = None
def close(self):
self.wrapped.close()
@unittest.skipIf('COCKROACH_PORT' not in os.environ, "not running under docker-compose")
class ClientTest(unittest.TestCase):
def setUp(self):
self.assertTrue(os.environ['COCKROACH_PORT'].startswith('tcp://'))
addr = os.environ['COCKROACH_PORT'][len('tcp://'):]
self.notifying_sender = NotifyingSender(HTTPSender(addr))
self.client = KV(self.notifying_sender, user="root")
# This import is delayed because the module is not present by default
# on python 2. We only run this test with python 3, but it needs to be
# importable on py2.
from concurrent.futures import ThreadPoolExecutor
self.executor = ThreadPoolExecutor(2)
def tearDown(self):
self.client.close()
self.executor.shutdown()
# Verify that we can make a simple RPC to the server.
def test_basic(self):
reply = self.client.call(
Methods.Increment,
api_pb2.IncrementRequest(
header=api_pb2.RequestHeader(key=b"a"),
increment=3,
))
self.assertEqual(reply.new_value, 3)
# Verify that non-transactional client will succeed despite write/write and read/write
# conflicts. In the case where the non-transactional put can push the txn,
# we expect the transaction's value to be written after all retries are complete.
def test_retry_non_txn(self):
# method, isolation, can push, expected attempts
test_cases = [
# Write/write conflicts.
(Methods.Put, data_pb2.SNAPSHOT, True, 2),
(Methods.Put, data_pb2.SERIALIZABLE, True, 2),
# Some of the can't-push test cases take 15 seconds each,
# and are currently failing when uncommented.
# TODO: why? because the go version uses Store.SetRangeRetryOptions?
#(Methods.Put, data_pb2.SNAPSHOT, False, 1),
#(Methods.Put, data_pb2.SERIALIZABLE, False, 1),
# Read/write conflicts.
(Methods.Get, data_pb2.SNAPSHOT, True, 1),
(Methods.Get, data_pb2.SERIALIZABLE, True, 2),
(Methods.Get, data_pb2.SNAPSHOT, False, 1),
#(Methods.Get, data_pb2.SERIALIZABLE, False, 1),
]
# Lay down a write intent using a txn and attempt to write to same
# key. Try this twice--once with priorities which will allow the
# intent to be pushed and once with priorities which will not.
for i, test_case in enumerate(test_cases):
method, isolation, can_push, exp_attempts = test_case
logging.info("starting test case %d", i)
key = ("key-%d" % i).encode('ascii')
txn_pri = -1
client_pri = -1
if can_push:
client_pri = -2
else:
txn_pri = -2
self.client.user_priority = client_pri
done_call = threading.Event()
count = [0]
def callback(txn):
txn.user_priority = txn_pri
count[0] += 1
# Lay down the intent.
txn.call(Methods.Put, api_pb2.PutRequest(
header=api_pb2.RequestHeader(key=key),
value=data_pb2.Value(bytes=b"txn-value")))
# On the first attempt, send the non-txn put or get.
if count[0] == 1:
event = threading.Event()
# We use a "notifying" sender here, which allows us to know exactly
# when the call has been processed; otherwise, we'd be dependent on
# timing.
self.notifying_sender.reset(event.set)
def non_txn_op():
if method is Methods.Put:
args = api_pb2.PutRequest()
args.value.bytes = b"value"
elif method is Methods.Get:
args = api_pb2.GetRequest()
else:
raise Exception("unexpected method %s" % method)
args.header.key = key
while True:
try:
self.client.call(method, args)
except errors.WriteIntentError:
continue
except Exception:
# Run until we either succed or get a non-write-intent error.
pass
break
done_call.set()
self.executor.submit(non_txn_op)
event.wait()
self.client.run_transaction(TransactionOptions(isolation=isolation), callback)
# Make sure non-txn put or get has finished.
done_call.wait()
# Get the current value to verify whether the txn happened first.
get_reply = self.client.call(
Methods.Get, api_pb2.GetRequest(header=api_pb2.RequestHeader(key=key)))
if can_push or method is Methods.Get:
self.assertEqual(get_reply.value.bytes, b"txn-value")
else:
self.assertEqual(get_reply.value.bytes, b"value")
self.assertEqual(count[0], exp_attempts)
def test_run_transaction(self):
for commit in [True, False]:
value = b"value"
key = ("key-%s" % commit).encode("ascii")
def callback(txn):
# Put transactional value.
txn.call(Methods.Put, api_pb2.PutRequest(header=api_pb2.RequestHeader(key=key),
value=data_pb2.Value(bytes=value)))
# Attempt to read outside of txn.
gr = self.client.call(
Methods.Get, api_pb2.GetRequest(header=api_pb2.RequestHeader(key=key)))
self.assertFalse(gr.HasField('value'))
# Read within the transaction.
gr = txn.call(
Methods.Get, api_pb2.GetRequest(header=api_pb2.RequestHeader(key=key)))
self.assertEqual(gr.value.bytes, value)
if not commit:
raise ValueError("purposefully failing transaction")
# Use snapshot isolation so non-transactional read can always push.
try:
self.client.run_transaction(
TransactionOptions(isolation=data_pb2.SNAPSHOT), callback)
except ValueError as e:
self.assertTrue((not commit) and str(e) == "purposefully failing transaction")
else:
self.assertTrue(commit)
# Verify the value is now visible on commit==True, and not visible otherwise.
gr = self.client.call(
Methods.Get, api_pb2.GetRequest(header=api_pb2.RequestHeader(key=key)))
if commit:
self.assertEqual(gr.value.bytes, value)
else:
self.assertFalse(gr.HasField('value'))
# Verify gets and puts of protobufs using the kv client's convenience methods.
def test_get_and_put_proto(self):
zone_config = config_pb2.ZoneConfig(
replica_attrs=[config_pb2.Attributes(attrs=["dc1", "mem"]),
config_pb2.Attributes(attrs=["dc2", "mem"])],
range_min_bytes=1<<10, # 1k
range_max_bytes=1<<18, # 256k
)
key = b"zone-config"
self.client.put_proto(key, zone_config)
read_zone_config = config_pb2.ZoneConfig()
ok, ts = self.client.get_proto(key, read_zone_config)
self.assertTrue(ok)
self.assertNotEqual(ts.wall_time, 0)
self.assertEqual(read_zone_config, zone_config)
# Verify that empty values are preserved for both empty bytes and integer=0.
def test_empty_values(self):
self.client.put_bytes(b"a", b"")
self.client.call(Methods.Put, api_pb2.PutRequest(
header=api_pb2.RequestHeader(key=b"b"),
value=data_pb2.Value(integer=0)))
get_resp = self.client.call(Methods.Get, api_pb2.GetRequest(
header=api_pb2.RequestHeader(key=b"a")))
self.assertTrue(get_resp.value.HasField('bytes'))
self.assertFalse(get_resp.value.HasField('integer'))
self.assertEqual(get_resp.value.bytes, b"")
get_resp = self.client.call(Methods.Get, api_pb2.GetRequest(
header=api_pb2.RequestHeader(key=b"b")))
self.assertFalse(get_resp.value.HasField('bytes'))
self.assertTrue(get_resp.value.HasField('integer'))
self.assertEqual(get_resp.value.integer, 0)
# Prepare a sequence of increment calls then flush them and verify the results.
def test_prepare_and_flush(self):
keys = []
replies = []
for i in range(10):
key = ("key %02d" % i).encode('ascii')
keys.append(key)
reply = api_pb2.IncrementResponse()
replies.append(reply)
self.client.prepare(
Methods.Increment,
api_pb2.IncrementRequest(header=api_pb2.RequestHeader(key=key), increment=i),
reply)
self.client.flush()
for i, reply in enumerate(replies):
self.assertEqual(i, reply.new_value)
# Now try 2 scans.
scan1 = api_pb2.ScanResponse()
scan2 = api_pb2.ScanResponse()
self.client.prepare(
Methods.Scan, api_pb2.ScanRequest(header=api_pb2.RequestHeader(
key=b"key 00", end_key=b"key 05")), scan1)
self.client.prepare(
Methods.Scan, api_pb2.ScanRequest(header=api_pb2.RequestHeader(
key=b"key 05", end_key=b"key 10")), scan2)
self.client.flush()
self.assertEqual(len(scan1.rows), 5)
self.assertEqual(len(scan2.rows), 5)
for i in range(5):
self.assertEqual(scan1.rows[i].key, keys[i])
self.assertEqual(scan1.rows[i].value.integer, i)
self.assertEqual(scan2.rows[i].key, keys[i+5])
self.assertEqual(scan2.rows[i].value.integer, i+5)
# This is an example for using the call() method to Put and then get a value for a
# given key.
# TODO: In the go version this is an example test; when we have docs consider making it
# a doctest.
def test_example_call(self):
key = b"a"
value = b"asdf"
# Store test value.
self.client.call(Methods.Put,
api_pb2.PutRequest(header=api_pb2.RequestHeader(key=key),
value=data_pb2.Value(bytes=value)))
# Retrieve test value using the same key.
get_resp = self.client.call(Methods.Get,
api_pb2.GetRequest(header=api_pb2.RequestHeader(key=key)))
self.assertTrue(get_resp.HasField('value'))
self.assertEqual(get_resp.value.bytes, value)
# This is an example for using the prepare() method to submit multiple key value
# API operations to be run in parallel. Flush() is then used to begin execution of all
# the prepared operations.
# TODO: In the go version this is an example test; when we have docs consider making
# it a doctest.
def test_example_prepare(self):
batch_size = 12
keys = []
values = []
for i in range(batch_size):
keys.append(("key-%03d" % i).encode('ascii'))
values.append(("value-%0d3" % i).encode('ascii'))
self.client.prepare(
Methods.Put, api_pb2.PutRequest(header=api_pb2.RequestHeader(
key=keys[i]), value=data_pb2.Value(bytes=values[i])),
api_pb2.PutResponse())
# Flush all puts for parallel execution.
self.client.flush()
# Scan for the newly inserted rows in parallel.
num_scans = 3
rows_per_scan = batch_size // num_scans
scan_responses = []
for i in range(num_scans):
first_key = keys[i*rows_per_scan]
last_key = keys[((i+1)*rows_per_scan)-1]
reply = api_pb2.ScanResponse()
scan_responses.append(reply)
self.client.prepare(
Methods.Scan, api_pb2.ScanRequest(
header=api_pb2.RequestHeader(key=first_key, end_key=last_key+b"\x00"),
max_results=rows_per_scan),
reply)
# Flush all scans for parallel execution.
self.client.flush()
# Check results.
for i in range(num_scans):
for j in range(rows_per_scan):
row = scan_responses[i].rows[j]
self.assertEqual(row.key, keys[i*rows_per_scan+j])
self.assertEqual(row.value.bytes, values[i*rows_per_scan+j])
# This is an example for using the run_transaction method to submit multiple key value
# API operations inside a transaction.
# TODO: In the go version this is an example test; when we have docs consider making
# it a doctest.
def test_example_run_transaction(self):
# Create test data.
num_pairs = 10
keys = []
values = []
for i in range(num_pairs):
keys.append(('testkey-%0d3' % i).encode('ascii'))
values.append(('testvalue-%03d' % i).encode('ascii'))
# Insert all KV pairs inside a transaction.
def callback(txn):
for i in range(num_pairs):
txn.prepare(Methods.Put,
api_pb2.PutRequest(header=api_pb2.RequestHeader(key=keys[i]),
value=data_pb2.Value(bytes=values[i])),
api_pb2.PutResponse())
# Note that the KV client is flushed automatically on transaction commit.
# Invoking flush after individual API methods is only required if the result
# needs to be received to take conditional action.
put_opts = TransactionOptions(name="example put")
self.client.run_transaction(put_opts, callback)
# Read back KV pairs inside a transaction.
get_responses = []
def callback(txn):
for i in range(num_pairs):
get_responses.append(api_pb2.GetResponse())
txn.prepare(Methods.Get,
api_pb2.GetRequest(header=api_pb2.RequestHeader(key=keys[i])),
get_responses[-1])
get_opts = TransactionOptions(name="example get")
self.client.run_transaction(get_opts, callback)
# Check results.
for i, get_resp in enumerate(get_responses):
self.assertTrue(get_resp.HasField('value'))
self.assertEqual(get_resp.value.bytes, values[i])
def concurrent_increments(self):
"""Start two threads in parallel, both of which read the integers stored
at the other's key and add it onto their own. It is checked that the outcome is
serializable, i.e. exactly one of the two threads (the later write) sees the
previous write by the other.
"""
start = threading.Barrier(2)
end = threading.Barrier(2+1)
for i in range(2):
def func(i):
try:
# Read the other key, write key i.
read_key = ("value-%d" % ((i+1) % 2)).encode('ascii')
write_key = ("value-%d" % i).encode('ascii')
# Wait until the other threads are running.
start.wait()
def callback(txn):
# Retrieve the other key.
gr = txn.call(Methods.Get, api_pb2.GetRequest(
header=api_pb2.RequestHeader(key=read_key)))
other_value = gr.value.integer
txn.call(Methods.Increment, api_pb2.IncrementRequest(
header=api_pb2.RequestHeader(key=write_key),
increment=1+other_value))
txn_opts = TransactionOptions(name='test-%d' % i)
self.client.run_transaction(txn_opts, callback)
finally:
end.wait()
self.executor.submit(func, i)
# Wait for the threads to finish.
end.wait()
# Verify that both keys contain something and, more importantly, that one key
# actually contains the value of the first writer and not only its own.
total = 0
results = []
for i in range(2):
read_key = ('value-%d' % i).encode('ascii')
gr = self.client.call(
Methods.Get, api_pb2.GetRequest(header=api_pb2.RequestHeader(key=read_key)))
self.assertTrue(gr.HasField('value'))
self.assertTrue(gr.value.HasField('integer'))
total += gr.value.integer
results.append(gr.value.integer)
# First writer should have 1, second one 2.
self.assertEqual(total, 3, "got unserializable values %r" % results)
# test_concurrent_increments is a simple explicit test for serializability
# for the concrete situation described in:
# https://groups.google.com/forum/#!topic/cockroach-db/LdrC5_T0VNw
def test_concurrent_increments(self):
# Convenience loop: Crank up this number for testing this
# more often. It'll increase test duration though.
for i in range(5):
self.client.call(Methods.DeleteRange,
api_pb2.DeleteRangeRequest(header=api_pb2.RequestHeader(
key=b"value-0", end_key=b"value-1x")))
self.concurrent_increments()
| 42.974886 | 95 | 0.579185 |
73e2554d4ceae16e3f4c0c8a20fa218a5d49a165 | 1,485 | py | Python | tests/configs/test_zoo_urls.py | kmarathe10/mmf | 93cd2a7f333ad14e88fa6c2453a0f54b64b36210 | [
"BSD-3-Clause"
] | 1 | 2020-06-03T07:47:36.000Z | 2020-06-03T07:47:36.000Z | tests/configs/test_zoo_urls.py | kmarathe10/mmf | 93cd2a7f333ad14e88fa6c2453a0f54b64b36210 | [
"BSD-3-Clause"
] | null | null | null | tests/configs/test_zoo_urls.py | kmarathe10/mmf | 93cd2a7f333ad14e88fa6c2453a0f54b64b36210 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from omegaconf import OmegaConf
from mmf.utils.configuration import load_yaml
from mmf.utils.download import DownloadableFile, check_header
from tests.test_utils import skip_if_no_network
class TestConfigsForKeys(unittest.TestCase):
def _test_zoo_for_keys(self, path):
zoo_config = load_yaml(path)
self._recurse_on_config(zoo_config)
def _recurse_on_config(self, config):
if OmegaConf.is_list(config) and len(config) > 0 and "url" in config[0]:
# Found the urls, let's test them
for item in config:
# First try making the DownloadableFile class to make sure
# everything is fine
download = DownloadableFile(**item)
# Now check the actual header
check_header(download._url, from_google=download._from_google)
elif OmegaConf.is_dict(config):
# Both version and resources should be present
if "version" in config:
self.assertIn("resources", config)
if "resources" in config:
self.assertIn("version", config)
# Let's continue recursing
for item in config:
self._recurse_on_config(config[item])
@skip_if_no_network
def test_zoos(self):
self._test_zoo_for_keys("configs/zoo/datasets.yaml")
self._test_zoo_for_keys("configs/zoo/models.yaml")
| 37.125 | 80 | 0.653872 |
73e25eacd8f566ad7c9b269120b9ab0e32ecc589 | 103 | py | Python | tests/_root_dir.py | sebastian-sz/tfdata-image-loader | 7ba03a37086a4bc318961d39b72db76ed1cd85cf | [
"MIT"
] | 1 | 2020-05-12T05:33:17.000Z | 2020-05-12T05:33:17.000Z | tests/_root_dir.py | sebastian-sz/tfdata-image-loader | 7ba03a37086a4bc318961d39b72db76ed1cd85cf | [
"MIT"
] | null | null | null | tests/_root_dir.py | sebastian-sz/tfdata-image-loader | 7ba03a37086a4bc318961d39b72db76ed1cd85cf | [
"MIT"
] | null | null | null | """Module for handling data paths."""
import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
| 20.6 | 53 | 0.737864 |
73e268ced93d4575771ba66687b9f2c9b34dbb5a | 22,563 | py | Python | sdk/cdn/azure-mgmt-cdn/azure/mgmt/cdn/aio/operations/_policies_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/cdn/azure-mgmt-cdn/azure/mgmt/cdn/aio/operations/_policies_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/cdn/azure-mgmt-cdn/azure/mgmt/cdn/aio/operations/_policies_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._policies_operations import build_create_or_update_request_initial, build_delete_request, build_get_request, build_list_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PoliciesOperations:
"""PoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.cdn.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.CdnWebApplicationFirewallPolicyList"]:
"""Lists all of the protection policies within a resource group.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CdnWebApplicationFirewallPolicyList or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cdn.models.CdnWebApplicationFirewallPolicyList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CdnWebApplicationFirewallPolicyList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CdnWebApplicationFirewallPolicyList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/cdnWebApplicationFirewallPolicies'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
policy_name: str,
**kwargs: Any
) -> "_models.CdnWebApplicationFirewallPolicy":
"""Retrieve protection policy with specified name within a resource group.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param policy_name: The name of the CdnWebApplicationFirewallPolicy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CdnWebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.cdn.models.CdnWebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CdnWebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
policy_name=policy_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CdnWebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/cdnWebApplicationFirewallPolicies/{policyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
policy_name: str,
cdn_web_application_firewall_policy: "_models.CdnWebApplicationFirewallPolicy",
**kwargs: Any
) -> "_models.CdnWebApplicationFirewallPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.CdnWebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(cdn_web_application_firewall_policy, 'CdnWebApplicationFirewallPolicy')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
policy_name=policy_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CdnWebApplicationFirewallPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('CdnWebApplicationFirewallPolicy', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('CdnWebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/cdnWebApplicationFirewallPolicies/{policyName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
policy_name: str,
cdn_web_application_firewall_policy: "_models.CdnWebApplicationFirewallPolicy",
**kwargs: Any
) -> AsyncLROPoller["_models.CdnWebApplicationFirewallPolicy"]:
"""Create or update policy with specified rule set name within a resource group.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param policy_name: The name of the CdnWebApplicationFirewallPolicy.
:type policy_name: str
:param cdn_web_application_firewall_policy: Policy to be created.
:type cdn_web_application_firewall_policy:
~azure.mgmt.cdn.models.CdnWebApplicationFirewallPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CdnWebApplicationFirewallPolicy or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cdn.models.CdnWebApplicationFirewallPolicy]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CdnWebApplicationFirewallPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
policy_name=policy_name,
cdn_web_application_firewall_policy=cdn_web_application_firewall_policy,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('CdnWebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/cdnWebApplicationFirewallPolicies/{policyName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
policy_name: str,
cdn_web_application_firewall_policy_patch_parameters: "_models.CdnWebApplicationFirewallPolicyPatchParameters",
**kwargs: Any
) -> "_models.CdnWebApplicationFirewallPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.CdnWebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(cdn_web_application_firewall_policy_patch_parameters, 'CdnWebApplicationFirewallPolicyPatchParameters')
request = build_update_request_initial(
resource_group_name=resource_group_name,
policy_name=policy_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CdnWebApplicationFirewallPolicy', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('CdnWebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/cdnWebApplicationFirewallPolicies/{policyName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
policy_name: str,
cdn_web_application_firewall_policy_patch_parameters: "_models.CdnWebApplicationFirewallPolicyPatchParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.CdnWebApplicationFirewallPolicy"]:
"""Update an existing CdnWebApplicationFirewallPolicy with the specified policy name under the
specified subscription and resource group.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param policy_name: The name of the CdnWebApplicationFirewallPolicy.
:type policy_name: str
:param cdn_web_application_firewall_policy_patch_parameters: CdnWebApplicationFirewallPolicy
parameters to be patched.
:type cdn_web_application_firewall_policy_patch_parameters:
~azure.mgmt.cdn.models.CdnWebApplicationFirewallPolicyPatchParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CdnWebApplicationFirewallPolicy or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.cdn.models.CdnWebApplicationFirewallPolicy]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CdnWebApplicationFirewallPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
policy_name=policy_name,
cdn_web_application_firewall_policy_patch_parameters=cdn_web_application_firewall_policy_patch_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('CdnWebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/cdnWebApplicationFirewallPolicies/{policyName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
policy_name: str,
**kwargs: Any
) -> None:
"""Deletes Policy.
:param resource_group_name: Name of the Resource group within the Azure subscription.
:type resource_group_name: str
:param policy_name: The name of the CdnWebApplicationFirewallPolicy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
policy_name=policy_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/cdnWebApplicationFirewallPolicies/{policyName}'} # type: ignore
| 48.006383 | 205 | 0.692018 |
73e26c42d03be84833d32be1b861e6d28d243796 | 1,173 | py | Python | deepreg/log.py | Zhiyuan-w/DeepReg | 3e372d1835fdc9468c026db3767dcf9e8d4a4b0e | [
"Apache-2.0"
] | 379 | 2020-07-18T22:00:53.000Z | 2022-03-31T05:17:29.000Z | deepreg/log.py | Zhiyuan-w/DeepReg | 3e372d1835fdc9468c026db3767dcf9e8d4a4b0e | [
"Apache-2.0"
] | 646 | 2020-07-18T08:55:48.000Z | 2022-03-29T02:24:54.000Z | deepreg/log.py | Zhiyuan-w/DeepReg | 3e372d1835fdc9468c026db3767dcf9e8d4a4b0e | [
"Apache-2.0"
] | 62 | 2020-07-26T05:00:23.000Z | 2022-02-22T21:58:19.000Z | """Module for logger."""
import logging
import os
import sys
def get(name: str) -> logging.Logger:
"""
Configure the logger with formatter and handlers.
The logger should be used as:
.. code-block:: python
from deepreg import log
logger = log.get(__name__)
The log level depends on the environment variable `DEEPREG_LOG_LEVEL`.
- 0: NOTSET, will be set to DEBUG
- 1: DEBUG
- 2: INFO (default)
- 3: WARNING
- 4: ERROR
- 5: CRITICAL
https://docs.python.org/3/library/logging.html#levels
:param name: module name.
:return: configured logger.
"""
logger = logging.getLogger(name=name)
logger.propagate = False
log_level = os.environ.get("DEEPREG_LOG_LEVEL", "2")
log_level_int = max(int(log_level) * 10, 10)
logger.setLevel(log_level_int)
formatter = logging.Formatter(
fmt="%(asctime)s | %(levelname)-8s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
stdout_handler = logging.StreamHandler(stream=sys.stdout)
stdout_handler.setFormatter(formatter)
stdout_handler.setLevel(log_level_int)
logger.addHandler(stdout_handler)
return logger
| 25.5 | 86 | 0.660699 |
73e2703337e31d071623cfa6b64d721f4de6f113 | 989 | py | Python | setup.py | JHay0112/jmath | 3bccf7353c07152ff49ec6aa56785918c9f710ed | [
"MIT"
] | 7 | 2021-06-25T12:57:08.000Z | 2021-12-29T21:53:30.000Z | setup.py | JHay0112/jmath | 3bccf7353c07152ff49ec6aa56785918c9f710ed | [
"MIT"
] | 1 | 2021-08-11T02:16:12.000Z | 2021-08-12T06:05:21.000Z | setup.py | JHay0112/jmath | 3bccf7353c07152ff49ec6aa56785918c9f710ed | [
"MIT"
] | 1 | 2021-09-20T21:25:55.000Z | 2021-09-20T21:25:55.000Z | '''
setup.py
Author: Jordan Hay
Date: 2021-06-17
'''
from setuptools import setup, find_packages
# Open readme
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
# Open version
with open("version.txt", "r", encoding="utf-8") as fh:
version = fh.readline()
setup(
name = 'jmath',
packages = find_packages(include = [
"jmath",
"jmath.*"
]),
version = version,
description = "Mathematics Tools",
long_description = long_description,
long_description_content_type = "text/markdown",
author = "Jordan Hay",
license = "MIT",
url = "https://github.com/JHay0112/jmath",
project_urls={
"Bug Tracker": "https://github.com/JHay0112/jmath/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[],
python_requires=">=3.6"
)
| 24.121951 | 66 | 0.608696 |
73e27040e83af8d285bcc929f8ab591bf957b9d5 | 3,998 | py | Python | alipay/aop/api/request/AlipayOpenAppAppcontentItemDeleteRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/AlipayOpenAppAppcontentItemDeleteRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/AlipayOpenAppAppcontentItemDeleteRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayOpenAppAppcontentItemDeleteModel import AlipayOpenAppAppcontentItemDeleteModel
class AlipayOpenAppAppcontentItemDeleteRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayOpenAppAppcontentItemDeleteModel):
self._biz_content = value
else:
self._biz_content = AlipayOpenAppAppcontentItemDeleteModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.app.appcontent.item.delete'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.572414 | 148 | 0.646823 |
73e293d9729126dcbc6df04ad609fd3212e9bcf9 | 81,399 | py | Python | tests/test_tools.py | MichaelTrumbull/regolith | 881f543cfa7bbf80b38ed2b357209c5bc009a6af | [
"CC0-1.0"
] | null | null | null | tests/test_tools.py | MichaelTrumbull/regolith | 881f543cfa7bbf80b38ed2b357209c5bc009a6af | [
"CC0-1.0"
] | null | null | null | tests/test_tools.py | MichaelTrumbull/regolith | 881f543cfa7bbf80b38ed2b357209c5bc009a6af | [
"CC0-1.0"
] | null | null | null | from copy import copy
import habanero
import pytest
import datetime as dt
from regolith.tools import (
filter_publications,
filter_presentations,
fuzzy_retrieval,
fragment_retrieval,
number_suffix,
latex_safe,
update_schemas,
group,
is_fully_appointed,
group_member_ids,
group_member_employment_start_end,
merge_collections_all,
merge_collections_intersect,
merge_collections_superior,
month_and_year,
remove_duplicate_docs,
awards_grants_honors,
get_id_from_name,
get_person_contact,
date_to_rfc822,
key_value_pair_filter,
collection_str,
search_collection,
collect_appts,
grant_burn,
validate_meeting,
get_formatted_crossref_reference,
compound_dict,
compound_list, filter_employment_for_advisees,
get_tags, dereference_institution
)
PEOPLE_COLL = [
{"_id": "m1",
"name": "member1",
"education": [{
"group": "bg",
"institution": "columbiau",
"degree": "PhD",
"department": "apam",
"begin_year": 2016
}],
"employment": [{
"begin_year": 2020,
"begin_month": 1,
"organization": "columbiau",
"position": "Undergraduate Researcher",
"advisor": "sbillinge",
"status": "undergrad"
}]
},
{
"_id": "nm1",
"name": "non-member1",
"education": [{
"institution": "columbiau",
"degree": "PhD",
"department": "apam",
"begin_year": 2016
}],
"employment": [{
"begin_year": 2020,
"begin_month": 1,
"organization": "columbiau",
"position": "Undergraduate Researcher",
"advisor": "sbillinge",
"status": "undergrad"
}]
},
{
"_id": "m2",
"name": "member2",
"education": [{
"institution": "columbiau",
"degree": "PhD",
"department": "apam",
"begin_year": 2016
}],
"employment": [{
"begin_year": 2020,
"begin_month": 1,
"group": "bg",
"organization": "columbiau",
"position": "Undergraduate Researcher",
"advisor": "sbillinge",
"status": "undergrad"
}]
},
]
CONTACTS_COLL = [
{"_id": "c1",
"name": "contact1",
"institution": "columbiau"
}]
@pytest.mark.parametrize(
"input, expected", [
(["m1", PEOPLE_COLL, CONTACTS_COLL],
{"_id": "m1",
"name": "member1",
"education": [{
"group": "bg",
"institution": "columbiau",
"degree": "PhD",
"department": "apam",
"begin_year": 2016
}],
"employment": [{
"begin_year": 2020,
"begin_month": 1,
"organization": "columbiau",
"position": "Undergraduate Researcher",
"advisor": "sbillinge",
"status": "undergrad"
}]
}),
(["c1", PEOPLE_COLL, CONTACTS_COLL],
{"_id": "c1",
"name": "contact1",
"institution": "columbiau"
}),
(["bad1", PEOPLE_COLL, CONTACTS_COLL], None),
])
def test_get_person_contact(input, expected):
print(input)
actual = get_person_contact(input[0],input[1],input[2])
assert actual == expected
CITATIONS = [{"_id": "paper",
"author": ["m1","cleese"],
"ackno": "thanks",
"grant": "fwp, dmref",
"month": "apr",
"year": "2021"},
{"_id": "paper2",
"author": ["m1","palin"],
"ackno": "thanks",
"grant": "fwp2",
"year": "2020"},
{"_id": "paper3",
"author": ["m1", "jones"],
"ackno": "thanks",
"grant": "fwp2",
"month": "jun",
"year": "2020"}
]
@pytest.mark.parametrize(
"args, kwargs, expected", [
([CITATIONS, set(["m1"])],
{},
[{"_id": "paper3",
"author": ["\\textbf{m1}", "jones"],
"ackno": "thanks",
"grant": "fwp2",
"month": "jun",
"year": "2020"},
{"_id": "paper2",
"author": ["\\textbf{m1}", "palin"],
"ackno": "thanks",
"grant": "fwp2",
"year": "2020"},
{'_id': 'paper',
'ackno': 'thanks',
'author': ['\\textbf{m1}', 'cleese'],
'grant': 'fwp, dmref',
'month': 'apr',
'year': '2021'}
]
),
([CITATIONS, set(["m1"])],
{"bold": False, "ackno": True},
[{"_id": "paper3",
"author": ["m1", "jones"],
"ackno": "thanks",
"grant": "fwp2",
"month": "jun",
"note": "\\newline\\newline\\noindent Acknowledgement:\\newline\\noindent thanks\\newline\\newline\\noindent ",
"year": "2020"},
{"_id": "paper2",
"author": ["m1", "palin"],
"ackno": "thanks",
"grant": "fwp2",
"note": "\\newline\\newline\\noindent Acknowledgement:\\newline\\noindent thanks\\newline\\newline\\noindent ",
"year": "2020"},
{'_id': 'paper',
'ackno': 'thanks',
'author': ['m1', 'cleese'],
'grant': 'fwp, dmref',
'month': 'apr',
"note": "\\newline\\newline\\noindent Acknowledgement:\\newline\\noindent thanks\\newline\\newline\\noindent ",
'year': '2021'}
]
),
([CITATIONS, set(["m1"])],
{"bold":False},
[{"_id": "paper3",
"author": ["m1", "jones"],
"ackno": "thanks",
"grant": "fwp2",
"month": "jun",
"year": "2020"},
{"_id": "paper2",
"author": ["m1", "palin"],
"ackno": "thanks",
"grant": "fwp2",
"year": "2020"},
{'_id': 'paper',
'ackno': 'thanks',
'author': ['m1', 'cleese'],
'grant': 'fwp, dmref',
'month': 'apr',
'year': '2021'}
]
),
([CITATIONS, set(["m1"])],
{"bold": False, "grants": "fwp2"},
[{"_id": "paper3",
"author": ["m1", "jones"],
"ackno": "thanks",
"grant": "fwp2",
"month": "jun",
"year": "2020"},
{"_id": "paper2",
"author": ["m1", "palin"],
"ackno": "thanks",
"grant": "fwp2",
"year": "2020"}
]
),
([CITATIONS, set(["m1"])],
{"bold": False, "grants": ["fwp2", "dmref"]},
[{"_id": "paper3",
"author": ["m1", "jones"],
"ackno": "thanks",
"grant": "fwp2",
"month": "jun",
"year": "2020"},
{"_id": "paper2",
"author": ["m1", "palin"],
"ackno": "thanks",
"grant": "fwp2",
"year": "2020"},
{'_id': 'paper',
'ackno': 'thanks',
'author': ['m1', 'cleese'],
'grant': 'fwp, dmref',
'month': 'apr',
'year': '2021'}
]
),
([CITATIONS, set(["m1"])],
{"bold": False, "since": dt.date(2021,1,1)},
[{'_id': 'paper',
'ackno': 'thanks',
'author': ['m1', 'cleese'],
'grant': 'fwp, dmref',
'month': 'apr',
'year': '2021'}
]
),
([CITATIONS, set(["m1"])],
{"bold": False, "since": dt.date(2020, 5, 1), "before": dt.date(2021,1,1)},
[{"_id": "paper3",
"author": ["m1", "jones"],
"ackno": "thanks",
"grant": "fwp2",
"month": "jun",
"year": "2020"},
{"_id": "paper2",
"author": ["m1", "palin"],
"ackno": "thanks",
"grant": "fwp2",
"year": "2020"},
]
),
]
)
def test_filter_publications(args, kwargs, expected):
actual = filter_publications(*args, **kwargs)
assert actual == expected
def test_author_publications():
citations = [{"author": ["CJ", "SJLB"]}, {"editor": "SJLB"}]
filter_publications(citations, {"SJLB"})
def test_fuzzy_retrieval():
person = {
"_id": "scopatz",
"aka": [
"Scopatz",
"Scopatz, A",
"Scopatz, A.",
"Scopatz, A M",
"Anthony Michael Scopatz",
],
"name": "Anthony Scopatz",
}
assert fuzzy_retrieval([person], ["aka", "name", "_id"],
"scopatz") == person
assert fuzzy_retrieval([person], ["aka", "name", "_id"],
"scopatz, a") is None
assert (
fuzzy_retrieval(
[person], ["aka", "name", "_id"], "scopatz, a",
case_sensitive=False,
)
== person
)
def test_get_formatted_crossref_reference(monkeypatch):
def mockreturn(*args, **kwargs):
mock_article = {'message': {'author': [{"given": "SJL", "family": "Billinge"}],
"short-container-title": ["J. Great Results"],
"volume": 10,
"title": ["Whamo"],
"page": "231-233",
"issued": {"date-parts": [[1971,8,20]]}}
}
return mock_article
monkeypatch.setattr(habanero.Crossref, "works", mockreturn)
expected = ("Whamo, SJL Billinge, J. Great Results, v. 10, pp. 231-233, (1971).",
dt.date(1971, 8, 20))
actual = get_formatted_crossref_reference("test")
assert actual == expected
@pytest.mark.parametrize(
"input,expected",
[
(0, "th"),
(1, "st"),
(2, "nd"),
(3, "rd"),
(4, "th"),
(10, "th"),
(13, "th"),
(33, "rd"),
(None, ""),
("0", ""),
],
)
def test_number_suffix(input, expected):
assert number_suffix(input) == expected
@pytest.mark.parametrize(
"input,expected",
[
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
}
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
}
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}],
),
[
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
"amount": "50 mph",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}
],
),
[
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
"amount": "50 mph",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
},
],
),
[
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
},
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
"amount": "50 mph",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
"linked_to": "proposal2"
},
],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
"linked_to": "proposal2",
},
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
"amount": "50 mph",
},
],
[{"_id": "grant1",
"linked_to": "proposal1",
"amount": "100 mph"},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
},
],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
},
],
),
],
)
def test_merge_collections_all(input, expected):
a = input[0]
b = input[1]
target_id = "linked_to"
assert merge_collections_all(a, b, target_id) == expected
@pytest.mark.parametrize(
"input,expected",
[
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
}
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
}
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
"amount": "50 mph",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}
],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
"amount": "50 mph",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
},
],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
},
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
"amount": "50 mph",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
"linked_to": "proposal2"
},
],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
"linked_to": "proposal2",
},
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
"amount": "50 mph",
},
],
[{"_id": "grant1",
"linked_to": "proposal1",
"amount": "100 mph"},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
},
],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
},
],
),
],
)
def test_merge_collections_superior(input, expected):
a = input[0]
b = input[1]
target_id = "linked_to"
assert merge_collections_superior(a, b, target_id) == expected
@pytest.mark.parametrize(
"input,expected",
[
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
}
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
}
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
"amount": "50 mph",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"}
],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
"amount": "50 mph",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"},
],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
"amount": "50 mph",
},
{
"_id": "proposal2",
"title": "African swallow",
"author": "king arthur",
},
],
[{"_id": "grant1", "linked_to": "proposal1",
"amount": "100 mph"},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
"linked_to": "proposal2"
},
],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
"linked_to": "proposal2",
},
],
),
(
(
[
{
"_id": "proposal1",
"title": "European swallow",
"author": "king arthur",
"amount": "50 mph",
},
],
[{"_id": "grant1",
"linked_to": "proposal1",
"amount": "100 mph"},
{
"_id": "grant2",
"title": "African swallow",
"author": "king arthur",
},
],
),
[
{
"_id": "grant1",
"title": "European swallow",
"author": "king arthur",
"linked_to": "proposal1",
"amount": "100 mph",
},
],
),
],
)
def test_merge_intersection(input, expected):
a = input[0]
b = input[1]
target_id = "linked_to"
assert merge_collections_intersect(a, b, target_id) == expected
@pytest.mark.parametrize(
"input,expected,kwargs",
[
("$hi", r"\$hi", {}),
(
r"Website: https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf hi",
r"Website: \url{https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf} hi",
{},
),
(
r"Website: https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf hi",
r"Website: \href{https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf} hi",
{"wrapper": "href"},
),
(
r"Website: https://github.com/CJ-Wright/"
r"Masters_Thesis/raw/master/thesis.pdf hi",
r"Website: https://github.com/CJ-Wright/"
r"Masters\_Thesis/raw/master/thesis.pdf hi",
{"url_check": False},
),
],
)
def test_latex_safe(input, expected, kwargs):
output = latex_safe(input, **kwargs)
assert output == expected
DEFAULT_SCHEMA = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
},
},
},
},
}
USER_SCHEMA0 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {"type": "dict",
"schema": {"day": {"required": False, }, }, },
},
},
}
EXPECTED_SCHEMA0 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": False,
"type": "integer",
},
},
},
},
},
}
USER_SCHEMA1 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {"type": "dict",
"schema": {"day": {"type": "string", }, }, },
},
},
}
EXPECTED_SCHEMA1 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "string",
},
},
},
},
},
}
USER_SCHEMA2 = {
"expenses": {
"begin_day": {
"description": "The first day of expense",
"required": True,
"type": "string",
}
},
}
EXPECTED_SCHEMA2 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
},
},
},
"begin_day": {
"description": "The first day of expense",
"required": True,
"type": "string",
},
},
}
USER_SCHEMA3 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {"day": {"description": "The date on the receipt"}, },
},
},
},
}
EXPECTED_SCHEMA3 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "The date on the receipt",
"required": True,
"type": "integer",
},
},
},
},
},
}
USER_SCHEMA4 = {
"expenses": {
"itemized_expenses": {
"schema": {
"schema": {
"prepaid_expense": {
"description": "Expense paid by the direct billing",
"required": True,
"type": "float",
},
},
},
},
},
}
EXPECTED_SCHEMA4 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
"prepaid_expense": {
"description": "Expense paid by the direct billing",
"required": True,
"type": "float",
},
},
},
},
},
}
USER_SCHEMA5 = {}
EXPECTED_SCHEMA5 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
},
},
},
},
}
USER_SCHEMA6 = {"expenses": {}}
EXPECTED_SCHEMA6 = {
"expenses": {
"itemized_expenses": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"day": {
"description": "Expense day",
"required": True,
"type": "integer",
},
},
},
},
},
}
@pytest.mark.parametrize(
"default_schema, user_schema, expected_schema",
[
(DEFAULT_SCHEMA, USER_SCHEMA0, EXPECTED_SCHEMA0),
(DEFAULT_SCHEMA, USER_SCHEMA1, EXPECTED_SCHEMA1),
(DEFAULT_SCHEMA, USER_SCHEMA2, EXPECTED_SCHEMA2),
(DEFAULT_SCHEMA, USER_SCHEMA3, EXPECTED_SCHEMA3),
(DEFAULT_SCHEMA, USER_SCHEMA4, EXPECTED_SCHEMA4),
(DEFAULT_SCHEMA, USER_SCHEMA5, EXPECTED_SCHEMA5),
(DEFAULT_SCHEMA, USER_SCHEMA6, EXPECTED_SCHEMA6),
],
)
def test_update_schemas(default_schema, user_schema, expected_schema):
updated_schema = update_schemas(default_schema, user_schema)
assert updated_schema == expected_schema
def test_group():
doc0 = {"k0": "v00", "k1": "v01"}
doc1 = {"k0": "v10", "k1": "v11"}
doc2 = {"k1": "v21"}
doc3 = {"k0": "v00", "k1": "v31"}
db = (doc for doc in (doc0, doc1, doc2, doc3))
by = "k0"
expect = {"v00": [doc0, doc3], "v10": [doc1]}
assert group(db, by) == expect
ppl_coll = [
{
"_id": "m1",
"name": "member1",
"education": [{
"group": "bg",
"institution": "columbiau",
"degree": "PhD",
"department": "apam",
"begin_year": 2016
}],
"employment": [{
"begin_year": 2020,
"begin_month": 1,
"organization": "columbiau",
"position": "Undergraduate Researcher",
"advisor": "sbillinge",
"status": "undergrad"
}]
},
{
"_id": "nm1",
"name": "non-member1",
"education": [{
"institution": "columbiau",
"degree": "PhD",
"department": "apam",
"begin_year": 2016
}],
"employment": [{
"begin_year": 2020,
"begin_month": 1,
"organization": "columbiau",
"position": "Undergraduate Researcher",
"advisor": "sbillinge",
"status": "undergrad"
}]
},
{
"_id": "m2",
"name": "member2",
"education": [{
"institution": "columbiau",
"degree": "PhD",
"department": "apam",
"begin_year": 2016
}],
"employment": [{
"begin_year": 2020,
"begin_month": 1,
"group": "bg",
"organization": "columbiau",
"position": "Undergraduate Researcher",
"advisor": "sbillinge",
"status": "undergrad"
}]
},
]
@pytest.mark.parametrize(
"input,expected",
[
(ppl_coll, set(["m1", "m2"])),
],
)
def test_group_member_ids(input, expected):
actual = group_member_ids(input, "bg")
assert actual == expected
d1 = {
'name': 'John',
'experience': [
{'company': 'Google', 'role': 'product manager'},
{'company': 'Amazon', 'role': 'QA'}
],
'school': {
'name': 'Columbia',
'location': 'NYC',
'year': 'senior'
}
}
d2 = {
'name': 'Sarah',
'experience': [
{'company': 'Verizon', 'role': 'sales'},
{'company': 'AT&T', 'role': 'software engineer'}
],
'school': {
'name': 'Columbia',
'location': 'NYC',
'year': 'junior'
},
'hobbies': ['swimming', 'hiking'],
'info': {
'stats': {
'code': 'a76',
'location': 'California'
},
'software': 'CAD'
}
}
d3 = {
'_id': 'abc',
'name': 'Example Lab',
'Members': [
{
'Name': 'Lisa',
'Experience': [
{
'company': 'Google',
'location': {'state': 'CA', 'zip code': '94043'}
},
{
'company': 'Amazon',
'location': {'state': 'VA', 'zip code': '20189'}
}
]
},
{
'Name': 'Stephen',
'Experience': [
{
'company': 'Goldman Sachs',
'location': {'state': 'NY', 'zip code': '10282'}
}
]
}
]
}
@pytest.mark.parametrize(
"input,expected",
[
(d1, ['John', 'Google', 'product manager', 'Amazon', 'QA', 'Columbia', 'NYC', 'senior']),
(d2, ['Sarah', 'Verizon', 'sales', 'AT&T', 'software engineer', 'Columbia', 'NYC', 'junior', 'swimming', 'hiking', 'a76', 'California', 'CAD']),
(d3, ['abc', 'Example Lab', 'Lisa', 'Google', 'CA', '94043', 'Amazon', 'VA', '20189', 'Stephen', 'Goldman Sachs', 'NY', '10282'])
]
)
def test_compound_dict(input, expected):
assert(compound_dict(input, []) == expected)
l1 = [
'hello',
{'name': 'Fred', 'status': 'active'},
{'name': 'Derf', 'status': 'inactive'},
'bye'
]
l2 = [
['a', 'b', 'c'],
{'name': 'Anthony', 'Status': 'active'},
[{'product': 'phone'}, {'product': 'laptop'}],
"end"
]
@pytest.mark.parametrize(
"input,expected",
[
(l1, ['hello', 'Fred', 'active', 'Derf', 'inactive', 'bye']),
(l2, ['a', 'b', 'c', 'Anthony', 'active', 'phone', 'laptop', 'end']),
]
)
def test_compound_list(input, expected):
assert(compound_list(input, []) == expected)
p1 = {
"_id": "scopatz",
"aka": [
"Scopatz",
"Scopatz, A",
"Scopatz, A.",
"Scopatz, A M",
"Anthony Michael Scopatz",
],
"name": "Anthony Scopatz",
}
p2 = {
"_id": "abc",
"aka": [
"A. BC",
"BC, A",
"Anthony BC",
],
"name": "Anthony Bill Chris",
}
p3 = {
"_id": "Leonan",
"name": "Leonan Garea",
"company": {
"name": "Amazon",
"role": "Product Manager"
},
"projects": [
{'title': 'GUI Application', 'description': 'Make a GUI for the group'},
]
}
p4 = {
"_id": "cba",
"name": "Jackie",
"company": {},
"projects": [
{'title': 'PDF Maker', 'description': 'New PDF function'},
{'title': 'Write Paper', 'description': 'Draft the new paper'}
]
}
p5 = {
'_id': 'ghi',
'name': 'Carl',
'experience': [
{
'name': 'Google',
'roles': [
{
'position': 'software engineer',
'location': {'state': 'CA', 'zip code': '92551'}
},
{
'position': 'manager',
'location': {'state': 'VA', 'zip code': '20189'}
}
]
},
{
'name': 'Goldman Sachs',
'Experience': [
{
'position': 'junior associate',
'location': {'state': 'NY', 'zip code': '10282'}
}
]
}
]
}
@pytest.mark.parametrize(
"input, expected",
[
(([p1, p2], ["aka", "name", "_id"],
"Anth", False),[p1,p2]),
(([p1, p2], ["aka", "name", "_id"],
"scopatz, a", True),[]),
(([p1, p2], ["aka", "name", "_id"],
"scopatz, a", False),[p1]),
(([p1, p2], ["aka", "name", "_id"],
"ill", False),[p2]),
(([p3], ["company"], "Amazon", False), [p3]),
(([p3, p4], ["projects"], "PDF", False), [p4]),
(([p5], ['experience'], '20189', False), [p5]),
(([p5], ['experience'], 'hello', False), [])
],
)
def test_fragment_retrieval(input, expected):
assert(fragment_retrieval(input[0],input[1],input[2],case_sensitive = input[3]) == expected)
@pytest.mark.parametrize(
"input, expected",
[
((None, None), "present"),
((None, 2002), "2002"),
((5,2002), "May 2002"),
],
)
def test_month_and_year(input,expected):
assert(month_and_year(input[0],input[1]) == expected)
@pytest.mark.parametrize(
"appts,start,end,expected",
[
({"name": "Kurt Godel",
"_id": "kgodel",
"appointments": {
"A": {"begin_year": 2017, "begin_month": 6, "begin_day": 1, "end_year": 2017, "end_month": 6, "end_day": 30,
"grant": "grant1", "loading": 1.0, "type": "pd",}}},
"2017-06-01", "2017-07-01", False),
({"name": "MC Escher",
"_id": "mcescher",
"appointments": {
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant1", "loading": 0.5, "type": "pd",},
"B": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant2", "loading": 0.5, "type": "pd",
}}},"2017-06-01", "2017-06-30", True),
({"name": "Johann Sebastian Bach",
"_id": "jsbach",
"appointments":{
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant1", "loading": 0.5, "type": "pd",},
"B": {"begin_date": '2017-06-02', "end_date": '2017-06-29', "grant": "grant2", "loading": 0.5, "type": "pd",
}}}, "2017-06-01", "2017-06-30", False),
({"name": "Evariste Galois",
"_id": "egalois",
"appointments": {
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-15', "grant": "grant1", "loading": 1.0, "type": "pd",},
"B": {"begin_date": '2017-06-16', "end_date": '2017-06-30', "grant": "grant2", "loading": 1.0, "type": "pd",
}}},"2017-06-01", "2017-06-30", True),
({"name": "Ludwig Wittgenstein",
"_id": "lwittgenstein",
"appointments": {
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-15', "grant": "grant1", "loading": 1.0, "type": "pd",},
"B": {"begin_date": '2017-06-17', "end_date": '2017-06-30', "grant": "grant2", "loading": 1.0, "type": "pd",},
"C": {"begin_date": '2017-07-01', "end_date": '2017-07-30', "grant": "grant3", "loading": 1.0, "type": "pd",
}}}, "2017-06-01", "2017-06-30", False),
({"name": "Buckminster Fuller",
"_id": "bfuller",
"appointments":{
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant1", "loading": 1.0, "type": "pd",},
"B": {"begin_date": '2017-06-17', "end_date": '2017-06-30', "grant": "grant2", "loading": 1.0, "type": "pd",
}}}, "2017-06-01", "2017-06-30", False),
({"name": "Lorem Ipsum",
"_id": "lipsum",
"appointments":{
"A": {"begin_date": '2017-06-01', "end_date": '2017-06-30', "grant": "grant1", "loading": 1.0,"type": "pd",},
"B": {"begin_date": '2017-06-17', "end_date": '2017-06-30', "grant": "grant2", "loading": 1.0, "type": "pd",}
}}, "2017-06-01", "2017-06-30", False),
],
)
def test_is_fully_appointed(appts, start, end, expected):
actual = is_fully_appointed(appts, start, end)
assert actual == expected
@pytest.mark.parametrize(
"input, expected",
[
({'funding':[
{"name": "Omega Laser User's Group Travel Award",
"value": 1100,
"year": 2013},
{"name": "NIF User's Group Travel Award",
"value": 1150,
"year": 2013}]},
[{'description': "Omega Laser User's Group Travel Award (\\$1,100)",
'year': 2013,
'_key': 2013.0},
{'description':"NIF User's Group Travel Award (\\$1,150)",
'year': 2013,
'_key': 2013.0}]),
({'funding':[
{"name": "Omega Laser User's Group Travel Award",
"value": 1100,
"year": 2013}],
"service":[{"name": "International Steering Committee", "role": "chair",
"type": "profession", "year": 2020,
"month": 3, "notes": ["something"]}]},
[{"description":"International Steering Committee",
"year":2020,
"_key":2020.03},
{'description': "Omega Laser User's Group Travel Award (\\$1,100)",
'year': 2013,
'_key': 2013.0}]
)
],
)
def test_get_id_from_name(input,expected):
assert(awards_grants_honors(input) == expected)
@pytest.mark.parametrize(
"input, expected",
[
(([{'_id':'afriend','aka':['AB Friend','Tony Friend'], 'name': 'Anthony B Friend'}], 'Simon'), None),
(([{'_id':'afriend','aka':['AB Friend','Tony Friend'], 'name': 'Anthony B Friend'}], 'Anthony B Friend'),
'afriend'),
(([{'_id':'afriend','aka':['AB Friend','Tony Friend'], 'name': 'Anthony B Friend'},
{'_id':'aeinstein','aka':['Einstein'], 'name': 'Albert Einstein'}],
'Albert Einstein'),
'aeinstein')
],
)
def test_get_id_from_name(input,expected):
assert(get_id_from_name(input[0],input[1]) == expected)
@pytest.mark.parametrize(
"input, expected",
[
((2012, 'Jan', 18), 'Wed, 18 Jan 2012 00:00:00 -0000'),
((2020, 6, 22), 'Mon, 22 Jun 2020 00:00:00 -0000'),
],
)
def test_date_to_rfc822(input,expected):
assert(date_to_rfc822(input[0], input[1], input[2]) == expected)
person1 = {
"_id": "scopatz",
"aka": [
"Scopatz",
"Scopatz, A",
"Scopatz, A.",
"Scopatz, A M",
"Anthony Michael Scopatz",
],
"name": "Anthony Scopatz",
"position": "Professor"
}
person2 = {
"_id": "abc",
"aka": [
"A. BC",
"BC, A",
"Anthony BC",
],
"name": "Anthony Bill Chris",
"position": "Professor"
}
person3 = {
"_id": "jdoe",
"aka": [
"A. BC",
"BC, A",
"Anthony BC",
],
"name": "John Doe",
}
people = [person1, person2, person3]
@pytest.mark.parametrize(
"input, expected",
[
((people, ['name', 'Doe']), [person3]),
((people, ['name', 'Jerry']), []),
((people, ['position', 'Prof']), [person1, person2]),
((people, ['position', 'Prof', 'name', 'Chris']), [person2]),
],
)
def test_key_value_pair_filter(input, expected):
assert(key_value_pair_filter(input[0], input[1]) == expected)
@pytest.mark.parametrize(
"input, expected",
[
(([person3], None), "jdoe \n"),
(([], None), ''),
(([person1, person2], ['position']), "scopatz position: Professor \nabc position: Professor \n"),
(([person2], ['position']), "abc position: Professor \n"),
],
)
def test_collection_str(input, expected):
assert(collection_str(input[0], input[1]) == expected)
@pytest.mark.parametrize(
"input, expected",
[
((people, ['name', 'Doe'], None), "jdoe \n"),
((people, ['name', 'Jerry'], None), ""),
((people, ['position', 'Prof', 'name', 'Chris'], None), "abc \n"),
((people, ['position', 'prof', 'name', 'Chris'], ['position']), "abc position: Professor \n"),
],
)
def test_search_collection(input, expected):
assert(search_collection(input[0], input[1], input[2]) == expected)
appointed_people = [
{'name': 'Kurt Godel', '_id': 'kgodel',
'appointments': {
"A": {"begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.5, 'type': 'gra'},
"B": {'_id': 'B', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
"C": {'_id': 'C', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'}},
"employment": [
{'group': 'permutation', 'begin_date': '2014-06-01', 'end_date': '2015-06-01', 'status': 'phd'},
{'group': 'matrix', 'begin_year': '2020', 'end_day': '5', 'end_month': '12', 'end_year': '2020'},
{'group': 'permutation', 'begin_day': 4, 'begin_month': 9, 'begin_year': 2012, 'end_day': 5,
'end_month': 9, 'end_year': 2012, 'permanent': 'true'}
]},
{'name': 'MC Escher', '_id': 'mcescher',
'appointments':{
"A": {"begin_date": '2019-10-01', "end_date": '2019-10-31', 'grant': 'grant1', 'loading': 1.0, 'type': 'ss'},
"B": {"begin_date": '2019-11-01', "end_date": '2019-11-30', 'grant': 'grant2', 'loading': 0.5, 'type': 'ss'},
"C": {"begin_date": '2019-11-01', "end_date": '2019-11-30', 'grant': 'grant3', 'loading': 0.5, 'type': 'ss'},},
'employment': [
{'group': 'transformation', 'begin_date': '2018-07-24', 'end_date': dt.date(2020, 8, 1), 'status': 'postdoc'},
{'group': 'abstract', 'begin_year': 2010, 'end_day': 5, 'end_month': 12, 'end_year': 2020},
{'group': 'abstract', 'begin_date': '2012-06-30', 'end_date': '2012-09-05'}
]},
{'name': 'Johann Sebastian Bach', '_id': 'jsbach',
'appointments': {
"A": {"begin_date": '2019-12-01', "end_date": '2020-12-15', 'grant': 'grant1', 'loading': 0.9, 'type': 'pd'},
"B": {"begin_date": '2019-12-16', "end_date": '2020-12-31', 'grant': 'grant2', 'loading': 0.9, 'type': 'pd'},
"C": {"begin_date": '2019-12-01', "end_date": '2020-12-31', 'grant': 'grant3', 'loading': 0.1, 'type': 'pd'}},
'employment': [
{'group': 'bg', 'begin_date': '2019-02-03'}
]},
{'name': 'Ludwig Wittgenstein', '_id': 'lwittgenstein',
'appointments': {
"A": {'begin_date': '2019-12-10', 'end_date': '2019-12-20', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'}}},
{'name': 'Karl Popper', '_id': 'kpopper',
'appointments': {
"A": {'begin_date': '2019-12-25', 'end_date': '2019-12-31', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'}}},
{'name': 'GEM Anscombe', '_id': 'ganscombe', 'appointments': {}},
{'name': 'Sophie Germain', '_id': 'sgermain',
'appointments': {
"A": {'begin_date': '2019-09-02', 'end_date': '2019-09-06', 'grant': 'grant4', 'loading': 1.0, 'type': 'ss'}}},
]
@pytest.mark.parametrize(
"people,key,value,start,end,expected",
[(appointed_people, 'grant', 'grant1', None, None,
[{'person': 'kgodel', '_id': 'A', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.5, 'type': 'gra'},
{'person': 'kgodel', '_id': 'B', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'kgodel', '_id': 'C', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'mcescher', '_id': 'A', "begin_date": '2019-10-01', "end_date": '2019-10-31', 'grant': 'grant1', 'loading': 1.0, 'type': 'ss'},
{'person': 'jsbach', '_id': 'A', "begin_date": '2019-12-01', "end_date": '2020-12-15', 'grant': 'grant1', 'loading': 0.9, 'type': 'pd'},
]),
(appointed_people, None, None, '2019-09-01', '2019-09-30',
[{'person': 'kgodel', '_id': 'A', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.5, 'type': 'gra'},
{'person': 'kgodel', '_id': 'B', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'kgodel', '_id': 'C', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'sgermain', '_id': 'A', 'begin_date': '2019-09-02', 'end_date': '2019-09-06', 'grant': 'grant4', 'loading': 1.0, 'type': 'ss'} ,
]),
(appointed_people, ['loading', 'type'], [1.0, 'ss'], '2019-12-15', '2019-12-25',
[{'person': 'lwittgenstein', '_id': 'A', 'begin_date': '2019-12-10', 'end_date': '2019-12-20', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'},
{'person': 'kpopper', '_id': 'A', 'begin_date': '2019-12-25', 'end_date': '2019-12-31', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'}
]),
(appointed_people, ['loading', 'type', 'grant'], [0.9, 'pd', 'grant3'], None, None, []),
(appointed_people, None, None, None, None,
[{'person': 'kgodel', '_id': 'A', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.5, 'type': 'gra'},
{'person': 'kgodel', '_id': 'B', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'kgodel', '_id': 'C', "begin_date": '2019-09-01', "end_date": '2019-09-10', 'grant': 'grant1', 'loading': 0.25, 'type': 'gra'},
{'person': 'mcescher', '_id': 'A', "begin_date": '2019-10-01', "end_date": '2019-10-31', 'grant': 'grant1', 'loading': 1.0, 'type': 'ss'},
{'person': 'mcescher', '_id' :'B', "begin_date": '2019-11-01', "end_date": '2019-11-30', 'grant': 'grant2', 'loading': 0.5, 'type': 'ss'},
{'person': 'mcescher', '_id': 'C', "begin_date": '2019-11-01', "end_date": '2019-11-30', 'grant': 'grant3', 'loading': 0.5, 'type': 'ss'},
{'person': 'jsbach', '_id': 'A', "begin_date": '2019-12-01', "end_date": '2020-12-15', 'grant': 'grant1', 'loading': 0.9, 'type': 'pd'},
{'person': 'jsbach', '_id': 'B', "begin_date": '2019-12-16', "end_date": '2020-12-31', 'grant': 'grant2', 'loading': 0.9, 'type': 'pd'},
{'person': 'jsbach', '_id': 'C', "begin_date": '2019-12-01', "end_date": '2020-12-31', 'grant': 'grant3', 'loading': 0.1, 'type': 'pd'},
{'person': 'lwittgenstein', '_id': 'A', 'begin_date': '2019-12-10', 'end_date': '2019-12-20', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'},
{'person': 'kpopper', '_id': 'A', 'begin_date': '2019-12-25', 'end_date': '2019-12-31', 'grant': 'grant2', 'loading': 1.0, 'type': 'ss'},
{'person': 'sgermain', '_id': 'A', 'begin_date': '2019-09-02', 'end_date': '2019-09-06', 'grant': 'grant4', 'loading': 1.0, 'type': 'ss'},
]),
(appointed_people, 'type', 'ss', '2019-10-21', '2019-09-01', 'begin date is after end date'),
(appointed_people, ['type', 'loading'], None, None, None, 'number of filter keys and filter values do not match'),
(appointed_people, 'type', 'pd', '2019-12-10', None, 'please enter both begin date and end date or neither'),
([{'name': 'Magical Person', '_id': 'mperson', 'appointments': {"A": {'begin_date': '2019-09-01', 'end_date': '2019-09-05',
'loading': 1.0, 'grant': 'grant1', 'type': 'imaginary'}}}], None, None,
None, None, 'invalid type imaginary for appointment A of mperson'
),
]
)
def test_collect_appts(people, key, value, start, end, expected):
try:
actual = collect_appts(people, filter_key=key, filter_value=value, begin_date=start, end_date=end)
assert actual == expected
except ValueError:
with pytest.raises(ValueError) as excinfo:
actual = collect_appts(people, filter_key=key, filter_value=value, begin_date=start, end_date=end)
assert str(excinfo.value) == expected
except RuntimeError:
with pytest.raises(RuntimeError) as excinfo:
actual = collect_appts(people, filter_key=key, filter_value=value, begin_date=start, end_date=end)
assert str(excinfo.value) == expected
appts = collect_appts(appointed_people)
grant1 = {'_id': 'grant1', 'alias': 'grant_one', 'budget': [
{'begin_date': '2019-09-01', 'end_date': '2019-09-03', 'student_months': 1, 'postdoc_months': 0.5, 'ss_months': 0},
{'begin_date': '2019-09-04', 'end_date': '2019-09-07', 'student_months': 1.5, 'postdoc_months': 0, 'ss_months': 0},
{'begin_date': '2019-09-08', 'end_date': '2019-09-10', 'student_months': 2, 'postdoc_months': 1.5, 'ss_months': 0},
]}
grant2 = {'_id': 'grant2', 'alias': 'grant_two', 'budget': [
{'begin_date': '2019-09-01', 'end_date': '2019-12-31', 'student_months': 4, 'postdoc_months': 2.5, 'ss_months': 1}
]}
grant3 = {'_id': 'grant3', 'budget': [
{'begin_date': '2019-09-01', 'end_date': '2019-10-31', 'student_months': 0, 'postdoc_months': 1, 'ss_months': 2},
{'begin_date': '2019-11-01', 'end_date': '2019-12-31', 'student_months': 2, 'postdoc_months': 0.5, 'ss_months': 0}
]}
grant4 = {'_id': 'grant4', 'alias': 'grant_four', 'budget': [
{'begin_date': '2019-09-01', 'end_date': '2019-09-07', 'student_months': 1, 'postdoc_months': 1, 'ss_months': 1}]}
@pytest.mark.parametrize(
"grant,appointments,start,end,expected",
[
(grant1, appts, None, None,
{dt.date(2019, 9, 1): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 29.5},
dt.date(2019, 9, 2): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 28.5},
dt.date(2019, 9, 3): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 27.5},
dt.date(2019, 9, 4): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 72.25},
dt.date(2019, 9, 5): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 71.25},
dt.date(2019, 9, 6): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 70.25},
dt.date(2019, 9, 7): {'postdoc_days': 15.25, 'ss_days': 0.0, 'student_days': 69.25},
dt.date(2019, 9, 8): {'postdoc_days': 61.0, 'ss_days': 0.0, 'student_days': 129.25},
dt.date(2019, 9, 9): {'postdoc_days': 61.0, 'ss_days': 0.0, 'student_days': 128.25},
dt.date(2019, 9, 10): {'postdoc_days': 61.0, 'ss_days': 0.0, 'student_days': 127.25}}
),
(grant2, appts, '2019-12-15', '2019-12-31',
{dt.date(2019, 12, 15): {'postdoc_days': 76.25, 'ss_days': 9.5, 'student_days': 122.0},
dt.date(2019, 12, 16): {'postdoc_days': 75.35, 'ss_days': 8.5, 'student_days': 122.0},
dt.date(2019, 12, 17): {'postdoc_days': 74.45, 'ss_days': 7.5, 'student_days': 122.0},
dt.date(2019, 12, 18): {'postdoc_days': 73.55, 'ss_days': 6.5, 'student_days': 122.0},
dt.date(2019, 12, 19): {'postdoc_days': 72.65, 'ss_days': 5.5, 'student_days': 122.0},
dt.date(2019, 12, 20): {'postdoc_days': 71.75, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 21): {'postdoc_days': 70.85, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 22): {'postdoc_days': 69.95, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 23): {'postdoc_days': 69.05, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 24): {'postdoc_days': 68.15, 'ss_days': 4.5, 'student_days': 122.0},
dt.date(2019, 12, 25): {'postdoc_days': 67.25, 'ss_days': 3.5, 'student_days': 122.0},
dt.date(2019, 12, 26): {'postdoc_days': 66.35, 'ss_days': 2.5, 'student_days': 122.0},
dt.date(2019, 12, 27): {'postdoc_days': 65.45, 'ss_days': 1.5, 'student_days': 122.0},
dt.date(2019, 12, 28): {'postdoc_days': 64.55, 'ss_days': 0.5, 'student_days': 122.0},
dt.date(2019, 12, 29): {'postdoc_days': 63.65, 'ss_days': -0.5, 'student_days': 122.0},
dt.date(2019, 12, 30): {'postdoc_days': 62.75, 'ss_days': -1.5, 'student_days': 122.0},
dt.date(2019, 12, 31): {'postdoc_days': 61.85, 'ss_days': -2.5, 'student_days': 122.0}}
),
(grant3, appts, '2019-12-31', '2019-12-31',
{dt.date(2019, 12, 31): {'postdoc_days': 42.65, 'ss_days': 46.0, 'student_days': 61.0}}
),
(grant4, appts, None, None,
{dt.date(2019, 9, 1): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 2): {'postdoc_days': 30.5, 'ss_days': 29.5, 'student_days': 30.5},
dt.date(2019, 9, 3): {'postdoc_days': 30.5, 'ss_days': 28.5, 'student_days': 30.5},
dt.date(2019, 9, 4): {'postdoc_days': 30.5, 'ss_days': 27.5, 'student_days': 30.5},
dt.date(2019, 9, 5): {'postdoc_days': 30.5, 'ss_days': 26.5, 'student_days': 30.5},
dt.date(2019, 9, 6): {'postdoc_days': 30.5, 'ss_days': 25.5, 'student_days': 30.5},
dt.date(2019, 9, 7): {'postdoc_days': 30.5, 'ss_days': 25.5, 'student_days': 30.5}}
),
({'_id': 'magical_grant', 'alias': 'very_magical_grant'}, appts,
'2012-12-23', '2013-01-24', 'magical_grant has no specified budget'
),
(grant4, appointed_people[0].get('appointments'), None, None,
{dt.date(2019, 9, 1): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 2): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 3): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 4): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 5): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 6): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5},
dt.date(2019, 9, 7): {'postdoc_days': 30.5, 'ss_days': 30.5, 'student_days': 30.5}}
)
]
)
def test_grant_burn(grant, appointments, start, end, expected):
try:
actual = grant_burn(grant, appointments, begin_date=start, end_date=end)
assert actual == expected
except ValueError:
with pytest.raises(ValueError) as excinfo:
actual = grant_burn(grant, appointments, begin_date=start, end_date=end)
assert str(excinfo.value) == expected
meeting1 = {'_id': 'grp2020-06-15', 'journal_club': {'doi': 'TBD'}}
meeting2 = {'_id': 'grp2020-06-22', 'presentation': {'link': 'TBD'}}
meeting3 = {'_id': 'grp2020-06-29', 'presentation': {'link': '2002ak_grmtg_presnetation', 'title': 'tbd'}}
@pytest.mark.parametrize(
"meeting,date,expected",
[
(meeting1, dt.date(2020, 8, 15), 'grp2020-06-15 does not have a journal club doi'),
(meeting1, dt.date(2020, 5, 15), None),
(meeting2, dt.date(2020, 8, 15), 'grp2020-06-22 does not have a presentation link'),
(meeting2, dt.date(2020, 5, 15), None),
(meeting3, dt.date(2020, 8, 15), 'grp2020-06-29 does not have a presentation title'),
(meeting3, dt.date(2020, 5, 15), None),
]
)
def test_validate_meeting(meeting, date, expected):
try:
actual = validate_meeting(meeting, date)
assert actual == expected
except ValueError:
with pytest.raises(ValueError) as excinfo:
actual = validate_meeting(meeting, date)
assert str(excinfo.value) == expected
@pytest.mark.parametrize(
"person,grpname,expected",
[
(appointed_people[0], 'permutation',
[{'_id': 'kgodel', 'begin_date': dt.date(2014, 6, 1),
'end_date': dt.date(2015, 6, 1), 'status': 'phd', 'permanent': None},
{'_id': 'kgodel', 'begin_date': dt.date(2012, 9, 4),
'end_date': dt.date(2012, 9, 5), 'status': None, 'permanent': 'true'}]
),
(appointed_people[1], 'transformation',
[{'_id': 'mcescher', 'begin_date': dt.date(2018, 7, 24),
'end_date': dt.date(2020, 8, 1), 'status': 'postdoc', 'permanent': None}]
),
(appointed_people[2], 'bg', "WARNING: jsbach has no end date in employment for bg starting 2019-02-03"
),
(appointed_people[3], 'abstract', [])
]
)
def test_group_member_employment_start_end(person, grpname, expected):
try:
actual = group_member_employment_start_end(person, grpname)
assert actual == expected
except:
with pytest.raises(RuntimeError) as excinfo:
actual = group_member_employment_start_end(person, grpname)
assert str(excinfo.value) == expected
@pytest.mark.parametrize(
"inp,expected",
[
([{"dupe_key": 1, "nond": 2}],
[{"dupe_key": 1, "nond": 2}]),
([{"dupe_key": 1, "nond": 2}, {"dupe_key": 1, "nond": 3}],
[{"dupe_key": 1, "nond": 2}]),
([{"no_dupe_key": 1, "nond": 2}],
"ERROR: Target key, dupe_key not found in {'no_dupe_key': 1, 'nond': 2}"),
]
)
def test_remove_duplicate_docs(inp, expected):
try:
actual = remove_duplicate_docs(inp, "dupe_key")
assert actual == expected
except:
with pytest.raises(RuntimeError) as excinfo:
actual = remove_duplicate_docs(inp, "dupe_key")
assert str(excinfo.value) == expected
@pytest.mark.parametrize(
"inp,expected",
[
([
[{"_id": "student", "name": "Lancelot", "employment": [
{"status": "ms",
"begin_date": "2020-05-05",
"end_date": "2020-10-10",
"advisor": "awesome",
"position": "masters researcher"
}]}],
"2022-01-01"
],
[]
),
([
[{"_id": "student", "name": "Lancelot", "employment": [
{"status": "ms",
"begin_date": "2020-05-05",
"end_date": "2020-10-10",
"advisor": "awesome",
"position": "masters researcher"
}]}],
"2019-01-01"
],
[{"_id": "student", "name": "Lancelot", "employment": [
{"status": "ms",
"begin_date": "2020-05-05",
"end_date": "2020-10-10",
"advisor": "awesome",
"position": "masters researcher"
}],
"role": "masters researcher",
"begin_year": 2020,
"end_year": 2020,
'end_date': dt.date(2020, 10, 10),
"status": "ms",
"position": "masters researcher"
}]
),
([
[{"_id": "student", "name": "Lancelot", "employment": [
{"status": "ms",
"advisor": "awesome",
"begin_date": "2020-05-05",
"position": "masters researcher"
}]}],
"2019-01-01"
],
[{"_id": "student", "name": "Lancelot",
"employment": [
{"status": "ms",
"advisor": "awesome",
"begin_date": "2020-05-05",
"position": "masters researcher"
}],
"role": "masters researcher",
"begin_year": 2020,
"end_year": "present",
'end_date': dt.date(2021, 6, 3),
"status": "ms",
"position": "masters researcher"
}]
)
])
def test_filter_employment_for_advisees(inp, expected):
actual = filter_employment_for_advisees(inp[0], inp[1], "ms", "awesome",
dt.date(2021,6,3))
assert actual == expected
person1 = {"_id":"tstark", "aka":"iron man", "name":"tony stark"}
person2 = {"_id":"nromanov", "aka":"black widow", "name":"natasha romanov"}
PEOPLE = [person1, person2]
presentation1 = {"_id":"abc", "authors":"tstark", "date":"2018-01-01", "department":"apam",
"institution":"columbiau", "status":"accepted", "type":"award"}
presentation2 = {"_id":"ghi", "authors":["tstark","nromanov"], "begin_date":"2019-01-02", "end_date":"2019-01-08",
"department":"physics", "institution":"rutgersu", "status":"cancelled", "type":"poster"}
presentation3 = {"_id":"jkl", "authors":["nromanov"], "begin_year":2020, "begin_month":2, "begin_day":2,
"end_year":2020, "end_month":12, "end_day":12, "department":"math", "institution":"rutgersu",
"status":"declined", "type":"webinar"}
PRESENTATIONS = [presentation1, presentation2, presentation3]
institution1 = {"_id":"columbiau", "city":"New York", "country":"USA", "name":"Columbia University", "state":"NY"}
institution2 = {"_id":"rutgersu", "city":"New Brunswick", "country":"USA", "name":"Rutgers University", "state":"NJ"}
institution3 = {"_id":"barnardc", "city":"New York", "country":"USA", "name":"Barnard College", "state":"NY", "departments": {"physics": {"name": "Department of Physics", "aka": "Phys"}}}
institution4 = {"_id":"nyu", "city":"New York", "country":"USA", "name":"New York University", "state":"NY", "street": "23rd", "zip": "10001", "aka": "purple"}
institution_overseas = {"_id":"overseasu", "city":"Toronto", "country":"Canada", "name":"Overseas University"}
organization1 = {"_id":"3m", "city":"Minneapolis", "country":"USA", "name":"3M", "state":"MN"}
INSTITUTIONS = [institution1, institution2, institution3, institution4, institution_overseas, organization1]
expected1 = {"_id": "abc",
"authors": "tony stark",
"begin_day_suffix": "st",
"begin_year": 2018,
"begin_month": 1,
"begin_day": 1,
"date": dt.date(2018, 1, 1),
"day_suffix": "st",
"department": {'name': 'apam'},
"institution": {'city': 'New York',
'country': 'USA',
'name': 'Columbia University',
'state': 'NY'},
"status": "accepted",
"type": "award"}
expected2 = {"_id": "ghi",
"authors": "tony stark, natasha romanov",
"begin_date": "2019-01-02",
"begin_day_suffix": "nd",
"begin_year": 2019,
"begin_month": 1,
"begin_day": 2,
"date": dt.date(2019, 1, 2),
"day_suffix": "nd",
"end_day": 8,
"end_day_suffix": "th",
"department": {'name': 'physics'},
"end_date": "2019-01-08",
"institution": {'city': 'New Brunswick',
'country': 'USA',
'name': 'Rutgers University',
'state': 'NJ'},
"status": "cancelled",
"type": "poster"}
expected3 = {'_id': 'jkl',
"authors": "natasha romanov",
"begin_day": 2,
"begin_day_suffix": "nd",
"begin_month": 2,
"begin_year": 2020,
"date": dt.date(2020, 2, 2),
"day_suffix": 'nd',
"department": {'name': 'math'},
"end_day": 12,
"end_day_suffix": 'th',
"end_month": 12,
"end_year": 2020,
"institution": {'city': 'New Brunswick',
'country': 'USA',
'name': 'Rutgers University',
'state': 'NJ'},
"status": "declined",
"type": "webinar"}
@pytest.mark.parametrize(
"input, expected, sysout", [
({"institution": "columbiau"}, {'department': 'unknown', "location": "New York, NY", "city": "New York", "country":"USA", "institution":"Columbia University", "organization":"Columbia University", "state":"NY"}, ""),
({"institution": "nyu"}, {'department': 'unknown',"location": "New York, NY", "city": "New York", "country":"USA", "institution":"New York University", "organization":"New York University", "state":"NY", "street": "23rd", "zip": "10001", "aka": "purple"}, ""),
({"institution": "barnardc", "department": "physics"}, {"location": "New York, NY", "city": "New York", "country":"USA", "institution":"Barnard College", "organization":"Barnard College", "state":"NY", "department": "Department of Physics"}, ""),
({"institution": "columbiau", "department": "physics"}, {"location": "New York, NY", "city": "New York", "country":"USA", "institution":"Columbia University", "organization":"Columbia University", "state":"NY", "department": "physics"}, "WARNING: no departments in columbiau. physics sought\n"),
({"organization": "3m"}, {'department': 'unknown',"location": "Minneapolis, MN", "city": "Minneapolis", "country":"USA", "institution":"3M", "organization":"3M", "state":"MN"}, ""),
({"institution": "notindbu"},
{"location": "unknown, unknown", "city": "unknown", "country": "unknown",
"institution": "notindbu", 'department': 'unknown',
"organization": "notindbu", "state": "unknown"}, "WARNING: notindbu not found in institutions\n"),
({"institution": "notindbu", "location": "Near, BY"},
{"location": "Near, BY", "city": "unknown", "country": "unknown",
"institution": "notindbu", 'department': 'unknown',
"organization": "notindbu", "state": "unknown"}, "WARNING: notindbu not found in institutions\n"),
({"institution": "notindbu", "city": "Near", "state": "BY"},
{"location": "Near, BY", "city": "Near", "country": "unknown",
"institution": "notindbu", 'department': 'unknown',
"organization": "notindbu", "state": "BY"}, "WARNING: notindbu not found in institutions\n"),
({"institution": "overseasu"},
{"location": "Toronto, Canada", "city": "Toronto",
"country": "Canada", 'department': 'unknown',
"institution": "Overseas University",
"organization": "Overseas University"}, ""),
({"degree": "phd"},
{"degree": "phd"}, "WARNING: no institution or organization in entry: {'degree': 'phd'}\n"),
]
)
def test_dereference_institution(input, expected, sysout, capsys):
dereference_institution(input, INSTITUTIONS, verbose=True)
assert expected == input
out, err = capsys.readouterr()
assert sysout == out
@pytest.mark.parametrize(
"args, kwargs, expected",[
#this tests no kwargs
([PEOPLE, PRESENTATIONS, INSTITUTIONS, "tstark"],
{}, [expected1]),
#this tests 'statuses' kwarg
([PEOPLE, PRESENTATIONS, INSTITUTIONS, "tstark"],
{"statuses" : ["all"]}, [expected2, expected1]),
#this tests 'statuses' and 'types' kwargs together
([PEOPLE, PRESENTATIONS, INSTITUTIONS, "tstark"],
{"statuses" : ["all"], "types" : ["poster"]}, [expected2]),
#this tests 'statuses' and 'since' kwargs together
([PEOPLE, PRESENTATIONS, INSTITUTIONS, "nromanov"],
{"statuses" : ["all"], "since" : dt.date(2019, 1, 1)}, [expected3, expected2]),
#this tests the 'statuses' and 'before' kwargs together
([PEOPLE, PRESENTATIONS, INSTITUTIONS, "tstark"],
{"statuses" : ["all"], "before" : dt.date(2018, 1, 2)}, [expected1])
]
)
def test_filter_presentations(args, kwargs, expected):
actual = filter_presentations(*args, **kwargs)
assert actual == expected
@pytest.mark.parametrize(
"coll, expected", [
([{"_id": "id", "name": "test"}], []),
([{"_id": "id", "tags": ""}], []),
([{"_id": "id", "tags": "thing1"}], ["thing1"]),
([{"_id": "id", "tags": "thing2,thing1"}], ["thing1", "thing2"]),
([{"_id": "id", "tags": "thing2 thing1"}], ["thing1", "thing2"]),
([{"_id": "id", "tags": "thing2,thing1 thing3"}], ["thing1", "thing2", "thing3"]),
]
)
def test_get_tags(coll, expected):
actual = get_tags(coll)
assert actual == expected
def test_get_tags_invalid():
coll = [{"_id": "id", "tags": ["test"]}]
with pytest.raises(TypeError) as e_info:
get_tags(coll)
assert e_info == 'ERROR: valid tags are comma or space separated strings of tag names'
| 37.356127 | 304 | 0.416676 |
73e2a262a92dde10c0a4c402338d30236552f731 | 3,963 | py | Python | src/sardana/macroserver/basetypes.py | schooft/sardana | 76287b416650f40da79871ee3849340d0ff31f1d | [
"CC-BY-3.0"
] | null | null | null | src/sardana/macroserver/basetypes.py | schooft/sardana | 76287b416650f40da79871ee3849340d0ff31f1d | [
"CC-BY-3.0"
] | null | null | null | src/sardana/macroserver/basetypes.py | schooft/sardana | 76287b416650f40da79871ee3849340d0ff31f1d | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""This module contains the definition of the macroserver base types for
macros"""
__all__ = ["Integer", "Float", "Boolean", "String", "User", "Filename",
"File", "Macro", "MacroLibrary", "Env", "Motor", "MotorParam",
"MotorGroup", "ExpChannel", "MeasurementGroup", "ComChannel",
"IORegister", "Controller", "Instrument", "ControllerClass",
"TriggerGate"]
__docformat__ = 'restructuredtext'
from sardana import INTERFACES
from sardana.macroserver.msparameter import ParamType, AttrParamType, \
ElementParamInterface
# Basic types
class Any(ParamType):
type_class = lambda p: p
class Integer(ParamType):
type_class = int
class Float(ParamType):
type_class = float
class Boolean(ParamType):
type_class = bool
def getObj(self, str_repr):
str_repr = str_repr.lower()
if str_repr in ['true', '1']:
value = True
elif str_repr in ['false', '0']:
value = False
else:
raise ValueError('{0} is not a boolean'.format(str_repr))
return value
class String(ParamType):
type_class = str
class User(ParamType):
type_class = str
class Filename(ParamType):
type_class = str
class File(ParamType):
type_class = str
def __init__(self, macro_server, name):
ParamType.__init__(self, macro_server, name)
self.filename = None
# self.data is supposed to be an array.array object
self.data = None
def set(self, filename, data):
self.filename = filename
self.data = data
class JSON(ParamType):
def getObj(self, str_repr):
import json
return json.loads(str_repr)
class Env(ParamType):
type_class = str
class TangoDevice(ParamType):
def getObj(self, str_repr):
import PyTango
return PyTango.DeviceProxy(str_repr)
class Device(ParamType):
def getObj(self, str_repr):
import taurus
return taurus.Device(str_repr)
# Hardware types
class MotorParam(AttrParamType):
"""Class designed to represent a motor parameter name. Usual values
are acceleration,deceleration,velocity,backlash,steps_per_unit,etc"""
def __init__(self, macro_server, name):
AttrParamType.__init__(self, macro_server, name)
self.attr_item_list = ["Acceleration", "Backlash", "Base_rate", "Step_per_unit",
"Deceleration", "Velocity", "Offset"]
self.non_attr_item_list = ["Controller"]
def getItemList(self):
return self.non_attr_item_list + self.attr_item_list
def getAttrItemList(self):
return self.attr_item_list
def getNonAttrItemList(self):
return self.non_attr_item_list
def __build_base_types():
for sardana_type, info in INTERFACES.items():
_, doc = info
class _I(ElementParamInterface):
__doc__ = doc
__name__ = sardana_type
globals()[sardana_type] = _I
__build_base_types()
| 25.901961 | 88 | 0.645218 |
73e2afe12920cb00a999e054608caceb109c692e | 3,946 | py | Python | nmgr/orm/base_model.py | mesalu/Viv2.NodeManager | 9c616e7c42cb5d543030c00f5e99b6aa9439d108 | [
"MIT"
] | null | null | null | nmgr/orm/base_model.py | mesalu/Viv2.NodeManager | 9c616e7c42cb5d543030c00f5e99b6aa9439d108 | [
"MIT"
] | null | null | null | nmgr/orm/base_model.py | mesalu/Viv2.NodeManager | 9c616e7c42cb5d543030c00f5e99b6aa9439d108 | [
"MIT"
] | null | null | null | """
"""
import enum
def column_info(*, name=None, relation=None, autogenerate=False):
"""
Associates the given method, property, or attribute
as column info within the table representing a
collection of entities.
The name can optionally be remapped by providing a name
parameter as a string. E.g. a property may be 'timestamp', where
the column in the relation has the name 'datetime_capture'.
Supplying the latter to `name` will cause the correct mapping
to occur.
:param str name: the actual column name in the database that corresponds to this column.
:param Relation relation: relation information about this column.
:param bool autogenerate: When inserting a new instance into the database, and this column is None,
should the database use the default value for this column?
"""
def decorator(attrib):
if relation and not isinstance(relation, Relation):
raise TypeError("relation parameter must be Relation instance.")
info = {
"name": name,
"relation": relation,
"autogenerate": autogenerate
}
return _ColInfo(attrib, **info)
return decorator
class Relation(object):
"""
Describes a relationship between entities via columns (e.g. foreign keys)
and a load-behavior
"""
class LoadBehavior(enum.Enum):
EAGER = enum.auto() # load when instantiated, if possible.
LAZY = enum.auto() # load once on first prop call
EXTRA_LAZY = enum.auto() # don't load until prop-call.
def __init__(self, behavior: LoadBehavior = LoadBehavior.LAZY, *, parent_cls = None, ent_col = None, parent_col = None):
"""
parent_cls: entity class associated to relation
ent_col: local column
parent_col: column in referenced entity.
"""
self._behavior = behavior
self._parent_cls = parent_cls
self._ent_col = ent_col
self._parent_col = parent_col
if not issubclass(parent_cls, Entity):
raise TypeError("Parent Entity must be a subclass of Entity")
if behavior not in Relation.LoadBehavior:
raise ValueError("Load behavior must be among Relation.LoadBehavior")
class _ColInfo(object):
"""
Some objects that need to be decorated by `column_info` can't
have attributes set on them (properties). This class is used
to temporarily wrap decorated items and then is stripped off
by the MetaEntity class initializer.
"""
def __init__(self, attrib, *, name=None, relation=None, autogenerate=False):
self._col_name = name
self._relation = relation
self._autogen = autogenerate
self._attr = attrib
def __getattr__(self, attr):
# there is some interim time between class setup and
# when the metaclass can come in and 'shuck' the _ColInfo
# instance back out, during which time the _ColInfo object
# needs to quack like a duck:
try:
return getattr(self, attr)
except Exception:
return getattr(self._attr, attr)
class MetaEntity(type):
def __init__(cls, name, bases, dct):
cls._cols = {}
# Discover values annotated with `column_info` in dct.
for key, value in dct.items():
if isinstance(value, _ColInfo):
cls._cols[key] = value
# while we're here fill in the
# presumed column name if necessary
if value._col_name is None:
value._col_name = key
for key, value in cls._cols.items():
dct[key] = value._attr
setattr(cls, key, value._attr)
if not '_table' in dct:
cls._table = name
type.__init__(cls, name, bases, dct)
class Entity(object, metaclass=MetaEntity):
"""
Common base class for all ORM entity objects.
"""
| 34.614035 | 124 | 0.638368 |
73e2cdf9ab6475e3b5fa52b9c77da0577ca7110a | 3,926 | py | Python | openstackclient/compute/v2/host.py | redhat-openstack/python-openstackclient | 7dc2e1dc08b0692a3accb343c62451fb3d83f4cd | [
"Apache-2.0"
] | null | null | null | openstackclient/compute/v2/host.py | redhat-openstack/python-openstackclient | 7dc2e1dc08b0692a3accb343c62451fb3d83f4cd | [
"Apache-2.0"
] | null | null | null | openstackclient/compute/v2/host.py | redhat-openstack/python-openstackclient | 7dc2e1dc08b0692a3accb343c62451fb3d83f4cd | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Host action implementations"""
from openstackclient.common import command
from openstackclient.common import utils
from openstackclient.i18n import _
class ListHost(command.Lister):
"""List host command"""
def get_parser(self, prog_name):
parser = super(ListHost, self).get_parser(prog_name)
parser.add_argument(
"--zone",
metavar="<zone>",
help=_("Only return hosts in the availability zone")
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
columns = (
"Host Name",
"Service",
"Zone"
)
data = compute_client.hosts.list_all(parsed_args.zone)
return (columns,
(utils.get_item_properties(
s, columns,
) for s in data))
class SetHost(command.Command):
"""Set host properties"""
def get_parser(self, prog_name):
parser = super(SetHost, self).get_parser(prog_name)
parser.add_argument(
"host",
metavar="<host>",
help=_("The host to modify (name or ID)")
)
status = parser.add_mutually_exclusive_group()
status.add_argument(
'--enable',
action='store_true',
help=_("Enable the host")
)
status.add_argument(
'--disable',
action='store_true',
help=_("Disable the host")
)
maintenance = parser.add_mutually_exclusive_group()
maintenance.add_argument(
'--enable-maintenance',
action='store_true',
help=_("Enable maintenance mode for the host")
)
maintenance.add_argument(
'--disable-maintenance',
action='store_true',
help=_("Disable maintenance mode for the host")
)
return parser
def take_action(self, parsed_args):
kwargs = {}
if parsed_args.enable:
kwargs['status'] = True
if parsed_args.disable:
kwargs['status'] = False
if parsed_args.enable_maintenance:
kwargs['maintenance_mode'] = True
if parsed_args.disable_maintenance:
kwargs['maintenance_mode'] = False
compute_client = self.app.client_manager.compute
foundhost = utils.find_resource(
compute_client.hosts,
parsed_args.host
)
compute_client.hosts.update(
foundhost.id,
kwargs
)
class ShowHost(command.Lister):
"""Show host command"""
def get_parser(self, prog_name):
parser = super(ShowHost, self).get_parser(prog_name)
parser.add_argument(
"host",
metavar="<host>",
help=_("Name of host")
)
return parser
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
columns = (
"Host",
"Project",
"CPU",
"Memory MB",
"Disk GB"
)
data = compute_client.hosts.get(parsed_args.host)
return (columns,
(utils.get_item_properties(
s, columns,
) for s in data))
| 29.742424 | 77 | 0.579215 |
73e313d795a934476f76f2f3879d5418d5a01d3e | 4,795 | py | Python | API/src/main/resources/Lib/robot/model/tags.py | TagExpress/SikuliX1 | de9da11794dd94b3821eddc5c01b534d3f2fe828 | [
"MIT"
] | null | null | null | API/src/main/resources/Lib/robot/model/tags.py | TagExpress/SikuliX1 | de9da11794dd94b3821eddc5c01b534d3f2fe828 | [
"MIT"
] | null | null | null | API/src/main/resources/Lib/robot/model/tags.py | TagExpress/SikuliX1 | de9da11794dd94b3821eddc5c01b534d3f2fe828 | [
"MIT"
] | null | null | null | # Copyright (c) 2010-2020, sikuli.org, sikulix.com - MIT license
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import (Matcher, NormalizedDict, is_string, py2to3, setter,
unic)
@py2to3
class Tags(object):
def __init__(self, tags=None):
self._tags = tags
@setter
def _tags(self, tags):
if not tags:
return ()
if is_string(tags):
tags = (tags,)
return self._normalize(tags)
def _normalize(self, tags):
normalized = NormalizedDict(((unic(t), 1) for t in tags), ignore='_')
for removed in '', 'NONE':
if removed in normalized:
normalized.pop(removed)
return tuple(normalized)
def add(self, tags):
self._tags = tuple(self) + tuple(Tags(tags))
def remove(self, tags):
tags = TagPatterns(tags)
self._tags = [t for t in self if not tags.match(t)]
def match(self, tags):
return TagPatterns(tags).match(self)
def __contains__(self, tags):
return self.match(tags)
def __len__(self):
return len(self._tags)
def __iter__(self):
return iter(self._tags)
def __unicode__(self):
return u'[%s]' % ', '.join(self)
def __repr__(self):
return repr(list(self))
def __getitem__(self, index):
item = self._tags[index]
return item if not isinstance(index, slice) else Tags(item)
def __add__(self, other):
return Tags(tuple(self) + tuple(Tags(other)))
@py2to3
class TagPatterns(object):
def __init__(self, patterns):
self._patterns = tuple(TagPattern(p) for p in Tags(patterns))
def match(self, tags):
tags = tags if isinstance(tags, Tags) else Tags(tags)
return any(p.match(tags) for p in self._patterns)
def __contains__(self, tag):
return self.match(tag)
def __len__(self):
return len(self._patterns)
def __iter__(self):
return iter(self._patterns)
def __getitem__(self, index):
return self._patterns[index]
def __unicode__(self):
return u'[%s]' % u', '.join(pattern.__unicode__() for pattern in self)
def TagPattern(pattern):
pattern = pattern.replace(' ', '')
if 'NOT' in pattern:
return NotTagPattern(*pattern.split('NOT'))
if 'OR' in pattern:
return OrTagPattern(pattern.split('OR'))
if 'AND' in pattern or '&' in pattern:
return AndTagPattern(pattern.replace('&', 'AND').split('AND'))
return SingleTagPattern(pattern)
@py2to3
class SingleTagPattern(object):
def __init__(self, pattern):
self._matcher = Matcher(pattern, ignore='_')
def match(self, tags):
return self._matcher.match_any(tags)
def __iter__(self):
yield self
def __unicode__(self):
return self._matcher.pattern
def __nonzero__(self):
return bool(self._matcher)
@py2to3
class AndTagPattern(object):
def __init__(self, patterns):
self._patterns = tuple(TagPattern(p) for p in patterns)
def match(self, tags):
return all(p.match(tags) for p in self._patterns)
def __iter__(self):
return iter(self._patterns)
def __unicode__(self):
return ' AND '.join(pattern.__unicode__() for pattern in self)
@py2to3
class OrTagPattern(object):
def __init__(self, patterns):
self._patterns = tuple(TagPattern(p) for p in patterns)
def match(self, tags):
return any(p.match(tags) for p in self._patterns)
def __iter__(self):
return iter(self._patterns)
def __unicode__(self):
return ' OR '.join(pattern.__unicode__() for pattern in self)
@py2to3
class NotTagPattern(object):
def __init__(self, must_match, *must_not_match):
self._first = TagPattern(must_match)
self._rest = OrTagPattern(must_not_match)
def match(self, tags):
if not self._first:
return not self._rest.match(tags)
return self._first.match(tags) and not self._rest.match(tags)
def __iter__(self):
yield self._first
for pattern in self._rest:
yield pattern
def __unicode__(self):
return ' NOT '.join(pattern.__unicode__() for pattern in self).lstrip()
| 26.638889 | 79 | 0.638999 |
73e348da3b419db4a042e1e3c57b42b2c474ecc2 | 38 | py | Python | gender_extractor/__init__.py | wwydmanski/gender-extractor | 6e328ba4ed1b08e7a874c36d68248540c2d8debc | [
"MIT"
] | null | null | null | gender_extractor/__init__.py | wwydmanski/gender-extractor | 6e328ba4ed1b08e7a874c36d68248540c2d8debc | [
"MIT"
] | null | null | null | gender_extractor/__init__.py | wwydmanski/gender-extractor | 6e328ba4ed1b08e7a874c36d68248540c2d8debc | [
"MIT"
] | null | null | null | from .extractor import GenderExtractor | 38 | 38 | 0.894737 |
73e36b27b76b7c046d43b723895723dd84ac9303 | 72,174 | py | Python | modspectra/cube.py | Deech08/modspectra | 4af177418f9ac3e1ff30bf99968251ac143a96bc | [
"BSD-3-Clause"
] | 2 | 2020-06-04T13:09:50.000Z | 2020-06-04T13:10:03.000Z | modspectra/cube.py | Deech08/modspectra | 4af177418f9ac3e1ff30bf99968251ac143a96bc | [
"BSD-3-Clause"
] | 1 | 2020-10-29T19:55:57.000Z | 2020-10-29T19:55:57.000Z | modspectra/cube.py | Deech08/modspectra | 4af177418f9ac3e1ff30bf99968251ac143a96bc | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import os
from astropy import units as u
from spectral_cube import SpectralCube
import numexpr as ne
import dask.array as da
from dask import delayed
from dask.diagnostics import ProgressBar
import logging
from astropy.coordinates.representation import CylindricalRepresentation, CartesianRepresentation, CartesianDifferential
import astropy.coordinates as coord
from astropy.coordinates import frame_transform_graph
from astropy import wcs
from astropy.io import fits
import scipy.interpolate
import scipy.integrate as integrate
import multiprocessing
from functools import partial
from .cubeMixin import EmissionCubeMixin
import datetime
# Helper Functions
def find_nearest_idx(array,value):
idx = (np.abs(array-value)).argmin()
return [idx]
def find_nannearest_idx(array,value):
idx = np.nanargmin(np.abs(array-value))
return [idx]
# Defie TiltedDisk Coordinate Frame
class TiltedDisk(coord.BaseCoordinateFrame):
"""
A cartesian coordinate system in the frame of the tilted elliptical disk
Requires three attributes - currently defaults to version of Krishnarao, Benjamin, Haffner (2019)
Three angles describing geometry of the structure
Attributes
----------
alpha: `~astropy.units.Quantity`, optional, must be keyword
Tilt Angle
beta: `~astropy.units.Quantity`, optional, must be keyword
90*u.deg - inclination
theta: `~astropy.units.Quantity`, optional, must be keyword
Major Axis Angle
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
x : `~astropy.units.Quantity`, optional, must be keyword
The x coordinate in the tilted disk coordinate system
y : `~astropy.units.Quantity`, optional, must be keyword
The y cooridnate in the titled disk coordinate system
z : `~astropy.units.Quantity`, optional, must be keyword
The z coordinate in the tilted disk coordinate system
v_x : :class:`~astropy.units.Quantity`, optional, must be keyword
The x component of the velocity
v_y : :class:`~astropy.units.Quantity`, optional, must be keyword
The y component of the velocity
v_z : :class:`~astropy.units.Quantity`, optional, must be keyword
The z component of the velocity
"""
default_representation = coord.CartesianRepresentation
default_differential = coord.CartesianDifferential
frame_specific_representation_info = {
coord.representation.CartesianDifferential: [
coord.RepresentationMapping('d_x', 'v_x', u.km/u.s),
coord.RepresentationMapping('d_y', 'v_y', u.km/u.s),
coord.RepresentationMapping('d_z', 'v_z', u.km/u.s),
],
}
# Specify frame attributes required to fully specify the frame
# Rotation angles
alpha = coord.QuantityAttribute(default=13.5*u.deg, unit = u.rad)
beta = coord.QuantityAttribute(default=20.*u.deg, unit = u.rad)
theta = coord.QuantityAttribute(default=48.5*u.deg, unit = u.rad)
def get_transformation_matrix(tilteddisk_frame, inverse = False):
"""
Create coordinate transformation matrix for converting between the TiltedDisk frame and Galactocentric frame
Parameters
----------
tilteddisk_frame: TiltedDisk class Coordinate frame
inverse: 'bool', optional, must be keyword
if True, return the transposed matrix for converting from the Galactocentric frame to the TiltedDisk frame
"""
alpha = tilteddisk_frame.alpha.value
beta = tilteddisk_frame.beta.value
theta = tilteddisk_frame.theta.value
# Generate rotation matrix for coordinate transformation into coord.Galactocentric
R_matrix = np.array([np.cos(beta)*np.cos(theta), np.cos(beta)*np.sin(theta), -np.sin(beta),
-np.cos(theta)*np.sin(alpha)*-np.sin(beta) - np.cos(alpha)*np.sin(theta),
np.cos(alpha)*np.cos(theta) + np.sin(alpha)*np.sin(beta)*np.sin(theta),
np.cos(beta)*np.sin(alpha),
np.cos(alpha)*np.cos(theta)*np.sin(beta) + np.sin(alpha)*np.sin(theta),
-np.cos(theta)*np.sin(alpha) + np.cos(alpha)*np.sin(beta)*np.sin(theta),
np.cos(alpha)*np.cos(beta)]).reshape(3,3)
if inverse:
return R_matrix.transpose()
else:
return R_matrix
@frame_transform_graph.transform(coord.DynamicMatrixTransform, TiltedDisk, coord.Galactocentric)
def td_to_galactocentric(tilteddisk_coord, galactocentric_frame):
""" Compute the transformation matrix from the Tilted Disk
coordinates to Galactocentric coordinates.
"""
return get_transformation_matrix(tilteddisk_coord)
@frame_transform_graph.transform(coord.DynamicMatrixTransform, coord.Galactocentric, TiltedDisk)
def galactocentric_to_td(galactocentric_coord, tilteddisk_frame):
""" Compute the transformation matrix from Galactocentric coordinates to
Tilted Disk coordinates.
"""
return get_transformation_matrix(tilteddisk_frame, inverse = True)
# Define some helper functions for the Elliptical Shape
def ellipse_equation(bd, el_constant1, el_constant2, bd_max, x_coord, y_coord):
"""
Equation for an ellipse in the form from Burton & Liszt (1978)
Function serves to be used in scipy.optimize.brenth to solve for bd
Parameters
----------
bd: 'number'
semi-minor axis of ellipse
el_constant1: 'number'
First parameter for defining ellipse
el_constant2: 'number'
second parameter for defining ellipse
bd_max: 'number'
Maximum semi-minor axis allowed within defined elliptical disk
x_coord: 'number, ndarray'
x-coordinate in ellipse
y_coord: 'number, ndarray'
y-coordinate in ellipse
"""
a = bd *el_constant1 + el_constant2 * bd**2 / bd_max
result = x_coord**2 / a**2 + y_coord**2 / bd**2 - 1.
return result
# the semi-minor axis parameter, bd, can not be analytically solved
def bd_solver(ell, xyz, z_sigma_lim, Hz, bd_max, el_constant1, el_constant2):
"""
Function to solve for the ellipse equation to fit into form of ellipse_equation
Chooses only to solve the equation numerically when necessary, avoiding the special cases.
Funciton written in form to use with multiprocessing.pool and functools.partial
The defining characteristic of the Ellipse is set by the relation between the
semi-major and semi-minor axis with the following equation from Liszt & Burton (1982):
ad / bd = el_constant1 + el_constant2 * bd / bd_max
You can trick the "Ellipse" shape into a "Circle" by setting el_constant1 = 1., el_constant2 = 0.
Parameters
----------
ell: 'int'
element number to iterate over
xyz: 'ndarray with shape (3,N)'
xyz-coordinates
z_sigma_lim: 'number'
sigma cuttoff to stop solving Ellipse equation for for z above a specified scale height threshold
Hz: 'number'
Scale height along z axis
bd_max: 'number'
Maximum semi-minor axis allowed within defined elliptical disk
el_constant1: 'number'
First parameter for defining ellipse
el_constant2: 'number'
second parameter for defining ellipse
"""
x_coord = xyz[0,ell]
y_coord = xyz[1,ell]
z_coord = xyz[2,ell]
if z_coord > z_sigma_lim*Hz:
res = bd_max+1.
elif np.abs(y_coord) > bd_max:
res = bd_max+1.
elif np.abs(x_coord) > bd_max * (el_constant1 + el_constant2):
res = bd_max+1.
elif x_coord == 0.:
res = y_coord
elif y_coord == 0.:
# y_coord = ad - > can directly solve for bd
res = (np.sqrt(bd_max) * np.sqrt(4.* np.abs(x_coord) * el_constant2 + bd_max * el_constant1**2) - bd_max * el_constant1) / (2.* el_constant2)
else:
res = scipy.optimize.brenth(ellipse_equation, 0.000001, 1.,
args = (el_constant1, el_constant2,bd_max, x_coord, y_coord))
if res<0:
print(el_constant1,el_constant2,bd_max, x_coord, y_coord )
return res
def EllipticalLBD(resolution, bd_max, Hz, z_sigma_lim, dens0,
velocity_factor, vel_0, el_constant1, el_constant2,
alpha, beta, theta, L_range, B_range, D_range, species = 'hi',
LSR_options={}, galcen_options = {}, visualize = False,
flaring = False, flaring_radial = False, min_bd = None,
memmap = False, da_chunks_xyz = 50, return_all = False, **kwargs):
"""
Creates kinematic disk following Elliptical Orbits of the from from Burton & Liszt (1982) or Krishnarao, Benjamin, Haffner (2019)
Numerically solves for ellipse equation for every point within the disk space
output is used directly to create a Longitude-Latitude-Velocity SpectralCube object using 'modspectra.cube.EllipticalLBD'
Uses numexpr package to evaluate math
Uses multiprocessing to solve Ellipse Equation
Uses Dask for memory mapping
Parameters
----------
resolution: 'tuple, list'
Resolution to create grid
bd_max: 'number'
Maximum semi-minor axis allowed within defined elliptical disk
Hz: 'number'
Scale height along z axis
z_sigma_lim: 'number'
sigma cuttoff to stop solving Ellipse equation for for z above a specified scale height threshold
dens0: 'number'
Density at midplane of Elliptical Disk
velocity_factor: 'number'
Constant used to define velocity field in Burton & Liszt model
vel_0: 'number'
Max velocity of Elliptical orbit
Corresponds to velocity of outermost orbit on semi-minor axis
el_constant1: 'number'
First parameter for defining ellipse
el_constant2: 'number'
second parameter for defining ellipse
alpha: 'number'
Tilt angle alpha - see :class:'TiltedDisk'
beta: 'number'
Tilt angle Beta - see :class:'TiltedDisk'
90 - beta is the inclination
theta: 'number'
Tilt angle of major axis of Ellipse - see :class:'TiltedDisk'
L_range: :list:'number'
Range of Longtiude to create grid over
B_range: :list:'number'
Range of Latitude to create grid over
D_range: :list:'number'
Range of Distances to create grid over
species: 'str', optional, must be keyword
Can be 'hi' or 'ha' to set whether disk will be for neutral or ionized hydrogen (HI 21cm or H-Alpha)
Default is 'hi'
LSR_options: 'dictionary', optional, must be keyword
Dictionary of **kwargs to pass into :class:'~astropy.coordinates.GalacticLSR'
galcen_options: 'dictionary', optional, must be keyword
set galcen_options to be passed to coordinate frames
visualize: 'bool', optional, must be keyword
if using dask, returns dask visualization map
flaring: 'bool, number', optional, must be keyword
if False, disk has a constant scale height
if a number, then sets the flaring parameter, F_z, as described in Krishnarao, Benjamin, Haffner (2019)
flaring_radial: 'bool', optional, must be keyword
if flaring is present then
if False (default), flaring of scale height is a function of bd, the semi minor axis
if True, flaring of scale height is a function of r, the cyclindrical radius coordinate in the TiltedDisk Frame
min_bd: 'number', optional, must be keyword
sets the minimum value of the semi minor axis to allow
Used to make a ring, rather than a disk structure
memmap: 'bool', optional, must be keyword
if True, use dask for memory mapping when creating disk structure
useful for higher resolution computes
da_chunks_xyz = 'number', optional, must be keyword
if memmap is True, sets the dask chunk size
Default to 50, likely too small for efficiency
return_all: 'bool', optional, must be keyword
if True, will return all output components
used for diagnostic purposes
**kwargs:
currenlty not implemented
Returns
-------
lbd_coords_withvel: :class:'~astropy.coordinates.GalacticLSR'
astropy.coord array containing all coordinates corresponding to fabricated grid of points
dens_grid: 'numpy.ndarray'
ndarray with shape (resolution) containing density of points in Longitude-Latitude-Distance grid
axes order swapped to be ready for SpectralCube creation (Distance, Latitude, Longitude)
cdelt: 'numpy.ndarray'
ndarray with shape (3) containing the step size for Longitude, Latitude, and Distance used in the grid
Used for WCS object creation in later instances
disk_coordinates: :class:'TiltedDisk', optional, only if return_all == True
TiltedDisk coordinate class containing grid coordinates in original tilted disk space
galcen_coords_withvel: :class:'~astropy.coordinates.Galactocentric'
TiltedDisk class transformed to Galactocentric frame
bd_grid: 'numpy.ndarray'
ndarray with shape (resolution) containing solved values of bd from Ellipse Equation solutions
axes order swapped to match dens_grid
vel_magnitude_grid: 'numpy.ndarray'
ndarray with shape (resolution) contianing velocity vector magnitude at corresponding grid position
"""
# Extract resolution information
nx, ny, nz = resolution
if memmap:
# Populate a uniform grid in Longitude-Latitude-Distance space
lbd_grid = delayed(np.mgrid)[L_range[0]:L_range[1]:nx*1j,
B_range[0]:B_range[1]:ny*1j,
D_range[0]:D_range[1]:nz*1j]
# Transform grid into a 3 x N array for Longitude, Latitude, Distance axes
lbd = lbd_grid.T.reshape(-1,3, order = "F").transpose()
# Initiate astropy coordinates.Galactic object
lbd_coords = delayed(coord.Galactic)(l = lbd[0,:]*u.deg, b = lbd[1,:]*u.deg, distance = lbd[2,:]*u.kpc)
galcen_coords = lbd_coords.transform_to(coord.Galactocentric(**galcen_options))
disk_coords = galcen_coords.transform_to(TiltedDisk(alpha = alpha*u.deg,
beta = beta*u.deg, theta = theta*u.deg)) #Delayed
# Create standard numpy ndarray of disk_coords and reshape to grid, matching original lbd_grid object
disk_coords_arr = delayed(np.array)([disk_coords.x.value, disk_coords.y.value, disk_coords.z.value])
xyz_grid = delayed(da.from_array)(disk_coords_arr.reshape(-1,nx,ny,nz),
chunks = (-1,da_chunks_xyz,da_chunks_xyz,da_chunks_xyz))
# initiate partial object to solve for Ellipse Equation
partial_bd_solver = delayed(partial)(bd_solver, xyz=disk_coords_arr, z_sigma_lim = z_sigma_lim, Hz = Hz,
bd_max = bd_max, el_constant1 = el_constant1, el_constant2 = el_constant2)
# Solve for bd values
#print("Starting bd solver:")
#with ProgressBar():
pool = multiprocessing.Pool()
bd_vals = delayed(pool.map)(partial_bd_solver, range(nx*ny*nz))
# Create grid of bd values solved from Ellipse Equation, matching original lbd_grid object
#print("Computing semi_minor axis parameter, bd:")
#if visualize:
#bd_vals.visualize(filename = 'bdValsGraph.svg')
#with ProgressBar():
bd_vals_arr = delayed(da.from_array)(bd_vals, chunks = int(da_chunks_xyz * da_chunks_xyz * 0.125 * da_chunks_xyz))
#print("start bd_grid Calculation")
bd_grid = delayed(da.from_array)(bd_vals_arr.reshape(nx,ny,nz), chunks = da_chunks_xyz)
# Create grid of ad values (semi-major axis) derived from bd values
ad_grid = delayed(bd_grid * (el_constant1 + el_constant2 * bd_grid / bd_max))
# Create grid of density values for the Elliptical Disk, mathcing original lbd_grid object
#print("Computing grid z-coordinates in Disk Frame:")
#with ProgressBar():
z_coor = xyz_grid[2,:,:,:].compute() #Delayed Dask Array of z coordinate values
if (flaring == False) & (flaring_radial == False):
def define_density_grid(z,z_sigma, bd, bdmax, H, density0, min_bd = min_bd):
#density[(np.abs(z)<(z_sigma * H)) & (bd<=bdmax)] = density0 * \
#np.exp(-0.5 * (z[(np.abs(z)<(z_sigma * H)) & (bd<=bdmax)] / H)**2)
outside = (z > z_sigma*H) | (bd > bdmax)
if min_bd != None:
outside |= bd < min_bd
density = density0 * np.exp(-0.5 * (z / H)**2)
density[outside] = 0.0
return density
dens_grid = delayed(define_density_grid)(z_coor, z_sigma_lim, bd_grid,
bd_max, Hz, dens0)
elif flaring_radial == False:
def define_density_grid(z, z_sigma, bd, bdmax, H, density0, min_bd = min_bd):
outside = (bd > bdmax)
if min_bd != None:
outside |= bd < min_bd
slope = H * (flaring - 1.) / bdmax
H_val = slope * bd + H
density = density0 * np.exp(-0.5 * (z / H_val)**2) / (H_val / H)
density[outside] = 0.0
return density
dens_grid = delayed(define_density_grid)(z_coor, z_sigma_lim, bd_grid,
bd_max, Hz, dens0)
else:
print('Computing Disk r-coordinate')
with ProgressBar():
r_coor = delayed(np.sqrt)(xyz_grid[0,:,:,:]**2 + xyz_grid[1,:,:,:]**2).compute()
def define_density_grid(z, z_sigma, bd, bdmax, H, density0, radial_coordinate, min_bd = min_bd):
outside = (bd > bdmax)
if min_bd != None:
outside |= bd < min_bd
admax = bdmax * (el_constant1 + el_constant2)
slope = H * (flaring - 1.) / admax
H_val = slope * r_coor + H
density = density0 * np.exp(-0.5 * (z / H_val)**2) / (H_val / H)
density[outside] = 0.0
return density
dens_grid = delayed(define_density_grid)(z_coor, z_sigma_lim, bd_grid,
bd_max, Hz, dens0, r_coor)
r_x = xyz_grid[0,:,:,:] #Delayed dask array
r_y = xyz_grid[1,:,:,:] #Delayed dask array
normalizer = 1. / delayed(da.sqrt)((r_x / ad_grid**2)**2 + (r_y / bd_grid**2)**2) #Delayed dask array
xtangent = r_y / bd_grid**2 * normalizer #Delayed dask array
ytangent = -r_x / ad_grid**2 * normalizer #Delayed dask array
# Angular momentum along the disk minor axis
Lz_minor_axis = 0. - bd_grid * vel_0 * (1. - delayed(da.exp)(-bd_grid / velocity_factor))
vel_magnitude_grid = delayed(da.fabs)(Lz_minor_axis / (r_x * ytangent - r_y * xtangent))
# Greate grid containing velocity vectors for orbits
vel_xyz = da.from_array(np.zeros(3*nx*ny*nz).reshape(3,nx,ny,nz),
chunks = (-1,da_chunks_xyz,da_chunks_xyz,da_chunks_xyz))
vx = xtangent * vel_magnitude_grid
vy = ytangent * vel_magnitude_grid
def assign_velocity(velocity_grid, velx, vely):
velocity_grid[0,:,:,:] = velx
velocity_grid[1,:,:,:] = vely
return velocity_grid
#Assign Velocities to array
vel_xyz = delayed(assign_velocity)(vel_xyz, vx, vy)
# vel_xyz[0,:,:,:] = vx.compute()
# vel_xyz[1,:,:,:] = vy.compute()
velocity_xyz = delayed(da.nan_to_num)(vel_xyz.T.reshape(-1,3, order = "F").transpose() * u.km/ u.s)
disk_coordinates = delayed(TiltedDisk)(x = disk_coords.x, y = disk_coords.y, z = disk_coords.z,
v_x = velocity_xyz[0,:], v_y = velocity_xyz[1,:], v_z = velocity_xyz[2,:],
alpha = alpha*u.deg, beta = beta*u.deg, theta = theta*u.deg)
# Transform to GalacticLSR frame
if return_all:
galcen_coords_withvel = disk_coordinates.transform_to(coord.Galactocentric(**galcen_options))
lbd_coords_withvel = galcen_coords_withvel.transform_to(coord.GalacticLSR(**LSR_options))
else:
lbd_coords_withvel = disk_coordinates.transform_to(coord.Galactocentric(**galcen_options)).transform_to(coord.GalacticLSR(**LSR_options))
# save Grid creation information for use in creating accurate WCS object associated with SpectralCube Object in future
dD = lbd_grid[2,0,0,1] - lbd_grid[2,0,0,0]
dB = lbd_grid[1,0,1,1] - lbd_grid[1,0,0,0]
dL = lbd_grid[0,1,0,0] - lbd_grid[0,0,0,0]
cdelt = np.array([dL.compute(), dB.compute(), dD.compute()])
print("Computing Coordinates with Disk Velocity information in GalacticLSR Frame:")
if visualize:
lbd_coords_withvel.visualize(filename = 'LBDCoordsWithVelGraph.svg')
with ProgressBar():
lbd_coords_withvel = lbd_coords_withvel.compute()
print("Computing Disk Density Grid:")
if visualize:
dens_grid.visualize(filename = 'DensGridGraph.svg')
with ProgressBar():
dens_grid = dens_grid.compute()
print("Computing Ellipse Parameters")
bd_grid = bd_grid.compute()
# dens_grid[(bd_grid > bd_max) | (np.abs(z_coor) >= z_sigma_lim * Hz)] = 0.0
pool.close()
if return_all:
return lbd_coords_withvel, np.swapaxes(dens_grid,0,2), cdelt, disk_coordinates, \
galcen_coords_withvel, np.swapaxes(bd_grid,0,2), vel_magnitude_grid.swapaxes(0,2)
#coordframe,dask array, np array, delayed, delayed, delayed dask array, delayed dask array
else:
return lbd_coords_withvel, np.swapaxes(dens_grid,0,2), cdelt
else:
# Populate a uniform grid in Longitude-Latitude-Distance space
lbd_grid = np.mgrid[L_range[0]:L_range[1]:nx*1j,
B_range[0]:B_range[1]:ny*1j,
D_range[0]:D_range[1]:nz*1j]
# Transform grid into a 3 x N array for Longitude, Latitude, Distance axes
lbd = lbd_grid.T.reshape(-1,3, order = "F").transpose()
# Initiate astropy coordinates.Galactic object
lbd_coords = coord.Galactic(l = lbd[0,:]*u.deg, b = lbd[1,:]*u.deg, distance = lbd[2,:]*u.kpc)
if return_all:
# Convert regularized grid points into Galactocentric frame
galcen_coords = lbd_coords.transform_to(coord.Galactocentric(**galcen_options))
disk_coords = galcen_coords.transform_to(TiltedDisk(alpha = alpha*u.deg,
beta = beta*u.deg, theta = theta*u.deg))
else:
disk_coords = lbd_coords.transform_to(coord.Galactocentric(**galcen_options)).transform_to(TiltedDisk(alpha = alpha*u.deg,
beta = beta*u.deg, theta = theta*u.deg))
# Create standard numpy ndarray of disk_coords and reshape to grid, matching original lbd_grid object
disk_coords_arr = np.array([disk_coords.x.value, disk_coords.y.value, disk_coords.z.value])
xyz_grid = disk_coords_arr.reshape(-1,nx,ny,nz)
# initiate partial object to solve for Ellipse Equation
partial_bd_solver = partial(bd_solver, xyz=disk_coords_arr, z_sigma_lim = z_sigma_lim, Hz = Hz,
bd_max = bd_max, el_constant1 = el_constant1, el_constant2 = el_constant2)
# Solve Ellipse Equation across multiple threads
pool = multiprocessing.Pool()
bd_vals = pool.map(partial_bd_solver, range(len(disk_coords.x.value)))
pool.close()
# Create grid of bd values solved from Ellipse Equation, matching original lbd_grid object
bd_grid = np.array(bd_vals).reshape(nx,ny,nz)
# Create grid of ad values (semi-major axis) derived from bd values
ad_grid = ne.evaluate("bd_grid * (el_constant1 + el_constant2 * bd_grid / bd_max)")
z_coor = xyz_grid[2,:,:,:]
# Create grid of density values for the Elliptical Disk, mathcing original lbd_grid object
if (flaring == False) & (flaring_radial == False):
def define_density_grid(z,z_sigma, bd, bdmax, H, density0, min_bd = min_bd):
#density[(np.abs(z)<(z_sigma * H)) & (bd<=bdmax)] = density0 * \
#np.exp(-0.5 * (z[(np.abs(z)<(z_sigma * H)) & (bd<=bdmax)] / H)**2)
outside = (z > z_sigma*H) | (bd > bdmax)
if min_bd != None:
outside |= bd < min_bd
density = density0 * np.exp(-0.5 * (z / H)**2)
density[outside] = 0.0
return density
dens_grid = define_density_grid(z_coor, z_sigma_lim, bd_grid,
bd_max, Hz, dens0)
elif flaring_radial == False:
def define_density_grid(z, z_sigma, bd, bdmax, H, density0, min_bd = min_bd):
outside = (bd > bdmax)
if min_bd != None:
outside |= bd < min_bd
slope = H * (flaring - 1.) / bdmax
H_val = slope * bd + H
density = density0 * np.exp(-0.5 * (z / H_val)**2) / (H_val / H)
density[outside] = 0.0
return density
dens_grid = define_density_grid(z_coor, z_sigma_lim, bd_grid,
bd_max, Hz, dens0)
else:
r_coor = delayed(np.sqrt)(xyz_grid[0,:,:,:]**2 + xyz_grid[1,:,:,:]**2).compute()
def define_density_grid(z, z_sigma, bd, bdmax, H, density0, radial_coordinate, min_bd = min_bd):
outside = (bd > bdmax)
if min_bd != None:
outside |= bd < min_bd
admax = bdmax * (el_constant1 + el_constant2)
slope = H * (flaring - 1.) / admax
H_val = slope * r_coor + H
density = density0 * np.exp(-0.5 * (z / H_val)**2) / (H_val / H)
density[outside] = 0.0
return density
dens_grid = define_density_grid(z_coor, z_sigma_lim, bd_grid,
bd_max, Hz, dens0, r_coor)
# if flaring == False:
# def define_density_grid(z,z_sigma, bd, bdmax, H, density0):
# #density[(np.abs(z)<(z_sigma * H)) & (bd<=bdmax)] = density0 * \
# #np.exp(-0.5 * (z[(np.abs(z)<(z_sigma * H)) & (bd<=bdmax)] / H)**2)
# outside = (z > z_sigma) | (bd > bdmax)
# density = ne.evaluate("density0 * exp(-0.5 * (z / H)**2)")
# density[outside] = 0.0
# return density
# else:
# def define_density_grid(z, z_sigma, bd, bdmax, H, density0):
# outside = (z > z_sigma) | (bd > bdmax)
# H_val = H + H * (bd/bdmax * flaring)
# density = ne.evaluate("density0 * exp(-0.5 * (z / H_val)**2)")
# density[outside] = 0.0
# return density
# dens_grid = define_density_grid(z_coor, z_sigma_lim, bd_grid, bd_max, Hz, dens0)
# dens_grid[(np.abs(z_coor)<(z_sigma_lim * Hz)) & (bd_grid<=bd_max)] = dens0 * \
# np.exp(-0.5 * (z_coor[(np.abs(z_coor)<(z_sigma_lim * Hz)) & (bd_grid<=bd_max)] / Hz)**2)
# Solve for velocity magnitude using Angular Momentum and Velcotiy field from Burton & Liszt
r_x = xyz_grid[0,:,:,:]
r_y = xyz_grid[1,:,:,:]
normalizer = ne.evaluate("1 / sqrt((r_x / ad_grid**2)**2 + (r_y / bd_grid**2)**2)")
xtangent = ne.evaluate("r_y / bd_grid**2 * normalizer")
ytangent = ne.evaluate("-r_x / ad_grid**2 * normalizer")
# Angular momentum along the disk minor axis
Lz_minor_axis = ne.evaluate("0. - bd_grid * vel_0 * (1. - exp(-bd_grid / velocity_factor))") #r x v
vel_magnitude_grid = ne.evaluate("abs(Lz_minor_axis / (r_x * ytangent - r_y * xtangent))")
# Greate grid containing velocity vectors for orbits
vel_xyz = np.zeros_like(xyz_grid)
vel_xyz[0,:,:,:] = ne.evaluate("xtangent * vel_magnitude_grid")
vel_xyz[1,:,:,:] = ne.evaluate("ytangent * vel_magnitude_grid")
np.nan_to_num(vel_xyz)
velocity_xyz = vel_xyz.T.reshape(-1,3, order = "F").transpose() * u.km/ u.s
vel_cartesian = CartesianRepresentation(velocity_xyz)
# Create new TiltedDisk object containing all newly calculated velocities
disk_coordinates = TiltedDisk(x = disk_coords.x, y = disk_coords.y, z = disk_coords.z,
v_x = vel_cartesian.x, v_y = vel_cartesian.y, v_z = vel_cartesian.z,
alpha = alpha*u.deg, beta = beta*u.deg, theta = theta*u.deg)
# Transform to GalacticLSR frame
if return_all:
galcen_coords_withvel = disk_coordinates.transform_to(coord.Galactocentric(**galcen_options))
lbd_coords_withvel = galcen_coords_withvel.transform_to(coord.GalacticLSR(**LSR_options))
else:
lbd_coords_withvel = disk_coordinates.transform_to(coord.Galactocentric(**galcen_options)).transform_to(coord.GalacticLSR(**LSR_options))
# save Grid creation information for use in creating accurate WCS object associated with SpectralCube Object in future
dD = lbd_grid[2,0,0,1] - lbd_grid[2,0,0,0]
dB = lbd_grid[1,0,1,1] - lbd_grid[1,0,0,0]
dL = lbd_grid[0,1,0,0] - lbd_grid[0,0,0,0]
cdelt = np.array([dL, dB, dD])
if return_all:
return lbd_coords_withvel, np.swapaxes(dens_grid,0,2), cdelt, disk_coordinates, \
galcen_coords_withvel, np.swapaxes(bd_grid,0,2), np.swapaxes(vel_magnitude_grid,0,2)
else:
return lbd_coords_withvel, np.swapaxes(dens_grid,0,2), cdelt
def EmissionLBV(lbd_coords_withvel, density_gridin, cdelt, vel_disp, vmin, vmax,
vel_resolution, L_range, B_range, species = 'hi', visualize = False,
T_gas = 120. *u.K, memmap = False, da_chunks_xyz = 50, redden = None, case = 'B'):
"""
Creates a Longitude-Latitude-Velocity SpectralCube object of neutral (HI 21cm) or ionized (H-Alpha) gas emission
Uses output calculated from 'modspectra.cube.EllipticalLBD'
Uses numexpr package to evaluate math
Parameters
----------
lbd_coords_withvel: :class:'~astropy.coordinates.GalacticLSR'
astropy.coord array containing all coordinates corresponding to fabricated grid of points
dens_grid: 'numpy.ndarray'
ndarray with shape (resolution) containing density of points in Longitude-Latitude-Distance grid
axes order swapped to be ready for SpectralCube creation
cdelt: 'numpy.ndarray'
ndarray with shape (3) containing the step size for Longitude, Latitude, and Distance used in the grid
Used for WCS object creation in later instances
vel_disp: 'number, Quantity
Velocity dispersion of the gas in units of km/s (if not Quantity)
vmin: 'number, Quantity'
Min Velocity to create in grid in units of km/s (if not Quantity)
vmax: 'number, Quantity'
Max Velocity to create in grid in units of km/s (if not Quantity)
vel_resolution: 'int'
Resolution to Create along velocity axis
L_range: :list:'number'
Range of Longtiude to create grid over
B_range: :list:'number'
Range of Latitude to create grid over
species: 'str', optional, must be keyword of either 'hi' or 'ha'
Specifies whether emission cube will be neutral (HI 21-cm) gas or ionized (H-Alpha) gas emission
Defaults to HI netural gas
visualize: 'bool', optional, must be keyword
if using dask, returns dask visualization map
T_gas: 'number, Quantity', optional, must be keyword
Temperature of neutral HI 21-cm emitting gas in Kelvin
Defaults to 120 K
memmap: 'bool', optional, must be keyword
if True, use dask for memory mapping when creating disk structure
useful for higher resolution computes
da_chunks_xyz: 'number', optional, must be keyword
if memmap is True, sets the dask chunk size
Default to 50, likely too small for efficiency
redden: 'bool', optional, must be keyword
if True, apply extinction corrections to emission using 3D dustmaps of Marshall et al. (2006)
implemented via the dustmaps and extinction python packages
case: 'str', optional, must be keyword
if species is 'ha', then sets the recombination case to use
Defaults to case B recombination
Returns
-------
emission_cube: :class:'numpy.ndarray'
Emission values in DBL cube
DBL_wcs: '~astropy.wcs.WCS'
WCS information associated with emission_cube
"""
# Check for units
if not isinstance(vel_disp, u.Quantity):
vel_disp = u.Quantity(vel_disp, unit = u.km / u.s)
logging.warning("No units specified for Velocity Dispersion, vel_disp, assuming"
"{}".format(vel_disp.unit))
elif not vel_disp.unit == u.km/u.s:
vel_disp = vel_disp.to(u.km / u.s)
if not isinstance(vmin, u.Quantity):
vmin = u.Quantity(vmin, unit = u.km / u.s)
logging.warning("No units specified for Min Velocity, vmin, assuming"
"{}".format(vmin.unit))
elif not vmin.unit == u.km/u.s:
vmin = vmin.to(u.km / u.s)
if not isinstance(vmax, u.Quantity):
vmax = u.Quantity(vmax, unit = u.km / u.s)
logging.warning("No units specified for Max Velocity, vmax, assuming"
"{}".format(vmax.unit))
elif not vmax.unit == u.km/u.s:
vmax = vmax.to(u.km / u.s)
if not isinstance(T_gas, u.Quantity):
T_gas = u.Quantity(T_gas, unit = u.K)
logging.warning("No units specified for T_gas, assuming"
"{}".format(T_gas.unit))
if redden:
from extinction import fm07 as extinction_law
from dustmaps.marshall import MarshallQuery
try:
marshall = MarshallQuery()
except OSError:
print("Local Copy of Marhsall et al (2006) dustmap is unavailable")
from dustmaps.config import config
print("Downloading local copy to {}".format(config['data_dir']))
from dustmaps.marshall import fetch as getMarshall
getMarshall()
finally:
marshall = MarshallQuery()
# Define the lookup table of values of Sigma
# First create my "lookup table" for the Gaussian evaluated at an array of sigma (5 sigma)
# Extract resolution information
nz, ny, nx = density_gridin.shape
# Define the velocity channels
VR, dv = np.linspace(vmin.value,vmax.value,vel_resolution, retstep=True)
vr_grid = np.swapaxes(lbd_coords_withvel.radial_velocity.value.reshape(nx,ny,nz),0,2)
# Calculate my sigma values
vr_grid_plus = vr_grid[:,:,:,None]
if memmap:
gaussian_cells = da.exp(-1/2. * ((da.from_array(vr_grid_plus, chunks = (da_chunks_xyz,da_chunks_xyz,da_chunks_xyz,1)) -
da.from_array(VR, chunks = -1)) / vel_disp.value)**2)
else:
gaussian_cells = ne.evaluate("exp(-1/2. * ((vr_grid_plus - VR) / vel_disp)**2)")
# Calculate emission in each grid cell in Longitude-Latitude-Velocity space
# Sums over Distance space
dist = cdelt[2]
if memmap:
if species == 'hi':
density_grid = density_gridin * 33.52 / (T_gas.value * vel_disp.value) * dist * 1000. / 50.
optical_depth = da.einsum('jkli, jkl->ijkl', gaussian_cells, density_grid).sum(axis = 1)
result_cube = T_gas * (1 - da.exp(-1.* optical_depth))
print("Computing Resulting Emission from Density Structure:")
if visualize:
result_cube.visualize(filename = 'EmissionCubeGraph.svg')
with ProgressBar():
emission_cube = delayed(result_cube.compute())
if species == 'ha':
if case == 'A': #Recombination case A
b_constant = -1.009
a_0_constant = 0.0938 * u.R / u.km * u.s
else: #Recombination case B
b_constant = -0.942 - 0.031 * np.log(T_gas.value/10.**4)
a_0_constant = 0.1442 * u.R / u.km * u.s
EM = da.from_array(density_gridin *density_gridin * dist * 1000., chunks = da_chunks_xyz)
if redden:
# from extinction import fm07 as extinction_law
# from dustmaps.marshall import MarshallQuery
# try:
# marshall = MarshallQuery()
# except OSError:
# print("Local Copy of Marhsall et al (2006) dustmap is unavailable")
# from dustmaps.config import config
# print("Downloading local copy to {}".format(config['data_dir']))
# from dustmaps.marshall import fetch as getMarshall
# getMarshall()
# finally:
# marshall = MarshallQuery()
wave_Ks = 2.17 *u.micron
A_KS_to_A_v = 1. / extinction_law(np.array([wave_Ks.to(u.AA).value]), 1.)
wave_ha = np.array([6562.8])
A_V_to_A_ha = extinction_law(wave_ha, 1.)
marshall_AK = da.from_array(marshall(coord.SkyCoord(lbd_coords_withvel)), chunks = da_chunks_xyz*100)
trans_ha = 10**(-0.4 * A_V_to_A_ha * A_KS_to_A_v * marshall_AK)
# print('Computed Av')
trans_grid = da.swapaxes(trans_ha.reshape(nx,ny,nz), 0,2)
EM = delayed(EM * trans_grid)
EM = delayed(EM * a_0_constant / vel_disp.value * (T_gas.value/10.**4)**(b_constant))
result_cube = delayed(da.einsum)('jkli, jkl->ijkl',
gaussian_cells,
EM)
result_cube = delayed(result_cube.sum)(axis=1)
print("Computing Resulting Emission from Density Structure:")
if visualize:
result_cube.visualize(filename = 'EmissionCubeGraph.svg')
with ProgressBar():
emission_cube = result_cube.compute()
else:
if species == 'hi':
density_grid = ne.evaluate("density_gridin *33.52 / (T_gas * vel_disp)* dist *1000. / 50.")
optical_depth = np.einsum('jkli, jkl->ijkl', gaussian_cells, density_grid).sum(axis = 1)
emission_cube = ne.evaluate("T_gas * (1 - exp(-1.* optical_depth))") * u.K
if species =='ha':
if case == 'A': #Recombination case A
b_constant = -1.009
a_0_constant = 0.0938 * u.R / u.km * u.s
else: #Recombination case B
b_constant = -0.942 - 0.031 * np.log(T_gas.value/10.**4)
a_0_constant = 0.1442 * u.R / u.km * u.s
EM = ne.evaluate("density_gridin**2 * dist * 1000.")
if redden:
# from extinction import fm07 as extinction_law
# from dustmaps.marshall import MarshallQuery
# try:
# marshall = MarshallQuery()
# except OSError:
# print("Local Copy of Marhsall et al (2006) dustmap is unavailable")
# from dustmaps.config import config
# print("Downloading local copy to {}".format(config['data_dir']))
# from dustmaps.marshall import fetch as getMarshall
# getMarshall()
# finally:
# marshall = MarshallQuery()
wave_Ks = 2.17 *u.micron
A_KS_to_A_v = 1. / extinction_law(np.array([wave_Ks.to(u.AA).value]), 1.)
wave_ha = np.array([6562.8])
A_V_to_A_ha = extinction_law(wave_ha, 1.)
marshall_AK = marshall(coord.SkyCoord(lbd_coords_withvel))
trans_ha = 10**(-0.4 * A_V_to_A_ha * A_KS_to_A_v * marshall_AK)
# print('Computed Av')
trans_grid = np.swapaxes(trans_ha.reshape(nx,ny,nz), 0,2)
EM *= trans_grid
emission_cube = np.einsum('jkli, jkl->ijkl', gaussian_cells,
a_0_constant * EM/ vel_disp.value * (T_gas.value/10.**4)**(b_constant)).sum(axis = 1)
# Create WCS Axes
DBL_wcs = wcs.WCS(naxis = 3)
DBL_wcs.wcs.crpix=[int(nx/2),int(ny/2),int(vel_resolution/2)]
DBL_wcs.wcs.crval=[np.sum(L_range)/2, np.sum(B_range)/2, (vmax.value+vmin.value)/2]
DBL_wcs.wcs.ctype=["GLON-CAR", "GLAT-CAR", "VRAD"]
DBL_wcs.wcs.cunit=["deg", "deg", "km/s"]
DBL_wcs.wcs.cdelt=np.array([cdelt[0], cdelt[1], dv])
# Return Emission cube and WCS info
return emission_cube, DBL_wcs
class EmissionCube(EmissionCubeMixin, SpectralCube):
"""
Synthetic Emission cube container
Parameters
----------
data: 'ndarray', optional, if set, skips cube creation
Data cube values
wcs: 'astropy.wcs.WCS', optional, if set, skips cube creation
WCS info
meta: 'dict', optional, if set, skips cube creation
Metadata
mask: 'ndarray', optional, must be keyword
Masking info
fill_value: 'number', optional, must be keyword
Values to fill for missing data
header: 'dict'
FITS header
allow_huge_operations: 'bool'
from SpectralCube
beam: ''
from SpectralCube
wcs_tolerance: 'number'
from SpectralCube
resolution: 'tuple or list'
Defines LBD Resolution of grid, must be shape (3)
vel_resolution: 'int'
Velocity axis resolution
L_range: 'list, tuple, Quantity'
Range of longitude in degrees to create grid [low, high]
B_range: 'list, tuple, Quantity'
Range of latitude in degrees to create grid [low, high]
D_range: 'list,tuple,Quantity'
Range of Distances in kpc to create grid [near, far]
alpha: 'number, Quantity'
First Tilt Angle of Disk
beta: 'number, Quantity'
Second Tilt Angle of Disk
90 degrees - beta is the inclination
theta: 'number, Quantity'
Third tilt Angle of Disk for allignment of major axis with line of sight
bd_max: 'number, Quantity'
Max size of Ellipse along minor axis
Default in units of kpc
Hz: 'number, Quantity'
Vertical Scale height of Disk
Default in units of kpc
z_sigma_lim: 'number'
sigma cuttoff to stop solving Ellipse equation for for z above a specified scale height threshold
dens0: 'number, Quantity'
Density at midplane of Elliptical Disk
Default units of cm^-3
velocity_factor: 'number'
Constant used to define velocity field in Burton & Liszt model
vel_0: 'number'
Max velocity of Elliptical orbit
Corresponds to velocity of outermost orbit on semi-minor axis
Default unit of km/s
el_constant1: 'number'
First parameter for defining ellipse
el_constant2: 'number'
second parameter for defining ellipse
vel_disp: 'number, Quantity'
Velocity dispersion of the gas in units of km/s (if not Quantity)
vmin: 'number, Quantity'
Min Velocity to create in grid in units of km/s (if not Quantity)
vmax: 'number, Quantity'
Max Velocity to create in grid in units of km/s (if not Quantity)
visualize: 'bool', optional, must be keyword
if using dask, returns dask visualization map
redden: 'bool', optional, must be keyword
if True, apply extinction corrections to emission using 3D dustmaps of Marshall et al. (2006)
implemented via the dustmaps and extinction python packages
flaring: 'bool, number', optional, must be keyword
if False, disk has a constant scale height
if a number, then sets the flaring parameter, F_z, as described in Krishnarao, Benjamin, Haffner (2019)
flaring_radial: 'bool', optional, must be keyword
if flaring is present then
if False (default), flaring of scale height is a function of bd, the semi minor axis
if True, flaring of scale height is a function of r, the cyclindrical radius coordinate in the TiltedDisk Frame
min_bd: 'number', optional, must be keyword
sets the minimum value of the semi minor axis to allow
Used to make a ring, rather than a disk structure
species: 'str', optional, must be keyword of either 'hi' or 'ha'
Specifies whether emission cube will be neutral (HI 21-cm) gas or ionized (H-Alpha) gas emission
Defaults to HI netural gas
T_gas: 'number, Quantity', optional, must be keyword
Temperature of neutral HI 21-cm emitting gas in Kelvin
Defaults to 120 K
LSR_options: 'dictionary', optional, must be keyword
Dictionary of **kwargs to pass into :class:'~astropy.coordinates.GalacticLSR'
galcen_options: 'dictionary', optional, must be keyword
set galcen_options to be passed to coordinate frames
return_all: 'bool', optional, must be keyword
if True, will return all output components
used for diagnosing issues
most information can be determined / converted from initial 3 output elements
only other useful bit is bd_grid - may incorporate into default output in future
LB80: 'bool', optional, must be keyword
if True, will create a default cube with the exact parameters of Liszt & Burton (1982)
if any parameters are already set, they will be used instead
defaults: 'bool'
if True, will use detaulf resolution and other information to create object
create: 'bool'
if True, will create cube using provided parameters
***Important*** must be True to create a cube, otherwise will assume it is
loading/initiating cube from provided data
memmap: 'bool'
if true, will create cube using memory mapping via dask
Note: Currently does not work properly with species = 'ha'
da_chunks_xyz: 'number', optional, must be keyword
if memmap, this sets the default chunksize to be used for dask arrays
LBD_output_in: 'Dictionary', optional, must be keyword
provide LBD output information to save in EmissionCube class
only used if EmissionCube is from a model created by this package
LBD_output_keys_in: 'list, str', optional, must be keyword
provide keys tot he entries of LBD_output_in
only used if EmissionCube is from a model created by this package
model_header: 'fits.header'
Provide header to gather model parameters and information
only used if EmissionCube is from a model created by this package
DK20: 'bool'
if true, will will create default H-Alpha cube with parameters of (Krishnarao et al. 2019)
if any parameters are already set, they will be used instead
Returns
-------
SpectralCube BaseClass with EmissionCube container Class
"""
def __init__(self, data = None, wcs = None, meta = None,
mask = None, fill_value=np.nan, header=None,
allow_huge_operations=False, beam=None, wcs_tolerance=0.0,
resolution = None, vel_resolution = None,
L_range = None, B_range = None, D_range = None,
alpha = None, beta = None, theta = None,
bd_max = None, Hz = None, z_sigma_lim = None, dens0 = None,
velocity_factor = None, vel_0 = None, el_constant1 = None, el_constant2 = None,
vel_disp = None, vmin = None, vmax = None, visualize = False, redden = None,
flaring = None, flaring_radial = None, min_bd = None,
species = None, T_gas = None, LSR_options = {}, galcen_options = {}, return_all = False,
LB80 = False, defaults = False, create = False, memmap = False, da_chunks_xyz = None,
LBD_output_in = None, LBD_output_keys_in = None, model_header = None,
DK20 = False, case = None, **kwargs):
if not meta:
meta = {}
# Define Burton & Liszt 1982 parameters if needed
if LB80:
galcen_distance_factor = 8.127 / 10.
if el_constant1 == None:
el_constant1 = 1.6
if el_constant2 == None:
el_constant2 = 1.5
if T_gas == None:
T_gas = 120. * u.K
if bd_max == None:
bd_max = 0.6 * u.kpc * galcen_distance_factor
if Hz == None:
Hz = 0.1 * u.kpc * galcen_distance_factor
if z_sigma_lim == None:
z_sigma_lim = 3
if dens0 == None:
dens0 = 0.33 * 1/u.cm/u.cm/u.cm
if vel_0 == None:
vel_0 = 360.*u.km/u.s
if velocity_factor == None:
velocity_factor = 0.1
if vel_disp == None:
vel_disp = 9 * u.km/u.s
if alpha == None:
alpha = 13.5 * u.deg
if beta == None:
beta = 20. * u.deg
if theta == None:
theta = 48.5 * u.deg
if species == None:
species = 'hi'
if flaring == None:
flaring = False
if flaring_radial == None:
flaring_radial = False
if DK20:
galcen_distance_factor = 8.127 / 10.
if el_constant1 == None:
el_constant1 = 1.6
if el_constant2 == None:
el_constant2 = 1.5
if T_gas == None:
T_gas = 8000. * u.K
if not bd_max:
bd_max = 0.6 * u.kpc * galcen_distance_factor
if Hz == None:
Hz = 0.26 * u.kpc
if z_sigma_lim == None:
z_sigma_lim = 3
if dens0 == None:
dens0 = 0.39 * 1/u.cm/u.cm/u.cm
if vel_0 == None:
vel_0 = 360.*u.km/u.s
if velocity_factor == None:
velocity_factor = 0.1
if vel_disp == None:
vel_disp = 12. * u.km/u.s
if alpha == None:
alpha = 13.5 * u.deg
if beta == None:
beta = 20. * u.deg
if theta == None:
theta = 48.5 * u.deg
if species == None:
species = 'ha'
if redden == None:
redden = True
if not case == 'A':
case = 'B'
if flaring == None:
flaring = 2.05
if flaring_radial == None:
flaring_radial = True
# Define default parameters if needed
if defaults:
if resolution == None:
resolution = (128,128,128)
if vel_resolution == None:
vel_resolution = 550
if L_range == None:
L_range = [-10,10]*u.deg
if B_range == None:
B_range = [-8,8] * u.deg
if D_range == None:
D_range = [5,13] * u.kpc
if species == None:
species = 'hi'
if vmin == None:
vmin = -325. * u.km/u.s
if vmax == None:
vmax = 325. * u.km/u.s
if create:
# Check units
if not isinstance(L_range, u.Quantity):
L_range = u.Quantity(L_range, unit = u.deg)
logging.warning("No units specified for Longitude Range, assuming "
"{}".format(L_range.unit))
elif not L_range.unit == u.deg:
L_range = L_range.to(u.deg)
if not isinstance(B_range, u.Quantity):
B_range = u.Quantity(B_range, unit = u.deg)
logging.warning("No units specified for Latitude Range, assuming "
"{}".format(B_range.unit))
elif not B_range.unit == u.deg:
B_range = B_range.to(u.deg)
if not isinstance(D_range, u.Quantity):
D_range = u.Quantity(D_range, unit = u.kpc)
logging.warning("No units specified for Distance Range, assuming "
"{}".format(D_range.unit))
elif not D_range.unit == u.kpc:
D_range = D_range.to(u.kpc)
if not isinstance(alpha, u.Quantity):
alpha = u.Quantity(alpha, unit = u.deg)
logging.warning("No units specified for Alpha, assuming "
"{}".format(alpha.unit))
elif not alpha.unit == u.deg:
alpha = alpha.to(u.deg)
if not isinstance(beta, u.Quantity):
beta = u.Quantity(beta, unit = u.deg)
logging.warning("No units specified for Beta, assuming "
"{}".format(beta.unit))
elif not beta.unit == u.deg:
beta = beta.to(u.deg)
if not isinstance(theta, u.Quantity):
theta = u.Quantity(theta, unit = u.deg)
logging.warning("No units specified for Theta, assuming "
"{}".format(theta.unit))
elif not theta.unit == u.deg:
theta = theta.to(u.deg)
if not isinstance(bd_max, u.Quantity):
bd_max = u.Quantity(bd_max, unit = u.kpc)
logging.warning("No units specified for Max Semi-minor axis, bd_max, assuming "
"{}".format(bd_max.unit))
elif not bd_max.unit == u.kpc:
bd_max = bd_max.to(u.kpc)
if not isinstance(Hz, u.Quantity):
Hz = u.Quantity(Hz, unit = u.kpc)
logging.warning("No units specified for Vertical Scale Height, Hz, assuming "
"{}".format(Hz.unit))
elif not Hz.unit == u.kpc:
Hz = Hz.to(u.kpc)
if not isinstance(dens0, u.Quantity):
dens0 = u.Quantity(dens0, unit = 1 / u.cm / u.cm / u.cm)
logging.warning("No units specified for Midplane Density, dens0, assuming "
"{}".format(dens0.unit))
elif not dens0.unit == 1 / u.cm / u.cm / u.cm:
dens0 = dens0.to(1 / u.cm / u.cm / u.cm)
if not isinstance(vel_0, u.Quantity):
vel_0 = u.Quantity(vel_0, unit = u.km / u.s)
logging.warning("No units specified for Max Velocity, vel_0, assuming "
"{}".format(vel_0.unit))
elif not vel_0.unit == u.km/u.s:
vel_0 = vel_0.to(u.km / u.s)
if not isinstance(vel_disp, u.Quantity):
vel_disp = u.Quantity(vel_disp, unit = u.km / u.s)
logging.warning("No units specified for Velocity Dispersion, vel_disp, assuming "
"{}".format(vel_disp.unit))
elif not vel_disp.unit == u.km/u.s:
vel_disp = vel_disp.to(u.km / u.s)
if not isinstance(vmin, u.Quantity):
vmin = u.Quantity(vmin, unit = u.km / u.s)
logging.warning("No units specified for Min Velocity, vmin, assuming "
"{}".format(vmin.unit))
elif not vmin.unit == u.km/u.s:
vmin = vmin.to(u.km / u.s)
if not isinstance(vmax, u.Quantity):
vmax = u.Quantity(vmax, unit = u.km / u.s)
logging.warning("No units specified for Max Velocity, vmax, assuming "
"{}".format(vmax.unit))
elif not vmax.unit == u.km/u.s:
vmax = vmax.to(u.km / u.s)
if (not da_chunks_xyz) and (memmap):
da_chunks_xyz = 50
logging.warning("Using a default chunksize of 50 per axis for memory mapping via dask")
# Assign attributes
self.bd_max = bd_max
self.Hz = Hz
self.z_sigma_lim = z_sigma_lim
self.dens0 = dens0
self.vel_0 = vel_0
self.velocity_factor = velocity_factor
self.vel_disp = vel_disp
self.el_constant1 = el_constant1
self.el_constant2 = el_constant2
self.T_gas = T_gas
self.alpha = alpha
self.beta = beta
self.theta = theta
self.resolution = resolution
self.vel_resolution = vel_resolution
self.L_range = L_range
self.B_range = B_range
self.D_range = D_range
self.species = species
self.vmin = vmin
self.vmax = vmax
# Get LBD Grid Created
self.LBD_output = EllipticalLBD(resolution, bd_max.value, Hz.value,
z_sigma_lim, dens0.value,
velocity_factor, vel_0.value, el_constant1, el_constant2,
alpha.value, beta.value, theta.value,
L_range.value, B_range.value, D_range.value,
memmap = memmap, da_chunks_xyz = da_chunks_xyz, visualize = visualize,
LSR_options = LSR_options, galcen_options = galcen_options,
flaring = flaring, flaring_radial = flaring_radial, min_bd = min_bd,
return_all = return_all, species = species, **kwargs)
if return_all:
self.LBD_output_keys = ['lbd_coords', 'disk_density', 'cdelt',
'disk_coordinate_frame', 'galcen_coords',
'bd_grid', 'vel_mag_grid']
else:
self.LBD_output_keys = ['lbd_coords', 'disk_density', 'cdelt']
# Create LBV Cube
data, wcs = EmissionLBV(self.LBD_output[0], self.LBD_output[1],
self.LBD_output[2], vel_disp, vmin, vmax,
vel_resolution, L_range.value, B_range.value, visualize = visualize, redden = redden,
species = species, T_gas = T_gas, memmap = memmap, da_chunks_xyz = da_chunks_xyz,
case = case)
if memmap:
print('Setting up Data Cube')
with ProgressBar():
data = data.compute()
# Metadata with unit info
if not isinstance(data, u.Quantity):
if species == 'hi':
data = data * u.K
elif species == 'ha':
data = data * u.R / u.km * u.s
meta['BUNIT'] = '{}'.format(data.unit)
# Add placeholder masks for v0.4.4
if mask is None:
from spectral_cube import BooleanArrayMask
mask_array = np.ones_like(data.value, dtype = bool)
mask = BooleanArrayMask(mask=mask_array, wcs=wcs)
# Initialize Spectral Cube Object
super().__init__(data = data, wcs = wcs, mask=mask, meta=meta, fill_value=fill_value,
header=header, allow_huge_operations=allow_huge_operations, beam=beam,
wcs_tolerance=wcs_tolerance, **kwargs)
if LBD_output_in:
self.LBD_output = LBD_output_in
if LBD_output_keys_in:
self.LBD_output_keys = LBD_output_keys_in
if model_header:
self.bd_max = model_header['BD_MAX'] * u.Unit(model_header['BD_MAX_U'])
self.Hz = model_header['HZ'] * u.Unit(model_header['HZ_U'])
self.z_sigma_lim = model_header['Z_SIGMA']
self.dens0 = model_header['DENS0'] * u.Unit(model_header['DENS0_U'])
self.vel_0 = model_header['VEL_0'] * u.Unit(model_header['VEL_0_U'])
self.velocity_factor = model_header['VEL_FACT']
self.vel_disp = model_header['VEL_DISP'] * u.Unit(model_header['VELDISPU'])
self.el_constant1 = model_header['EL_CONS1']
self.el_constant2 = model_header['EL_CONS2']
self.T_gas = model_header['T_GAS'] * u.Unit(model_header['T_GAS_U'])
self.alpha = model_header['ALPHA'] * u.Unit(model_header['ALPHA_U'])
self.alpha = self.alpha.decompose().scale * u.rad
self.alpha = self.alpha.to(u.deg)
self.beta = model_header['BETA'] * u.Unit(model_header['BETA_U'])
self.beta = self.beta.decompose().scale * u.rad
self.beta = self.beta.to(u.deg)
self.theta = model_header['THETA'] * u.Unit(model_header['THETA_U'])
self.theta = self.theta.decompose().scale * u.rad
self.theta = self.theta.to(u.deg)
self.resolution = model_header['RESO']
self.vel_resolution = model_header['VEL_RESO']
self.L_range = [model_header['L_RMIN'],model_header['L_RMAX']] * u.Unit(model_header['L_RANGEU'])
self.B_range = [model_header['B_RMIN'],model_header['B_RMAX']] * u.Unit(model_header['B_RANGEU'])
self.D_range = [model_header['D_RMIN'],model_header['D_RMAX']] * u.Unit(model_header['D_RANGEU'])
self.species = model_header['SPECIES']
self.vmin = model_header['VMIN'] * u.Unit(model_header['VMIN_U'])
self.vmax = model_header['VMAX'] * u.Unit(model_header['VMAX_U'])
@classmethod
def read(cls, file, model = False):
if model:
hdulist = fits.open(file)
v_bary_in = CartesianDifferential(hdulist[1].data["v_bary"] * u.km/u.s)
LBD_output_in = [coord.GalacticLSR(l = hdulist[1].data["lbd"][0,:]*u.deg, b = hdulist[1].data["lbd"][1,:]*u.deg,
distance = hdulist[1].data["lbd"][2,:]*u.kpc,
pm_l_cosb = hdulist[1].data["v"][0,:]*u.mas/u.yr,
pm_b = hdulist[1].data["v"][1,:]*u.mas/u.yr,
radial_velocity = hdulist[1].data["v"][2,:]*u.km/u.s,
v_bary = v_bary_in),
hdulist[1].data["disk_density_grid"],
hdulist[1].data["LBD_cdelt"]]
LBD_output_keys_in = ['lbd_coords', 'disk_density', 'cdelt']
model_header = hdulist[1].header
hdulist.close()
else:
model_header = None
LBD_output_keys_in = None
LBD_output_in = None
cube = super().read(file)
data = cube.unmasked_data[:]
wcs = cube.wcs
header = cube.header
meta = {}
if 'BUNIT' in header:
meta['BUNIT'] = header['BUNIT']
return cls(data = data, wcs = wcs, meta = meta, header = header,
LBD_output_in = LBD_output_in, LBD_output_keys_in = LBD_output_keys_in,
model_header = model_header)
def write(self, filename, overwrite = False, format = None, model = False):
if model:
hdulist = self.hdulist
if (hasattr(self, 'LBD_output') & len(hdulist) == 1):
length = self.resolution[0]*self.resolution[1]*self.resolution[2]
c1 = fits.Column(name='lbd', unit='deg,deg,kpc', format = '{}D'.format(length))
c2 = fits.Column(name='v', unit='mas/yr,mas/yr,km/s', format = '{}D'.format(length))
c3 = fits.Column(name='disk_density_grid', unit='cm-3',
format = '{}D'.format(length), dim = '({},{},{})'.format(self.resolution[0],
self.resolution[1],
self.resolution[2]))
c4 = fits.Column(name='LBD_cdelt', array=self.LBD_output[2], unit='deg,deg,kpc', format = 'D')
c5 = fits.Column(name='v_bary', array=[self.LBD_output[0].v_bary.d_x.value,
self.LBD_output[0].v_bary.d_y.value,
self.LBD_output[0].v_bary.d_z.value],
format = 'D',
unit = '{}'.format(self.LBD_output[0].v_bary.d_z.unit))
table = fits.BinTableHDU.from_columns([c1,c2,c3,c4,c5])
table.data["lbd"][0,:] = self.LBD_output[0].l
table.data["lbd"][1,:] = self.LBD_output[0].b
table.data["lbd"][2,:] = self.LBD_output[0].distance
table.data["v"][0,:] = self.LBD_output[0].pm_l_cosb
table.data["v"][1,:] = self.LBD_output[0].pm_b
table.data["v"][2,:] = self.LBD_output[0].radial_velocity
table.data["disk_density_grid"] = self.LBD_output[1]
hdulist.append(table)
hdulist[1].header['BD_MAX'] = '{}'.format(self.bd_max.value)
hdulist[1].header['BD_MAX_U'] = '{}'.format(self.bd_max.unit)
hdulist[1].header['HZ'] = '{}'.format(self.Hz.value)
hdulist[1].header['HZ_U'] = '{}'.format(self.Hz.unit)
hdulist[1].header['Z_SIGMA'] = '{}'.format(self.z_sigma_lim)
hdulist[1].header['DENS0'] = '{}'.format(self.dens0.value)
hdulist[1].header['DENS0_U'] = '{}'.format(self.dens0.unit)
hdulist[1].header['VEL_0'] = '{}'.format(self.vel_0.value)
hdulist[1].header['VEL_0_U'] = '{}'.format(self.vel_0.unit)
hdulist[1].header['VEL_FACT'] = '{}'.format(self.velocity_factor)
hdulist[1].header['VEL_DISP'] = '{}'.format(self.vel_disp.value)
hdulist[1].header['VELDISPU'] = '{}'.format(self.vel_disp.unit)
hdulist[1].header['EL_CONS1'] = '{}'.format(self.el_constant1)
hdulist[1].header['EL_CONS2'] = '{}'.format(self.el_constant2)
hdulist[1].header['T_GAS'] = '{}'.format(self.T_gas.value)
hdulist[1].header['T_GAS_U'] = '{}'.format(self.T_gas.unit)
hdulist[1].header['ALPHA'] = '{}'.format(self.alpha.value)
hdulist[1].header['ALPHA_U'] = '{}'.format(self.alpha.unit)
hdulist[1].header['BETA'] = '{}'.format(self.beta.value)
hdulist[1].header['BETA_U'] = '{}'.format(self.beta.unit)
hdulist[1].header['THETA'] = '{}'.format(self.theta.value)
hdulist[1].header['THETA_U'] = '{}'.format(self.theta.unit)
hdulist[1].header['RESO'] = '{}'.format(self.resolution)
hdulist[1].header['VEL_RESO'] = '{}'.format(self.vel_resolution)
hdulist[1].header['L_RMIN'] = self.L_range[0].value
hdulist[1].header['L_RMAX'] = self.L_range[1].value
hdulist[1].header['L_RANGEU'] = '{}'.format(self.L_range.unit)
hdulist[1].header['B_RMIN'] = self.B_range[0].value
hdulist[1].header['B_RMAX'] = self.B_range[1].value
hdulist[1].header['B_RANGEU'] = '{}'.format(self.B_range.unit)
hdulist[1].header['D_RMIN'] = self.L_range[0].value
hdulist[1].header['D_RMAX'] = self.L_range[1].value
hdulist[1].header['D_RANGEU'] = '{}'.format(self.D_range.unit)
hdulist[1].header['SPECIES'] = '{}'.format(self.species)
hdulist[1].header['VMIN'] = self.vmin.value
hdulist[1].header['VMIN_U'] = '{}'.format(self.vmin.unit)
hdulist[1].header['VMAX'] = self.vmax.value
hdulist[1].header['VMAX_U'] = '{}'.format(self.vmax.unit)
now = datetime.datetime.strftime(datetime.datetime.now(),
"%Y/%m/%d-%H:%M:%S")
hdulist[0].header.add_history("Written by modspectra on "
"{date}".format(date=now))
try:
hdulist.writeto(filename, overwrite=overwrite)
except TypeError:
hdulist.writeto(filename, clobber=overwrite)
else:
super().write(filename, overwrite = overwrite, format = format)
# Convenience functions for quickly making DK20 Disk
def create_DK20(**kwargs):
"""
Quick Create a DK20 H-Alpha emisison cube as described in Krishnarao, Benjamin, Haffner (2019)
Parameters
----------
**kwargs: passed to EmissionCube.__init
"""
return EmissionCube(create = True, DK20 = True, defaults = True, **kwargs)
# Convenience functions for quickly making LB80 Disk
def create_LB80(**kwargs):
"""
Quick Create a DK20 H-Alpha emisison cube as described in Krishnarao, Benjamin, Haffner (2019)
Parameters
----------
**kwargs: passed to EmissionCube.__init
"""
return EmissionCube(create = True, LB80 = True, defaults = True, **kwargs)
def create_DK20_spectrum(coordinate, radius,
l_resolution = 10,
b_resolution = 10,
distance_resolution = 200,
**kwargs):
"""
Quick Create a DK20 H-Alpha emisison spectrum for a given SkyCoord direction and beam radius
Parameters
----------
coordinate: '`astropy.coordinates.SkyCoord'
central coordinate to compute spectrum
radius: 'Quantity or number'
Beam radius to compute spectrum over
assumes u.deg if number
l_resolution: 'number', optional, must be keyword
resolution across longitude dimension
b_resolution: 'number', optional, must be keyword
resolution across latitude dimension
distance_resolution: 'number', optional, must be keyword
resolution across distance dimension
"""
if not isinstance(coordinate, coord.SkyCoord):
raise TypeError
print("Input coordinate must be a SkyCoord object")
elif not isinstance(coordinate.galcen_distance, u.Quantity):
coordinate.galcen_distance = 8.127 * u.kpc
logging.warning("No galcen_distance attribute specified for SkyCoord, assuming galcen_distance = "
"{0}{1}".format(coordinate.galcen_distance.value, coordinate.galcen_distance.unit))
if not isinstance(radius, u.Quantity):
radius *= u.deg
logging.warning("No units specified for radius, assuming"
"{}".format(radius.unit))
resolution = (l_resolution, b_resolution, distance_resolution)
c_gal = coordinate.transform_to('galactic')
L_range = [c_gal.l.to(u.deg) - radius.to(u.deg)*1.2, c_gal.l.to(u.deg) + radius.to(u.deg)*1.2]
B_range = [c_gal.b.to(u.deg) - radius.to(u.deg)*1.2, c_gal.b.to(u.deg) + radius.to(u.deg)*1.2]
cube = EmissionCube.create_DK20(resolution = resolution, L_range = L_range, B_range = B_range, **kwargs)
print(cube)
return cube.extract_beam(coordinate = c_gal, radius = radius, reduce_cube = False)
| 47.265226 | 149 | 0.586569 |
73e3a95266788e4ab3dd425d648d0fd3c527d917 | 4,162 | py | Python | examples/kernelml-enhanced-ridge-example.py | Freedomtowin/kernel_optimizer | 2676044e0f287cd8dda8f9f92a6d3813544965e4 | [
"MIT"
] | 9 | 2019-10-03T18:02:29.000Z | 2021-08-09T09:30:33.000Z | examples/kernelml-enhanced-ridge-example.py | Freedomtowin/kernel_optimizer | 2676044e0f287cd8dda8f9f92a6d3813544965e4 | [
"MIT"
] | 1 | 2019-12-11T09:46:09.000Z | 2021-06-17T00:45:16.000Z | examples/kernelml-enhanced-ridge-example.py | Freedomtowin/kernel_optimizer | 2676044e0f287cd8dda8f9f92a6d3813544965e4 | [
"MIT"
] | 3 | 2020-04-18T10:41:56.000Z | 2021-06-17T02:06:14.000Z |
import kernelml
from numba import jit,njit, prange, types
# import seaborn
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn import linear_model
import numpy
train=pd.read_csv("DATA/kc_house_train_data.csv",dtype = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float, 'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str, 'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':str, 'condition':int, 'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int})
test=pd.read_csv("DATA/kc_house_test_data.csv",dtype = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float, 'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str, 'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':str, 'condition':int, 'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int})
def sampler_uniform_distribution(kmldata):
random_samples = kmldata.prior_random_samples
variances = np.var(kmldata.update_history[:,:],axis=1).flatten()
means = kmldata.best_weight_vector.flatten()
# means = kmldata.update_history[:,-1].flatten()
return np.vstack([np.random.uniform(mu-np.sqrt(sigma*12)/2,mu+np.sqrt(sigma*12)/2,(random_samples)) for sigma,mu in zip(variances,means)])
@jit('float64(float64[:,:], float64[:,:], float64[:,:])',nopython=True)
def ridge_least_sqs_loss(x,y,w):
alpha,w = w[0][0],w[1:]
penalty = 0
value = 1
if alpha<value:
penalty = 3*abs(value-alpha)
if alpha<0:
alpha=0
hypothesis = x.dot(w)
loss = hypothesis-y
return np.sum(loss**2)/len(y) + alpha*np.sum(w[1:]**2) + penalty*np.sum(w[1:]**2)
@njit('float64[:](float64[:,:], float64[:,:], float64[:,:])',parallel=True)
def map_losses(X,y,w_list):
N = w_list.shape[1]
resX = np.zeros(N)
for i in prange(N):
loss = ridge_least_sqs_loss(X,y,w_list[:,i:i+1])
resX[i] = loss
return resX
X_train = train[['sqft_living','bedrooms','bathrooms']].values
y_train = train[['price']].values
X_test = test[['sqft_living','bedrooms','bathrooms']].values
y_test = test[['price']].values
SST_train = np.sum((y_train-np.mean(y_train))**2)
SST_test = np.sum((y_test-np.mean(y_test))**2)
X_train = np.column_stack((np.ones(X_train.shape[0]),X_train))
X_test = np.column_stack((np.ones(X_test.shape[0]),X_test))
runs = 5
zscore = .09
simulation_factor = 100
volatility = 10
min_per_change=0.001
cycles = 100
volume = 100
kml = kernelml.KernelML(
prior_sampler_fcn=None,
posterior_sampler_fcn=sampler_uniform_distribution,
intermediate_sampler_fcn=None,
mini_batch_sampler_fcn=None,
parameter_transform_fcn=None,
loss_calculation_fcn=map_losses,
batch_size=None)
# args_list = [np.array([1,2,3],dtype=np.float64)]
args_list = []
kml.optimize(X_train,y_train,
args=args_list,
number_of_parameters=5,
number_of_realizations=runs,
number_of_random_simulations = simulation_factor,
number_of_cycles=cycles,
update_volatility = volatility,
update_volume=volume,
convergence_z_score=zscore,
prior_uniform_low=1,
prior_uniform_high=2,
print_feedback=True)
#Get model performance on validation data
w = kml.model.get_best_param()
alpha,w = w[0],w[1:].reshape(-1,1)
print('alpha:',alpha)
print('w:',w)
yp_train = X_train.dot(w)
SSE_train = np.sum((y_train-yp_train)**2)
yp_test = X_test.dot(w)
SSE_test = np.sum((y_test-yp_test)**2)
#Compare to sklearn.Ridge(alpha=1)
model = linear_model.Ridge(alpha=1)
model.fit(X_train,y_train)
print('kernelml validation r-squared:',1-SSE_test/SST_test)
print('scikit-learn validation r-squared:',model.score(X_test,y_test))
| 37.836364 | 403 | 0.646084 |
73e3ce5423e1dc84cd4febe940a90fee9f6ff775 | 1,531 | py | Python | Python/Games/rpcV3.py | erikhayton/Portfolio | ee1e649fe6340911968f27207ad7253d3ddc2cde | [
"MIT"
] | null | null | null | Python/Games/rpcV3.py | erikhayton/Portfolio | ee1e649fe6340911968f27207ad7253d3ddc2cde | [
"MIT"
] | null | null | null | Python/Games/rpcV3.py | erikhayton/Portfolio | ee1e649fe6340911968f27207ad7253d3ddc2cde | [
"MIT"
] | null | null | null | import random
# more specified imput
# from random import randint
human_wins=0
computer_wins=0
winning_score=2
while human_wins < winning_score and computer_wins < winning_score:
print(f"Puny Human Score: {human_wins} Computer Score: {computer_wins}")
# shorter
human = input("Puny human: Input rock, paper, or scissors! ").lower()
if human =="quit" or human == "q":
break
rand_num = random.randint(0,2)
# more specified
# rand_num = randint(0,2)
# computer
if rand_num == 0:
computer = "rock"
elif rand_num == 1:
computer = "paper"
else:
computer = "scissors"
print("computer played: " + computer)
# human
if human == computer:
print("Draw")
elif human == "rock" and computer == "scissors":
print("puny human wins")
human_wins += 1
elif human == "paper" and computer == "rock":
print("puny human wins")
human_wins += 1
elif human == "scissors" and computer == "paper":
print("puny human wins")
human_wins += 1
else:
print("computer wins")
computer_wins += 1
print(
"*\n"
"*\n"
"*")
print(f"FINAL SCORES: Puny Human: {human_wins} Computer: {computer_wins}")
print(
"*\n"
"*\n"
"*")
if human_wins > computer_wins:
print("Congrats Dumbass! Now you're on Technology's Hit List!")
elif human_wins == computer_wins:
print("You'll never amount to anything...Quitter!")
else:
print("You have pleased the Google overlords...Loser!")
print(
"\n"
)
| 25.516667 | 76 | 0.61855 |
73e3d3f4cc2f3697c5345fccbd0d3d02956e6f1e | 385 | py | Python | app/core/migrations/0010_auto_20201203_2152.py | aakash-sheth/django-rest-api | 5dcd5eb03c5efccd7011fd6417b50e2ceacff243 | [
"MIT"
] | null | null | null | app/core/migrations/0010_auto_20201203_2152.py | aakash-sheth/django-rest-api | 5dcd5eb03c5efccd7011fd6417b50e2ceacff243 | [
"MIT"
] | null | null | null | app/core/migrations/0010_auto_20201203_2152.py | aakash-sheth/django-rest-api | 5dcd5eb03c5efccd7011fd6417b50e2ceacff243 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.3 on 2020-12-03 21:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20201202_2144'),
]
operations = [
migrations.RenameField(
model_name='unemploymentbyindustry',
old_name='indutry_id',
new_name='industry_id',
),
]
| 20.263158 | 48 | 0.607792 |
73e41ea9d55c6019ac5f31281c270fec65e9ea79 | 8,998 | py | Python | tests/test_validate.py | jeremylinlin/operator-courier | 9e53cee85e02e3ab54cfbef5770cfd58b4895c3b | [
"Apache-2.0"
] | 1 | 2019-04-09T04:52:16.000Z | 2019-04-09T04:52:16.000Z | tests/test_validate.py | jeremylinlin/operator-courier | 9e53cee85e02e3ab54cfbef5770cfd58b4895c3b | [
"Apache-2.0"
] | null | null | null | tests/test_validate.py | jeremylinlin/operator-courier | 9e53cee85e02e3ab54cfbef5770cfd58b4895c3b | [
"Apache-2.0"
] | null | null | null | import yaml
import pytest
from operatorcourier.validate import ValidateCmd
from operatorcourier.format import unformat_bundle
from testfixtures import LogCapture
@pytest.mark.parametrize('bundle,expected_validation_results_dict', [
("tests/test_files/bundles/verification/noicon.valid.bundle.yaml",
{'errors': [], 'warnings': [
'csv metadata.annotations.categories not defined',
'csv spec.icon not defined']}),
("tests/test_files/bundles/verification/nocrd.valid.bundle.yaml",
{'errors': [], 'warnings': [
'csv spec.icon not defined',
'csv spec.maturity not defined']}),
])
def test_valid_bundles(bundle, expected_validation_results_dict):
valid, validation_results_dict = get_validation_results(bundle)
assert valid is True
assert validation_results_dict == expected_validation_results_dict
@pytest.mark.parametrize('bundle,expected_validation_results_dict', [
("tests/test_files/bundles/verification/nopkg.invalid.bundle.yaml",
{'errors': ['Bundle does not contain any packages.'],
'warnings': [
'csv metadata.annotations.categories not defined',
'csv spec.icon not defined']}),
("tests/test_files/bundles/verification/no-data-key.bundle.yaml",
{'errors': ['Bundle does not contain any clusterServiceVersions.',
'Bundle does not contain any packages.'], 'warnings': []}),
("tests/test_files/bundles/verification/csvinstallspecnotlists.invalid.bundle.yaml",
{'errors': ['csv spec.install.spec.deployments should be a list',
'csv spec.install.spec.permissions should be a list',
'csv spec.install.spec.clusterPermissions should be a list'],
'warnings': ['csv spec.icon not defined']}),
("tests/test_files/bundles/verification/"
"csvmissinginstallattributes.invalid.bundle.yaml",
{'errors': ['csv spec.install.strategy not defined',
'csv spec.install.spec not defined'],
'warnings': ['csv spec.icon not defined']}),
("tests/test_files/bundles/verification/"
"csvinstallstrategywrongvalue.invalid.bundle.yaml",
{'errors': ["csv spec.install.strategy must be one of ['deployment']"],
'warnings': ['csv spec.icon not defined']}),
])
def test_invalid_bundle(bundle, expected_validation_results_dict):
valid, validation_results_dict = get_validation_results(bundle)
assert valid is False
assert validation_results_dict == expected_validation_results_dict
@pytest.mark.parametrize('bundle,expected_validation_results_dict', [
("tests/test_files/bundles/verification/valid.bundle.yaml",
{'errors': [], 'warnings': []}), ])
def test_ui_valid_bundle_io(bundle, expected_validation_results_dict):
valid, validation_results_dict = get_ui_validation_results(bundle)
assert valid is True
assert validation_results_dict == expected_validation_results_dict
@pytest.mark.parametrize('bundle,expected_validation_results_dict', [
("tests/test_files/bundles/verification/ui.invalid.bundle.yaml",
{'errors': [
"csv.spec.links must be a list of name & url pairs.",
"spec.version invalid is not a valid semver "
"(example of a valid semver is: 1.0.12)",
"metadata.annotations.capabilities invalid "
"is not a valid capabilities level",
"spec.icon[0].mediatype image/invalid is not "
"a valid mediatype. It must be one of \"image/gif\", "
"\"image/jpeg\", \"image/png\", \"image/svg+xml\"",
"category invalid is not a valid category",
"UI validation failed to verify that required fields "
"for operatorhub.io are properly formatted."
],
'warnings': []}), ])
def test_ui_invalid_bundle_io(bundle, expected_validation_results_dict):
valid, validation_results_dict = get_ui_validation_results(bundle)
assert valid is False
assert validation_results_dict == expected_validation_results_dict
@pytest.mark.parametrize('file_name,correct_repo_name', [
("tests/test_files/bundles/verification/valid.bundle.yaml", 'marketplace'),
("tests/test_files/bundles/verification/nocrd.valid.bundle.yaml", 'svcat'),
])
def test_valid_bundles_with_package_name_and_repo_name_match(file_name,
correct_repo_name):
with open(file_name) as f:
bundle = yaml.safe_load(f)
bundle = unformat_bundle(bundle)
valid, _ = ValidateCmd().validate(bundle, repository=correct_repo_name)
assert valid
@pytest.mark.parametrize('file_name,wrong_repo_name', [
("tests/test_files/bundles/verification/valid.bundle.yaml", 'wrong-repo-name'),
("tests/test_files/bundles/verification/nocrd.valid.bundle.yaml", 'wrong-repo-name'),
])
def test_valid_bundles_with_package_name_and_repo_name_mismatch(file_name,
wrong_repo_name):
with open(file_name) as f:
bundle = yaml.safe_load(f)
bundle = unformat_bundle(bundle)
valid, _ = ValidateCmd().validate(bundle, repository=wrong_repo_name)
assert not valid
@pytest.mark.parametrize('file_name', [
"tests/test_files/bundles/verification/multiplepkgs.invalid.bundle.yaml",
])
def test_valid_bundles_with_multiple_packages(file_name):
with open(file_name) as f:
bundle = yaml.safe_load(f)
bundle = unformat_bundle(bundle)
with LogCapture() as logs:
valid, _ = ValidateCmd().validate(bundle)
assert not valid
logs.check_present(('operatorcourier.validate',
'ERROR',
'Only 1 package is expected to exist per bundle, but got 2.'))
def get_ui_validation_results(bundle):
return ValidateCmd(ui_validate_io=True).validate(get_bundle(bundle))
def get_validation_results(bundle):
return ValidateCmd().validate(get_bundle(bundle))
def get_bundle(bundle):
with open(bundle) as f:
bundle = yaml.safe_load(f)
return unformat_bundle(bundle)
@pytest.mark.parametrize('bundleFile,logInfo', [
('tests/test_files/bundles/verification/nopkg.invalid.bundle.yaml',
('operatorcourier.validate', 'ERROR', 'Bundle does not contain any packages.')),
('tests/test_files/bundles/verification/crdmissingkindfield.invalid.bundle.yaml',
('operatorcourier.validate', 'ERROR', 'crd spec.names.kind not defined.')),
('tests/test_files/bundles/verification/crdmissingpluralfield.invalid.bundle.yaml',
('operatorcourier.validate', 'ERROR', 'crd spec.names.plural not defined.')),
('tests/test_files/bundles/verification/crdmissingversionfield.invalid.bundle.yaml',
('operatorcourier.validate', 'ERROR', 'crd spec.version not defined.')),
('tests/test_files/bundles/verification/csvmissingkindfield.invalid.bundle.yaml',
('operatorcourier.validate', 'ERROR',
'kind not defined for item in spec.customresourcedefinitions.')),
('tests/test_files/bundles/verification/csvmissingnamefield.invalid.bundle.yaml',
('operatorcourier.validate', 'ERROR',
'name not defined for item in spec.customresourcedefinitions.')),
('tests/test_files/bundles/verification/csvmissingversionfield.invalid.bundle.yaml',
('operatorcourier.validate', 'ERROR',
'version not defined for item in spec.customresourcedefinitions.')),
])
def test_invalid_bundle_missing_fields(bundleFile, logInfo):
_test_invalid_bundle_with_log(bundleFile, logInfo)
@pytest.mark.parametrize('bundleFile,logInfo', [
('tests/test_files/bundles/verification/csvcrdfieldmismatch1.invalid.bundle.yaml',
('operatorcourier.validate', 'ERROR',
'CRD.spec.names.kind does not match CSV.spec.crd.owned.kind')),
('tests/test_files/bundles/verification/csvcrdfieldmismatch2.invalid.bundle.yaml',
('operatorcourier.validate', 'ERROR',
'CRD.spec.version does not match CSV.spec.crd.owned.version')),
('tests/test_files/bundles/verification/csvcrdfieldmismatch3.invalid.bundle.yaml',
('operatorcourier.validate', 'ERROR',
'`CRD.spec.names.plural`.`CRD.spec.group` does not match CSV.spec.crd.owned.name')),
])
def test_invalid_bundle_crd_csv_fields_mismatch(bundleFile, logInfo):
_test_invalid_bundle_with_log(bundleFile, logInfo)
def _test_invalid_bundle_with_log(bundleFile, logInfo):
with open(bundleFile) as f:
bundle = yaml.safe_load(f)
bundle = unformat_bundle(bundle)
module, level, message = logInfo[0], logInfo[1], logInfo[2]
with LogCapture() as logs:
valid, _ = ValidateCmd().validate(bundle)
assert not valid
# check if the input log info is present among all logs captured
logs.check_present(
(module, level, message),
)
| 46.14359 | 90 | 0.689153 |
73e437db070b903661a65cf95b8cb3437f67e6ac | 6,340 | py | Python | pypiserver/__init__.py | Y-Ge/pypiserver | f6dce96ee7c82a5d4db6cb501f0cd5af2a6d858e | [
"MIT"
] | 1 | 2019-03-18T17:47:54.000Z | 2019-03-18T17:47:54.000Z | pypiserver/__init__.py | DalavanCloud/pypiserver | 537034eb4323f0a84e66cb8d73e3b152c9d08744 | [
"MIT"
] | null | null | null | pypiserver/__init__.py | DalavanCloud/pypiserver | 537034eb4323f0a84e66cb8d73e3b152c9d08744 | [
"MIT"
] | null | null | null | import os
import re as _re
import sys
version = __version__ = "1.2.7"
__version_info__ = tuple(_re.split('[.-]', __version__))
__updated__ = "2019-01-31 18:43:27"
__title__ = "pypiserver"
__summary__ = "A minimal PyPI server for use with pip/easy_install."
__uri__ = "https://github.com/pypiserver/pypiserver"
class Configuration(object):
"""
.. see:: config-options: :func:`pypiserver.configure()`
"""
def __init__(self, **kwds):
vars(self).update(kwds)
def __repr__(self, *args, **kwargs):
return 'Configuration(**%s)' % vars(self)
def __str__(self, *args, **kwargs):
return 'Configuration:\n%s' % '\n'.join('%20s = %s' % (k, v)
for k, v in sorted(vars(self).items()))
def update(self, props):
d = props if isinstance(props, dict) else vars(props)
vars(self).update(d)
DEFAULT_SERVER = "auto"
def default_config(
root=None,
host="0.0.0.0",
port=8080,
server=DEFAULT_SERVER,
redirect_to_fallback=True,
fallback_url=None,
authenticated=['update'],
password_file=None,
overwrite=False,
hash_algo='md5',
verbosity=1,
log_file=None,
log_frmt="%(asctime)s|%(name)s|%(levelname)s|%(thread)d|%(message)s",
log_req_frmt="%(bottle.request)s",
log_res_frmt="%(status)s",
log_err_frmt="%(body)s: %(exception)s \n%(traceback)s",
welcome_file=None,
cache_control=None,
auther=None,
VERSION=__version__):
"""
Fetch default-opts with overridden kwds, capable of starting-up pypiserver.
Does not validate overridden options.
Example usage::
kwds = pypiserver.default_config(<override_kwds> ...)
## More modifications on kwds.
pypiserver.app(**kwds)``.
Kwds correspond to same-named cmd-line opts, with '-' --> '_' substitution.
Non standard args are described below:
:param return_defaults_only:
When `True`, returns defaults, otherwise,
configures "runtime" attributes and returns also the "packages"
found in the roots.
:param root:
A list of paths, derived from the packages specified on cmd-line.
If `None`, defaults to '~/packages'.
:param redirect_to_fallback:
see :option:`--disable-fallback`
:param authenticated:
see :option:`--authenticate`
:param password_file:
see :option:`--passwords`
:param log_file:
see :option:`--log-file`
Not used, passed here for logging it.
:param log_frmt:
see :option:`--log-frmt`
Not used, passed here for logging it.
:param callable auther:
An API-only options that if it evaluates to a callable,
it is invoked to allow access to protected operations
(instead of htpaswd mechanism) like that::
auther(username, password): bool
When defined, `password_file` is ignored.
:param host:
see :option:`--interface`
Not used, passed here for logging it.
:param port:
see :option:`--port`
Not used, passed here for logging it.
:param server:
see :option:`--server`
Not used, passed here for logging it.
:param verbosity:
see :option:`-v`
Not used, passed here for logging it.
:param VERSION:
Not used, passed here for logging it.
:return: a dict of defaults
"""
return locals()
def app(**kwds):
"""
:param dict kwds: Any overrides for defaults, as fetched by
:func:`default_config()`. Check the docstring of this function
for supported kwds.
"""
from . import core
_app = __import__("_app", globals(), locals(), ["."], 1)
sys.modules.pop('pypiserver._app', None)
kwds = default_config(**kwds)
config, packages = core.configure(**kwds)
_app.config = config
_app.packages = packages
_app.app.module = _app # HACK for testing.
return _app.app
def str2bool(s, default):
if s is not None and s != '':
return s.lower() not in ("no", "off", "0", "false")
return default
def _str_strip(string):
"""Provide a generic strip method to pass as a callback."""
return string.strip()
def paste_app_factory(global_config, **local_conf):
"""Parse a paste config and return an app."""
def upd_conf_with_bool_item(conf, attr, sdict):
conf[attr] = str2bool(sdict.pop(attr, None), conf[attr])
def upd_conf_with_str_item(conf, attr, sdict):
value = sdict.pop(attr, None)
if value is not None:
conf[attr] = value
def upd_conf_with_int_item(conf, attr, sdict):
value = sdict.pop(attr, None)
if value is not None:
conf[attr] = int(value)
def upd_conf_with_list_item(conf, attr, sdict, sep=' ', parse=_str_strip):
values = sdict.pop(attr, None)
if values:
conf[attr] = list(filter(None, map(parse, values.split(sep))))
def _make_root(root):
root = root.strip()
if root.startswith("~"):
return os.path.expanduser(root)
return root
c = default_config()
upd_conf_with_bool_item(c, 'overwrite', local_conf)
upd_conf_with_bool_item(c, 'redirect_to_fallback', local_conf)
upd_conf_with_list_item(c, 'authenticated', local_conf, sep=' ')
upd_conf_with_list_item(c, 'root', local_conf, sep='\n', parse=_make_root)
upd_conf_with_int_item(c, 'verbosity', local_conf)
str_items = [
'fallback_url',
'hash_algo',
'log_err_frmt',
'log_file',
'log_frmt',
'log_req_frmt',
'log_res_frmt',
'password_file',
'welcome_file'
]
for str_item in str_items:
upd_conf_with_str_item(c, str_item, local_conf)
# cache_control is undocumented; don't know what type is expected:
# upd_conf_with_str_item(c, 'cache_control', local_conf)
return app(**c)
def _logwrite(logger, level, msg):
if msg:
line_endings = ['\r\n', '\n\r', '\n']
for le in line_endings:
if msg.endswith(le):
msg = msg[:-len(le)]
if msg:
logger.log(level, msg)
| 30.047393 | 79 | 0.603943 |
73e459323254e7d423c487948b57b38b79861676 | 6,289 | py | Python | code/generation-model/progressive_transformer/vocabulary.py | Merterm/-Modeling-Intensification-for-SLG | 800fff3d3c7bacc86c1db8382f7c2e68d2f0c074 | [
"MIT"
] | 5 | 2022-03-14T15:52:09.000Z | 2022-03-30T11:42:47.000Z | code/generation-model/progressive_transformer/vocabulary.py | Merterm/Modeling-Intensification-for-SLG | 800fff3d3c7bacc86c1db8382f7c2e68d2f0c074 | [
"MIT"
] | null | null | null | code/generation-model/progressive_transformer/vocabulary.py | Merterm/Modeling-Intensification-for-SLG | 800fff3d3c7bacc86c1db8382f7c2e68d2f0c074 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Vocabulary module
"""
from collections import defaultdict, Counter
from typing import List
import numpy as np
from torchtext.data import Dataset
from constants import UNK_TOKEN, DEFAULT_UNK_ID, \
EOS_TOKEN, BOS_TOKEN, PAD_TOKEN
class Vocabulary:
""" Vocabulary represents mapping between tokens and indices. """
def __init__(self, tokens: List[str] = None, file: str = None) -> None:
# don't rename stoi and itos since needed for torchtext
# warning: stoi grows with unknown tokens, don't use for saving or size
# special symbols
self.specials = [UNK_TOKEN, PAD_TOKEN, BOS_TOKEN, EOS_TOKEN]
self.stoi = defaultdict(DEFAULT_UNK_ID)
self.itos = []
if tokens is not None:
self._from_list(tokens)
elif file is not None:
self._from_file(file)
def _from_list(self, tokens: List[str] = None) -> None:
"""
Make vocabulary from list of tokens.
Tokens are assumed to be unique and pre-selected.
Special symbols are added if not in list.
:param tokens: list of tokens
"""
self.add_tokens(tokens=self.specials+tokens)
assert len(self.stoi) == len(self.itos)
def _from_file(self, file: str) -> None:
"""
Make vocabulary from contents of file.
File format: token with index i is in line i.
:param file: path to file where the vocabulary is loaded from
"""
tokens = []
with open(file, "r") as open_file:
for line in open_file:
tokens.append(line.strip("\n"))
self._from_list(tokens)
def __str__(self) -> str:
return self.stoi.__str__()
def to_file(self, file: str) -> None:
"""
Save the vocabulary to a file, by writing token with index i in line i.
:param file: path to file where the vocabulary is written
"""
with open(file, "w") as open_file:
for t in self.itos:
open_file.write("{}\n".format(t))
def add_tokens(self, tokens: List[str]) -> None:
"""
Add list of tokens to vocabulary
:param tokens: list of tokens to add to the vocabulary
"""
for t in tokens:
new_index = len(self.itos)
# add to vocab if not already there
if t not in self.itos:
self.itos.append(t)
self.stoi[t] = new_index
def is_unk(self, token: str) -> bool:
"""
Check whether a token is covered by the vocabulary
:param token:
:return: True if covered, False otherwise
"""
return self.stoi[token] == DEFAULT_UNK_ID()
def __len__(self) -> int:
return len(self.itos)
def array_to_sentence(self, array: np.array, cut_at_eos=True) -> List[str]:
"""
Converts an array of IDs to a sentence, optionally cutting the result
off at the end-of-sequence token.
:param array: 1D array containing indices
:param cut_at_eos: cut the decoded sentences at the first <eos>
:return: list of strings (tokens)
"""
sentence = []
for i in array:
s = self.itos[i]
if cut_at_eos and s == EOS_TOKEN:
break
sentence.append(s)
return sentence
def arrays_to_sentences(self, arrays: np.array, cut_at_eos=True) \
-> List[List[str]]:
"""
Convert multiple arrays containing sequences of token IDs to their
sentences, optionally cutting them off at the end-of-sequence token.
:param arrays: 2D array containing indices
:param cut_at_eos: cut the decoded sentences at the first <eos>
:return: list of list of strings (tokens)
"""
sentences = []
for array in arrays:
sentences.append(
self.array_to_sentence(array=array, cut_at_eos=cut_at_eos))
return sentences
def build_vocab(field: str, max_size: int, min_freq: int, dataset: Dataset,
vocab_file: str = None) -> Vocabulary:
"""
Builds vocabulary for a torchtext `field` from given`dataset` or
`vocab_file`.
:param field: attribute e.g. "src"
:param max_size: maximum size of vocabulary
:param min_freq: minimum frequency for an item to be included
:param dataset: dataset to load data for field from
:param vocab_file: file to store the vocabulary,
if not None, load vocabulary from here
:return: Vocabulary created from either `dataset` or `vocab_file`
"""
if vocab_file is not None:
# load it from file
vocab = Vocabulary(file=vocab_file)
else:
# create newly
def filter_min(counter: Counter, min_freq: int):
""" Filter counter by min frequency """
filtered_counter = Counter({t: c for t, c in counter.items()
if c >= min_freq})
return filtered_counter
def sort_and_cut(counter: Counter, limit: int):
""" Cut counter to most frequent,
sorted numerically and alphabetically"""
# sort by frequency, then alphabetically
tokens_and_frequencies = sorted(counter.items(),
key=lambda tup: tup[0])
tokens_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
vocab_tokens = [i[0] for i in tokens_and_frequencies[:limit]]
return vocab_tokens
tokens = []
for i in dataset.examples:
if field == "src":
tokens.extend(i.src)
elif field == "trg":
tokens.extend(i.trg)
counter = Counter(tokens)
if min_freq > -1:
counter = filter_min(counter, min_freq)
vocab_tokens = sort_and_cut(counter, max_size)
assert len(vocab_tokens) <= max_size
vocab = Vocabulary(tokens=vocab_tokens)
assert len(vocab) <= max_size + len(vocab.specials)
assert vocab.itos[DEFAULT_UNK_ID()] == UNK_TOKEN
# check for all except for UNK token whether they are OOVs
for s in vocab.specials[1:]:
assert not vocab.is_unk(s)
return vocab
| 33.452128 | 79 | 0.59771 |
73e46541472028aa4391bf5a949dc70d6afdbbb0 | 839 | py | Python | venv/lib/python3.6/site-packages/kubernetes/__init__.py | DiptoChakrabarty/Kube-Automate | 2072d1aadd58eb405c7308ff5cfecbf50300ead3 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/kubernetes/__init__.py | DiptoChakrabarty/Kube-Automate | 2072d1aadd58eb405c7308ff5cfecbf50300ead3 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/kubernetes/__init__.py | DiptoChakrabarty/Kube-Automate | 2072d1aadd58eb405c7308ff5cfecbf50300ead3 | [
"MIT"
] | null | null | null | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__project__ = 'kubernetes'
# The version is auto-updated. Please do not edit.
__version__ = "11.0.0"
import kubernetes.client
import kubernetes.config
import kubernetes.dynamic
import kubernetes.watch
import kubernetes.stream
import kubernetes.utils
| 33.56 | 74 | 0.779499 |
73e483775b429775c6f14af3bf69a26e36441403 | 13,729 | py | Python | appcli/configuration/configuration_dir_state.py | david-homley/appcli | 08e336082fb5f4b79169b9c2cd7561bf489e28ac | [
"MIT"
] | 2 | 2019-12-06T00:17:07.000Z | 2021-07-23T10:52:34.000Z | appcli/configuration/configuration_dir_state.py | david-homley/appcli | 08e336082fb5f4b79169b9c2cd7561bf489e28ac | [
"MIT"
] | 172 | 2019-10-10T04:04:31.000Z | 2022-03-28T03:37:52.000Z | appcli/configuration/configuration_dir_state.py | david-homley/appcli | 08e336082fb5f4b79169b9c2cd7561bf489e28ac | [
"MIT"
] | 6 | 2019-10-04T02:56:35.000Z | 2021-05-07T02:10:27.000Z | #!/usr/bin/env python3
# # -*- coding: utf-8 -*-
"""
States that the configuration directory can be in.
________________________________________________________________________________
Created by brightSPARK Labs
www.brightsparklabs.com
"""
# standard libraries
from collections import defaultdict
from pathlib import Path
from typing import Iterable
# vendor libraries
import git
# local libraries
from appcli.commands.appcli_command import AppcliCommand
from appcli.functions import error_and_exit
from appcli.git_repositories.git_repositories import (
ConfigurationGitRepository,
GeneratedConfigurationGitRepository,
)
from appcli.logger import logger
# ------------------------------------------------------------------------------
# CLASSES
# ------------------------------------------------------------------------------
class ConfigurationDirState:
"""The state of the configuration directory. This encapsulates both the 'conf' and 'generated' git-managed
directories.
This is the base class from which all the different 'state' classes of the configuration directory will inherit.
"""
def __init__(self, disallowed_command, disallowed_command_unless_forced) -> None:
self.disallowed_command = disallowed_command
self.disallowed_command_unless_forced = disallowed_command_unless_forced
def verify_command_allowed(self, command: AppcliCommand, force: bool = False):
if command in self.disallowed_command:
error_and_exit(self.disallowed_command[command])
if command in self.disallowed_command_unless_forced and not force:
error_and_exit(
f"{self.disallowed_command_unless_forced[command]}"
" If this command supports it, use '--force' to ignore error."
)
logger.debug(
f"Allowed command [{command}] with current configuration state [{self}], where force is [{force}]."
)
class ConfigurationDirStateFactory:
"""Factory class to get the current ConfigurationDirState state class"""
def get_state(
configuration_dir: Path,
generated_configuration_dir: Path,
app_version: str,
backup_dir: Path,
) -> ConfigurationDirState:
if configuration_dir is None:
return NoDirectoryProvidedConfigurationDirState()
if backup_dir is None:
return NoDirectoryProvidedBackupDirState()
if not backup_dir.exists():
return BackupDirectoryDoesNotExist()
if not ConfigurationDirStateFactory.__is_git_repo(configuration_dir):
return UninitialisedConfigurationDirState()
config_repo = ConfigurationGitRepository(configuration_dir)
conf_version = config_repo.get_repository_version()
if conf_version != app_version:
error_message = f"Application requires migration. Configuration version [{conf_version}], Application version [{app_version}]."
return RequiresMigrationConfigurationDirState(error_message)
if not ConfigurationDirStateFactory.__is_git_repo(generated_configuration_dir):
return UnappliedConfigurationDirState()
gen_config_repo = GeneratedConfigurationGitRepository(
generated_configuration_dir
)
if config_repo.is_dirty():
if gen_config_repo.is_dirty():
return DirtyConfAndGenConfigurationDirState()
return DirtyConfConfigurationDirState()
if gen_config_repo.is_dirty():
return DirtyGenConfigurationDirState()
if gen_config_repo.get_commit_count() > 1:
return InvalidConfigurationDirState(
f"Generated repository [{gen_config_repo.get_repo_path()}] has extra untracked git commits."
)
return CleanConfigurationDirState()
def __is_git_repo(path: Path):
"""Checks if the directory at the path is a git repository.
Args:
path (Path): Path to test.
Returns:
bool: True if the directory exists and is a git repo. Otherwise False.
"""
try:
# If this doesn't raise an Exception, a git repo exists at this path.
git.Repo(path)
return True
except (git.InvalidGitRepositoryError, git.exc.NoSuchPathError):
return False
class NoDirectoryProvidedConfigurationDirState(ConfigurationDirState):
"""Represents the configuration dir state where appcli doesn't know the path to configuration dir."""
def __init__(self) -> None:
default_error_message = (
"No configuration directory provided to appcli. Run 'install'."
)
disallowed_command = get_disallowed_command_from_allowed_commands(
[AppcliCommand.INSTALL],
default_error_message,
)
disallowed_command_unless_forced = {}
super().__init__(disallowed_command, disallowed_command_unless_forced)
class NoDirectoryProvidedBackupDirState(ConfigurationDirState):
"""Represents the backup dir state where appcli doesn't know the path to backup dir."""
def __init__(self) -> None:
disallowed_command = {
AppcliCommand.BACKUP: "Cannot backup due to missing backup directory. Run 'install'.",
AppcliCommand.RESTORE: "Cannot restore due to missing backup directory. Run 'install'.",
AppcliCommand.VIEW_BACKUPS: "Cannot view backups due to missing backup directory. Run 'install'.",
}
disallowed_command_unless_forced = {}
super().__init__(disallowed_command, disallowed_command_unless_forced)
class BackupDirectoryDoesNotExist(ConfigurationDirState):
"""Represents the backup dir state where the backup directory does not exist."""
def __init__(self) -> None:
disallowed_command = {
AppcliCommand.RESTORE: "Cannot restore due to missing backup directory. Run 'backup'.",
AppcliCommand.VIEW_BACKUPS: "Cannot view backups due to missing backup directory. Run 'backup'.",
}
disallowed_command_unless_forced = {}
super().__init__(disallowed_command, disallowed_command_unless_forced)
class UninitialisedConfigurationDirState(ConfigurationDirState):
"""Represents the configuration dir state where config directory hasn't been initialised."""
def __init__(self) -> None:
default_error_message = "Cannot run command against uninitialised application. Run 'configure init'."
disallowed_command = get_disallowed_command_from_allowed_commands(
[
AppcliCommand.CONFIGURE_INIT,
AppcliCommand.LAUNCHER,
AppcliCommand.BACKUP,
AppcliCommand.RESTORE,
AppcliCommand.VIEW_BACKUPS,
],
default_error_message,
)
disallowed_command_unless_forced = {}
super().__init__(disallowed_command, disallowed_command_unless_forced)
class UnappliedConfigurationDirState(ConfigurationDirState):
"""Represents the configuration dir state where configuration hasn't been applied yet, i.e. the generated
configuration doesn't exist."""
def __init__(self) -> None:
disallowed_command = {
AppcliCommand.CONFIGURE_INIT: "Cannot initialise an existing configuration.",
AppcliCommand.SERVICE_START: "Cannot start services due to missing generated configuration. Run 'configure apply'.",
AppcliCommand.SERVICE_SHUTDOWN: "Cannot stop services due to missing generated configuration. Run 'configure apply'.",
AppcliCommand.SERVICE_LOGS: "Cannot get service logs due to missing generated configuration. Run 'configure apply'.",
AppcliCommand.TASK_RUN: "Cannot run tasks due to missing generated configuration. Run 'configure apply'.",
AppcliCommand.ORCHESTRATOR: "Cannot run orchestrator commands due to missing generated configuration. Run 'configure apply'.",
}
disallowed_command_unless_forced = {}
super().__init__(disallowed_command, disallowed_command_unless_forced)
class CleanConfigurationDirState(ConfigurationDirState):
"""Represents the configuration dir state where config and generated directories both exist and are in a clean
state."""
def __init__(self) -> None:
disallowed_command = {
AppcliCommand.CONFIGURE_INIT: "Cannot initialise an existing configuration.",
}
disallowed_command_unless_forced = {}
super().__init__(disallowed_command, disallowed_command_unless_forced)
class DirtyConfConfigurationDirState(ConfigurationDirState):
"""Represents the configuration dir state where config directory is dirty."""
def __init__(self) -> None:
disallowed_command = {
AppcliCommand.CONFIGURE_INIT: "Cannot initialise an existing configuration.",
AppcliCommand.MIGRATE: "Cannot migrate with a dirty configuration. Run 'configure apply'.",
}
disallowed_command_unless_forced = {
AppcliCommand.SERVICE_START: "Cannot start with dirty configuration. Run 'configure apply'.",
AppcliCommand.TASK_RUN: "Cannot run task with dirty configuration. Run 'configure apply'.",
AppcliCommand.ORCHESTRATOR: "Cannot run orchestrator tasks with dirty configuration. Run 'configure apply'.",
}
super().__init__(disallowed_command, disallowed_command_unless_forced)
class DirtyGenConfigurationDirState(ConfigurationDirState):
"""Represents the configuration dir state where generated directory is dirty."""
def __init__(self) -> None:
disallowed_command = {
AppcliCommand.CONFIGURE_INIT: "Cannot initialise an existing configuration.",
AppcliCommand.MIGRATE: "Cannot migrate with a dirty generated configuration. Run 'configure apply'.",
}
disallowed_command_unless_forced = {
AppcliCommand.CONFIGURE_APPLY: "Cannot 'configure apply' over a dirty generated directory as it will overwrite existing modifications.",
AppcliCommand.SERVICE_START: "Cannot start service with dirty generated configuration. Run 'configure apply'.",
AppcliCommand.TASK_RUN: "Cannot run task with dirty generated configuration. Run 'configure apply'.",
AppcliCommand.ORCHESTRATOR: "Cannot run orchestrator tasks with dirty generated configuration. Run 'configure apply'.",
}
super().__init__(disallowed_command, disallowed_command_unless_forced)
class DirtyConfAndGenConfigurationDirState(ConfigurationDirState):
"""Represents the configuration dir state where both the conf and generated directory are dirty."""
def __init__(self) -> None:
disallowed_command = {
AppcliCommand.CONFIGURE_INIT: "Cannot initialise an existing configuration.",
AppcliCommand.MIGRATE: "Cannot migrate with a dirty generated configuration. Run 'configure apply'.",
}
disallowed_command_unless_forced = {
AppcliCommand.CONFIGURE_APPLY: "Cannot 'configure apply' over a dirty generated directory as it will overwrite existing modifications.",
AppcliCommand.SERVICE_START: "Cannot start service with dirty generated configuration. Run 'configure apply'.",
AppcliCommand.TASK_RUN: "Cannot run task with dirty generated configuration. Run 'configure apply'.",
AppcliCommand.ORCHESTRATOR: "Cannot run orchestrator tasks with dirty generated configuration. Run 'configure apply'.",
}
super().__init__(disallowed_command, disallowed_command_unless_forced)
class RequiresMigrationConfigurationDirState(ConfigurationDirState):
"""Represents the configuration dir state where configuration and application versions are misaligned."""
def __init__(self, error: str) -> None:
disallowed_command = get_disallowed_command_from_allowed_commands(
[AppcliCommand.MIGRATE], error
)
disallowed_command_unless_forced = {}
super().__init__(disallowed_command, disallowed_command_unless_forced)
class InvalidConfigurationDirState(ConfigurationDirState):
"""Represents the configuration dir state where configuration is invalid and incompatible with appcli."""
def __init__(self, error: str) -> None:
default_error_message = f"Invalid configuration state, this error must be rectified before continuing. {error}"
# Remove the 'VIEW_BACKUPS' and 'RESTORE' commands from the set of 'disallowed' commands so that we can add them
# as 'disallowed unless forced' commands
disallowed_command = get_disallowed_command_from_allowed_commands(
[AppcliCommand.VIEW_BACKUPS, AppcliCommand.RESTORE], default_error_message
)
disallowed_command_unless_forced = {
AppcliCommand.VIEW_BACKUPS: default_error_message,
AppcliCommand.RESTORE: default_error_message,
}
super().__init__(disallowed_command, disallowed_command_unless_forced)
def get_disallowed_command_from_allowed_commands(
allowed_commands: Iterable[AppcliCommand], error_message: str
) -> dict:
"""Given an Iterable of allowed appcli commands, generates the dict of disallowed commands.
Args:
allowed_commands (Iterable[AppcliCommand]): Allowed commands.
error_message (str): Error message for disallowed commands.
Returns:
dict: [description]
"""
disallowed_commands = dict(defaultdict.fromkeys(list(AppcliCommand), error_message))
for command in allowed_commands:
disallowed_commands.pop(command, None)
return disallowed_commands
| 41.35241 | 148 | 0.706169 |
73e49410c113212cc0d6966e28424954d6c36c5c | 1,503 | py | Python | tests/script_crawl.py | dimichxp/grab | 5fedee3009c18c8a1139b0f82736ebe473021b46 | [
"MIT"
] | 2,266 | 2015-01-01T08:47:33.000Z | 2022-03-21T05:02:55.000Z | tests/script_crawl.py | dimichxp/grab | 5fedee3009c18c8a1139b0f82736ebe473021b46 | [
"MIT"
] | 221 | 2015-01-13T01:55:57.000Z | 2022-02-25T23:23:35.000Z | tests/script_crawl.py | dimichxp/grab | 5fedee3009c18c8a1139b0f82736ebe473021b46 | [
"MIT"
] | 315 | 2015-01-23T06:38:55.000Z | 2022-03-27T08:33:33.000Z | from grab.spider import Spider, Task
from grab.script import crawl
from grab.util.module import SPIDER_REGISTRY
from tests.util import BaseGrabTestCase, only_grab_transport
class TestSpider(Spider):
url = None
points = []
def prepare(self):
#from grab.spider.base import logger_verbose
#logger_verbose.setLevel(logging.DEBUG)
del self.points[:]
def task_generator(self):
#print('A')
yield Task('page', url=self.url)
def task_page(self, grab, unused_task):
#print('B')
self.points.append(grab.doc.body)
class FailSpider(Spider):
def task_generator(self):
yield Task('page', url=self.url)
def task_page(self, unused_grab, unused_task):
raise Exception('Shit happens!')
class ScriptCrawlTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
@only_grab_transport('never')
def test_crawl(self):
TestSpider.url = self.server.get_url()
self.server.response['data'] = b'1'
SPIDER_REGISTRY.clear()
crawl.main('test_spider', settings_module='tests.files.crawl_settings',
disable_report=True)
self.assertEqual(TestSpider.points, [b'1'])
@only_grab_transport('never')
def test_crawl_save_lists(self):
FailSpider.url = self.server.get_url()
self.server.response['data'] = b'1'
SPIDER_REGISTRY.clear()
crawl.main('fail_spider', settings_module='tests.files.crawl_settings')
| 28.358491 | 79 | 0.665336 |
73e497c04c2fcbb36025b03b2393f753c6477b25 | 9,375 | py | Python | src/build/android/generate_jacoco_report.py | lazymartin/naiveproxy | 696e8714278e85e67e56a2eaea11f26c53116f0c | [
"BSD-3-Clause"
] | 2,219 | 2018-03-26T02:57:34.000Z | 2022-03-31T00:27:59.000Z | src/build/android/generate_jacoco_report.py | uszhen/naiveproxy | 0aa27e8bd37428f2124a891be1e5e793928cd726 | [
"BSD-3-Clause"
] | 250 | 2018-02-02T23:16:57.000Z | 2022-03-21T06:09:53.000Z | src/build/android/generate_jacoco_report.py | uszhen/naiveproxy | 0aa27e8bd37428f2124a891be1e5e793928cd726 | [
"BSD-3-Clause"
] | 473 | 2019-03-24T16:34:23.000Z | 2022-03-31T02:01:05.000Z | #!/usr/bin/env vpython3
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Aggregates Jacoco coverage files to produce output."""
from __future__ import print_function
import argparse
import fnmatch
import json
import os
import sys
import devil_chromium
from devil.utils import cmd_helper
from pylib.constants import host_paths
# Source paths should be passed to Jacoco in a way that the relative file paths
# reflect the class package name.
_PARTIAL_PACKAGE_NAMES = ['com/google', 'org/chromium']
# The sources_json_file is generated by jacoco_instr.py with source directories
# and input path to non-instrumented jars.
# e.g.
# 'source_dirs': [
# "chrome/android/java/src/org/chromium/chrome/browser/toolbar/bottom",
# "chrome/android/java/src/org/chromium/chrome/browser/ui/system",
# ...]
# 'input_path':
# '$CHROMIUM_OUTPUT_DIR/\
# obj/chrome/android/features/tab_ui/java__process_prebuilt-filtered.jar'
_SOURCES_JSON_FILES_SUFFIX = '__jacoco_sources.json'
# These should match the jar class files generated in internal_rules.gni
_DEVICE_CLASS_EXCLUDE_SUFFIX = 'host_filter.jar'
_HOST_CLASS_EXCLUDE_SUFFIX = 'device_filter.jar'
def _CreateClassfileArgs(class_files, exclude_suffix=None, include_substr=None):
"""Returns a filtered list of files with classfile option.
Args:
class_files: A list of class files.
exclude_suffix: Suffix to look for to exclude.
include_substr: A substring that must be present to include the file.
exclude_suffix takes precedence over this.
Returns:
A list of files that don't use the suffix.
"""
result_class_files = []
for f in class_files:
include_file = True
if exclude_suffix and f.endswith(exclude_suffix):
include_file = False
# Exclude overrides include.
if include_file and include_substr and include_substr not in f:
include_file = False
if include_file:
result_class_files += ['--classfiles', f]
return result_class_files
def _GenerateReportOutputArgs(args, class_files, report_type):
class_jar_exclude = None
if report_type == 'device':
class_jar_exclude = _DEVICE_CLASS_EXCLUDE_SUFFIX
elif report_type == 'host':
class_jar_exclude = _HOST_CLASS_EXCLUDE_SUFFIX
cmd = _CreateClassfileArgs(class_files, class_jar_exclude,
args.include_substr_filter)
if args.format == 'html':
report_dir = os.path.join(args.output_dir, report_type)
if not os.path.exists(report_dir):
os.makedirs(report_dir)
cmd += ['--html', report_dir]
elif args.format == 'xml':
cmd += ['--xml', args.output_file]
elif args.format == 'csv':
cmd += ['--csv', args.output_file]
return cmd
def _GetFilesWithSuffix(root_dir, suffix):
"""Gets all files with a given suffix.
Args:
root_dir: Directory in which to search for files.
suffix: Suffix to look for.
Returns:
A list of absolute paths to files that match.
"""
files = []
for root, _, filenames in os.walk(root_dir):
basenames = fnmatch.filter(filenames, '*' + suffix)
files.extend([os.path.join(root, basename) for basename in basenames])
return files
def _GetExecFiles(root_dir, exclude_substr=None):
""" Gets all .exec files
Args:
root_dir: Root directory in which to search for files.
exclude_substr: Substring which should be absent in filename. If None, all
files are selected.
Returns:
A list of absolute paths to .exec files
"""
all_exec_files = _GetFilesWithSuffix(root_dir, ".exec")
valid_exec_files = []
for exec_file in all_exec_files:
if not exclude_substr or exclude_substr not in exec_file:
valid_exec_files.append(exec_file)
return valid_exec_files
def _ParseArguments(parser):
"""Parses the command line arguments.
Args:
parser: ArgumentParser object.
Returns:
The parsed arguments.
"""
parser.add_argument(
'--format',
required=True,
choices=['html', 'xml', 'csv'],
help='Output report format. Choose one from html, xml and csv.')
parser.add_argument(
'--device-or-host',
choices=['device', 'host'],
help='Selection on whether to use the device classpath files or the '
'host classpath files. Host would typically be used for junit tests '
' and device for tests that run on the device. Only used for xml and csv'
' reports.')
parser.add_argument('--include-substr-filter',
help='Substring that must be included in classjars.',
type=str,
default='')
parser.add_argument('--output-dir', help='html report output directory.')
parser.add_argument('--output-file',
help='xml file to write device coverage results.')
parser.add_argument(
'--coverage-dir',
required=True,
help='Root of the directory in which to search for '
'coverage data (.exec) files.')
parser.add_argument('--exec-filename-excludes',
required=False,
help='Excludes .exec files which contain a particular '
'substring in their name')
parser.add_argument(
'--sources-json-dir',
help='Root of the directory in which to search for '
'*__jacoco_sources.json files.')
parser.add_argument(
'--class-files',
nargs='+',
help='Location of Java non-instrumented class files. '
'Use non-instrumented jars instead of instrumented jars. '
'e.g. use chrome_java__process_prebuilt_(host/device)_filter.jar instead'
'of chrome_java__process_prebuilt-instrumented.jar')
parser.add_argument(
'--sources',
nargs='+',
help='Location of the source files. '
'Specified source folders must be the direct parent of the folders '
'that define the Java packages.'
'e.g. <src_dir>/chrome/android/java/src/')
parser.add_argument(
'--cleanup',
action='store_true',
help='If set, removes coverage files generated at '
'runtime.')
args = parser.parse_args()
if args.format == 'html' and not args.output_dir:
parser.error('--output-dir needed for report.')
if args.format in ('csv', 'xml'):
if not args.output_file:
parser.error('--output-file needed for xml/csv reports.')
if not args.device_or_host and args.sources_json_dir:
parser.error('--device-or-host selection needed with --sources-json-dir')
if not (args.sources_json_dir or args.class_files):
parser.error('At least either --sources-json-dir or --class-files needed.')
return args
def main():
parser = argparse.ArgumentParser()
args = _ParseArguments(parser)
devil_chromium.Initialize()
coverage_files = _GetExecFiles(args.coverage_dir, args.exec_filename_excludes)
if not coverage_files:
parser.error('No coverage file found under %s' % args.coverage_dir)
print('Found coverage files: %s' % str(coverage_files))
class_files = []
source_dirs = []
if args.sources_json_dir:
sources_json_files = _GetFilesWithSuffix(args.sources_json_dir,
_SOURCES_JSON_FILES_SUFFIX)
for f in sources_json_files:
with open(f, 'r') as json_file:
data = json.load(json_file)
class_files.extend(data['input_path'])
source_dirs.extend(data['source_dirs'])
# Fix source directories as direct parent of Java packages.
fixed_source_dirs = set()
for path in source_dirs:
for partial in _PARTIAL_PACKAGE_NAMES:
if partial in path:
fixed_dir = os.path.join(host_paths.DIR_SOURCE_ROOT,
path[:path.index(partial)])
fixed_source_dirs.add(fixed_dir)
break
if args.class_files:
class_files += args.class_files
if args.sources:
fixed_source_dirs.update(args.sources)
cmd = [
'java', '-jar',
os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party', 'jacoco', 'lib',
'jacococli.jar'), 'report'
] + coverage_files
for source in fixed_source_dirs:
cmd += ['--sourcefiles', source]
if args.format == 'html':
# Both reports are generated for html as the cq bot generates an html
# report and we wouldn't know which one a developer needed.
device_cmd = cmd + _GenerateReportOutputArgs(args, class_files, 'device')
host_cmd = cmd + _GenerateReportOutputArgs(args, class_files, 'host')
device_exit_code = cmd_helper.RunCmd(device_cmd)
host_exit_code = cmd_helper.RunCmd(host_cmd)
exit_code = device_exit_code or host_exit_code
else:
cmd = cmd + _GenerateReportOutputArgs(args, class_files,
args.device_or_host)
exit_code = cmd_helper.RunCmd(cmd)
if args.cleanup:
for f in coverage_files:
os.remove(f)
# Command tends to exit with status 0 when it actually failed.
if not exit_code:
if args.format == 'html':
if not os.path.isdir(args.output_dir) or not os.listdir(args.output_dir):
print('No report generated at %s' % args.output_dir)
exit_code = 1
elif not os.path.isfile(args.output_file):
print('No device coverage report generated at %s' % args.output_file)
exit_code = 1
return exit_code
if __name__ == '__main__':
sys.exit(main())
| 33.244681 | 80 | 0.686187 |
73e49bd316a0ad43366584823044db8639b86236 | 4,191 | py | Python | python/src/cryptopals/set5/challenge35.py | ocirne/cryptopals | 3835bbeef3e521d48814795b94d426139918b9cd | [
"Unlicense"
] | 1 | 2021-02-20T16:38:25.000Z | 2021-02-20T16:38:25.000Z | python/src/cryptopals/set5/challenge35.py | ocirne/cryptopals | 3835bbeef3e521d48814795b94d426139918b9cd | [
"Unlicense"
] | null | null | null | python/src/cryptopals/set5/challenge35.py | ocirne/cryptopals | 3835bbeef3e521d48814795b94d426139918b9cd | [
"Unlicense"
] | null | null | null | import random
import secrets
from abc import abstractmethod, ABC
from typing import Union
from Crypto.Cipher import AES
from cryptopals.basics import padding, strip_padding, InvalidPaddingException
from cryptopals.digests import SHA1
def aes_cbc_encrypt(priv_key: int, pt: bytes) -> bytes:
s = priv_key.to_bytes(8, byteorder="big")
key = SHA1().digest(s)[:16]
iv = secrets.token_bytes(16)
aes = AES.new(key, AES.MODE_CBC, iv)
ct = aes.encrypt(padding(pt))
return ct + iv
def aes_cbc_decrypt(priv_key: int, ct: bytes, iv: bytes) -> bytes:
s = priv_key.to_bytes(8, byteorder="big")
key = SHA1().digest(s)[:16]
aes = AES.new(key, AES.MODE_CBC, iv)
pt = aes.decrypt(ct)
return strip_padding(pt)
class Bob:
def request_groups(self, p, g):
self.p = p
self.g = g
return "ACK"
def request_pub_key(self, A):
b = random.randrange(0, self.p)
B = pow(self.g, b, self.p)
self.s = pow(A, b, self.p)
return B
def request_message(self, ciphertext_a: bytes) -> bytes:
return aes_cbc_encrypt(self.s, ciphertext_a)
class AbstractMallory(ABC):
def __init__(self, bob: Bob):
self.bob = bob
@abstractmethod
def request_groups(self, p, g):
...
def request_pub_key(self, A):
return self.bob.request_pub_key(A)
def request_message(self, ciphertext_a: bytes) -> bytes:
ct_b = self.bob.request_message(ciphertext_a)
ct, iv = ciphertext_a[:-16], ciphertext_a[-16:]
self.easy_decrypt(ct, iv)
return ct_b
@abstractmethod
def easy_decrypt(self, ct: bytes, iv: bytes):
...
class Mallory1(AbstractMallory):
""" g = 1 """
def request_groups(self, p, g):
self.bob.request_groups(p, 1)
def easy_decrypt(self, ct: bytes, iv: bytes):
"""
s = B**a = (g**b)**a = (1**b)**a = 1**a = 1 mod p
"""
recovered_pt = aes_cbc_decrypt(1, ct, iv)
print("recovered pt (g = 1):", recovered_pt)
class Mallory2(AbstractMallory):
""" g = p """
def request_groups(self, p, g):
self.bob.request_groups(p, p)
def easy_decrypt(self, ct: bytes, iv: bytes) -> bytes:
"""
s = B**a = (g**b)**a = (p**b)**a = 0 mod p
"""
recovered_pt = aes_cbc_decrypt(0, ct, iv)
print("recovered pt (g = p):", recovered_pt)
class Mallory3(AbstractMallory):
""" g = p - 1 """
def request_groups(self, p, g):
self.p = p
self.bob.request_groups(p, p - 1)
def easy_decrypt(self, ct: bytes, iv: bytes):
"""
s = B**a = (g**b)**a = ((p-1)**b)**a = ((-1)**b)**a = {p-1, 1} mod p
"""
try:
recovered_pt = aes_cbc_decrypt(1, ct, iv)
except InvalidPaddingException:
recovered_pt = aes_cbc_decrypt(self.p - 1, ct, iv)
print("recovered pt (g = p - 1):", recovered_pt)
class Alice:
def __init__(self, bob: Union[Bob, Mallory1, Mallory2, Mallory3]):
self.bob = bob
self.pt = b"Ice Ice Baby"
def request_groups(self):
# should be a prime, good enough
self.p = random.randint(0, 2 ** 31)
self.g = random.randint(0, 100)
self.a = random.randrange(0, self.p)
self.bob.request_groups(self.p, self.g)
def request_pub_key(self):
"""
A -> M
Send "p", "g", "A"
"""
A = pow(self.g, self.a, self.p)
B = self.bob.request_pub_key(A)
self.s = pow(B, self.a, self.p)
def request_message(self):
"""
A -> M
Send AES-CBC(SHA1(s)[0:16], iv=random(16), msg) + iv
"""
ct = aes_cbc_encrypt(self.s, self.pt)
self.bob.request_message(ct)
def challenge34(mallory_class):
"""
>>> challenge34(Mallory1)
recovered pt (g = 1): b'Ice Ice Baby'
>>> challenge34(Mallory2)
recovered pt (g = p): b'Ice Ice Baby'
>>> challenge34(Mallory3)
recovered pt (g = p - 1): b'Ice Ice Baby'
"""
bob = Bob()
mallory = mallory_class(bob)
alice = Alice(mallory)
alice.request_groups()
alice.request_pub_key()
alice.request_message()
| 26.358491 | 77 | 0.575758 |
73e4b6f68858a7c98eb2dac179bf2cd9c1e8ce32 | 4,786 | py | Python | auv_control_pi/components/remote_proxy.py | adrienemery/auv-control-pi | 633fe89b652b07eb6ebe03c0550daa211b122297 | [
"MIT"
] | 9 | 2016-10-02T06:59:37.000Z | 2020-09-24T15:36:10.000Z | auv_control_pi/components/remote_proxy.py | adrienemery/auv-control-pi | 633fe89b652b07eb6ebe03c0550daa211b122297 | [
"MIT"
] | null | null | null | auv_control_pi/components/remote_proxy.py | adrienemery/auv-control-pi | 633fe89b652b07eb6ebe03c0550daa211b122297 | [
"MIT"
] | 4 | 2019-01-12T23:09:34.000Z | 2020-11-05T14:52:42.000Z | import logging
from autobahn.asyncio.component import Component, run
from ..config import config
from .ahrs import AHRS
from .auv_control import AUV
from .gps import GPSComponent
from .navigation import Navitgator
logger = logging.getLogger(__name__)
class RouterProxy:
"""A Proxy connect the remoute WAMP router to the local WAMP router
This allows us to expose RPC + Pub/Sub to the internet for remote control,
while still keeping a simple local interface where all local components
just connect to the local WAMP router.
When new RPC methods are added they need to be registered here.
Similarly when new topics are published we need to add the topics here to
"re-publish" them to the remote WAMP router.
"""
rpc_proxy_classes = [
AHRS,
GPSComponent,
AUV,
Navitgator
]
published_topics_proxy = [
'auv.update',
'nav.update',
'ahrs.update',
'rc_control.update',
'gps.update',
# add topics here to expose them to remote router
]
def __init__(self, remote_component, local_component):
self.remote_wamp = remote_component
self.local_wamp = local_component
self.remote_session = None # None while we're disconnected from WAMP router
self.local_session = None # None while we're disconnected from WAMP router
# associate ourselves with each WAMP session lifecycle
self.remote_wamp.on('join', self.join_remote)
# self.remote_wamp.on('challenge', self.on_remote_challenge)
self.local_wamp.on('join', self.join_local)
@property
def rpc_proxy_list(self):
rpcs = []
for cls in self.rpc_proxy_classes:
rpcs.extend(cls.rpc_uris())
return rpcs
async def register_rpc_proxies(self):
"""Register RPC methods on remote router that mirror RPC's on the local router
This allows a remote component to call RPC's on the local WAMP router.
"""
for rpc_name in self.rpc_proxy_list:
logger.debug('Registering RPC to Proxy: {}'.format(rpc_name))
class RPCProxy:
def __init__(self, local_session, rpc_name):
self._local_session = local_session
self._rpc_name = rpc_name
async def __call__(self, *args, **kwargs):
logger.debug('Proxying RPC {}, with args {}, kwargs {}'.format(self._rpc_name, args, kwargs))
return await self._local_session.call(self._rpc_name, *args, **kwargs)
await self.remote_session.register(RPCProxy(self.local_session, rpc_name), rpc_name)
async def register_pub_sub_proxies(self):
"""Publish data to remote router on topics that we are subscribed to locally
This allows remote components to subscribe to topics that are published
locally since they are being "re-published".
"""
for topic in self.published_topics_proxy:
class PubSubProxy:
def __init__(self, remote_session, topic):
self._remote_session = remote_session
self._topic = topic
async def __call__(self, *args, **kwargs):
self._remote_session.publish(self._topic, *args, **kwargs)
# subscribe to the local topics published so they can be "re-published" on the remote session
await self.local_session.subscribe(PubSubProxy(self.remote_session, topic), topic)
async def register_proxies(self):
# ensure we have both remote and local sessions setup before
# registering proxy rpc's and pub/sub's
if self.remote_session and self.local_session:
await self.register_pub_sub_proxies()
await self.register_rpc_proxies()
async def join_remote(self, session, details):
"""Handle setup when we join the remote router
"""
logger.info("Connected to Remote WAMP router")
self.remote_session = session
await self.register_proxies()
async def join_local(self, session, details):
"""Handle setup when we join the local router
"""
logger.info("Connected to Local WAMP router")
self.local_session = session
await self.register_proxies()
remote_comp = Component(
transports=config.crossbar_url,
realm=config.crossbar_realm,
authentication={
'ticket': {
'ticket': config.auth_token,
'authid': 'auv'
}
},
)
local_comp = Component(
transports="ws://crossbar:8080/ws",
realm="realm1",
)
def main():
RouterProxy(remote_comp, local_comp)
run([local_comp, remote_comp])
if __name__ == "__main__":
main()
| 32.780822 | 113 | 0.648767 |
73e4bbeff1d7383061fde873163072fc28067bb2 | 7,511 | py | Python | src/menten_gcn/decorators/base.py | MentenAI/menten_gcn | bcc7642cb32ab4e60a97687de17c1aa2dc4b5421 | [
"MIT"
] | 11 | 2020-12-15T15:36:47.000Z | 2022-03-10T19:23:36.000Z | src/menten_gcn/decorators/base.py | MentenAI/menten_gcn | bcc7642cb32ab4e60a97687de17c1aa2dc4b5421 | [
"MIT"
] | 2 | 2021-01-14T15:04:59.000Z | 2021-01-14T19:24:00.000Z | src/menten_gcn/decorators/base.py | MentenAI/menten_gcn | bcc7642cb32ab4e60a97687de17c1aa2dc4b5421 | [
"MIT"
] | null | null | null | from menten_gcn.wrappers import WrappedPose
from typing import List, Tuple
class Decorator:
#########
# BASIC #
#########
def __init__(self):
pass
def get_version_name(self) -> str:
"""
Get a unique, versioned name of this decorator for maximal reproducability
"""
raise NotImplementedError
def cache_data(self, wrapped_pose: WrappedPose, dict_cache: dict):
"""
Some decorators can save time by precomputing arbitrary data
and storing it in this cache.
For example, the RosettaHBondDecorator recomputes and caches
all hydrogen bonds so they become a simple lookup when decorating
individual nodes and edges.
Parameters
---------
wrapped_pose: WrappedPose
Each pose will be given its own cache.
This pose is the one we are currently caching
dict_cache: dict
Destination for your data.
Please use a unique key that won't overlap with other decorators'.
"""
pass
#########
# NODES #
#########
def n_node_features(self) -> int:
"""
How many features will this decorator add to node tensors (X)?
"""
return 0
def calc_node_features(self, wrapped_pose: WrappedPose,
resid: int, dict_cache: dict = None) -> List[float]:
"""
This does all of the business logic of calculating
the values to be added for each node.
Parameters
---------
wrapped_pose: WrappedPose
The pose we are currently generating data for
resid: int
The residue ID we are currently generating data for
dict_cache: dict
The same cache that was populated in "cache_data".
The user might not have created a cache so don't assume this is not None.
See the RosettaHBondDecorator for an example of how to use this
Returns
---------
features: list
The length of this list will be the same value as self.n_node_features().
These are the values to represent this decorator's
contribution to X for this resid.
"""
features = []
return features
def describe_node_features(self) -> List[str]:
"""
Returns descriptions of how each value is computed.
Our goal is for these descriptions to be relatively concise but
also have enough detail to fully reproduce these calculations.
Returns
---------
features: list
The length of this list will be the same value as self.n_node_features().
These are descriptions of the values to represent this decorator's
contribution to X for any arbitrary resid.
"""
return []
#########
# EDGES #
#########
def n_edge_features(self) -> int:
"""
How many features will this decorator add to edge tensors (E)?
"""
return 0
def calc_edge_features(self, wrapped_pose: WrappedPose, resid1: int, resid2: int,
dict_cache: dict = None) -> Tuple[List[float], List[float]]:
"""
This does all of the business logic of calculating
the values to be added for each edge.
This function will never be called in the reverse order
(with resid1 and resid2 swapped).
Instead, we just create both edges at once.
Parameters
---------
wrapped_pose: WrappedPose
The pose we are currently generating data for
resid1: int
The first residue ID we are currently generating data for
resid1: int
The second residue ID we are currently generating data for
dict_cache: dict
The same cache that was populated in "cache_data".
The user might not have created a cache so don't assume this is not None.
See the RosettaHBondDecorator for an example of how to use this
Returns
---------
features: list
The length of this list will be the same value as self.n_edge_features().
These are the values to represent this decorator's
contribution to E for the edge going from resid1 -> resid2.
inv_features: list
The length of this list will be the same value as self.n_edge_features().
These are the values to represent this decorator's
contribution to E for the edge going from resid2 -> resid1.
"""
features = [] # 1 -> 2
inv_features = [] # 2 -> 1
return features, inv_features
def describe_edge_features(self) -> List[str]:
"""
Returns descriptions of how each value is computed.
Our goal is for these descriptions to be relatively concise but
also have enough detail to fully reproduce these calculations.
Returns
---------
features: list
The length of this list will be the same value as self.n_edge_features().
These are descriptions of the values to represent this decorator's
contribution to E for any arbitrary resid pair.
"""
return []
class CombinedDecorator(Decorator):
#########
# BASIC #
#########
def __init__(self, decorators: list = []):
self.decorators = decorators
def get_version_name(self):
name = "CombinedDecorator("
for d in self.decorators:
name += d.get_version_name() + ","
name += ")"
return name
def cache_data(self, wrapped_pose: WrappedPose, dict_cache: dict):
for d in self.decorators:
d.cache_data(wrapped_pose, dict_cache)
#########
# NODES #
#########
def n_node_features(self):
return sum(d.n_node_features() for d in self.decorators)
def calc_node_features(self, wrapped_pose: WrappedPose, resid: int, dict_cache: dict = None):
features = []
for d in self.decorators:
features.extend(d.calc_node_features(wrapped_pose, resid=resid, dict_cache=dict_cache))
assert(len(features) == self.n_node_features())
return features
def describe_node_features(self):
features = []
for d in self.decorators:
features.extend(d.describe_node_features())
assert(len(features) == self.n_node_features())
return features
#########
# EDGES #
#########
def n_edge_features(self):
return sum(d.n_edge_features() for d in self.decorators)
def calc_edge_features(self, wrapped_pose: WrappedPose,
resid1: int, resid2: int, dict_cache: dict = None):
features = [] # 1 -> 2
inv_features = [] # 2 -> 1
for d in self.decorators:
f12, f21 = d.calc_edge_features(wrapped_pose,
resid1=resid1, resid2=resid2, dict_cache=dict_cache)
features.extend(f12)
inv_features.extend(f21)
assert(len(features) == self.n_edge_features())
assert(len(features) == len(inv_features))
return features, inv_features
def describe_edge_features(self):
features = []
for d in self.decorators:
features.extend(d.describe_edge_features())
assert(len(features) == self.n_edge_features())
return features
| 33.53125 | 99 | 0.595926 |
73e4c63042c95f6f403b43246352a6981def574a | 2,700 | py | Python | kitchen/chef.py | TrasgoGroup/Cell-Viewer | 4f2421e278b22926cd34e0600d0ecb12026d58c9 | [
"Apache-2.0"
] | null | null | null | kitchen/chef.py | TrasgoGroup/Cell-Viewer | 4f2421e278b22926cd34e0600d0ecb12026d58c9 | [
"Apache-2.0"
] | 2 | 2021-02-04T10:32:13.000Z | 2021-02-12T10:11:46.000Z | kitchen/chef.py | MCruces-fz/Cell-Viewer | 4f2421e278b22926cd34e0600d0ecb12026d58c9 | [
"Apache-2.0"
] | 1 | 2021-04-30T11:37:37.000Z | 2021-04-30T11:37:37.000Z | """
A P A C H E L I C E N S E
------------
Version 2.0, January 2004
Copyright 2021 Miguel Cruces Fernández
Licensed under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific
language governing permissions and limitations under the
License.
miguel.cruces.fernandez@usc.es
mcsquared.fz@gmail.com
"""
import numpy as np
from typing import List
class Chef:
def __init__(self, data_dir: str):
"""
Constructor for the parent class CHEF
Chef is the object whom cooks data.
:param data_dir: Prent directory with all data.
"""
self.main_data_dir = data_dir
# DECLARE VARIABLES
self.from_date = None
self.to_date = None
self.plane_name = None
self.all_data = None
self._option_list_var: List[str] = []
self.plane_event = None
@property
def option_list_var(self):
return self._option_list_var
def read_data(self) -> np.array:
"""
EDIT IS NEEDED:
Method to read all the hit data from wherever it is
stored, in whatever format is needed
:return: 3D Numpy array with all data.
"""
raise Exception("Method read_data must be override")
def update(self, from_date=None, to_date=None,
plane_name: str = "T1", var_to_update: str = None):
"""
Method to update all the self variables needed for the GUI.
:param from_date: Starting date in datetime format.
:param to_date: Ending date in datetime format.
:param plane_name: Name of the plane to get values.
:param var_to_update:
:return: Void function, It only updates self variables.
"""
raise Exception("Please, override this method.")
# # Update all data only if necessary
# if from_date != self.from_date or to_date != self.to_date or \
# plane_name != self.plane_name:
# self.from_date = from_date
# self.to_date = to_date
# self.plane_name = plane_name
# # self.mean = MEAN
# # self.std = STD
# # self.kurtosis = KURTOSIS
# # self.skewness = SKEWNESS
| 28.723404 | 72 | 0.61 |
73e4ce32efe6a5aa49530beaaec8b6bef590a48a | 2,352 | py | Python | Python/model/net.py | SCAN-NRAD/BrainRegressorCNN | 7917c6a6c4e3728db17ec762c63f8253392e6c04 | [
"BSD-3-Clause"
] | 1 | 2022-02-11T18:49:34.000Z | 2022-02-11T18:49:34.000Z | Python/model/net.py | SCAN-NRAD/BrainRegressorCNN | 7917c6a6c4e3728db17ec762c63f8253392e6c04 | [
"BSD-3-Clause"
] | null | null | null | Python/model/net.py | SCAN-NRAD/BrainRegressorCNN | 7917c6a6c4e3728db17ec762c63f8253392e6c04 | [
"BSD-3-Clause"
] | null | null | null | import tensorflow as tf
SCALE = 1.0
def conv_net_V7a_3d(features):
with tf.variable_scope('NET'):
input_layer = features["x"] / SCALE
output_layer = features["y"]
is_train = features["is_train"]
data = tf.reshape(input_layer, [-1, 256, 256, 256, 1])
conv1 = tf.layers.conv3d(
inputs=data,
filters=144,
kernel_size=[11, 11, 11],
strides=[3, 3, 3],
padding='same',
use_bias=True,
activation=tf.nn.relu,
name='layer1_conv')
print(conv1.shape)
pool1 = tf.layers.max_pooling3d(inputs=conv1, pool_size=[3, 3, 3], strides=2, name='layer1_pool')
print(pool1.shape)
conv2 = tf.layers.conv3d(
inputs=pool1,
filters=192,
kernel_size=[5, 5, 5],
strides=[2, 2, 2],
padding='same',
use_bias=False,
activation=tf.nn.relu,
name='layer2_conv')
print(conv2.shape)
pool2 = tf.layers.max_pooling3d(inputs=conv2, pool_size=[3, 3, 3], strides=2, name='layer2_pool')
print(pool2.shape)
conv3 = tf.layers.conv3d(
inputs=pool2,
filters=192,
kernel_size=[3, 3, 3],
padding='same',
use_bias=False,
activation=tf.nn.relu,
name = 'layer3_conv')
print(conv3.shape)
pool3 = tf.layers.max_pooling3d(inputs=conv3, pool_size=[3, 3, 3], strides=2, name='layer3_pool')
print(pool3.shape)
pool3_flat = tf.reshape(pool3, [-1, 4 * 4 * 4 * 192])
dropout_pool3 = tf.layers.dropout(pool3_flat, rate=0.4, training=is_train)
fc1 = tf.layers.dense(inputs=dropout_pool3, units=6*64, use_bias=True, activation=tf.nn.relu, name='layer4_fc')
print(fc1.shape)
dropout_fc1 = tf.layers.dropout(fc1, rate=0.4, training=is_train)
fc2 = tf.layers.dense(inputs=dropout_fc1, units=6*32, use_bias=True, activation=None, name='layer5_fc')
print(fc2.shape)
dropout_fc2 = tf.layers.dropout(fc2, rate=0, training=is_train)
fc3 = tf.layers.dense(inputs=dropout_fc2, units=output_layer.shape[1], use_bias=True, activation=None, name='layer6_fc')
print(fc3.shape)
ret = tf.identity(fc3, name='model')
return ret
| 38.557377 | 128 | 0.581207 |
73e4e35ebe538050aef86fdf0b70584528a62e78 | 6,052 | py | Python | tests/wallet/test_singleton_lifecycle.py | hulatang/skynet-blockchain | d7d6f7ec84731c13b9d6d307bb171cf0e266be82 | [
"Apache-2.0"
] | 7 | 2021-09-07T02:14:15.000Z | 2022-03-27T06:42:35.000Z | tests/wallet/test_singleton_lifecycle.py | hulatang/skynet-blockchain | d7d6f7ec84731c13b9d6d307bb171cf0e266be82 | [
"Apache-2.0"
] | 1 | 2021-10-21T16:38:56.000Z | 2021-11-15T13:03:15.000Z | tests/wallet/test_singleton_lifecycle.py | hulatang/skynet-blockchain | d7d6f7ec84731c13b9d6d307bb171cf0e266be82 | [
"Apache-2.0"
] | 3 | 2021-10-21T07:17:40.000Z | 2022-03-16T12:57:09.000Z | import asyncio
from typing import List, Tuple
from blspy import G2Element
from clvm_tools import binutils
from skynet.types.blockchain_format.program import Program, INFINITE_COST
from skynet.types.announcement import Announcement
from skynet.types.blockchain_format.coin import Coin
from skynet.types.blockchain_format.sized_bytes import bytes32
from skynet.types.coin_spend import CoinSpend
from skynet.types.spend_bundle import SpendBundle
from skynet.util.condition_tools import ConditionOpcode
from skynet.util.ints import uint64
from skynet.wallet.puzzles.load_clvm import load_clvm
from tests.core.full_node.test_conditions import bt, check_spend_bundle_validity, initial_blocks
SINGLETON_MOD = load_clvm("singleton_top_layer.clvm")
LAUNCHER_PUZZLE = load_clvm("singleton_launcher.clvm")
P2_SINGLETON_MOD = load_clvm("p2_singleton.clvm")
POOL_MEMBER_MOD = load_clvm("pool_member_innerpuz.clvm")
POOL_WAITINGROOM_MOD = load_clvm("pool_waitingroom_innerpuz.clvm")
LAUNCHER_PUZZLE_HASH = LAUNCHER_PUZZLE.get_tree_hash()
SINGLETON_MOD_HASH = SINGLETON_MOD.get_tree_hash()
POOL_REWARD_PREFIX_MAINNET = bytes32.fromhex("ccd5bb71183532bff220ba46c268991a00000000000000000000000000000000")
def check_coin_spend(coin_spend: CoinSpend):
try:
cost, result = coin_spend.puzzle_reveal.run_with_cost(INFINITE_COST, coin_spend.solution)
except Exception as ex:
print(ex)
def adaptor_for_singleton_inner_puzzle(puzzle: Program) -> Program:
# this is prety slow
return Program.to(binutils.assemble("(a (q . %s) 3)" % binutils.disassemble(puzzle)))
def launcher_conditions_and_spend_bundle(
parent_coin_id: bytes32,
launcher_amount: uint64,
initial_singleton_inner_puzzle: Program,
metadata: List[Tuple[str, str]],
launcher_puzzle: Program = LAUNCHER_PUZZLE,
) -> Tuple[Program, bytes32, List[Program], SpendBundle]:
launcher_puzzle_hash = launcher_puzzle.get_tree_hash()
launcher_coin = Coin(parent_coin_id, launcher_puzzle_hash, launcher_amount)
singleton_full_puzzle = SINGLETON_MOD.curry(
SINGLETON_MOD_HASH, launcher_coin.name(), launcher_puzzle_hash, initial_singleton_inner_puzzle
)
singleton_full_puzzle_hash = singleton_full_puzzle.get_tree_hash()
message_program = Program.to([singleton_full_puzzle_hash, launcher_amount, metadata])
expected_announcement = Announcement(launcher_coin.name(), message_program.get_tree_hash())
expected_conditions = []
expected_conditions.append(
Program.to(
binutils.assemble(f"(0x{ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT.hex()} 0x{expected_announcement.name()})")
)
)
expected_conditions.append(
Program.to(
binutils.assemble(f"(0x{ConditionOpcode.CREATE_COIN.hex()} 0x{launcher_puzzle_hash} {launcher_amount})")
)
)
launcher_solution = Program.to([singleton_full_puzzle_hash, launcher_amount, metadata])
coin_spend = CoinSpend(launcher_coin, launcher_puzzle, launcher_solution)
spend_bundle = SpendBundle([coin_spend], G2Element())
lineage_proof = Program.to([parent_coin_id, launcher_amount])
return lineage_proof, launcher_coin.name(), expected_conditions, spend_bundle
def singleton_puzzle(launcher_id: Program, launcher_puzzle_hash: bytes32, inner_puzzle: Program) -> Program:
return SINGLETON_MOD.curry(SINGLETON_MOD_HASH, launcher_id, launcher_puzzle_hash, inner_puzzle)
def singleton_puzzle_hash(launcher_id: Program, launcher_puzzle_hash: bytes32, inner_puzzle: Program) -> bytes32:
return singleton_puzzle(launcher_id, launcher_puzzle_hash, inner_puzzle).get_tree_hash()
def solution_for_singleton_puzzle(lineage_proof: Program, my_amount: int, inner_solution: Program) -> Program:
return Program.to([lineage_proof, my_amount, inner_solution])
def p2_singleton_puzzle(launcher_id: Program, launcher_puzzle_hash: bytes32) -> Program:
return P2_SINGLETON_MOD.curry(SINGLETON_MOD_HASH, launcher_id, launcher_puzzle_hash)
def p2_singleton_puzzle_hash(launcher_id: Program, launcher_puzzle_hash: bytes32) -> bytes32:
return p2_singleton_puzzle(launcher_id, launcher_puzzle_hash).get_tree_hash()
def test_only_odd_coins_0():
blocks = initial_blocks()
farmed_coin = list(blocks[-1].get_included_reward_coins())[0]
metadata = [("foo", "bar")]
ANYONE_CAN_SPEND_PUZZLE = Program.to(1)
launcher_amount = uint64(1)
launcher_puzzle = LAUNCHER_PUZZLE
launcher_puzzle_hash = launcher_puzzle.get_tree_hash()
initial_singleton_puzzle = adaptor_for_singleton_inner_puzzle(ANYONE_CAN_SPEND_PUZZLE)
lineage_proof, launcher_id, condition_list, launcher_spend_bundle = launcher_conditions_and_spend_bundle(
farmed_coin.name(), launcher_amount, initial_singleton_puzzle, metadata, launcher_puzzle
)
conditions = Program.to(condition_list)
coin_spend = CoinSpend(farmed_coin, ANYONE_CAN_SPEND_PUZZLE, conditions)
spend_bundle = SpendBundle.aggregate([launcher_spend_bundle, SpendBundle([coin_spend], G2Element())])
run = asyncio.get_event_loop().run_until_complete
coins_added, coins_removed = run(check_spend_bundle_validity(bt.constants, blocks, spend_bundle))
coin_set_added = set([_.coin for _ in coins_added])
coin_set_removed = set([_.coin for _ in coins_removed])
launcher_coin = launcher_spend_bundle.coin_spends[0].coin
assert launcher_coin in coin_set_added
assert launcher_coin in coin_set_removed
assert farmed_coin in coin_set_removed
singleton_expected_puzzle_hash = singleton_puzzle_hash(launcher_id, launcher_puzzle_hash, initial_singleton_puzzle)
expected_singleton_coin = Coin(launcher_coin.name(), singleton_expected_puzzle_hash, launcher_amount)
assert expected_singleton_coin in coin_set_added
# next up: spend the expected_singleton_coin
# it's an adapted `ANYONE_CAN_SPEND_PUZZLE`
# then try a bad lineage proof
# then try writing two odd coins
# then try writing zero odd coins
# then, destroy the singleton with the -113 hack
return 0
| 42.321678 | 119 | 0.791474 |
73e507f783025453646e323a102459079c087cc4 | 1,500 | py | Python | tests/test_util.py | nelson-liu/LSTMs-exploit-linguistic-attributes | 227ef83654f7db4510e18f44fcc994bdc4d6c54a | [
"MIT"
] | 5 | 2018-06-13T19:44:02.000Z | 2019-09-02T21:33:41.000Z | tests/test_util.py | nelson-liu/LSTMs-exploit-linguistic-attributes | 227ef83654f7db4510e18f44fcc994bdc4d6c54a | [
"MIT"
] | null | null | null | tests/test_util.py | nelson-liu/LSTMs-exploit-linguistic-attributes | 227ef83654f7db4510e18f44fcc994bdc4d6c54a | [
"MIT"
] | 3 | 2018-08-29T21:48:26.000Z | 2018-11-12T21:59:17.000Z | from __future__ import unicode_literals
from numpy.testing import assert_array_equal
from lstms_exploit_linguistic_attributes.utils import sort_batch_by_length
import torch
from .common.test_case import ReproducibleTestCase
class TestUtils(ReproducibleTestCase):
def test_sort_tensor_by_length(self):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tensor = torch.rand([5, 7, 9])
tensor[0, 3:, :] = 0
tensor[1, 4:, :] = 0
tensor[2, 1:, :] = 0
tensor[3, 5:, :] = 0
tensor.to(device)
sequence_lengths = torch.LongTensor([3, 4, 1, 5, 7])
sequence_lengths.to(device)
(sorted_tensor, sorted_lengths,
reverse_indices, _) = sort_batch_by_length(
tensor, sequence_lengths)
# Test sorted indices are padded correctly.
assert_array_equal(
sorted_tensor[1, 5:, :].to("cpu").detach().numpy(), 0.0)
assert_array_equal(
sorted_tensor[2, 4:, :].to("cpu").detach().numpy(), 0.0)
assert_array_equal(
sorted_tensor[3, 3:, :].to("cpu").detach().numpy(), 0.0)
assert_array_equal(
sorted_tensor[4, 1:, :].to("cpu").detach().numpy(), 0.0)
assert sorted_lengths.detach().equal(torch.LongTensor([7, 5, 4, 3, 1]))
# Test restoration indices correctly recover the original tensor.
assert sorted_tensor.index_select(0, reverse_indices).data.equal(
tensor.detach())
| 36.585366 | 79 | 0.631333 |
73e51e4211af1463f3d96979a261a45c4429319e | 6,228 | py | Python | examples/morphing/flight_conditions/morphed/range_constant_aoa.py | leal26/pyXFOIL | 88ff224be25cdb51eb821315f6e094f68fb13247 | [
"MIT"
] | 50 | 2016-03-15T17:24:55.000Z | 2021-12-28T07:32:45.000Z | examples/morphing/flight_conditions/morphed/range_constant_aoa.py | carsecond/AeroPy | 81685f364abd9536fc62dce114f14bef191dab8c | [
"MIT"
] | 22 | 2017-04-20T11:27:28.000Z | 2022-02-09T05:57:06.000Z | examples/morphing/flight_conditions/morphed/range_constant_aoa.py | carsecond/AeroPy | 81685f364abd9536fc62dce114f14bef191dab8c | [
"MIT"
] | 34 | 2016-03-04T15:57:37.000Z | 2022-02-15T20:06:54.000Z | import aeropy.xfoil_module as xf
from aeropy.geometry.airfoil import CST, create_x
from aeropy.morphing.camber_2D import *
from aeropy.aero_module import air_properties, Reynolds, LLT_calculator
from scipy.interpolate import griddata, RegularGridInterpolator
import numpy as np
import matplotlib.pyplot as plt
import pandas
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
import scipy
def aircraft_range_varying_V(f_L, f_LD, AOA):
def to_integrate(weight):
# velocity = 0.514444*108 # m/s (113 KTAS)
def calculate_velocity(AOA):
def residual(V):
CL = f_L([V, AOA])[0]
span = 11
chord_root = span/16.2
return abs(V - math.sqrt(weight/(.5*density*(span*chord_root))))
res = scipy.optimize.minimize(residual, 30, bounds = [[20, 65],])#, options={'ftol':1e-9})
return res.x[0]
velocity = calculate_velocity(AOA)
lift_to_drag = f_LD([velocity, AOA])
span = 10.9728
RPM = 1800
a = 0.3089 # (lb/hr)/BTU
b = 0.008*RPM+19.607 # lb/hr
lbhr_to_kgs = 0.000125998
BHP_to_watt = 745.7
eta = 0.85
thrust = weight/lift_to_drag
power_SI = thrust*velocity/eta
power_BHP = power_SI/BHP_to_watt
mass_flow = (a*power_BHP + b)
mass_flow_SI = mass_flow*lbhr_to_kgs
SFC = mass_flow_SI/thrust
dR = velocity/g/SFC*lift_to_drag/weight
return dR*0.001 # *0.0005399
AOA_list = []
g = 9.81 # kg/ms
fuel = 56*6.01*0.4535*g
initial_weight = 1111*g
final_weight = initial_weight-fuel
x = np.linspace(final_weight, initial_weight, 100)
y = []
for x_i in x:
y.append(to_integrate(x_i)[0])
range = scipy.integrate.simps(y, x)
return range
# ==============================================================================
# Inputs
# ==============================================================================
altitude = 10000 # ft
air_props = air_properties(altitude, unit='feet')
density = air_props['Density']
# data = pandas.read_csv('performance_grid.csv')
# psi_spars = [0.1, 0.3, 0.6, 0.8]
# c_P = 1.0
# ranges = []
# for i in range(len(data.values)):
# AC = data.values[i,0:4]
# velocity = data.values[i,-4]
# AOA = data.values[i,-5]
# cl= data.values[i,-3]
# cd = data.values[i,-2]
# CL, CD = coefficient_LLT(AC, velocity, AOA)
# data.values[i, -3] = CL
# data.values[i, -2] = CD
# data.values[i, -1] = CL/CD
# print(i, CL, CD)
# data = data.drop_duplicates()
import pickle
# f = open('wing.p', 'wb')
# pickle.dump(data, f)
# f.close()
state = 'morphed'
concepts = ['NACA0012', 'NACA4415', 'NACA641212', 'glider']
#
# plt.figure()
# for concept in concepts:
# mat = scipy.io.loadmat(state + '_' + concept)
# aoa = mat['aoa'][0]
# velocity = mat['V'][0]
# cl = mat['CL'].T
# LD_ratio = mat['lift_to_drag']
# # print(aoa)
# # print(velocity)
# # print(cl)
# f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
# f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = [20]
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_L(data_i), label = concept)
# # plt.scatter(aoas, f_L((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.ylabel('cl')
# plt.show()
# plt.figure()
# for concept in concepts:
# mat = scipy.io.loadmat(state + '_' + concept)
# aoa = mat['aoa'][0]
# velocity = mat['V'][0]
# cl = mat['CL'].T
# LD_ratio = mat['lift_to_drag']
# f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
# f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = [20]
# aoas = np.linspace(0,12,100)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_LD(data_i), label = concept)
# # plt.scatter(aoas, f_LD((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.ylabel('Lift-to-drag ratio')
# plt.show()
range_data = {}
plt.figure()
for concept in concepts:
data = np.loadtxt('./'+state + '_' + concept + '.txt')
aoa = np.unique(data[:,0])
velocity = np.unique(data[:,1])
cl = data[:,2].reshape([200,200])
LD_ratio = data[:,3].reshape([200,200])
f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = np.linspace(20, 65, 7)
# plt.figure()
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_L(data_i), label = velocity[i])
# # plt.scatter(aoas, f_L((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.show()
# plt.figure()
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_LD(data_i), label = velocity[i])
# # plt.scatter(aoas, f_LD((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.show()
ranges = []
# velocity = np.linspace(20, 60, 5)
for i in range(len(aoa)):
range_i = aircraft_range_varying_V(f_L, f_LD, aoa[i])
# plt.plot(np.arange(len(AOA_i)), AOA_i, label=velocity[i])
# plt.scatter(np.arange(len(AOA_i)),AOA_i)
print(i, aoa[i], range_i)
ranges.append(range_i)
# print(velocity[36])
range_data[concept] = ranges
plt.plot(aoa, ranges, lw=2, label=concept)
f = open('ranges_aoa.p', 'wb')
pickle.dump(range_data, f)
f.close()
# plt.xlim(min(velocity), max(velocity))
# plt.ylim(min(ranges), max(ranges))
plt.xlabel('Angle of Attack')
plt.ylabel('Range (km)')
plt.legend()
plt.show()
| 31.938462 | 102 | 0.595857 |
73e543796beab2fcacd8193c4d9787822e2162b4 | 3,526 | py | Python | ygoprodeck/constants.py | RonaldTheodoro/ygoprodeck-api-client | 2a64411e08f1b7e32cc60aca5ccd5b5369a4b380 | [
"MIT"
] | 1 | 2020-04-09T22:55:05.000Z | 2020-04-09T22:55:05.000Z | ygoprodeck/constants.py | RonaldTheodoro/ygoprodeck-api-client | 2a64411e08f1b7e32cc60aca5ccd5b5369a4b380 | [
"MIT"
] | 10 | 2018-12-08T02:48:47.000Z | 2021-10-02T20:18:10.000Z | ygoprodeck/constants.py | RonaldTheodoro/ygoprodeck-api-client | 2a64411e08f1b7e32cc60aca5ccd5b5369a4b380 | [
"MIT"
] | 2 | 2018-12-08T05:46:54.000Z | 2021-05-24T23:25:59.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ygoprodeck.constants
~~~~~~~~~~~~~~~~~~~~
Constant values.
"""
import enum
class EnumMixin(enum.Enum):
@classmethod
def is_valid_value(cls, value):
return any(item.value == value for item in cls)
@classmethod
def valid_values(cls):
return [v.value for n, v in cls.__members__.items()]
class Race(EnumMixin):
AQUA = 'aqua'
BEAST = 'beast'
BEAST_WARRIOR = 'beast-warrior'
CREATOR_GOD = 'creator-god'
CYBERSE = 'cyberse'
DINOSAUR = 'dinosaur'
DIVINE_BEAST = 'divine-beast'
DRAGON = 'dragon'
FAIRY = 'fairy'
FIEND = 'fiend'
FISH = 'fish'
INSECT = 'insect'
MACHINE = 'machine'
PLANT = 'plant'
PSYCHIC = 'psychic'
PYRO = 'pyro'
REPTILE = 'reptile'
ROCK = 'rock'
SEA_SERPENT = 'sea serpent'
SPELLCASTER = 'spellcaster'
THUNDER = 'thunder'
WARRIOR = 'warrior'
WINGED_BEAST = 'winged beast'
NORMAL = 'normal'
FIELD = 'field'
EQUIP = 'equip'
CONTINUOUS = 'continuous'
QUICK_PLAY = 'quick-play'
RITUAL = 'ritual'
COUNTER = 'counter'
class CardTypes(EnumMixin):
EFFECT_MONSTER = 'effect monster'
FLIP_EFFECT_MONSTER = 'flip effect monster'
FLIP_TUNER_EFFECT_MONSTER = 'flip tuner effect monster'
GEMINI_MONSTER = 'gemini monster'
NORMAL_MONSTER = 'normal monster'
NORMAL_TUNER_MONSTER = 'normal tuner monster'
PENDULUM_EFFECT_FUSION_MONSTER = 'pendulum effect fusion monster'
PENDULUM_EFFECT_MONSTER = 'pendulum effect monster'
PENDULUM_FLIP_EFFECT_MONSTER = 'pendulum flip effect monster'
PENDULUM_NORMAL_MONSTER = 'pendulum normal monster'
PENDULUM_TUNER_EFFECT_MONSTER = 'pendulum tuner effect monster'
RITUAL_EFFECT_MONSTER = 'ritual effect monster'
RITUAL_MONSTER = 'ritual monster'
SKILL_CARD = 'skill card'
SPELL_CARD = 'spell card'
SPIRIT_MONSTER = 'spirit monster'
TOON_MONSTER = 'toon monster'
TRAP_CARD = 'trap card'
TUNER_MONSTER = 'tuner monster'
UNION_EFFECT_MONSTER = 'union effect monster'
UNION_TUNER_EFFECT_MONSTER = 'union tuner effect monster'
FUSION_MONSTER = 'fusion monster'
LINK_MONSTER = 'link monster'
SYNCHRO_MONSTER = 'synchro monster'
SYNCHRO_PENDULUM_EFFECT_MONSTER = 'synchro pendulum effect monster'
SYNCHRO_TUNER_MONSTER = 'synchro tuner monster'
XYZ_MONSTER = 'xyz monster'
XYZ_PENDULUM_EFFECT_MONSTER = 'xyz pendulum effect monster'
class Attributes(EnumMixin):
DARK = 'dark'
DIVINE = 'divine'
EARTH = 'earth'
FIRE = 'fire'
LIGHT = 'light'
WATER = 'water'
WIND = 'wind'
class Language(EnumMixin):
FRENCH = 'fr'
GERMAN = 'de'
PORTUGUESE = 'pt'
ITALIAN = 'it'
class LinkMarkers(EnumMixin):
TOP = 'top'
TOP_RIGHT = 'top-right'
TOP_LEFT = 'top-left'
LEFT = 'left'
RIGHT = 'right'
BOTTOM = 'bottom'
BOTTOM_RIGHT = 'bottom-right'
BOTTOM_LEFT = 'bottom-left'
class Banlist(EnumMixin):
TCG = 'tcg'
OCG = 'ocg'
GOAT = 'goat'
class BanlistStatus(EnumMixin):
BANNED = 'banned'
LIMITED = 'limited'
SEMI_LIMITED = 'semi-limited'
UNLIMITED = 'unlimited'
class SortParams(EnumMixin):
ATK = 'atk'
DEF = 'def'
NAME = 'name'
TYPE = 'type'
LEVEL = 'level'
ID = 'id'
NEW = 'new'
class Format(EnumMixin):
TCG = 'tcg'
GOAT = 'goat'
OCG_GOAT = 'ocg goat'
SPEED_DUEL = 'speed duel'
RUSH_DUEL = 'rush duel'
DUEL_LINKS = 'duel links'
| 23.986395 | 71 | 0.647476 |
73e54f3d9b66b22d44d7d7e8f1269a8983eb9339 | 839 | py | Python | tools/dockerize/webportal/usr/lib/python2.7/site-packages/oslo/rootwrap/__init__.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | 1 | 2019-09-11T11:56:19.000Z | 2019-09-11T11:56:19.000Z | tools/dockerize/webportal/usr/lib/python2.7/site-packages/oslo/vmware/__init__.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | tools/dockerize/webportal/usr/lib/python2.7/site-packages/oslo/vmware/__init__.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import warnings
def deprecated():
new_name = __name__.replace('.', '_')
warnings.warn(
('The oslo namespace package is deprecated. Please use %s instead.' %
new_name),
DeprecationWarning,
stacklevel=3,
)
deprecated()
| 31.074074 | 78 | 0.688915 |
73e5660045f3586616bf85a8d339a4367b6343e9 | 10,015 | py | Python | doc/conf.py | ilumsden/signac-flow | 1f1e11ce2fb298a7fe0fd6af2ee3a742ede72607 | [
"BSD-3-Clause"
] | null | null | null | doc/conf.py | ilumsden/signac-flow | 1f1e11ce2fb298a7fe0fd6af2ee3a742ede72607 | [
"BSD-3-Clause"
] | null | null | null | doc/conf.py | ilumsden/signac-flow | 1f1e11ce2fb298a7fe0fd6af2ee3a742ede72607 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# signac-flow documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 16 14:21:08 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
if name == '_mock_methods':
return []
return Mock()
MOCK_MODULES = []
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'signac-flow'
copyright = 'The Regents of the University of Michigan'
author = u'Carl S. Adorf, Vyas Ramasubramani, Bradley D. Dice, Michael M. Henry, Paul M. Dodd, Sharon C. Glotzer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.8'
# The full version, including alpha/beta/rc tags.
release = '0.8.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'images/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'signac-flowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'signac-flow.tex', 'signac-flow Documentation',
'Carl Simon Adorf, Paul Dodd', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'signac-flow', 'signac-flow Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'signac-flow', 'signac-flow Documentation',
author, 'signac-flow', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'https://docs.python.org/': None,
'signac': ('https://signac.readthedocs.io/projects/core/en/latest/', None),
}
| 32.411003 | 113 | 0.716525 |
73e571f9b5d1e907543b01cf8d0cee822c5e2643 | 1,261 | py | Python | Python/ex113.py | maurovasconcelos/Ola-Mundo | 526c6c271fbe916c4f9f22153828e4d8c726a544 | [
"MIT"
] | 1 | 2021-02-16T17:36:53.000Z | 2021-02-16T17:36:53.000Z | Python/ex113.py | maurovasconcelos/Ola-Mundo | 526c6c271fbe916c4f9f22153828e4d8c726a544 | [
"MIT"
] | null | null | null | Python/ex113.py | maurovasconcelos/Ola-Mundo | 526c6c271fbe916c4f9f22153828e4d8c726a544 | [
"MIT"
] | null | null | null | def leiaint(msg):
while True: #repita infinitamente
try: #tente
n = int(input(msg))
except (ValueError, TypeError): #se o erro for Valuem ou Type, faça:
print('\033[31mERRO: Por favor, digite um número inteiro válido.\033[m ')
continue# ...
except (KeyboardInterrupt):
print('\033[31mEntrada de dados interrompida pelo usuário.\033[31m')
return 0
else: # se nao/ se nao der erro...
return n # retorne o valor n, que foi o digitado.
def leiafloat(msg):
while True: #repita infinitamente
try: #tente
n = float(input(msg))
except (ValueError, TypeError): #se o erro for Valuem ou Type, faça:
print('\033[31mERRO: Por favor, digite um número inteiro válido.\033[m ')
continue# ...
except (KeyboardInterrupt):
print('\033[31mEntrada de dados interrompida pelo usuário.\033[31m')
return 0
else: # se nao/ se nao der erro...
return n # retorne o valor n, que foi o digitado.
#programa principal
n1 = leiaint('Digite um valor Inteiro: ')
n2 = leiafloat('Digite um valor Real: ')
print(f'O valor inteiro digitado foi {n1} e o valor Real foi {n2}') | 38.212121 | 86 | 0.599524 |
73e5a34199b931b8efbfd3f3bfcf4f6c676d45a5 | 4,815 | py | Python | hint/01/Miner-Training-Local-CodeSample-Approach1/MinerEnv.py | phunc20/rlcomp2020 | c37f8f05cc86d55fca2648bf5491d6a2218c2cad | [
"MIT"
] | null | null | null | hint/01/Miner-Training-Local-CodeSample-Approach1/MinerEnv.py | phunc20/rlcomp2020 | c37f8f05cc86d55fca2648bf5491d6a2218c2cad | [
"MIT"
] | 1 | 2022-02-10T02:27:10.000Z | 2022-02-10T02:27:10.000Z | hint/01/Miner-Training-Local-CodeSample-Approach1/MinerEnv.py | phunc20/rlcomp2020 | c37f8f05cc86d55fca2648bf5491d6a2218c2cad | [
"MIT"
] | null | null | null | import sys
import numpy as np
from GAME_SOCKET_DUMMY import GameSocket #in testing version, please use GameSocket instead of GAME_SOCKET_DUMMY
from MINER_STATE import State
TreeID = 1
TrapID = 2
SwampID = 3
class MinerEnv:
def __init__(self, host, port):
self.socket = GameSocket(host, port)
self.state = State()
self.score_pre = self.state.score#Storing the last score for designing the reward function
self.pos_x_pre = self.state.x
self.pos_y_pre = self.state.y
def start(self): #connect to server
self.socket.connect()
def end(self): #disconnect server
self.socket.close()
def send_map_info(self, request):#tell server which map to run
self.socket.send(request)
def reset(self): #start new game
try:
message = self.socket.receive() #receive game info from server
self.state.init_state(message) #init state
self.score_pre = self.state.score#Storing the last score for designing the reward function
self.pos_x_pre = self.state.x
self.pos_y_pre = self.state.y
except Exception as e:
import traceback
traceback.print_exc()
def step(self, action): #step process
self.socket.send(action) #send action to server
try:
message = self.socket.receive() #receive new state from server
self.state.update_state(message) #update to local state
except Exception as e:
import traceback
traceback.print_exc()
# Functions are customized by client
def get_state(self):
#Local view
view = np.zeros([5,5])
for i in range(-2,3):
for j in range(-2,3):
index_x = self.state.x + i
index_y = self.state.y + j
if index_x < 0 or index_y < 0 or index_x >= self.state.mapInfo.max_x or index_y >= self.state.mapInfo.max_y:
view[2+i,2+j] = -1
else:
if self.state.mapInfo.get_obstacle(index_x, index_y) == TreeID:
view[2+i,2+j] = -1
if self.state.mapInfo.get_obstacle(index_x, index_y) == TrapID:
view[2+i,2+j] = -1
if self.state.mapInfo.get_obstacle(index_x, index_y) == SwampID:
view[2+i,2+j] = -1
#Create the state
DQNState = view.flatten().tolist()
self.pos_x_gold_first =self.state.x
self.pos_y_gold_first = self.state.y
if len(self.state.mapInfo.golds) > 0:
self.pos_x_gold_first = self.state.mapInfo.golds[0]["posx"]
self.pos_y_gold_first = self.state.mapInfo.golds[0]["posy"]
DQNState.append(self.pos_x_gold_first-self.state.x)
DQNState.append(self.pos_y_gold_first-self.state.y)
#Convert the DQNState from list to array for training
DQNState = np.array(DQNState)
return DQNState
def get_reward(self):
# Calculate reward
reward = 0
goldamount = self.state.mapInfo.gold_amount(self.state.x, self.state.y)
if goldamount> 0:
reward += 10#goldamount
#remove the gold
for g in self.socket.stepState.golds:
if g.posx == self.state.x and g.posy == self.state.y:
self.socket.stepState.golds.remove(g)
#If the DQN agent crashs into obstacels (Tree, Trap, Swamp), then it should be punished by a negative reward
if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TreeID: # Tree
reward -= 0.2
if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == TrapID: # Trap
reward -= 0.2
if self.state.mapInfo.get_obstacle(self.state.x, self.state.y) == SwampID: # Swamp
reward -= 0.2
dis_pre = np.sqrt((self.pos_x_pre- self.pos_x_gold_first)**2 + (self.pos_y_pre-self.pos_y_gold_first)**2)
dis_curr = np.sqrt((self.state.x- self.pos_x_gold_first)**2 + (self.state.y-self.pos_y_gold_first)**2)
if (dis_curr - dis_pre) <= 0:#Reducing the distance , reward ++
reward += 0.1
else:
reward -= 0.1
# If out of the map, then the DQN agent should be punished by a larger nagative reward.
if self.state.status == State.STATUS_ELIMINATED_WENT_OUT_MAP:
reward += -10
return reward
def check_terminate(self):
#Checking the status of the game
#it indicates the game ends or is playing
return self.state.status != State.STATUS_PLAYING
| 41.153846 | 124 | 0.585047 |
73e5b173b2e19f5826c87f6f2796eb99f44bbbae | 20,304 | py | Python | intersight/models/iam_user.py | gumpcraca/intersight-python | 780e6703c739f329084beacbbf2ad7a6a2e59b2b | [
"Apache-2.0"
] | null | null | null | intersight/models/iam_user.py | gumpcraca/intersight-python | 780e6703c739f329084beacbbf2ad7a6a2e59b2b | [
"Apache-2.0"
] | null | null | null | intersight/models/iam_user.py | gumpcraca/intersight-python | 780e6703c739f329084beacbbf2ad7a6a2e59b2b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-255
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class IamUser(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'ancestors': 'list[MoBaseMoRef]',
'create_time': 'datetime',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'parent': 'MoBaseMoRef',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'api_keys': 'list[IamApiKeyRef]',
'client_ip_address': 'str',
'email': 'str',
'first_name': 'str',
'idp': 'IamIdpRef',
'idpreference': 'IamIdpReferenceRef',
'last_login_time': 'datetime',
'last_name': 'str',
'name': 'str',
'permissions': 'list[IamPermissionRef]',
'sessions': 'list[IamSessionRef]',
'user_type': 'str'
}
attribute_map = {
'account_moid': 'AccountMoid',
'ancestors': 'Ancestors',
'create_time': 'CreateTime',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'parent': 'Parent',
'tags': 'Tags',
'version_context': 'VersionContext',
'api_keys': 'ApiKeys',
'client_ip_address': 'ClientIpAddress',
'email': 'Email',
'first_name': 'FirstName',
'idp': 'Idp',
'idpreference': 'Idpreference',
'last_login_time': 'LastLoginTime',
'last_name': 'LastName',
'name': 'Name',
'permissions': 'Permissions',
'sessions': 'Sessions',
'user_type': 'UserType'
}
def __init__(self, account_moid=None, ancestors=None, create_time=None, mod_time=None, moid=None, object_type=None, owners=None, parent=None, tags=None, version_context=None, api_keys=None, client_ip_address=None, email=None, first_name=None, idp=None, idpreference=None, last_login_time=None, last_name=None, name=None, permissions=None, sessions=None, user_type=None):
"""
IamUser - a model defined in Swagger
"""
self._account_moid = None
self._ancestors = None
self._create_time = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._parent = None
self._tags = None
self._version_context = None
self._api_keys = None
self._client_ip_address = None
self._email = None
self._first_name = None
self._idp = None
self._idpreference = None
self._last_login_time = None
self._last_name = None
self._name = None
self._permissions = None
self._sessions = None
self._user_type = None
if account_moid is not None:
self.account_moid = account_moid
if ancestors is not None:
self.ancestors = ancestors
if create_time is not None:
self.create_time = create_time
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if parent is not None:
self.parent = parent
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if api_keys is not None:
self.api_keys = api_keys
if client_ip_address is not None:
self.client_ip_address = client_ip_address
if email is not None:
self.email = email
if first_name is not None:
self.first_name = first_name
if idp is not None:
self.idp = idp
if idpreference is not None:
self.idpreference = idpreference
if last_login_time is not None:
self.last_login_time = last_login_time
if last_name is not None:
self.last_name = last_name
if name is not None:
self.name = name
if permissions is not None:
self.permissions = permissions
if sessions is not None:
self.sessions = sessions
if user_type is not None:
self.user_type = user_type
@property
def account_moid(self):
"""
Gets the account_moid of this IamUser.
The Account ID for this managed object.
:return: The account_moid of this IamUser.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this IamUser.
The Account ID for this managed object.
:param account_moid: The account_moid of this IamUser.
:type: str
"""
self._account_moid = account_moid
@property
def ancestors(self):
"""
Gets the ancestors of this IamUser.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this IamUser.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this IamUser.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this IamUser.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def create_time(self):
"""
Gets the create_time of this IamUser.
The time when this managed object was created.
:return: The create_time of this IamUser.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this IamUser.
The time when this managed object was created.
:param create_time: The create_time of this IamUser.
:type: datetime
"""
self._create_time = create_time
@property
def mod_time(self):
"""
Gets the mod_time of this IamUser.
The time when this managed object was last modified.
:return: The mod_time of this IamUser.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this IamUser.
The time when this managed object was last modified.
:param mod_time: The mod_time of this IamUser.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this IamUser.
A unique identifier of this Managed Object instance.
:return: The moid of this IamUser.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this IamUser.
A unique identifier of this Managed Object instance.
:param moid: The moid of this IamUser.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this IamUser.
The fully-qualified type of this managed object, e.g. the class name.
:return: The object_type of this IamUser.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this IamUser.
The fully-qualified type of this managed object, e.g. the class name.
:param object_type: The object_type of this IamUser.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this IamUser.
An array of owners which represent effective ownership of this object.
:return: The owners of this IamUser.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this IamUser.
An array of owners which represent effective ownership of this object.
:param owners: The owners of this IamUser.
:type: list[str]
"""
self._owners = owners
@property
def parent(self):
"""
Gets the parent of this IamUser.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this IamUser.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this IamUser.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this IamUser.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def tags(self):
"""
Gets the tags of this IamUser.
An array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this IamUser.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this IamUser.
An array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this IamUser.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this IamUser.
The versioning info for this managed object
:return: The version_context of this IamUser.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this IamUser.
The versioning info for this managed object
:param version_context: The version_context of this IamUser.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def api_keys(self):
"""
Gets the api_keys of this IamUser.
Current API keys of the user. API keys are used to programatically perform API calls.
:return: The api_keys of this IamUser.
:rtype: list[IamApiKeyRef]
"""
return self._api_keys
@api_keys.setter
def api_keys(self, api_keys):
"""
Sets the api_keys of this IamUser.
Current API keys of the user. API keys are used to programatically perform API calls.
:param api_keys: The api_keys of this IamUser.
:type: list[IamApiKeyRef]
"""
self._api_keys = api_keys
@property
def client_ip_address(self):
"""
Gets the client_ip_address of this IamUser.
Specifies the IP address from which the user logged in the last time.
:return: The client_ip_address of this IamUser.
:rtype: str
"""
return self._client_ip_address
@client_ip_address.setter
def client_ip_address(self, client_ip_address):
"""
Sets the client_ip_address of this IamUser.
Specifies the IP address from which the user logged in the last time.
:param client_ip_address: The client_ip_address of this IamUser.
:type: str
"""
self._client_ip_address = client_ip_address
@property
def email(self):
"""
Gets the email of this IamUser.
Email of the user. Users are added to Intersight using the email configured in the IdP.
:return: The email of this IamUser.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""
Sets the email of this IamUser.
Email of the user. Users are added to Intersight using the email configured in the IdP.
:param email: The email of this IamUser.
:type: str
"""
self._email = email
@property
def first_name(self):
"""
Gets the first_name of this IamUser.
First name of the user. This field is populated from the IdP attributes received after authentication.
:return: The first_name of this IamUser.
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""
Sets the first_name of this IamUser.
First name of the user. This field is populated from the IdP attributes received after authentication.
:param first_name: The first_name of this IamUser.
:type: str
"""
self._first_name = first_name
@property
def idp(self):
"""
Gets the idp of this IamUser.
:return: The idp of this IamUser.
:rtype: IamIdpRef
"""
return self._idp
@idp.setter
def idp(self, idp):
"""
Sets the idp of this IamUser.
:param idp: The idp of this IamUser.
:type: IamIdpRef
"""
self._idp = idp
@property
def idpreference(self):
"""
Gets the idpreference of this IamUser.
:return: The idpreference of this IamUser.
:rtype: IamIdpReferenceRef
"""
return self._idpreference
@idpreference.setter
def idpreference(self, idpreference):
"""
Sets the idpreference of this IamUser.
:param idpreference: The idpreference of this IamUser.
:type: IamIdpReferenceRef
"""
self._idpreference = idpreference
@property
def last_login_time(self):
"""
Gets the last_login_time of this IamUser.
Specifies the last login time for user.
:return: The last_login_time of this IamUser.
:rtype: datetime
"""
return self._last_login_time
@last_login_time.setter
def last_login_time(self, last_login_time):
"""
Sets the last_login_time of this IamUser.
Specifies the last login time for user.
:param last_login_time: The last_login_time of this IamUser.
:type: datetime
"""
self._last_login_time = last_login_time
@property
def last_name(self):
"""
Gets the last_name of this IamUser.
Last name of the user. This field is populated from the IdP attributes received after authentication.
:return: The last_name of this IamUser.
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""
Sets the last_name of this IamUser.
Last name of the user. This field is populated from the IdP attributes received after authentication.
:param last_name: The last_name of this IamUser.
:type: str
"""
self._last_name = last_name
@property
def name(self):
"""
Gets the name of this IamUser.
UserID as configured in the IdP.
:return: The name of this IamUser.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this IamUser.
UserID as configured in the IdP.
:param name: The name of this IamUser.
:type: str
"""
self._name = name
@property
def permissions(self):
"""
Gets the permissions of this IamUser.
Permissions assigned to the user. Permission provides a way to assign roles to a user or user group to perform operations on object hierarchy.
:return: The permissions of this IamUser.
:rtype: list[IamPermissionRef]
"""
return self._permissions
@permissions.setter
def permissions(self, permissions):
"""
Sets the permissions of this IamUser.
Permissions assigned to the user. Permission provides a way to assign roles to a user or user group to perform operations on object hierarchy.
:param permissions: The permissions of this IamUser.
:type: list[IamPermissionRef]
"""
self._permissions = permissions
@property
def sessions(self):
"""
Gets the sessions of this IamUser.
Current web sessions of the user. After a user logs into Intersight, a session object is created. This session object is deleted upon logout, idle timeout, expiry timeout, or manual deletion.
:return: The sessions of this IamUser.
:rtype: list[IamSessionRef]
"""
return self._sessions
@sessions.setter
def sessions(self, sessions):
"""
Sets the sessions of this IamUser.
Current web sessions of the user. After a user logs into Intersight, a session object is created. This session object is deleted upon logout, idle timeout, expiry timeout, or manual deletion.
:param sessions: The sessions of this IamUser.
:type: list[IamSessionRef]
"""
self._sessions = sessions
@property
def user_type(self):
"""
Gets the user_type of this IamUser.
Specifies if the user is added manually by specifying email, or has logged in using groups, based on IdP attributes received during authentication. If added manually, the user type will be static, otherwise dynamic.
:return: The user_type of this IamUser.
:rtype: str
"""
return self._user_type
@user_type.setter
def user_type(self, user_type):
"""
Sets the user_type of this IamUser.
Specifies if the user is added manually by specifying email, or has logged in using groups, based on IdP attributes received during authentication. If added manually, the user type will be static, otherwise dynamic.
:param user_type: The user_type of this IamUser.
:type: str
"""
self._user_type = user_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, IamUser):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.597183 | 374 | 0.593627 |
73e5b7ad4bf19b12c479c7fa20e280455f9be5f9 | 7,713 | py | Python | code/figures/supplement/figS8_schmidt_correction_approaches.py | gchure/quantitative_proteome | 90d15dd208647332aeb06cf7e16ab71a6b210820 | [
"MIT"
] | 2 | 2020-05-18T23:03:10.000Z | 2020-10-27T19:31:51.000Z | code/figures/supplement/figS8_schmidt_correction_approaches.py | gchure/quantitative_proteome | 90d15dd208647332aeb06cf7e16ab71a6b210820 | [
"MIT"
] | 1 | 2020-02-14T23:03:21.000Z | 2020-02-14T23:03:21.000Z | code/figures/supplement/figS8_schmidt_correction_approaches.py | RPGroup-PBoC/growth_limits | 90d15dd208647332aeb06cf7e16ab71a6b210820 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from scipy import stats
import glob
import matplotlib.pyplot as plt
import matplotlib.ticker
import prot.viz
import prot.size
colors, palette = prot.viz.bokeh_theme()
# dataset_colors = prot.viz.dataset_colors()
prot.viz.plotting_style()
# Exponential fit function
from scipy.optimize import curve_fit
def func(x, a, c, d):
return a*np.exp(-c*x)+d
basan_df = pd.read_csv('../../data/basan2015_raw_data/basan2015_data.csv')
##############################################
##############################################
# Perform exponential fit of Basan
##############################################
##############################################
popt_fg, pcov_fg = curve_fit(func, basan_df.growth_rate_hr.values, basan_df.protein_fg.values, p0=(1, 1e-6, 1))
##############################################
##############################################
# Now plot!
##############################################
##############################################
fig, ax = plt.subplots(1, 2, figsize=(5, 2.5))
# plot Schmidt and Li values
##############################################
# Load the original dataset with aboslute measurements
# Load the original dataset with aboslute measurements
data_orig = pd.read_csv('../../data/compiled_datasets.csv')
data_orig = data_orig[data_orig.dataset == 'li_2014']
for d, df in data_orig.groupby(['dataset', 'dataset_name', 'condition', 'growth_rate_hr']):
mass = df.reported_fg_per_cell.sum()
ax[0].plot(df.growth_rate_hr.unique(), mass, 'o', ms=4, color=colors['purple'],
markeredgewidth=0.5, markeredgecolor='k', label=d[1])
data_orig = pd.read_csv('../../data/compiled_datasets.csv')
data_orig = data_orig[data_orig.dataset == 'schmidt_2016']
for d, df in data_orig.groupby(['dataset', 'dataset_name', 'condition', 'growth_rate_hr']):
mass = df.reported_fg_per_cell.sum()
ax[0].plot(df.growth_rate_hr.unique(), mass, 'o', ms=4, color=colors['light_blue'],
markeredgewidth=0.5, markeredgecolor='k', label=d[1])
handles, labels = ax[0].get_legend_handles_labels()
by_label = dict(zip(labels, handles))
ax[0].legend(by_label.values(), by_label.keys(), loc = 'upper left', fontsize = 6)
# approach 1 - use cell volumes from Si et al. 2017; maintain constant cellular protein concentation
##############################################
vol_glu = prot.size.lambda2size(0.58)
mass_glu = data_orig[data_orig.condition == 'glucose'].reported_fg_per_cell.sum()
for d, df in data_orig.groupby(['dataset', 'dataset_name', 'condition', 'growth_rate_hr']):
vol = prot.size.lambda2size(d[3])
ax[1].plot(df.growth_rate_hr.unique(), (mass_glu/vol_glu) * vol , 'o', ms=4, color=colors['light_red'],
markeredgewidth=0.5, markeredgecolor='k', label=d[1])
# plot smooth curve with data
x = np.linspace(0,2,100)
vol = prot.size.lambda2size(x)
ax[1].plot(x, (mass_glu/vol_glu) * vol, '-', alpha = 0.6,
lw = 0.5, color = colors['light_red'], zorder = 0)
# approach 2 - use cell volumes from Si et al. 2017; maintain constant cellular protein concentation
##############################################
# gather RNA/protein and DNA data from literature
##############################################
##############################################
# perform linear fit of RNA-ro-protein ratios
# pairwise for < 0.7 hr-1 and >= 0.7 hr-1
dai_df = pd.read_csv('../../data/dai2016_raw_data/dai2016_summary.csv')
dai_df_slow = dai_df[dai_df.growth_rate_hr < 0.7]
dai_df_fast = dai_df[dai_df.growth_rate_hr >= 0.7]
slope_dia_RP_A, intercept_dia_RP_A, r_value, p_value, std_err = stats.linregress(dai_df_slow.growth_rate_hr.values,
dai_df_slow.RNA_P_ratio.values)
slope_dia_RP_B, intercept_dia_RP_B, r_value, p_value, std_err = stats.linregress(dai_df_fast.growth_rate_hr.values,
dai_df_fast.RNA_P_ratio.values)
basan_df = pd.read_csv('../../data/basan2015_raw_data/basan2015_data.csv')
popt_dna, pcov_dna = curve_fit(func, basan_df.growth_rate_hr.values, basan_df.dna_fg.values, p0=(1, 1e-6, 1))
schmidt_gr = data_orig.sort_values(by='growth_rate_hr', ascending = True).growth_rate_hr.unique()
pred_vol_Si = prot.size.lambda2size(schmidt_gr)
# assume 1.1 g/ml mass density in cell, 30 % dry mass
# calculate total dry mass in fg
pred_drymass_Si = ((1.1*0.30)*(pred_vol_Si*1E-12)*1E15)
# calculate total DNA mass in fg
pred_dnamass_Si = func(schmidt_gr, *popt_dna)
# Calculate total RNA + protein mass, assume DNA + RNA + protein == 90% dry mass
pred_RNA_protein_mass_Si = pred_drymass_Si*0.90 - pred_dnamass_Si
# predict RNA/Protein ratio:
pred_RNA_protein_ratio = np.append(\
(slope_dia_RP_A*schmidt_gr[:-2] + intercept_dia_RP_A),
(slope_dia_RP_B*schmidt_gr[-2:] + intercept_dia_RP_B))
# estimate total RNA mass per cell
pred_RNAmass_Si = (pred_RNA_protein_ratio * pred_RNA_protein_mass_Si) / (1+ pred_RNA_protein_ratio)
# estimate total protein mass per cell
pred_proteinmass_Si = (pred_RNA_protein_mass_Si/ (1+pred_RNA_protein_ratio))
# Plot predictions of protein along with 'data points' for Schmidt
x = np.linspace(0,2,100)
vol = prot.size.lambda2size(schmidt_gr)
ax[1].plot(schmidt_gr, pred_proteinmass_Si, 'o', ms=4, color=colors['light_green'],
markeredgewidth=0.5, markeredgecolor='k', label = 'protein')
# repeat calculations for smooth curve, predict RNA/Protein ratio <= 0.7 hr-1:
x = np.linspace(0,0.7,100)
vol = prot.size.lambda2size(x)
pred_drymass_Si = ((1.1*0.30)*(vol*1E-12)*1E15)
pred_dnamass_Si = func(x, *popt_dna)
# Calculate total RNA + protein mass, assume DNA + RNA + protein == 90% dry mass
pred_RNA_protein_mass_Si = pred_drymass_Si*0.90 - pred_dnamass_Si
# estimate R:P ratio
pred_RNA_protein_ratio = slope_dia_RP_A*x + intercept_dia_RP_A
# estimate total protein mass per cell
pred_proteinmass_Si = (pred_RNA_protein_mass_Si/ (1+pred_RNA_protein_ratio))
# fg per cell
ax[1].plot(x, pred_proteinmass_Si, '-', alpha = 0.6,
lw = 0.5, color = colors['light_green'], zorder = 0)
# repeat calculations for smooth curve, predict RNA/Protein ratio >= 0.7 hr-1:
x = np.linspace(0.7, 2.0, 100)
vol = prot.size.lambda2size(x)
pred_drymass_Si = ((1.1*0.30)*(vol*1E-12)*1E15)
pred_dnamass_Si = func(x, *popt_dna)
# Calculate total RNA + protein mass, assume DNA + RNA + protein == 90% dry mass
pred_RNA_protein_mass_Si = pred_drymass_Si*0.90 - pred_dnamass_Si
# estimate R:P ratio
pred_RNA_protein_ratio = slope_dia_RP_B*x + intercept_dia_RP_B
# estimate total protein mass per cell
pred_proteinmass_Si = (pred_RNA_protein_mass_Si/ (1+pred_RNA_protein_ratio))
# fg per cell
ax[1].plot(x, pred_proteinmass_Si, '-', alpha = 0.8,
lw = 0.9, color = colors['light_green'], zorder = 0)
# approach 3 - use measurements of protein per cell from Basan 2015
##############################################
x = np.linspace(0,2,100)
ax[1].plot(basan_df.growth_rate_hr.values, basan_df.protein_fg.values, 'o', ms=4, color=colors['light_purple'],
markeredgewidth=0.5, markeredgecolor='k', label = 'Approach 3')
ax[1].plot(x, func(x, *popt_fg), '-', alpha = 0.6,
lw = 0.5, color=colors['light_purple'], zorder = 0)
for ax_ in ax:
ax_.set_ylabel('protein mass per cell [fg]', fontsize=6)
# ax.legend(loc = 'upper left', fontsize=6)
ax_.xaxis.set_tick_params(labelsize=5)
ax_.yaxis.set_tick_params(labelsize=5)
ax_.set_xlabel('growth rate [hr$^{-1}$]', fontsize=6)
ax_.set_xlim([0, 2])
ax_.set_ylim([0, 1300])
plt.tight_layout()
fig.savefig('../../figures/figS8_schmidt_corrections_approaches.pdf', bbox_inches='tight')
| 40.171875 | 115 | 0.654868 |
73e5cdc6600030cfe462b4484058f2a16f1ca0f1 | 4,111 | py | Python | aviation_weather/forecast.py | StephenOrJames/aviation-weather | 35594cc1ba448acdea9123380cc47ac96d2107c8 | [
"MIT"
] | 2 | 2017-03-14T23:28:28.000Z | 2020-01-10T23:04:30.000Z | aviation_weather/forecast.py | aviationweather/aviation_weather | 35594cc1ba448acdea9123380cc47ac96d2107c8 | [
"MIT"
] | null | null | null | aviation_weather/forecast.py | aviationweather/aviation_weather | 35594cc1ba448acdea9123380cc47ac96d2107c8 | [
"MIT"
] | null | null | null | import re
import aviation_weather
from aviation_weather import exceptions
class Forecast(object):
"""The Forecast class represents a weather forecast.
Attributes:
type (MessageType): The type of forecast (e.g. 'TAF' or 'TAF AMD').
location (Location): The location associated with the forecast.
time (Time): The issuance time of the forecast.
valid_period (str): The time period for which the forecast is valid.
wind (Wind): The forecast winds.
visibility (Visibility): The forecast prevailing visibility.
weather_groups (tuple(WeatherGroup)): The forecast weather groups.
sky_conditions (tuple(SkyConditions)): The forecast sky conditions.
wind_shear (WindShear): The forecast wind shear.
changes (list(ChangeGroup)): A list of the change groups (BECMG, FM, PROB, or TEMPO).
"""
def __init__(self, raw):
parts = re.split(r"\s(?=(?:BECMG|FM\d{6}|PROB\d{2}|TEMPO)\s)", raw) # separate the major parts
try:
self._parse(raw, parts)
except (exceptions.ComponentDecodeError, IndexError) as e:
raise exceptions.ForecastDecodeError("Forecast(%r) could not be parsed" % raw) from e
def _parse(self, raw, parts):
part = parts[0]
t = re.match(r"TAF(?: AMD)?\s+", raw)
if t is None:
self.type = None
r = part.split()
else:
tg = t.group()
self.type = aviation_weather.MessageType(tg.rstrip())
r = part.lstrip(tg).split()
self.location = aviation_weather.Location(r[0])
self.time = aviation_weather.Time(r[1])
self.valid_period = tuple(aviation_weather.Time("%s00Z" % period) for period in r[2].split("/"))
self.wind = aviation_weather.Wind(r[3])
self.visibility = aviation_weather.Visibility(r[4])
# Weather groups
t = list()
i = 5
try:
while True:
t.append(aviation_weather.WeatherGroup(r[i]))
i += 1
except (exceptions.WeatherGroupDecodeError, IndexError):
r = r[i:]
self.weather_groups = tuple(t)
# Sky conditions
t = list()
i = 0
try:
while True:
t.append(aviation_weather.SkyCondition(r[i]))
i += 1
except (exceptions.SkyConditionDecodeError, IndexError):
r = r[i:]
self.sky_conditions = tuple(t)
if r:
self.wind_shear = aviation_weather.WindShear(r[0])
else:
self.wind_shear = None
# Changes
self.changes = list()
for part in parts[1:]:
if part.startswith("BECMG"):
p = aviation_weather.BecomingGroup(part)
elif part.startswith("FM"):
p = aviation_weather.FromGroup(part)
elif part.startswith("PROB"):
p = aviation_weather.ProbabilityGroup(part)
elif part.startswith("TEMPO"):
p = aviation_weather.TemporaryGroup(part)
else:
p = None
if p:
self.changes.append(p)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.raw)
def __str__(self):
return self.raw # TODO: change to self.decoded (if and when it is implemented)
@property
def raw(self):
raw = ""
if self.type:
raw += " %s" % self.type.raw
raw += " %s" % self.location.raw
raw += " %s" % self.time.raw
raw += " %s/%s" % (self.valid_period[0].raw[:-3], self.valid_period[1].raw[:-3])
raw += " %s" % self.wind.raw
raw += " %s" % self.visibility.raw
for weather_group in self.weather_groups:
raw += " %s" % weather_group.raw
for sky_condition in self.sky_conditions:
raw += " %s" % sky_condition.raw
if self.wind_shear:
raw += " %s" % self.wind_shear.raw
for change in self.changes:
raw += " %s" % change.raw
return raw[1:]
| 35.747826 | 104 | 0.564096 |
73e5f2882cf433bca0fa8fb76f1eb89b9f97dbff | 13,955 | py | Python | dl/step1/create_onegoods_tf_record.py | huachao2017/goodsdl | 3616d53b90696a97a5d56a064e2a14d484b821d7 | [
"Apache-2.0"
] | 3 | 2018-10-16T09:36:12.000Z | 2019-04-15T03:12:49.000Z | dl/step1/create_onegoods_tf_record.py | huachao2017/goodsdl | 3616d53b90696a97a5d56a064e2a14d484b821d7 | [
"Apache-2.0"
] | null | null | null | dl/step1/create_onegoods_tf_record.py | huachao2017/goodsdl | 3616d53b90696a97a5d56a064e2a14d484b821d7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert the Oxford good dataset to TFRecord for object_detection.
See: O. M. Parkhi, A. Vedaldi, A. Zisserman, C. V. Jawahar
Cats and Dogs
IEEE Conference on Computer Vision and Pattern Recognition, 2012
http://www.robots.ox.ac.uk/~vgg/data/goods/
Example usage:
./create_good_tf_record --data_dir=/home/user/good \
--output_dir=/home/user/good/output
"""
import hashlib
import io
import logging
import os
import random
import re
from lxml import etree
import PIL.Image
import tensorflow as tf
import shutil
from object_detection.utils import dataset_util
logger = logging.getLogger("dataset")
def get_class_name_from_filename(file_name):
"""Gets the class name from a file.
Args:
file_name: The file name to get the class name from.
ie. "american_pit_bull_terrier_105.jpg"
Returns:
A string of the class name.
"""
return file_name.split('_')[0]
# match = re.match(r'([A-Za-z0-9_]+)(_[0-9]+\.jpg)', file_name, re.I)
# return match.groups()[0]
def dict_to_tf_example(data,
label_map_dict,
example,
index,
ignore_difficult_instances=False):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
label_map_dict: A map from string label names to integers ids.
image_subdirectory: String specifying subdirectory within the
Pascal dataset directory holding the actual image data.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
img_path = example
with tf.gfile.GFile(img_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
xmin.append(float(obj['bndbox']['xmin']) / width)
ymin.append(float(obj['bndbox']['ymin']) / height)
xmax.append(float(obj['bndbox']['xmax']) / width)
ymax.append(float(obj['bndbox']['ymax']) / height)
class_name = '1'
classes_text.append(class_name.encode('utf8'))
classes.append(label_map_dict[class_name])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(str(index).encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
return example
def recursive_parse_xml_to_dict(xml):
"""Recursively parses XML contents to python dict.
We assume that `object` tags are the only ones that can appear
multiple times at the same level of a tree.
Args:
xml: xml tree obtained by parsing XML file contents using lxml.etree
Returns:
Python dictionary holding XML contents.
"""
if not xml:
return {xml.tag: xml.text}
result = {}
for child in xml:
child_result = recursive_parse_xml_to_dict(child)
if child.tag != 'object':
result[child.tag] = child_result[child.tag]
else:
if child.tag not in result:
result[child.tag] = []
result[child.tag].append(child_result[child.tag])
return {xml.tag: result}
def create_tf_record(output_filename,
label_map_dict,
examples):
"""Creates a TFRecord file from examples.
Args:
output_filename: Path to where output file is saved.
label_map_dict: The label map dictionary.
examples: Examples to parse and save to tf record.
"""
writer = tf.python_io.TFRecordWriter(output_filename)
for idx, example in enumerate(examples):
if idx % 100 == 0:
logger.info('On image %d of %d', idx, len(examples))
path = example + '.xml'
if not os.path.exists(path):
logger.warning('Could not find %s, ignoring example.', path)
continue
with tf.gfile.GFile(path, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = recursive_parse_xml_to_dict(xml)['annotation']
# TODO 必须为jpg后缀的图片
tf_example = dict_to_tf_example(data, label_map_dict, example + '.jpg', idx)
writer.write(tf_example.SerializeToString())
writer.close()
def create_label_map_file(output_filename,
label_map_dict):
with open(output_filename, 'w') as output:
for key in label_map_dict:
output.write('\nitem {\n')
output.write(" id: {}\n".format(label_map_dict[key]))
output.write(" name: '{}'\n".format(key))
output.write("}\n")
def update_config_file(train_dir,
train_name,
num_classes,
num_steps=200000,
fine_tune_checkpoint_dir=None,
eval_num=100):
file_path, _ = os.path.split(os.path.realpath(__file__))
config_template_file_path = os.path.join(file_path, 'faster_rcnn_nas_goods.config.template')
output_filename = os.path.join(train_dir, train_name, 'faster_rcnn_nas_goods.config')
with open(config_template_file_path, 'r') as file:
data = file.read()
# p = re.compile(r'num_classes: \d+')
output = re.sub('num_classes: \d+', 'num_classes: '+str(num_classes), data)
output = re.sub('# num_steps: \d+', 'num_steps: '+str(num_steps), output)
output = re.sub('num_visualizations: \d+', 'num_visualizations: '+str(eval_num), output)
output = re.sub('num_examples: \d+', 'num_examples: '+str(eval_num), output)
if fine_tune_checkpoint_dir is not None:
# restore from tensorflow model or pre train model
output = re.sub('fine_tune_checkpoint: ""', 'fine_tune_checkpoint:"'+fine_tune_checkpoint_dir+'/model.ckpt"', output)
output = re.sub('PATH_TO_BE_CONFIGURED_TRAIN', os.path.join(train_dir, train_name), output)
with open(output_filename, 'w') as file:
file.write(output)
def read_examples_list_and_label_map_and_classnames(path, additional_path=None):
"""返回所有图片文件路径"""
logger.info('dataset path:{},{}'.format(path,additional_path))
dirlist = os.listdir(path) # 列出文件夹下所有的目录与文件
examples = []
addition_examples = []
class_names = []
for i in range(0, len(dirlist)):
class_dir = os.path.join(path, dirlist[i])
if os.path.isdir(class_dir):
class_names.append(dirlist[i])
filelist = os.listdir(class_dir)
for j in range(0, len(filelist)):
image_path = os.path.join(class_dir, filelist[j])
example, ext = os.path.splitext(image_path)
if ext == ".jpg" and os.path.isfile(example + '.xml'):
examples.append(example)
if additional_path is not None:
additional_dirlist = os.listdir(additional_path)
for i in range(0, len(additional_dirlist)):
class_dir = os.path.join(additional_path, additional_dirlist[i])
if os.path.isdir(class_dir):
filelist = os.listdir(class_dir)
for j in range(0, len(filelist)):
image_path = os.path.join(class_dir, filelist[j])
example, ext = os.path.splitext(image_path)
if ext == ".jpg" and os.path.isfile(example + '.xml'):
addition_examples.append(example)
return examples, addition_examples, {'1':1},sorted(class_names)
def prepare_train(data_dir, train_dir, train_name, fine_tune_checkpoint_dir, local_fineture, additional_data_dir=None):
normal_examples_list, addition_examples, label_map_dict, class_names = read_examples_list_and_label_map_and_classnames(data_dir,additional_data_dir)
logger.info(label_map_dict)
# Test images are not included in the downloaded data set, so we shall perform
# our own split.
if additional_data_dir is not None:
# 评估时必然包括addition_examples
val_examples = addition_examples
else:
val_examples = normal_examples_list
normal_examples_list.extend(addition_examples)
# normal_examples_list.extend(addition_examples)
train_examples = normal_examples_list
logger.info('%d training and %d validation examples.',
len(train_examples), len(val_examples))
random.seed(42)
random.shuffle(train_examples)
output_dir = os.path.join(train_dir, train_name)
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
train_output_path = os.path.join(output_dir, 'goods_train.record')
val_output_path = os.path.join(output_dir, 'goods_val.record')
label_map_file_path = os.path.join(output_dir, 'goods_label_map.pbtxt')
create_tf_record(train_output_path, label_map_dict, train_examples)
create_tf_record(val_output_path, label_map_dict, val_examples)
create_label_map_file(label_map_file_path, label_map_dict)
# class_names_to_ids = dict(zip(class_names, range(len(class_names))))
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(class_names)), class_names))
from datasets import dataset_utils
dataset_utils.write_label_file(labels_to_class_names, output_dir)
# 设定每张照片训练100次
if additional_data_dir is not None:
per_pic_train_counts = 100
else:
per_pic_train_counts = 1000
if local_fineture:
per_pic_train_counts = 20
update_config_file(train_dir, train_name, len(label_map_dict), num_steps=len(train_examples)*per_pic_train_counts, fine_tune_checkpoint_dir=fine_tune_checkpoint_dir, eval_num=len(val_examples))
return label_map_dict
def prepare_rawdata_update_train(data_dir, train_dir, train_name):
# @remove: this function should be removed
normal_examples_list, addition_examles_list, label_map_dict, _ = read_examples_list_and_label_map_and_classnames(data_dir)
logger.info(label_map_dict)
# Test images are not included in the downloaded data set, so we shall perform
# our own split.
normal_examples_list.extend(addition_examles_list)
random.seed(42)
random.shuffle(normal_examples_list)
train_examples = normal_examples_list
logger.info('%d training examples.',
len(train_examples))
output_dir = os.path.join(train_dir, train_name)
train_output_path = os.path.join(output_dir, 'goods_train.record')
import datetime
now = datetime.datetime.now()
shutil.move(train_output_path, train_output_path+'.'+ str(now.time()))
create_tf_record(train_output_path, label_map_dict, train_examples)
# class_names_to_ids = dict(zip(class_names, range(len(class_names))))
# Finally, write the labels file:
# 设定每张照片训练100次
config_file_path = os.path.join(output_dir, 'faster_rcnn_nas_goods.config')
checkpoint = tf.train.latest_checkpoint(output_dir)
with open(config_file_path, 'r') as file:
data = file.read()
# p = re.compile(r'num_classes: \d+')
output = re.sub('# num_steps: \d+', 'num_steps: '+str(106300+len(train_examples)*100), data)
shutil.move(config_file_path, config_file_path+'.'+ str(now.time()))
with open(config_file_path, 'w') as file:
file.write(output)
| 40.449275 | 197 | 0.670799 |
73e5f4bb5d6e7aa955d952943cdd700d42dec584 | 7,817 | py | Python | NGram/prediction_2104/oyster_reader.py | mbc96325/IOHMM-for-individual-mobility-prediction | b2d346a12b902581641a0afa8e694ee8ef158195 | [
"MIT"
] | 1 | 2021-09-02T14:16:14.000Z | 2021-09-02T14:16:14.000Z | NGram/prediction_2104/oyster_reader.py | mbc96325/IOHMM-for-individual-mobility-prediction | b2d346a12b902581641a0afa8e694ee8ef158195 | [
"MIT"
] | null | null | null | NGram/prediction_2104/oyster_reader.py | mbc96325/IOHMM-for-individual-mobility-prediction | b2d346a12b902581641a0afa8e694ee8ef158195 | [
"MIT"
] | 1 | 2021-09-20T02:59:47.000Z | 2021-09-20T02:59:47.000Z | import csv
from trip import trip
from user import user
class panelDataReader:
def __init__(self, file):
self.reader = csv.reader(open(file), delimiter=",")
self.header = self.reader.next()
self.lastRecord = None
def nextRecord(self):
try:
line = next(self.reader)
except StopIteration:
line = None
self.lastRecord = line
return line
def nextUserRecords(self):
userIndex = self.header.index("prestigeid")
records = []
if self.lastRecord is None:
if self.nextRecord() is None:
return None
firstRecord = self.lastRecord
records.append(firstRecord)
while True:
prevID = self.lastRecord[userIndex]
nextRecord = self.nextRecord()
if nextRecord is not None and prevID == nextRecord[userIndex]:
records.append(nextRecord)
else:
break
if len(records) > 0:
return records
else:
return None
def readPanelData(file, stationDictPath=None):
print 'Importing users...'
if stationDictPath is not None:
stationDict = stationNameDict(stationDictPath)
else:
stationDict = None
panelReader = panelDataReader(file)
# indices for later use
headers = panelReader.header
userIndex = headers.index("prestigeid")
dayIndex = headers.index("daykey")
transIndex = headers.index("transactiontype")
timeIndex = headers.index("transactiontime")
entryIndex = headers.index("stationoffirstentrykey")
exitIndex = headers.index("stationofexitkey")
X = []
counter = 0
userRecords = panelReader.nextUserRecords()
# If there is more data to be processed
while userRecords is not None:
userID = userRecords[0][userIndex] # new user ID
userTripList = [] # new user tripList
prevTap = None
for i in xrange(len(userRecords)):
tap = userRecords[i]
# convert day and time to integer
tap[dayIndex] = int(tap[dayIndex])
tap[timeIndex] = int(tap[timeIndex])
# If transaction time ealier than 3am, count it as previous day
if tap[timeIndex] < 60 * 3:
tap[dayIndex] -= 1
tap[timeIndex] += 60 * 24
# Add new trip
if tap[transIndex] == "61":
if prevTap is None:
prevTap = tap
continue # go to nex iteration to see if it is a complete trip
else:
# a new trip missing tap-out
newTrip = trip(day=prevTap[dayIndex],
o=prevTap[entryIndex],
d=prevTap[exitIndex], # outStation="-1"
ot=prevTap[timeIndex],
dt=-1)
prevTap = tap
elif tap[transIndex] == "62":
if prevTap is None:
# a new trp missing tap-in
newTrip = trip(day=tap[dayIndex],
o=tap[entryIndex],
d=tap[exitIndex],
ot=-1,
dt=tap[timeIndex])
elif prevTap[dayIndex] == tap[dayIndex] and \
prevTap[entryIndex] == tap[entryIndex]:
# a complete new trip
newTrip = trip(day=tap[dayIndex],
o=tap[entryIndex],
d=tap[exitIndex],
ot=prevTap[timeIndex],
dt=tap[timeIndex])
prevTap = None
if newTrip is not None:
# Exclude trips with same entry and exit stations
if newTrip.o == newTrip.d:
continue
# Exclude trips with unvalid hours
if newTrip.getHour() > 26 or newTrip.getHour() < 3:
continue
# Exclude repetitive records
if len(userTripList) > 0:
if newTrip.day == userTripList[-1].day and \
newTrip.getTime() == userTripList[-1].getTime():
continue
# Convert station id to station name
if stationDict is not None:
newTrip.o = stationDict[newTrip.o]
newTrip.d = stationDict[newTrip.d]
# Exclude incomplete trips with unknown OD pairs
if newTrip.incomplete() is True or\
newTrip.o == "Unknown" or newTrip.d == "Unknown":
newTrip = None
continue
userTripList.append(newTrip) # Add the new trip to the user's list
newTrip = None
# Define a new user and add to a user list
newUser = user(userID, tripList=userTripList)
X.append(newUser)
# Print progress (number of users processed...)
counter += 1
if counter % 100 == 0:
print counter
if counter == 1000:
return X
# Get next user's transactions, and start over
userRecords = panelReader.nextUserRecords()
return X
def readPanelData2(file, stationDictPath=None, limit=None):
print 'Importing users...'
if stationDictPath is not None:
stationDict = stationNameDict(stationDictPath)
else:
stationDict = None
panelReader = panelDataReader(file)
# indices for later use
headers = panelReader.header
userIndex = headers.index("prestigeid")
dayIndex = headers.index("daykey")
transIndex = headers.index("transactiontype")
entryTimeIndex = headers.index("timeoffirstentry")
exitTimeIndex = headers.index("transactiontime")
entryIndex = headers.index("stationoffirstentry")
exitIndex = headers.index("nlc")
X = []
counter = 0
userRecords = panelReader.nextUserRecords()
# If there is more data to be processed
while userRecords is not None:
userID = userRecords[0][userIndex] # new user ID
userTripList = [] # new user tripList
for i in xrange(len(userRecords)):
tap = userRecords[i]
if tap[transIndex] == "62":
# convert day and time to integer
daykey = int(tap[dayIndex])
inTime = int(tap[entryTimeIndex])
outTime = int(tap[exitTimeIndex])
inStation = tap[entryIndex]
outStation = tap[exitIndex]
# Add new trip
if inStation != "-1" and outStation != "-1":
# If transaction time ealier than 3am, count it as previous day
# if inTime < 60 * 3:
# daykey -= 1
# inTime += 60 * 24
# outTime += 60 * 24
# define a new trip object
newTrip = trip(day=daykey,
o=inStation,
d=outStation,
ot=inTime,
dt=outTime)
# Exclude trips with same entry and exit stations
if newTrip.o == newTrip.d:
continue
# Exclude trips with unvalid hours
if newTrip.getHour() > 26 or newTrip.getHour() < 3:
continue
# Convert station id to station name
if stationDict is not None:
newTrip.o = stationDict[newTrip.o]
newTrip.d = stationDict[newTrip.d]
# Exclude incomplete trips with unknown OD pairs
if newTrip.incomplete() is True or\
newTrip.o == "Unknown" or newTrip.d == "Unknown":
newTrip = None
continue
# If two trips with same starting time, use the latest one
if len(userTripList) > 0:
if newTrip.day == userTripList[-1].day and \
newTrip.getAbsTime() == userTripList[-1].getAbsTime():
userTripList[-1] = newTrip
continue
# Data errors
if len(userTripList) > 0:
if newTrip.day == userTripList[-1].day and \
newTrip.getAbsTime() < userTripList[-1].getAbsTime():
continue
userTripList.append(newTrip) # Add the new trip to the user's list
# Define a new user and add to a user list
userTripList.sort
newUser = user(userID, tripList=userTripList)
X.append(newUser)
# Print progress (number of users processed...)
counter += 1
if counter % 10000 == 0:
print counter
if limit:
if counter >= limit:
return X
# Get next user's transactions, and start over
userRecords = panelReader.nextUserRecords()
return X
def stationNameDict(filepath):
rd = csv.reader(open(filepath, 'rU'), delimiter=",")
Dict = {}
for s in rd:
Dict[s[0]] = s[-1]
# Special situations
# Dict["-1"] = "Unknown"
# Dict["0"] = "Unknown"
return Dict
def filter_by_userid(filepath, ids):
rd = csv.reader(open(filepath, 'rU'))
headers = rd.next()
userIndex = headers.index("prestigeid")
filename = '../output/sampleUsers.csv'
wt = csv.writer(open(filename, 'wt'))
wt.writerow(headers)
for row in rd:
userid = row[userIndex]
if userid in ids:
wt.writerow(row)
if __name__ == '__main__':
dataFile = "../data/oysterdata.csv"
# ids = ['1797223601', '1537232329']
ids = ['1837931289', '1297299286']
filter_by_userid(dataFile, ids)
| 29.277154 | 72 | 0.667008 |
73e6159ad96bf86c318bb44aefabc8909dc235a4 | 9,734 | py | Python | dbt/adapters/mysql/impl.py | avaitla/dbt-mysql | d5f09a4b75441710503819ebac31c52e67118ad2 | [
"Apache-2.0"
] | null | null | null | dbt/adapters/mysql/impl.py | avaitla/dbt-mysql | d5f09a4b75441710503819ebac31c52e67118ad2 | [
"Apache-2.0"
] | null | null | null | dbt/adapters/mysql/impl.py | avaitla/dbt-mysql | d5f09a4b75441710503819ebac31c52e67118ad2 | [
"Apache-2.0"
] | null | null | null | from concurrent.futures import Future
from dataclasses import asdict
from typing import Optional, List, Dict, Any, Iterable
import agate
import dbt
import dbt.exceptions
from dbt.adapters.base.impl import catch_as_completed
from dbt.adapters.sql import SQLAdapter
from dbt.adapters.mysql import MySQLConnectionManager
from dbt.adapters.mysql import MySQLRelation
from dbt.adapters.mysql import MySQLColumn
from dbt.adapters.base import BaseRelation
from dbt.clients.agate_helper import DEFAULT_TYPE_TESTER
from dbt.events import AdapterLogger
from dbt.utils import executor
logger = AdapterLogger("mysql")
LIST_SCHEMAS_MACRO_NAME = 'list_schemas'
LIST_RELATIONS_MACRO_NAME = 'list_relations_without_caching'
class MySQLAdapter(SQLAdapter):
Relation = MySQLRelation
Column = MySQLColumn
ConnectionManager = MySQLConnectionManager
@classmethod
def date_function(cls):
return 'current_date()'
@classmethod
def convert_datetime_type(
cls, agate_table: agate.Table, col_idx: int
) -> str:
return "timestamp"
def quote(self, identifier):
return '`{}`'.format(identifier)
def list_relations_without_caching(
self, schema_relation: MySQLRelation
) -> List[MySQLRelation]:
kwargs = {'schema_relation': schema_relation}
try:
results = self.execute_macro(
LIST_RELATIONS_MACRO_NAME,
kwargs=kwargs
)
except dbt.exceptions.RuntimeException as e:
errmsg = getattr(e, 'msg', '')
if f"MySQL database '{schema_relation}' not found" in errmsg:
return []
else:
description = "Error while retrieving information about"
logger.debug(f"{description} {schema_relation}: {e.msg}")
return []
relations = []
for row in results:
if len(row) != 4:
raise dbt.exceptions.RuntimeException(
"Invalid value from "
f'"mysql__list_relations_without_caching({kwargs})", '
f'got {len(row)} values, expected 4'
)
_, name, _schema, relation_type = row
relation = self.Relation.create(
schema=_schema,
identifier=name,
type=relation_type
)
relations.append(relation)
return relations
def get_columns_in_relation(self, relation: Relation) -> List[MySQLColumn]:
rows: List[agate.Row] = super().get_columns_in_relation(relation)
return self.parse_show_columns(relation, rows)
def _get_columns_for_catalog(
self, relation: MySQLRelation
) -> Iterable[Dict[str, Any]]:
columns = self.get_columns_in_relation(relation)
for column in columns:
# convert MySQLColumns into catalog dicts
as_dict = asdict(column)
as_dict['column_name'] = as_dict.pop('column', None)
as_dict['column_type'] = as_dict.pop('dtype')
as_dict['table_database'] = None
yield as_dict
def get_relation(
self, database: str, schema: str, identifier: str
) -> Optional[BaseRelation]:
if not self.Relation.include_policy.database:
database = None
return super().get_relation(database, schema, identifier)
def parse_show_columns(
self,
relation: Relation,
raw_rows: List[agate.Row]
) -> List[MySQLColumn]:
return [MySQLColumn(
table_database=None,
table_schema=relation.schema,
table_name=relation.name,
table_type=relation.type,
table_owner=None,
table_stats=None,
column=column.column,
column_index=idx,
dtype=column.dtype,
) for idx, column in enumerate(raw_rows)]
def get_catalog(self, manifest):
schema_map = self._get_catalog_schemas(manifest)
if len(schema_map) > 1:
dbt.exceptions.raise_compiler_error(
f'Expected only one database in get_catalog, found '
f'{list(schema_map)}'
)
with executor(self.config) as tpe:
futures: List[Future[agate.Table]] = []
for info, schemas in schema_map.items():
for schema in schemas:
futures.append(tpe.submit_connected(
self, schema,
self._get_one_catalog, info, [schema], manifest
))
catalogs, exceptions = catch_as_completed(futures)
return catalogs, exceptions
def _get_one_catalog(
self, information_schema, schemas, manifest,
) -> agate.Table:
if len(schemas) != 1:
dbt.exceptions.raise_compiler_error(
f'Expected only one schema in mysql _get_one_catalog, found '
f'{schemas}'
)
database = information_schema.database
schema = list(schemas)[0]
columns: List[Dict[str, Any]] = []
for relation in self.list_relations(database, schema):
logger.debug("Getting table schema for relation {}", relation)
columns.extend(self._get_columns_for_catalog(relation))
return agate.Table.from_object(
columns, column_types=DEFAULT_TYPE_TESTER
)
def check_schema_exists(self, database, schema):
results = self.execute_macro(
LIST_SCHEMAS_MACRO_NAME,
kwargs={'database': database}
)
exists = True if schema in [row[0] for row in results] else False
return exists
# Methods used in adapter tests
def update_column_sql(
self,
dst_name: str,
dst_column: str,
clause: str,
where_clause: Optional[str] = None,
) -> str:
clause = f'update {dst_name} set {dst_column} = {clause}'
if where_clause is not None:
clause += f' where {where_clause}'
return clause
def timestamp_add_sql(
self, add_to: str, number: int = 1, interval: str = 'hour'
) -> str:
# for backwards compatibility, we're compelled to set some sort of
# default. A lot of searching has lead me to believe that the
# '+ interval' syntax used in postgres/redshift is relatively common
# and might even be the SQL standard's intention.
return f"date_add({add_to}, interval {number} {interval})"
def string_add_sql(
self, add_to: str, value: str, location='append',
) -> str:
if location == 'append':
return f"concat({add_to}, '{value}')"
elif location == 'prepend':
return f"concat({value}, '{add_to}')"
else:
raise dbt.exceptions.RuntimeException(
f'Got an unexpected location value of "{location}"'
)
def get_rows_different_sql(
self,
relation_a: MySQLRelation,
relation_b: MySQLRelation,
column_names: Optional[List[str]] = None,
) -> str:
# This method only really exists for test reasons
names: List[str]
if column_names is None:
columns = self.get_columns_in_relation(relation_a)
names = sorted((self.quote(c.name) for c in columns))
else:
names = sorted((self.quote(n) for n in column_names))
alias_a = "A"
alias_b = "B"
columns_csv_a = ', '.join([f"{alias_a}.{name}" for name in names])
columns_csv_b = ', '.join([f"{alias_b}.{name}" for name in names])
join_condition = " AND ".join(
[f"{alias_a}.{name} = {alias_b}.{name}" for name in names]
)
first_column = names[0]
# MySQL doesn't have an EXCEPT or MINUS operator, so we need to simulate it
COLUMNS_EQUAL_SQL = '''
WITH
a_except_b as (
SELECT
{columns_a}
FROM {relation_a} as {alias_a}
LEFT OUTER JOIN {relation_b} as {alias_b}
ON {join_condition}
WHERE {alias_b}.{first_column} is null
),
b_except_a as (
SELECT
{columns_b}
FROM {relation_b} as {alias_b}
LEFT OUTER JOIN {relation_a} as {alias_a}
ON {join_condition}
WHERE {alias_a}.{first_column} is null
),
diff_count as (
SELECT
1 as id,
COUNT(*) as num_missing FROM (
SELECT * FROM a_except_b
UNION ALL
SELECT * FROM b_except_a
) as missing
),
table_a as (
SELECT COUNT(*) as num_rows FROM {relation_a}
),
table_b as (
SELECT COUNT(*) as num_rows FROM {relation_b}
),
row_count_diff as (
SELECT
1 as id,
table_a.num_rows - table_b.num_rows as difference
FROM table_a, table_b
)
SELECT
row_count_diff.difference as row_count_difference,
diff_count.num_missing as num_mismatched
FROM row_count_diff
INNER JOIN diff_count ON row_count_diff.id = diff_count.id
'''.strip()
sql = COLUMNS_EQUAL_SQL.format(
alias_a=alias_a,
alias_b=alias_b,
first_column=first_column,
columns_a=columns_csv_a,
columns_b=columns_csv_b,
join_condition=join_condition,
relation_a=str(relation_a),
relation_b=str(relation_b),
)
return sql
| 34.034965 | 83 | 0.585576 |
73e6164ab2fcaaa33308e50702e8d93b8741e5f4 | 1,374 | py | Python | src/visitpy/visit_utils/src/moab.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 226 | 2018-12-29T01:13:49.000Z | 2022-03-30T19:16:31.000Z | src/visitpy/visit_utils/src/moab.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 5,100 | 2019-01-14T18:19:25.000Z | 2022-03-31T23:08:36.000Z | src/visitpy/visit_utils/src/moab.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 84 | 2019-01-24T17:41:50.000Z | 2022-03-10T10:01:46.000Z | #!/usr/bin/env python
# Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
#*****************************************************************************
"""
file: moab.py
description:
Provides a python command for msub submission.
"""
import os
from visit_utils.common import *
class MsubError(Exception):
def __init__(self,emsg):
self.msg = "<msub error:> " + emsg
def msub(cmd,rmin,nnodes=1,mach=None,part="pbatch",bank="bdivp",rdir=None,obase=None,depend=None):
if mach is None:
mach = hostname(False)
# create output file name
if obase is None:
ctoks = cmd.split()
sname = os.path.split(ctoks[0])[1]
obase = sname
ofile = "out.moab.%s.%s.%s.txt" % (obase,hostname(),timestamp())
xcmd = "msub -o %s -l nodes=%d -l walltime=%s:00 " % (ofile,nnodes, str(rmin))
if not rdir is None:
xcmd += " -d %s " % (os.path.abspath(rdir))
if not depend is None:
xcmd += "-l depend=%s " % depend
xcmd += "-q %s -A %s %s" % (part,bank,cmd)
ret,out = sexe(xcmd,ret_output=True,echo=True)
if ret == 0:
jid = int(out.split(" ")[-1].strip())
return jid, ofile
else:
raise MsubError(out) | 34.35 | 98 | 0.586608 |
73e617742938fe8fab4f5cd358b896a4f5b41dbd | 209 | py | Python | ashbee/ashbee/doctype/gratuity_claim/test_gratuity_claim.py | iRaySpace/ashbee | c848bfa19c0ac6d2cdc1b31ca4488a2c98e573ef | [
"MIT"
] | 1 | 2019-05-15T12:55:06.000Z | 2019-05-15T12:55:06.000Z | ashbee/ashbee/doctype/gratuity_claim/test_gratuity_claim.py | iRaySpace/ashbee | c848bfa19c0ac6d2cdc1b31ca4488a2c98e573ef | [
"MIT"
] | null | null | null | ashbee/ashbee/doctype/gratuity_claim/test_gratuity_claim.py | iRaySpace/ashbee | c848bfa19c0ac6d2cdc1b31ca4488a2c98e573ef | [
"MIT"
] | 3 | 2019-05-15T12:55:26.000Z | 2019-12-31T14:34:49.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, 9t9IT and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestGratuityClaim(unittest.TestCase):
pass
| 19 | 44 | 0.770335 |
73e61db53a4e78c4849b1e3a4f9b05e0f83da4b4 | 1,348 | py | Python | portal/apps/dashboard/management/commands/subscriber_data.py | Artis-Physis/utopia-cms | 5cb8d941d0b2df53fddc566a52e9d3baee4a007e | [
"BSD-3-Clause"
] | 8 | 2020-12-15T17:11:08.000Z | 2021-12-13T22:08:33.000Z | portal/apps/dashboard/management/commands/subscriber_data.py | Artis-Physis/utopia-cms | 5cb8d941d0b2df53fddc566a52e9d3baee4a007e | [
"BSD-3-Clause"
] | 28 | 2020-12-15T17:34:03.000Z | 2022-02-01T04:09:10.000Z | portal/apps/dashboard/management/commands/subscriber_data.py | Artis-Physis/utopia-cms | 5cb8d941d0b2df53fddc566a52e9d3baee4a007e | [
"BSD-3-Clause"
] | 7 | 2020-12-15T19:59:17.000Z | 2021-11-24T16:47:06.000Z | # -*- coding: utf-8 -*-
from os.path import join
from unicodecsv import writer
from django.core.management.base import BaseCommand
from django.conf import settings
from core.models import Article
from thedaily.models import Subscriber
from automatic_mail import latest_activity
class Command(BaseCommand):
help = 'Generates a CSV containing some data for all subscribers'
def handle(self, *args, **options):
w = writer(open(
join(settings.DASHBOARD_REPORTS_PATH, 'subscriber_data.csv'), 'w'))
for s in Subscriber.objects.select_related(
'user', 'newsletters').filter(
user__isnull=False, user__is_staff=False).iterator():
try:
viewed_articles = s.user.viewed_articles_core
viewed_sections = u', '.join(set.union(*[
set(a.sections.distinct().values_list('name', flat=True))
for a in viewed_articles.all()]))
except Article.DoesNotExist:
viewed_sections = u''
w.writerow([
s.id, s.contact_id, s.user.get_full_name() or s.user.username,
s.user.email, s.user.date_joined, s.user.is_active,
viewed_sections, latest_activity(s.user),
u', '.join(s.newsletters.values_list('name', flat=True))])
| 39.647059 | 79 | 0.626113 |
73e62338d35911e63f20d83a015a99ee32198695 | 910 | py | Python | sapp/pipeline/trim_trace_graph.py | facebook/sapp | 4b85d10a791d8e9c8ae83d1f62fbded24845f053 | [
"MIT"
] | 74 | 2020-12-18T20:04:30.000Z | 2022-03-22T22:26:02.000Z | sapp/pipeline/trim_trace_graph.py | facebook/sapp | 4b85d10a791d8e9c8ae83d1f62fbded24845f053 | [
"MIT"
] | 61 | 2020-12-21T21:33:05.000Z | 2022-01-27T21:22:20.000Z | sapp/pipeline/trim_trace_graph.py | facebook/sapp | 4b85d10a791d8e9c8ae83d1f62fbded24845f053 | [
"MIT"
] | 20 | 2021-04-08T01:28:53.000Z | 2022-03-22T22:26:05.000Z | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Tuple
from ..trace_graph import TraceGraph
from ..trimmed_trace_graph import TrimmedTraceGraph
from . import PipelineStep, Summary
log: logging.Logger = logging.getLogger("sapp")
class TrimTraceGraph(PipelineStep[TraceGraph, TraceGraph]):
def run(self, input: TraceGraph, summary: Summary) -> Tuple[TraceGraph, Summary]:
if summary.get("affected_files") is None:
return input, summary
log.info("Trimming graph to affected files.")
trimmed_graph = TrimmedTraceGraph(
summary["affected_files"], summary.get("affected_issues_only", False)
)
trimmed_graph.populate_from_trace_graph(input)
return trimmed_graph, summary
| 33.703704 | 85 | 0.72967 |
73e6245b4a279b327060a5be38a9264498866096 | 8,029 | py | Python | crypten/communicator/communicator.py | knarflin/CrypTen | 6a06dc8cd52200f40a9fc520be0066bd0dea6b14 | [
"MIT"
] | 1 | 2019-12-06T06:08:40.000Z | 2019-12-06T06:08:40.000Z | crypten/communicator/communicator.py | knarflin/CrypTen | 6a06dc8cd52200f40a9fc520be0066bd0dea6b14 | [
"MIT"
] | 10 | 2021-02-03T16:45:53.000Z | 2021-04-07T16:24:17.000Z | crypten/communicator/communicator.py | knarflin/CrypTen | 6a06dc8cd52200f40a9fc520be0066bd0dea6b14 | [
"MIT"
] | 1 | 2021-02-10T06:15:02.000Z | 2021-02-10T06:15:02.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import timeit
import torch
class Communicator:
"""
Abstract class defining the functions that a Communicator should implement.
"""
# Determines whether communicators log communication stats
__verbosity = False
@classmethod
def is_verbose(cls):
return cls.__verbosity
@classmethod
def set_verbosity(cls, verbosity):
assert isinstance(verbosity, bool), "Verbosity must be a boolean value"
cls.__verbosity = verbosity
@classmethod
def is_initialized(cls):
"""Returns whether the communicator has been initialized"""
raise NotImplementedError("is_initialized is not implemented")
@classmethod
def get(cls):
"""Returns an instance of the communicator"""
raise NotImplementedError("get is not implemented")
@classmethod
def initialize(cls, **kwargs):
"""Initializes the communicator. Call this function before using it."""
raise NotImplementedError("initialize is not implemented")
@classmethod
def shutdown(cls):
raise NotImplementedError("shutdown is not implemented")
def send(self, tensor, dst):
"""Sends the specified tensor to the destination dst."""
raise NotImplementedError("send is not implemented")
def recv(self, tensor, src=None):
"""Receives a tensor from an (optional) source src."""
raise NotImplementedError("recv is not implemented")
def scatter(self, scatter_list, src, size=None, async_op=False):
"""Scatters a list of tensors to all parties."""
raise NotImplementedError("scatter is not implemented")
def reduce(self, tensor, op=None, async_op=False):
"""Reduces the tensor data across all parties."""
raise NotImplementedError("tensor is not implemented")
def all_reduce(self, tensor, op=None, async_op=False):
"""Reduces the tensor data across all parties; all get the final result."""
raise NotImplementedError("tensor is not implemented")
def gather(self, tensor, dst, async_op=False):
"""Gathers a list of tensors in a single party."""
raise NotImplementedError("gather is not implemented")
def all_gather(self, tensor, async_op=False):
"""Gathers tensors from all parties in a list."""
raise NotImplementedError("all_gather is not implemented")
def broadcast(self, tensor, src, async_op=False):
"""Broadcasts the tensor to all parties."""
raise NotImplementedError("broadcast is not implemented")
def barrier(self):
"""Synchronizes all processes.
This collective blocks processes until the whole group enters this
function.
"""
raise NotImplementedError("barrier is not implemented")
def send_obj(self, obj, dst):
"""Sends the specified object to the destination `dst`."""
raise NotImplementedError("send_obj is not implemented")
def recv_obj(self, src):
"""Receives a tensor from a source src."""
raise NotImplementedError("recv_obj is not implemented")
def broadcast_obj(self, obj, src):
"""Broadcasts a given object to all parties."""
raise NotImplementedError("broadcast_obj is not implemented")
def get_world_size(self):
"""Returns the size of the world."""
raise NotImplementedError("get_world_size is not implemented")
def get_rank(self):
"""Returns the rank of the current process."""
raise NotImplementedError("get_rank is not implemented")
def set_name(self):
"""Sets the party name of the current process."""
raise NotImplementedError("set_name is not implemented")
def get_name(self):
"""Returns the party name of the current process."""
raise NotImplementedError("get_name is not implemented")
def reset_communication_stats(self):
"""Resets communication statistics."""
raise NotImplementedError("reset_communication_stats is not implemented")
def print_communication_stats(self):
"""Prints communication statistics."""
raise NotImplementedError("print_communication_stats is not implemented")
def _log_communication(self, nelement):
"""Updates log of communication statistics."""
raise NotImplementedError("_log_communication is not implemented")
def reset_communication_stats(self):
"""Resets communication statistics."""
self.comm_rounds = 0
self.comm_bytes = 0
self.comm_time = 0
def print_communication_stats(self):
"""Prints communication statistics."""
logging.info("====Communication Stats====")
logging.info("Rounds: {}".format(self.comm_rounds))
logging.info("Bytes : {}".format(self.comm_bytes))
logging.info("Comm time: {}".format(self.comm_time))
def _log_communication(self, nelement):
"""Updates log of communication statistics."""
self.comm_rounds += 1
self.comm_bytes += nelement * self.BYTES_PER_ELEMENT
def _log_communication_time(self, comm_time):
self.comm_time += comm_time
def get_generator(self, idx, device=None):
"""
Get the corresponding RNG generator, as specified by its index and device
Args:
idx: The index of the generator, can be either 0 or 1
device: The device that the generator lives in.
"""
if device is None:
device = torch.device("cpu")
else:
device = torch.device(device)
if idx == 0:
if device.type == "cuda":
assert hasattr(
self, "g0_cuda"
), "Generator g0_cuda is not initialized, call crypten.init() first"
return self.g0_cuda
else:
assert hasattr(
self, "g0"
), "Generator g0 is not initialized, call crypten.init() first"
return self.g0
elif idx == 1:
if device.type == "cuda":
assert hasattr(
self, "g1_cuda"
), "Generator g1_cuda is not initialized, call crypten.init() first"
return self.g1_cuda
else:
assert hasattr(
self, "g1"
), "Generator g1 is not initialized, call crypten.init() first"
return self.g1
else:
raise RuntimeError(f"Generator idx {idx} out of bounds.")
def _logging(func):
"""Decorator that performs logging of communication statistics."""
def logging_wrapper(self, *args, **kwargs):
# TODO: Replace this
# - hacks the inputs into some of the functions for world_size 1:
if self.get_world_size() < 2:
if func.__name__ in ["gather", "all_gather"]:
return [args[0]]
elif len(args) > 0:
return args[0]
# only log if needed:
if self.is_verbose():
if func.__name__ == "barrier":
self._log_communication(0, 1)
elif func.__name__ == "scatter": # N - 1 tensors communicated
self._log_communication(args[0][0].nelement() * (len(args[0]) - 1))
elif "batched" in kwargs and kwargs["batched"]:
nbytes = sum(x.nelement() for x in args[0])
self._log_communication(nbytes)
else: # one tensor communicated
self._log_communication(args[0].nelement())
tic = timeit.timeit()
result = func(self, *args, **kwargs)
toc = timeit.timeit()
self._log_communication_time(toc - tic)
return result
return func(self, *args, **kwargs)
return logging_wrapper
| 35.526549 | 84 | 0.626977 |
73e62d052e7c8c432946e1da0cce302625659ce7 | 434 | py | Python | src/clusto/drivers/devices/appliance/basicappliance.py | thekad/clusto | c141ea3ef4931c6a21fdf42845c6e9de5ee08caa | [
"BSD-3-Clause"
] | 216 | 2015-01-10T17:03:25.000Z | 2022-03-24T07:23:41.000Z | src/clusto/drivers/devices/appliance/basicappliance.py | thekad/clusto | c141ea3ef4931c6a21fdf42845c6e9de5ee08caa | [
"BSD-3-Clause"
] | 23 | 2015-01-08T16:51:22.000Z | 2021-03-13T12:56:04.000Z | src/clusto/drivers/devices/appliance/basicappliance.py | thekad/clusto | c141ea3ef4931c6a21fdf42845c6e9de5ee08caa | [
"BSD-3-Clause"
] | 49 | 2015-01-08T00:13:17.000Z | 2021-09-22T02:01:20.000Z |
from clusto.drivers import Device
from clusto.drivers.devices import PortMixin, IPMixin
class BasicAppliance(IPMixin, PortMixin, Device):
"""
Basic appliance Driver
"""
_clusto_type = 'appliance'
_driver_name = 'basicappliance'
_portmeta = { 'pwr-nema-5' : { 'numports':2, },
'nic-eth' : { 'numports':1, },
'console-serial' : { 'numports':1, },
}
| 24.111111 | 55 | 0.56682 |