hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72a1840be2b7b6a8d90b8ea7f88260d3394c345 | 390 | py | Python | api/ver1/offices/strings.py | pcf26536/politico-api | 1c9b8755ddad2baf0bfdeab4aa0674e4197a0d7c | [
"MIT"
] | 1 | 2019-02-22T19:34:32.000Z | 2019-02-22T19:34:32.000Z | api/ver1/offices/strings.py | pcf26536/politico-api | 1c9b8755ddad2baf0bfdeab4aa0674e4197a0d7c | [
"MIT"
] | null | null | null | api/ver1/offices/strings.py | pcf26536/politico-api | 1c9b8755ddad2baf0bfdeab4aa0674e4197a0d7c | [
"MIT"
] | 1 | 2019-02-07T22:12:25.000Z | 2019-02-07T22:12:25.000Z | """ string constants for office module """
fed_type = 'federal'
leg_type = 'legislative'
state_type = 'state'
loc_gov_type = 'local government'
office_type_list = [fed_type, leg_type, state_type, loc_gov_type]
office_id_str = 'Office ID '
office_key = 'office'
prezzo = 'President'
mca = 'Member of County Assembly'
wr = 'Women Representative'
gov = 'Governer'
sen = 'Senator'
| 27.857143 | 66 | 0.715385 |
f72a5a03842d93e765331b1fe8130fe4969fd3ea | 3,462 | py | Python | temperature.py | balena-io-playground/balena-edison-monitoring-artik | d02ba51cda8edcbc0decc80d0dab7f724dc46014 | [
"Apache-2.0"
] | 2 | 2020-04-25T08:46:41.000Z | 2021-02-11T17:36:27.000Z | temperature.py | balena-io-playground/balena-edison-monitoring-artik | d02ba51cda8edcbc0decc80d0dab7f724dc46014 | [
"Apache-2.0"
] | null | null | null | temperature.py | balena-io-playground/balena-edison-monitoring-artik | d02ba51cda8edcbc0decc80d0dab7f724dc46014 | [
"Apache-2.0"
] | null | null | null | """
Temperature monitoring with Intel Edison and Samsung ARTIK Cloud
"""
import sys
import os
import time
from math import log
import statistics
from collections import deque
import artikcloud
from artikcloud.rest import ApiException
import pyupm_grove as grove
import mraa
import requests
# Setting credentials from the environmental variables
DEVICE_ID = os.getenv('ARTIKCLOUD_DEVICE_ID')
DEVICE_TOKEN = os.getenv('ARTIKCLOUD_DEVICE_TOKEN')
try:
AVERAGE = int(os.getenv('AVERAGE', 5))
except:
AVERAGE = 5
finally:
print("INFO: averaging over {} readings".format(AVERAGE))
PERIOD = 1
# Setting up ARTIK Cloud connection
artikcloud.configuration.access_token = DEVICE_TOKEN
# Setting up messaging
messages_api = artikcloud.MessagesApi()
# Create the temperature sensor object using AIO pin 0
temp = grove.GroveTemp(0)
print(temp.name())
led = mraa.Gpio(4)
led.dir(mraa.DIR_OUT)
def reboot_device():
"""Restart application through the resin Supervisor
"""
params = {'apikey': os.getenv('RESIN_SUPERVISOR_API_KEY')}
payload = {'appId': os.getenv('RESIN_APP_ID')}
supervisor_address = os.getenv('RESIN_SUPERVISOR_ADDRESS')
print("Restarting Application")
r = requests.post("{}/v1/reboot".format(supervisor_address),
supervisor_address,
params=params,
json=payload)
if r.status_code == 200:
sys.exit(0)
def temp_convert(sensor):
"""Adapted from UPM source code
https://github.com/intel-iot-devkit/upm/blob/4faa71d239f3549556a61df1a9c6f81c3d06bda2/src/grove/grovetemp.cxx#L54-L63
"""
a = sensor.raw_value()
if a < 0:
return -300
m_scale, m_r0, m_b = 1.0, 100000.0, 4275.0
# Apply scale factor after error check
a *= m_scale
r = (1023.0-a)*m_r0/a
t = 1.0/(log(r/m_r0)/m_b + 1.0/298.15)-273.15
return t
# Throw away readings to settle down
print("Throw-away readings to settle")
for i in range(5):
celsius = temp_convert(temp)
print("Current temperature: {0:.2f}".format(celsius))
time.sleep(1)
print("Starting proper readings")
i = 0
error_count = 0
readings = deque(maxlen=AVERAGE)
while True:
loopstart = time.time()
celsius = temp_convert(temp)
readings.append(celsius)
meancelsius = statistics.mean(readings)
print("Current temperature: {0:.2f} (mean: {1:.2f})".format(celsius, meancelsius))
if i % 600 == 0:
# Send a new message
message = artikcloud.Message()
message.type = "message"
message.sdid = "{}".format(DEVICE_ID)
message.ts = int(round(time.time() * 1000)) # timestamp, required
message.data = {'Temperature': meancelsius}
try:
response = messages_api.send_message(message)
print(response)
except ApiException as error:
print("API ERROR: {}".format(str(error)))
error_count += 1
except:
error = sys.exc_info()[0]
print("ERROR: {}".format(error))
error_count += 1
else:
error_count = 0
finally:
if error_count > 5:
reboot_device()
i = 0
led.write(1)
time.sleep(0.1)
led.write(0)
i += 1
newsleep = (loopstart + PERIOD) - time.time()
if newsleep < 0:
print("WARNING: loop took {}s while period is {}!".format(PERIOD - newsleep, PERIOD))
else:
time.sleep(newsleep)
| 29.589744 | 121 | 0.644136 |
f72a71fdb4445df86e6a963308c53723cb7372ed | 43 | py | Python | src/util/__init__.py | megemini/DataCastle2017 | 261134f760d8c1bbfc3e65e1362b7710e601947d | [
"MIT"
] | null | null | null | src/util/__init__.py | megemini/DataCastle2017 | 261134f760d8c1bbfc3e65e1362b7710e601947d | [
"MIT"
] | null | null | null | src/util/__init__.py | megemini/DataCastle2017 | 261134f760d8c1bbfc3e65e1362b7710e601947d | [
"MIT"
] | null | null | null | # import pandas as pd
# import numpy as np
| 14.333333 | 21 | 0.72093 |
f72ad5b39fcaee399cd011abf25e5fda0c0342a6 | 24,914 | py | Python | jina/flow/mixin/async_crud.py | liushuigs/jina | b3550e901b2a340924330b5ba2801603e493c933 | [
"Apache-2.0"
] | null | null | null | jina/flow/mixin/async_crud.py | liushuigs/jina | b3550e901b2a340924330b5ba2801603e493c933 | [
"Apache-2.0"
] | 2 | 2021-02-15T01:40:38.000Z | 2021-02-15T02:00:21.000Z | jina/flow/mixin/async_crud.py | liushuigs/jina | b3550e901b2a340924330b5ba2801603e493c933 | [
"Apache-2.0"
] | null | null | null | import warnings
from typing import Union, Iterable, TextIO, Dict, Optional
import numpy as np
from ...clients.base import InputType, CallbackFnType
from ...enums import DataInputType
from ...helper import deprecated_alias
class AsyncCRUDFlowMixin:
"""The asynchronous version of the Mixin for CRUD in Flow"""
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def train(
self,
inputs: InputType,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Do training on the current Flow
:param inputs: An iterator of bytes. If not given, then you have to specify it in **kwargs**.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
warnings.warn(f'{self.train} is under heavy refactoring', FutureWarning)
async for r in self._get_client(**kwargs).train(
inputs, on_done, on_error, on_always, **kwargs
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def index_ndarray(
self,
array: 'np.ndarray',
axis: int = 0,
size: Optional[int] = None,
shuffle: bool = False,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Using numpy ndarray as the index source for the current Flow
:param array: the numpy ndarray data source
:param axis: iterate over that axis
:param size: the maximum number of the sub arrays
:param shuffle: shuffle the the numpy data source beforehand
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_ndarray
async for r in self._get_client(**kwargs).index(
_input_ndarray(array, axis, size, shuffle),
on_done,
on_error,
on_always,
data_type=DataInputType.CONTENT,
**kwargs,
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def search_ndarray(
self,
array: 'np.ndarray',
axis: int = 0,
size: Optional[int] = None,
shuffle: bool = False,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a numpy ndarray as the query source for searching on the current Flow
:param array: the numpy ndarray data source
:param axis: iterate over that axis
:param size: the maximum number of the sub arrays
:param shuffle: shuffle the the numpy data source beforehand
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_ndarray
async for r in self._get_client(**kwargs).search(
_input_ndarray(array, axis, size, shuffle),
on_done,
on_error,
on_always,
data_type=DataInputType.CONTENT,
**kwargs,
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def index_lines(
self,
lines: Optional[Union[Iterable[str], TextIO]] = None,
filepath: Optional[str] = None,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
read_mode: str = 'r',
line_format: str = 'json',
field_resolver: Optional[Dict[str, str]] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a list of lines as the index source for indexing on the current Flow
:param lines: a list of strings, each is considered as d document
:param filepath: a text file that each line contains a document
:param size: the maximum number of the documents
:param sampling_rate: the sampling rate between [0, 1]
:param read_mode: specifies the mode in which the file
is opened. 'r' for reading in text mode, 'rb' for reading in binary
:param line_format: the format of each line: ``json`` or ``csv``
:param field_resolver: a map from field names defined in ``document`` (JSON, dict) to the field
names defined in Protobuf. This is only used when the given ``document`` is
a JSON string or a Python dict.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_lines
async for r in self._get_client(**kwargs).index(
_input_lines(
lines,
filepath,
size=size,
sampling_rate=sampling_rate,
read_mode=read_mode,
line_format=line_format,
field_resolver=field_resolver,
),
on_done,
on_error,
on_always,
data_type=DataInputType.AUTO,
**kwargs,
):
yield r
async def index_csv(
self,
lines: Union[Iterable[str], TextIO],
field_resolver: Dict[str, str] = None,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a list of lines as the index source for indexing on the current Flow
:param lines: a list of strings, each is considered as d document
:param size: the maximum number of the documents
:param sampling_rate: the sampling rate between [0, 1]
:param field_resolver: a map from field names defined in ``document`` (JSON, dict) to the field
names defined in Protobuf. This is only used when the given ``document`` is
a JSON string or a Python dict.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_csv
async for r in self._get_client(**kwargs).index(
_input_csv(
lines,
size=size,
sampling_rate=sampling_rate,
field_resolver=field_resolver,
),
on_done,
on_error,
on_always,
data_type=DataInputType.AUTO,
**kwargs,
):
yield r
async def index_ndjson(
self,
lines: Union[Iterable[str], TextIO],
field_resolver: Optional[Dict[str, str]] = None,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a list of lines as the index source for indexing on the current Flow
:param lines: a list of strings, each is considered as d document
:param size: the maximum number of the documents
:param sampling_rate: the sampling rate between [0, 1]
:param field_resolver: a map from field names defined in ``document`` (JSON, dict) to the field
names defined in Protobuf. This is only used when the given ``document`` is
a JSON string or a Python dict.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_ndjson
async for r in self._get_client(**kwargs).index(
_input_ndjson(
lines,
size=size,
sampling_rate=sampling_rate,
field_resolver=field_resolver,
),
on_done,
on_error,
on_always,
data_type=DataInputType.AUTO,
**kwargs,
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def index_files(
self,
patterns: Union[str, Iterable[str]],
recursive: bool = True,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
read_mode: Optional[str] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a set of files as the index source for indexing on the current Flow
:param patterns: The pattern may contain simple shell-style wildcards, e.g. '\*.py', '[\*.zip, \*.gz]'
:param recursive: If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
:param size: the maximum number of the files
:param sampling_rate: the sampling rate between [0, 1]
:param read_mode: specifies the mode in which the file
is opened. 'r' for reading in text mode, 'rb' for reading in binary mode
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_files
async for r in self._get_client(**kwargs).index(
_input_files(patterns, recursive, size, sampling_rate, read_mode),
on_done,
on_error,
on_always,
data_type=DataInputType.CONTENT,
**kwargs,
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def search_files(
self,
patterns: Union[str, Iterable[str]],
recursive: bool = True,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
read_mode: Optional[str] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a set of files as the query source for searching on the current Flow
:param patterns: The pattern may contain simple shell-style wildcards, e.g. '\*.py', '[\*.zip, \*.gz]'
:param recursive: If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
:param size: the maximum number of the files
:param sampling_rate: the sampling rate between [0, 1]
:param read_mode: specifies the mode in which the file
is opened. 'r' for reading in text mode, 'rb' for reading in
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_files
async for r in self._get_client(**kwargs).search(
_input_files(patterns, recursive, size, sampling_rate, read_mode),
on_done,
on_error,
on_always,
data_type=DataInputType.CONTENT,
**kwargs,
):
yield r
async def search_ndjson(
self,
lines: Union[Iterable[str], TextIO],
field_resolver: Optional[Dict[str, str]] = None,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a list of files as the query source for searching on the current Flow
:param lines: a list of strings, each is considered as d document
:param size: the maximum number of the documents
:param sampling_rate: the sampling rate between [0, 1]
:param field_resolver: a map from field names defined in ``document`` (JSON, dict) to the field
names defined in Protobuf. This is only used when the given ``document`` is
a JSON string or a Python dict.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_ndjson
async for r in self._get_client(**kwargs).search(
_input_ndjson(
lines,
size=size,
sampling_rate=sampling_rate,
field_resolver=field_resolver,
),
on_done,
on_error,
on_always,
data_type=DataInputType.AUTO,
**kwargs,
):
yield r
async def search_csv(
self,
lines: Union[Iterable[str], TextIO],
field_resolver: Optional[Dict[str, str]] = None,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a list of lines as the index source for indexing on the current Flow
:param lines: a list of strings, each is considered as d document
:param size: the maximum number of the documents
:param sampling_rate: the sampling rate between [0, 1]
:param field_resolver: a map from field names defined in ``document`` (JSON, dict) to the field
names defined in Protobuf. This is only used when the given ``document`` is
a JSON string or a Python dict.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_csv
async for r in self._get_client(**kwargs).search(
_input_csv(
lines,
size=size,
sampling_rate=sampling_rate,
field_resolver=field_resolver,
),
on_done,
on_error,
on_always,
data_type=DataInputType.AUTO,
**kwargs,
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def search_lines(
self,
lines: Optional[Union[Iterable[str], TextIO]] = None,
filepath: Optional[str] = None,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
read_mode: str = 'r',
line_format: str = 'json',
field_resolver: Optional[Dict[str, str]] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a list of files as the query source for searching on the current Flow
:param filepath: a text file that each line contains a document
:param lines: a list of strings, each is considered as d document
:param size: the maximum number of the documents
:param sampling_rate: the sampling rate between [0, 1]
:param read_mode: specifies the mode in which the file
is opened. 'r' for reading in text mode, 'rb' for reading in binary
:param line_format: the format of each line: ``json`` or ``csv``
:param field_resolver: a map from field names defined in ``document`` (JSON, dict) to the field
names defined in Protobuf. This is only used when the given ``document`` is
a JSON string or a Python dict.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_lines
async for r in self._get_client(**kwargs).search(
_input_lines(
lines,
filepath,
size=size,
sampling_rate=sampling_rate,
read_mode=read_mode,
line_format=line_format,
field_resolver=field_resolver,
),
on_done,
on_error,
on_always,
data_type=DataInputType.CONTENT,
**kwargs,
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def index(
self,
inputs: InputType,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Do indexing on the current Flow
It will start a :py:class:`CLIClient` and call :py:func:`index`.
:param inputs: An iterator of bytes. If not given, then you have to specify it in **kwargs**.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
async for r in self._get_client(**kwargs).index(
inputs, on_done, on_error, on_always, **kwargs
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def update(
self,
inputs: InputType,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Do updates on the current Flow
It will start a :py:class:`CLIClient` and call :py:func:`index`.
:param inputs: An iterator of bytes. If not given, then you have to specify it in **kwargs**.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
async for r in self._get_client(**kwargs).update(
inputs, on_done, on_error, on_always, **kwargs
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def delete(
self,
ids: Iterable[str],
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Do deletion on the current Flow
:param ids: An iterable of ids
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
async for r in self._get_client(**kwargs).delete(
ids, on_done, on_error, on_always, **kwargs
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def search(
self,
inputs: InputType,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Do searching on the current Flow
It will start a :py:class:`CLIClient` and call :py:func:`search`.
:param inputs: An iterator of bytes. If not given, then you have to specify it in **kwargs**.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
async for r in self._get_client(**kwargs).search(
inputs, on_done, on_error, on_always, **kwargs
):
yield r
| 40.70915 | 120 | 0.603837 |
f72adc47d855b9bd8cfb880f4445828ea9fe2109 | 9,267 | py | Python | pysot/datasets/dataset_template.py | wattanapong/DFA | c05851beca2f8739f80531eb4de2f61639715cab | [
"Apache-2.0"
] | null | null | null | pysot/datasets/dataset_template.py | wattanapong/DFA | c05851beca2f8739f80531eb4de2f61639715cab | [
"Apache-2.0"
] | null | null | null | pysot/datasets/dataset_template.py | wattanapong/DFA | c05851beca2f8739f80531eb4de2f61639715cab | [
"Apache-2.0"
] | null | null | null | # Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import sys
import os
import cv2
import numpy as np
from torch.utils.data import Dataset
from pysot.utils.bbox import center2corner, Center
from pysot.datasets.anchor_target import AnchorTarget
from pysot.datasets.augmentation import Augmentation
from pysot.core.config import cfg
logger = logging.getLogger("global")
# setting opencv
pyv = sys.version[0]
if pyv[0] == '3':
cv2.ocl.setUseOpenCL(False)
class SubDataset(object):
def __init__(self, name, root, anno, frame_range, num_use, start_idx):
cur_path = os.path.dirname(os.path.realpath(__file__))
self.name = name
self.root = os.path.join(cur_path, '../../', root)
self.anno = os.path.join(cur_path, '../../', anno)
self.frame_range = frame_range
self.num_use = num_use
self.start_idx = start_idx
logger.info("loading " + name)
with open(self.anno, 'r') as f:
meta_data = json.load(f)
meta_data = self._filter_zero(meta_data)
for video in list(meta_data.keys()):
for track in meta_data[video]:
frames = meta_data[video][track]
frames = list(map(int,
filter(lambda x: x.isdigit(), frames.keys())))
frames.sort()
meta_data[video][track]['frames'] = frames
if len(frames) <= 0:
logger.warning("{}/{} has no frames".format(video, track))
del meta_data[video][track]
for video in list(meta_data.keys()):
if len(meta_data[video]) <= 0:
logger.warning("{} has no tracks".format(video))
del meta_data[video]
self.labels = meta_data
self.num = len(self.labels)
self.num_use = self.num if self.num_use == -1 else self.num_use
self.videos = list(meta_data.keys())
logger.info("{} loaded".format(self.name))
self.path_format = '{}.{}.{}.jpg'
self.pick = self.shuffle()
def _filter_zero(self, meta_data):
meta_data_new = {}
for video, tracks in meta_data.items():
new_tracks = {}
for trk, frames in tracks.items():
new_frames = {}
for frm, bbox in frames.items():
if not isinstance(bbox, dict):
if len(bbox) == 4:
x1, y1, x2, y2 = bbox
w, h = x2 - x1, y2 - y1
else:
w, h = bbox
if w <= 0 or h <= 0:
continue
new_frames[frm] = bbox
if len(new_frames) > 0:
new_tracks[trk] = new_frames
if len(new_tracks) > 0:
meta_data_new[video] = new_tracks
return meta_data_new
def log(self):
logger.info("{} start-index {} select [{}/{}] path_format {}".format(
self.name, self.start_idx, self.num_use,
self.num, self.path_format))
def shuffle(self):
lists = list(range(self.start_idx, self.start_idx + self.num))
pick = []
while len(pick) < self.num_use:
np.random.shuffle(lists)
pick += lists
return pick[:self.num_use]
def get_image_anno(self, video, track, frame):
frame = "{:06d}".format(frame)
image_path = os.path.join(self.root, video,
self.path_format.format(frame, track, 'x'))
image_anno = self.labels[video][track][frame]
return image_path, image_anno
# track is tracking object in video
# video is one of subfolder under ILSVRC2015_VID_train_000{0-3}, for example, ILSVRC2015_train_00004000
def get_positive_pair(self, index):
video_name = self.videos[index]
video = self.labels[video_name]
track = np.random.choice(list(video.keys()))
track_info = video[track]
frames = track_info['frames']
template_frame = np.random.randint(0, len(frames))
template_frame = frames[template_frame]
return self.get_image_anno(video_name, track, template_frame)
def get_random_target(self, index=-1):
if index == -1:
index = np.random.randint(0, self.num)
video_name = self.videos[index]
video = self.labels[video_name]
track = np.random.choice(list(video.keys()))
track_info = video[track]
frames = track_info['frames']
frame = np.random.choice(frames)
return self.get_image_anno(video_name, track, frame)
def __len__(self):
return self.num
class TrkDataset(Dataset):
def __init__(self,):
super(TrkDataset, self).__init__()
desired_size = (cfg.TRAIN.SEARCH_SIZE - cfg.TRAIN.EXEMPLAR_SIZE) / \
cfg.ANCHOR.STRIDE + 1 + cfg.TRAIN.BASE_SIZE
if desired_size != cfg.TRAIN.OUTPUT_SIZE:
raise Exception('size not match!')
# create anchor target
self.anchor_target = AnchorTarget()
# create sub dataset
self.all_dataset = []
start = 0
self.num = 0
for name in cfg.DATASET.NAMES:
subdata_cfg = getattr(cfg.DATASET, name)
sub_dataset = SubDataset(
name,
subdata_cfg.ROOT,
subdata_cfg.ANNO,
subdata_cfg.FRAME_RANGE,
subdata_cfg.NUM_USE,
start
)
start += sub_dataset.num
self.num += sub_dataset.num_use
sub_dataset.log()
self.all_dataset.append(sub_dataset)
# data augmentation
self.template_aug = Augmentation(
cfg.DATASET.TEMPLATE.SHIFT,
cfg.DATASET.TEMPLATE.SCALE,
cfg.DATASET.TEMPLATE.BLUR,
cfg.DATASET.TEMPLATE.FLIP,
cfg.DATASET.TEMPLATE.COLOR
)
self.search_aug = Augmentation(
cfg.DATASET.SEARCH.SHIFT,
cfg.DATASET.SEARCH.SCALE,
cfg.DATASET.SEARCH.BLUR,
cfg.DATASET.SEARCH.FLIP,
cfg.DATASET.SEARCH.COLOR
)
videos_per_epoch = cfg.DATASET.VIDEOS_PER_EPOCH
self.num = videos_per_epoch if videos_per_epoch > 0 else self.num
self.num *= cfg.TRAIN.EPOCH
self.pick = self.shuffle()
def shuffle(self):
pick = []
m = 0
while m < self.num:
p = []
for sub_dataset in self.all_dataset:
sub_p = sub_dataset.pick
p += sub_p
np.random.shuffle(p)
pick += p
m = len(pick)
logger.info("shuffle done!")
logger.info("dataset length {}".format(self.num))
return pick[:self.num]
def _find_dataset(self, index):
for dataset in self.all_dataset:
if dataset.start_idx + dataset.num > index:
return dataset, index - dataset.start_idx
def _get_bbox(self, image, shape):
imh, imw = image.shape[:2]
if len(shape) == 4:
w, h = shape[2]-shape[0], shape[3]-shape[1]
else:
w, h = shape
context_amount = 0.5
exemplar_size = cfg.TRAIN.EXEMPLAR_SIZE
wc_z = w + context_amount * (w+h)
hc_z = h + context_amount * (w+h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = exemplar_size / s_z
w = w*scale_z
h = h*scale_z
cx, cy = imw//2, imh//2
bbox = center2corner(Center(cx, cy, w, h))
return bbox
def __len__(self):
return self.num
def __getitem__(self, index):
index = self.pick[index]
dataset, index = self._find_dataset(index)
gray = cfg.DATASET.GRAY and cfg.DATASET.GRAY > np.random.random()
neg = cfg.DATASET.NEG and cfg.DATASET.NEG > np.random.random()
# get one dataset
if neg:
print('please check this suspension due to it was removed negative function (distractor)')
import pdb
pdb.set_trace()
template = dataset.get_random_target(index)
search = np.random.choice(self.all_dataset).get_random_target()
else:
template = dataset.get_positive_pair(index)
if not os.path.exists(template[0]):
print(template[0])
# get image
template_image = cv2.imread(template[0])
# get bounding box
template_box = self._get_bbox(template_image, template[1])
# augmentation
template, _ = self.template_aug(template_image,
template_box,
cfg.TRAIN.EXEMPLAR_SIZE,
gray=gray)
template = template.transpose((2, 0, 1)).astype(np.float32)
return {
'template': template,
'gt': template_box
}
| 34.449814 | 107 | 0.555627 |
f72b094590d5184ffbaf3cd4a122b4c8a53db388 | 7,097 | py | Python | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2020_11_01_preview/_container_registry_management_client.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2021-09-07T18:39:05.000Z | 2021-09-07T18:39:05.000Z | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2020_11_01_preview/_container_registry_management_client.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2020_11_01_preview/_container_registry_management_client.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Optional, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
from . import models
from ._configuration import ContainerRegistryManagementClientConfiguration
from .operations import ConnectedRegistriesOperations, ExportPipelinesOperations, ImportPipelinesOperations, Operations, PipelineRunsOperations, PrivateEndpointConnectionsOperations, RegistriesOperations, ReplicationsOperations, ScopeMapsOperations, TokensOperations, WebhooksOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class ContainerRegistryManagementClient:
"""ContainerRegistryManagementClient.
:ivar connected_registries: ConnectedRegistriesOperations operations
:vartype connected_registries:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.ConnectedRegistriesOperations
:ivar export_pipelines: ExportPipelinesOperations operations
:vartype export_pipelines:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.ExportPipelinesOperations
:ivar registries: RegistriesOperations operations
:vartype registries:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.RegistriesOperations
:ivar import_pipelines: ImportPipelinesOperations operations
:vartype import_pipelines:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.ImportPipelinesOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.containerregistry.v2020_11_01_preview.operations.Operations
:ivar pipeline_runs: PipelineRunsOperations operations
:vartype pipeline_runs:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.PipelineRunsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.PrivateEndpointConnectionsOperations
:ivar replications: ReplicationsOperations operations
:vartype replications:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.ReplicationsOperations
:ivar scope_maps: ScopeMapsOperations operations
:vartype scope_maps:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.ScopeMapsOperations
:ivar tokens: TokensOperations operations
:vartype tokens: azure.mgmt.containerregistry.v2020_11_01_preview.operations.TokensOperations
:ivar webhooks: WebhooksOperations operations
:vartype webhooks:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.WebhooksOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Microsoft Azure subscription ID.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ContainerRegistryManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.connected_registries = ConnectedRegistriesOperations(self._client, self._config, self._serialize, self._deserialize)
self.export_pipelines = ExportPipelinesOperations(self._client, self._config, self._serialize, self._deserialize)
self.registries = RegistriesOperations(self._client, self._config, self._serialize, self._deserialize)
self.import_pipelines = ImportPipelinesOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.pipeline_runs = PipelineRunsOperations(self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.replications = ReplicationsOperations(self._client, self._config, self._serialize, self._deserialize)
self.scope_maps = ScopeMapsOperations(self._client, self._config, self._serialize, self._deserialize)
self.tokens = TokensOperations(self._client, self._config, self._serialize, self._deserialize)
self.webhooks = WebhooksOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request, # type: HttpRequest
**kwargs: Any
) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ContainerRegistryManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 53.360902 | 286 | 0.748908 |
f72b319c6f56785827dd2160e2b9d041dde23ada | 5,281 | py | Python | experiments/ashvin/icml2020/hand/brac/test_video1.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/ashvin/icml2020/hand/brac/test_video1.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/ashvin/icml2020/hand/brac/test_video1.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | """
AWR + SAC from demo experiment
"""
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment, process_args
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy
from rlkit.torch.networks import Clamp
if __name__ == "__main__":
variant = dict(
num_epochs=5001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
# num_gaussians=1,
),
qf_kwargs=dict(
hidden_sizes=[256, 256, ],
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
brac=True,
reward_transform_kwargs=None, # r' = r + 1
terminal_transform_kwargs=None, # t = 0
),
launcher_config=dict(
num_exps_per_instance=1,
region='us-west-2',
),
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
save_video=True,
image_env_kwargs=dict(
imsize=84,
init_camera=None, # the environment initializes the camera already
transpose=True,
normalize=True,
recompute_reward=False,
non_presampled_goal_img_is_garbage=True, # do not set_to_goal
),
dump_video_kwargs=dict(
exploration_goal_image_key="image_observation",
evaluation_goal_image_key="image_observation",
image_format="CWH",
),
# renderer_kwargs=dict(
# # width=84,
# # height=84,
# init_camera=None, # the environment initializes the camera already
# # transpose=True,
# create_image_format="HWC",
# output_image_format="CHW",
# # normalize=True,
# ),
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
'env': ["relocate-binary-old-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.1, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", ],
'trainer_kwargs.clip_score': [2, ],
# 'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0, ],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
'qf_kwargs.output_activation': [Clamp(max=0)],
'trainer_kwargs.train_bc_on_rl_buffer':[True],
# 'policy_kwargs.num_gaussians': [1, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, process_args)
| 30.883041 | 80 | 0.566938 |
f72bac46a7bf89e92fd86af932bfe8a7135e61d2 | 13,197 | py | Python | src/sardana/util/funcgenerator.py | schooft/sardana | 76287b416650f40da79871ee3849340d0ff31f1d | [
"CC-BY-3.0"
] | null | null | null | src/sardana/util/funcgenerator.py | schooft/sardana | 76287b416650f40da79871ee3849340d0ff31f1d | [
"CC-BY-3.0"
] | null | null | null | src/sardana/util/funcgenerator.py | schooft/sardana | 76287b416650f40da79871ee3849340d0ff31f1d | [
"CC-BY-3.0"
] | null | null | null | ##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
import time
import threading
import math
import copy
import numpy
import traceback
from sardana import State
from sardana.sardanaevent import EventGenerator, EventType
from sardana.pool.pooldefs import SynchParam, SynchDomain
from taurus.core.util.log import Logger
def strictly_increasing(l):
"""Check whether list l has strictly increasing values"""
return all(x < y for x, y in zip(l, l[1:]))
def strictly_decreasing(l):
"""Check whether list l has strictly deacreasing values"""
return all(x > y for x, y in zip(l, l[1:]))
class FunctionGenerator(EventGenerator, Logger):
"""Generator of active and passive events describing a rectangular
function.
.. note::
The FunctionGenerator class has been included in Sardana
on a provisional basis. Backwards incompatible changes
(up to and including removal of the module) may occur if
deemed necessary by the core developers.
"""
MAX_NAP_TIME = 0.1
def __init__(self, name="FunctionGenerator"):
EventGenerator.__init__(self)
Logger.__init__(self, name)
self._name = name
self._initial_domain = None
self._active_domain = None
self._position_event = threading.Event()
self._position = None
self._initial_domain_in_use = None
self._active_domain_in_use = None
self._active_events = list()
self._passive_events = list()
self._started = False
self._stopped = False
self._running = False
self._start_time = None
self._direction = None
self._condition = None
self._id = None
self._start_fired = False
def get_name(self):
return self._name
name = property(get_name)
def set_initial_domain(self, domain):
self._initial_domain = domain
def get_initial_domain(self):
return self._initial_domain
initial_domain = property(get_initial_domain, set_initial_domain)
def set_active_domain(self, domain):
self._active_domain = domain
def get_active_domain(self):
return self._active_domain
active_domain = property(get_active_domain, set_active_domain)
def set_initial_domain_in_use(self, domain):
self._initial_domain_in_use = domain
def get_initial_domain_in_use(self):
return self._initial_domain_in_use
initial_domain_in_use = property(get_initial_domain_in_use,
set_initial_domain_in_use)
def set_active_domain_in_use(self, domain):
self._active_domain_in_use = domain
def get_active_domain_in_use(self):
return self._active_domain_in_use
active_domain_in_use = property(get_active_domain_in_use,
set_active_domain_in_use)
def add_active_event(self, event):
self._active_events.append(event)
def set_active_events(self, events):
self._active_events = events
def get_active_events(self):
return self._active_events
active_events = property(get_active_events, set_active_events)
def add_passive_event(self, event):
self._passive_events.append(event)
def set_passive_events(self, events):
self._passive_events = events
def get_passive_events(self):
return self._passive_events
passive_events = property(get_passive_events, set_passive_events)
def set_direction(self, direction):
self._direction = direction
if direction == 1:
self._condition = numpy.greater_equal
elif direction == -1:
self._condition = numpy.less_equal
else:
raise ValueError("direction can be -1 or 1 (negative or positive)")
def get_direction(self):
return self._direction
direction = property(get_direction, set_direction)
def event_received(self, *args, **kwargs):
_, _, v = args
if v.error:
exc_info = v.exc_info
self.error("Synchronization base attribute in error")
msg = "Details: " + "".join(traceback.format_exception(*exc_info))
self.debug(msg)
return
self._position = v.value
self._position_event.set()
def start(self):
self._start_time = time.time()
self._stopped = False
self._started = True
self._position = None
self._start_fired = False
self._position_event.clear()
self._id = 0
self.fire_event(EventType("state"), State.Moving)
def stop(self):
self._stopped = True
def is_started(self):
return self._started
def is_stopped(self):
return self._stopped
def is_running(self):
return self._running
def run(self):
self._running = True
try:
while len(self.active_events) > 0 and not self.is_stopped():
self.wait_active()
self.fire_active()
self.wait_passive()
self.fire_passive()
self._id += 1
finally:
self._started = False
self._running = False
self._stopped = False
self.fire_event(EventType("state"), State.On)
def sleep(self, period):
if period <= 0:
return
necessary_naps = int(math.ceil(period / self.MAX_NAP_TIME))
if necessary_naps == 0: # avoid zero ZeroDivisionError
nap = 0
else:
nap = period / necessary_naps
for _ in xrange(necessary_naps):
if self.is_stopped():
break
time.sleep(nap)
def fire_start(self):
self.fire_event(EventType("start"), self._id)
self._start_fired = True
if self._id > 0:
msg = "start was fired with {0} delay".format(self._id)
self.warning(msg)
def wait_active(self):
candidate = self.active_events[0]
if self.initial_domain_in_use == SynchDomain.Time:
now = time.time()
candidate += self._start_time
self.sleep(candidate - now)
else:
while True:
if self.is_stopped():
break
if self._position_event.isSet():
self._position_event.clear()
now = self._position
if self._condition(now, candidate):
break
else:
self._position_event.wait(self.MAX_NAP_TIME)
def fire_active(self):
# check if some events needs to be skipped
i = 0
while i < len(self.active_events) - 1:
candidate = self.active_events[i + 1]
if self.initial_domain_in_use is SynchDomain.Time:
candidate += self._start_time
now = time.time()
elif self.initial_domain_in_use is SynchDomain.Position:
now = self._position
if self._condition(now, candidate):
i += 1
else:
break
self._id += i
if not self._start_fired:
self.fire_start()
self.fire_event(EventType("active"), self._id)
self.active_events = self.active_events[i + 1:]
self.passive_events = self.passive_events[i:]
def wait_passive(self):
if self.active_domain_in_use == SynchDomain.Time:
now = time.time()
candidate = self._start_time + self.passive_events[0]
self.sleep(candidate - now)
else:
while True:
if self._position_event.isSet():
self._position_event.clear()
if self._condition(self._position, self.passive_events[0]):
break
else:
self._position_event.wait(self.MAX_NAP_TIME)
if self.is_stopped():
break
def fire_passive(self):
self.fire_event(EventType("passive"), self._id)
self.set_passive_events(self.passive_events[1:])
if len(self.passive_events) == 0:
self.fire_end()
def fire_end(self):
self.fire_event(EventType("end"), self._id)
def set_configuration(self, configuration):
# make a copy since we may inject the initial time
configuration = copy.deepcopy(configuration)
active_events = []
passive_events = []
self._direction = None
# create short variables for commodity
Time = SynchDomain.Time
Position = SynchDomain.Position
Initial = SynchParam.Initial
Delay = SynchParam.Delay
Active = SynchParam.Active
Total = SynchParam.Total
Repeats = SynchParam.Repeats
for i, group in enumerate(configuration):
# inject delay as initial time - generation will be
# relative to the start time
initial_param = group.get(Initial)
if initial_param is None:
initial_param = dict()
if Time not in initial_param:
delay_param = group.get(Delay)
if Time in delay_param:
initial_param[Time] = delay_param[Time]
group[Initial] = initial_param
# determine active domain in use
msg = "no initial value in group %d" % i
if self.initial_domain in initial_param:
self.initial_domain_in_use = self.initial_domain
elif Position in initial_param:
self.initial_domain_in_use = Position
elif Time in initial_param:
self.initial_domain_in_use = Time
else:
raise ValueError(msg)
# determine passive domain in use
active_param = group.get(Active)
msg = "no active value in group %d" % i
if self.active_domain is None:
if Time in active_param:
self.active_domain_in_use = Time
elif Position in active_param:
self.active_domain_in_use = Position
else:
raise ValueError(msg)
elif self.active_domain in active_param:
self.active_domain_in_use = self.active_domain
else:
raise ValueError(msg)
# create short variables for commodity
initial_domain_in_use = self.initial_domain_in_use
active_domain_in_use = self.active_domain_in_use
repeats = group.get(Repeats, 1)
active = active_param[active_domain_in_use]
initial_in_initial_domain = initial_param[initial_domain_in_use]
initial_in_active_domain = initial_param[active_domain_in_use]
active_event_in_initial_domain = initial_in_initial_domain
active_event_in_active_domain = initial_in_active_domain
if repeats > 1:
total_param = group[Total]
total_in_initial_domain = total_param[initial_domain_in_use]
total_in_active_domain = total_param[active_domain_in_use]
for _ in xrange(repeats):
passive_event = active_event_in_active_domain + active
active_events.append(active_event_in_initial_domain)
passive_events.append(passive_event)
active_event_in_initial_domain += total_in_initial_domain
active_event_in_active_domain += total_in_active_domain
else:
active_events.append(active_event_in_initial_domain)
passive_event = active_event_in_active_domain + active
passive_events.append(passive_event)
# determine direction
if self.direction is None:
if strictly_increasing(active_events):
self.direction = 1
elif strictly_decreasing(active_events):
self.direction = -1
else:
msg = "active values indicate contradictory directions"
raise ValueError(msg)
self.active_events = active_events
self.passive_events = passive_events
| 35.286096 | 79 | 0.608169 |
f72badfdcc7c9b8294a786b553d4648ee517ad6c | 12,634 | py | Python | tests/python/pants_test/util/test_contextutil.py | ghthor/pants | 450de702414f87f563081ddefaefd8a554de07a3 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/util/test_contextutil.py | ghthor/pants | 450de702414f87f563081ddefaefd8a554de07a3 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/util/test_contextutil.py | ghthor/pants | 450de702414f87f563081ddefaefd8a554de07a3 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import pstats
import shutil
import signal
import sys
import unittest
import uuid
import zipfile
from builtins import next, object, range, str
from contextlib import contextmanager
import mock
from pants.util.contextutil import (HardSystemExit, InvalidZipPath, Timer, environment_as,
exception_logging, hard_exit_handler, hermetic_environment_as,
maybe_profiled, open_zip, pushd, signal_handler_as, stdio_as,
temporary_dir, temporary_file)
from pants.util.process_handler import subprocess
PATCH_OPTS = dict(autospec=True, spec_set=True)
class ContextutilTest(unittest.TestCase):
def test_empty_environment(self):
with environment_as():
pass
def test_override_single_variable(self):
with temporary_file() as output:
# test that the override takes place
with environment_as(HORK='BORK'):
subprocess.Popen([sys.executable, '-c', 'import os; print(os.environ["HORK"])'],
stdout=output).wait()
output.seek(0)
self.assertEquals('BORK\n', output.read())
# test that the variable is cleared
with temporary_file() as new_output:
subprocess.Popen([sys.executable, '-c', 'import os; print("HORK" in os.environ)'],
stdout=new_output).wait()
new_output.seek(0)
self.assertEquals('False\n', new_output.read())
def test_environment_negation(self):
with temporary_file() as output:
with environment_as(HORK='BORK'):
with environment_as(HORK=None):
# test that the variable is cleared
subprocess.Popen([sys.executable, '-c', 'import os; print("HORK" in os.environ)'],
stdout=output).wait()
output.seek(0)
self.assertEquals('False\n', output.read())
def test_hermetic_environment(self):
self.assertIn('USER', os.environ)
with hermetic_environment_as(**{}):
self.assertNotIn('USER', os.environ)
def test_hermetic_environment_subprocesses(self):
self.assertIn('USER', os.environ)
with hermetic_environment_as(**dict(AAA='333')):
output = subprocess.check_output('env', shell=True)
self.assertNotIn('USER=', output)
self.assertIn('AAA', os.environ)
self.assertEquals(os.environ['AAA'], '333')
self.assertIn('USER', os.environ)
self.assertNotIn('AAA', os.environ)
def test_hermetic_environment_unicode(self):
UNICODE_CHAR = '¡'
ENCODED_CHAR = UNICODE_CHAR.encode('utf-8')
with environment_as(**dict(XXX=UNICODE_CHAR)):
self.assertEquals(os.environ['XXX'], ENCODED_CHAR)
with hermetic_environment_as(**dict(AAA=UNICODE_CHAR)):
self.assertIn('AAA', os.environ)
self.assertEquals(os.environ['AAA'], ENCODED_CHAR)
self.assertEquals(os.environ['XXX'], ENCODED_CHAR)
def test_simple_pushd(self):
pre_cwd = os.getcwd()
with temporary_dir() as tempdir:
with pushd(tempdir) as path:
self.assertEquals(tempdir, path)
self.assertEquals(os.path.realpath(tempdir), os.getcwd())
self.assertEquals(pre_cwd, os.getcwd())
self.assertEquals(pre_cwd, os.getcwd())
def test_nested_pushd(self):
pre_cwd = os.getcwd()
with temporary_dir() as tempdir1:
with pushd(tempdir1):
self.assertEquals(os.path.realpath(tempdir1), os.getcwd())
with temporary_dir(root_dir=tempdir1) as tempdir2:
with pushd(tempdir2):
self.assertEquals(os.path.realpath(tempdir2), os.getcwd())
self.assertEquals(os.path.realpath(tempdir1), os.getcwd())
self.assertEquals(os.path.realpath(tempdir1), os.getcwd())
self.assertEquals(pre_cwd, os.getcwd())
self.assertEquals(pre_cwd, os.getcwd())
def test_temporary_file_no_args(self):
with temporary_file() as fp:
self.assertTrue(os.path.exists(fp.name), 'Temporary file should exist within the context.')
self.assertTrue(os.path.exists(fp.name) == False,
'Temporary file should not exist outside of the context.')
def test_temporary_file_without_cleanup(self):
with temporary_file(cleanup=False) as fp:
self.assertTrue(os.path.exists(fp.name), 'Temporary file should exist within the context.')
self.assertTrue(os.path.exists(fp.name),
'Temporary file should exist outside of context if cleanup=False.')
os.unlink(fp.name)
def test_temporary_file_within_other_dir(self):
with temporary_dir() as path:
with temporary_file(root_dir=path) as f:
self.assertTrue(os.path.realpath(f.name).startswith(os.path.realpath(path)),
'file should be created in root_dir if specified.')
def test_temporary_dir_no_args(self):
with temporary_dir() as path:
self.assertTrue(os.path.exists(path), 'Temporary dir should exist within the context.')
self.assertTrue(os.path.isdir(path), 'Temporary dir should be a dir and not a file.')
self.assertFalse(os.path.exists(path), 'Temporary dir should not exist outside of the context.')
def test_temporary_dir_without_cleanup(self):
with temporary_dir(cleanup=False) as path:
self.assertTrue(os.path.exists(path), 'Temporary dir should exist within the context.')
self.assertTrue(os.path.exists(path),
'Temporary dir should exist outside of context if cleanup=False.')
shutil.rmtree(path)
def test_temporary_dir_with_root_dir(self):
with temporary_dir() as path1:
with temporary_dir(root_dir=path1) as path2:
self.assertTrue(os.path.realpath(path2).startswith(os.path.realpath(path1)),
'Nested temporary dir should be created within outer dir.')
def test_timer(self):
class FakeClock(object):
def __init__(self):
self._time = 0.0
def time(self):
ret = self._time
self._time += 0.0001 # Force a little time to elapse.
return ret
def sleep(self, duration):
self._time += duration
clock = FakeClock()
# Note: to test with the real system clock, use this instead:
# import time
# clock = time
with Timer(clock=clock) as t:
self.assertLess(t.start, clock.time())
self.assertGreater(t.elapsed, 0)
clock.sleep(0.1)
self.assertGreater(t.elapsed, 0.1)
clock.sleep(0.1)
self.assertTrue(t.finish is None)
self.assertGreater(t.elapsed, 0.2)
self.assertLess(t.finish, clock.time())
def test_open_zipDefault(self):
with temporary_dir() as tempdir:
with open_zip(os.path.join(tempdir, 'test'), 'w') as zf:
self.assertTrue(zf._allowZip64)
def test_open_zipTrue(self):
with temporary_dir() as tempdir:
with open_zip(os.path.join(tempdir, 'test'), 'w', allowZip64=True) as zf:
self.assertTrue(zf._allowZip64)
def test_open_zipFalse(self):
with temporary_dir() as tempdir:
with open_zip(os.path.join(tempdir, 'test'), 'w', allowZip64=False) as zf:
self.assertFalse(zf._allowZip64)
def test_open_zip_raises_exception_on_falsey_paths(self):
falsey = (None, '', False)
for invalid in falsey:
with self.assertRaises(InvalidZipPath):
next(open_zip(invalid).gen)
def test_open_zip_returns_realpath_on_badzipfile(self):
# In case of file corruption, deleting a Pants-constructed symlink would not resolve the error.
with temporary_file() as not_zip:
with temporary_dir() as tempdir:
file_symlink = os.path.join(tempdir, 'foo')
os.symlink(not_zip.name, file_symlink)
self.assertEquals(os.path.realpath(file_symlink), os.path.realpath(not_zip.name))
with self.assertRaisesRegexp(zipfile.BadZipfile, r'{}'.format(not_zip.name)):
next(open_zip(file_symlink).gen)
@contextmanager
def _stdio_as_tempfiles(self):
"""Harness to replace `sys.std*` with tempfiles.
Validates that all files are read/written/flushed correctly, and acts as a
contextmanager to allow for recursive tests.
"""
# Prefix contents written within this instance with a unique string to differentiate
# them from other instances.
uuid_str = str(uuid.uuid4())
def u(string):
return '{}#{}'.format(uuid_str, string)
stdin_data = u('stdio')
stdout_data = u('stdout')
stderr_data = u('stderr')
with temporary_file() as tmp_stdin,\
temporary_file() as tmp_stdout,\
temporary_file() as tmp_stderr:
print(stdin_data, file=tmp_stdin)
tmp_stdin.seek(0)
# Read prepared content from stdin, and write content to stdout/stderr.
with stdio_as(stdout_fd=tmp_stdout.fileno(),
stderr_fd=tmp_stderr.fileno(),
stdin_fd=tmp_stdin.fileno()):
self.assertEquals(sys.stdin.fileno(), 0)
self.assertEquals(sys.stdout.fileno(), 1)
self.assertEquals(sys.stderr.fileno(), 2)
self.assertEquals(stdin_data, sys.stdin.read().strip())
print(stdout_data, file=sys.stdout)
yield
print(stderr_data, file=sys.stderr)
tmp_stdout.seek(0)
tmp_stderr.seek(0)
self.assertEquals(stdout_data, tmp_stdout.read().strip())
self.assertEquals(stderr_data, tmp_stderr.read().strip())
def test_stdio_as(self):
self.assertTrue(sys.stderr.fileno() > 2,
"Expected a pseudofile as stderr, got: {}".format(sys.stderr))
old_stdout, old_stderr, old_stdin = sys.stdout, sys.stderr, sys.stdin
# The first level tests that when `sys.std*` are file-likes (in particular, the ones set up in
# pytest's harness) rather than actual files, we stash and restore them properly.
with self._stdio_as_tempfiles():
# The second level stashes the first level's actual file objects and then re-opens them.
with self._stdio_as_tempfiles():
pass
# Validate that after the second level completes, the first level still sees valid
# fds on `sys.std*`.
self.assertEquals(sys.stdin.fileno(), 0)
self.assertEquals(sys.stdout.fileno(), 1)
self.assertEquals(sys.stderr.fileno(), 2)
self.assertEquals(sys.stdout, old_stdout)
self.assertEquals(sys.stderr, old_stderr)
self.assertEquals(sys.stdin, old_stdin)
def test_stdio_as_dev_null(self):
# Capture output to tempfiles.
with self._stdio_as_tempfiles():
# Read/write from/to `/dev/null`, which will be validated by the harness as not
# affecting the tempfiles.
with stdio_as(stdout_fd=-1, stderr_fd=-1, stdin_fd=-1):
self.assertEquals(b'', sys.stdin.read())
print('garbage', file=sys.stdout)
print('garbage', file=sys.stderr)
def test_signal_handler_as(self):
mock_initial_handler = 1
mock_new_handler = 2
with mock.patch('signal.signal', **PATCH_OPTS) as mock_signal:
mock_signal.return_value = mock_initial_handler
try:
with signal_handler_as(signal.SIGUSR2, mock_new_handler):
raise NotImplementedError('blah')
except NotImplementedError:
pass
self.assertEquals(mock_signal.call_count, 2)
mock_signal.assert_has_calls([
mock.call(signal.SIGUSR2, mock_new_handler),
mock.call(signal.SIGUSR2, mock_initial_handler)
])
def test_permissions(self):
with temporary_file(permissions=0o700) as f:
self.assertEquals(0o700, os.stat(f.name)[0] & 0o777)
with temporary_dir(permissions=0o644) as path:
self.assertEquals(0o644, os.stat(path)[0] & 0o777)
def test_exception_logging(self):
fake_logger = mock.Mock()
with self.assertRaises(AssertionError):
with exception_logging(fake_logger, 'error!'):
assert True is False
fake_logger.exception.assert_called_once_with('error!')
def test_maybe_profiled(self):
with temporary_dir() as td:
profile_path = os.path.join(td, 'profile.prof')
with maybe_profiled(profile_path):
for _ in range(5):
print('test')
# Ensure the profile data was written.
self.assertTrue(os.path.exists(profile_path))
# Ensure the profile data is valid.
pstats.Stats(profile_path).print_stats()
def test_hard_exit_handler(self):
with mock.patch('os._exit', **PATCH_OPTS) as mock_exit:
with hard_exit_handler():
raise HardSystemExit()
mock_exit.assert_called_once_with(0)
| 37.93994 | 100 | 0.679516 |
f72bca982790961f6bbe8bc31f1a60032438e1a2 | 154 | py | Python | chat_assistant/chat_assistant_app/urls.py | mrhegemon/Rasa_zero_rpc_XR_bot | a468cc1f2b1a4e935ce18e97dcb7a11070bbea0b | [
"MIT"
] | 1 | 2021-06-21T10:44:51.000Z | 2021-06-21T10:44:51.000Z | chat_assistant/chat_assistant_app/urls.py | mrhegemon/Rasa_zero_rpc_XR_bot | a468cc1f2b1a4e935ce18e97dcb7a11070bbea0b | [
"MIT"
] | null | null | null | chat_assistant/chat_assistant_app/urls.py | mrhegemon/Rasa_zero_rpc_XR_bot | a468cc1f2b1a4e935ce18e97dcb7a11070bbea0b | [
"MIT"
] | 1 | 2021-06-07T23:09:30.000Z | 2021-06-07T23:09:30.000Z | from django.conf.urls import url
from . import views
urlpatterns = [
url('', views.chat, name='chat'),
url('^chat/', views.chat, name='chat'),
]
| 19.25 | 43 | 0.62987 |
f72bd257412e17141e60758b6a2232418acfb73b | 895 | py | Python | sdks/python/test/test_PurgeResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | null | null | null | sdks/python/test/test_PurgeResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 6 | 2019-10-23T06:38:53.000Z | 2022-01-22T07:57:58.000Z | sdks/python/test/test_PurgeResponse.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 2 | 2019-10-23T06:31:05.000Z | 2021-08-21T17:32:47.000Z | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from PurgeResponse.clsPurgeResponse import PurgeResponse # noqa: E501
from appcenter_sdk.rest import ApiException
class TestPurgeResponse(unittest.TestCase):
"""PurgeResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPurgeResponse(self):
"""Test PurgeResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsPurgeResponse.PurgeResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.375 | 85 | 0.709497 |
f72bd93c4d17e05aa0d2db88c1b2ffb816c8ad18 | 2,936 | py | Python | sdk/monitor/azure-monitor-query/setup.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-09T08:59:13.000Z | 2022-03-09T08:59:13.000Z | sdk/monitor/azure-monitor-query/setup.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/monitor/azure-monitor-query/setup.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-monitor-query"
PACKAGE_PPRINT_NAME = "Azure Monitor Query"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
"Development Status :: 5 - Production/Stable",
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
python_requires=">=3.6",
zip_safe=False,
packages=find_packages(exclude=[
'tests',
'samples',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.monitor',
]),
install_requires=[
'msrest>=0.6.19',
'azure-core<2.0.0,>=1.12.0',
]
)
| 33.363636 | 85 | 0.609673 |
f72c09ac05b9052d87975715d4ccabff57f26a58 | 12,756 | py | Python | LaserRender.py | gsboylan/meerk40t | 7607b034368a428dfc5cab56629032d6074c756d | [
"MIT"
] | null | null | null | LaserRender.py | gsboylan/meerk40t | 7607b034368a428dfc5cab56629032d6074c756d | [
"MIT"
] | null | null | null | LaserRender.py | gsboylan/meerk40t | 7607b034368a428dfc5cab56629032d6074c756d | [
"MIT"
] | null | null | null | from math import floor
import wx
from PIL import Image
from ZMatrix import ZMatrix
from svgelements import *
"""
Laser Render provides GUI relevant methods of displaying the given project.
"""
DRAW_MODE_FILLS = 0x000001
DRAW_MODE_GUIDES = 0x000002
DRAW_MODE_GRID = 0x000004
DRAW_MODE_LASERPATH = 0x000008
DRAW_MODE_RETICLE = 0x000010
DRAW_MODE_SELECTION = 0x000020
DRAW_MODE_STROKES = 0x000040
DRAW_MODE_CACHE = 0x000080 # Set means do not cache.
DRAW_MODE_REFRESH = 0x000100
DRAW_MODE_ANIMATE = 0x000200
DRAW_MODE_PATH = 0x000400
DRAW_MODE_IMAGE = 0x000800
DRAW_MODE_TEXT = 0x001000
DRAW_MODE_BACKGROUND = 0x002000
DRAW_MODE_ICONS = 0x0040000
DRAW_MODE_TREE = 0x0080000
DRAW_MODE_INVERT = 0x400000
DRAW_MODE_FLIPXY = 0x800000
def swizzlecolor(c):
if c is None:
return None
if isinstance(c, int):
c = Color(c)
return c.blue << 16 | c.green << 8 | c.red
class LaserRender:
def __init__(self, device):
self.device = device
self.cache = None
self.pen = wx.Pen()
self.brush = wx.Brush()
self.color = wx.Colour()
def render(self, elements, gc, draw_mode=None, zoomscale=1):
"""
Render scene information.
:param gc:
:param draw_mode:
:return:
"""
if draw_mode is None:
draw_mode = self.device.draw_mode
if draw_mode & (DRAW_MODE_TEXT | DRAW_MODE_IMAGE | DRAW_MODE_PATH) != 0:
types = []
if draw_mode & DRAW_MODE_PATH == 0:
types.append(Path)
if draw_mode & DRAW_MODE_IMAGE == 0:
types.append(SVGImage)
if draw_mode & DRAW_MODE_TEXT == 0:
types.append(SVGText)
elements = [e for e in elements if type(e) in types]
for element in elements:
try:
element.draw(element, gc, draw_mode, zoomscale=zoomscale)
except AttributeError:
if isinstance(element, Path):
element.draw = self.draw_path
elif isinstance(element, SVGImage):
element.draw = self.draw_image
elif isinstance(element, SVGText):
element.draw = self.draw_text
elif isinstance(element, Group):
element.draw = self.draw_group
else:
continue
element.draw(element, gc, draw_mode, zoomscale=zoomscale)
def make_path(self, gc, path):
p = gc.CreatePath()
first_point = path.first_point
if first_point is not None:
p.MoveToPoint(first_point[0], first_point[1])
for e in path:
if isinstance(e, Move):
p.MoveToPoint(e.end[0], e.end[1])
elif isinstance(e, Line):
p.AddLineToPoint(e.end[0], e.end[1])
elif isinstance(e, Close):
p.CloseSubpath()
elif isinstance(e, QuadraticBezier):
p.AddQuadCurveToPoint(e.control[0], e.control[1],
e.end[0], e.end[1])
elif isinstance(e, CubicBezier):
p.AddCurveToPoint(e.control1[0], e.control1[1],
e.control2[0], e.control2[1],
e.end[0], e.end[1])
elif isinstance(e, Arc):
for curve in e.as_cubic_curves():
p.AddCurveToPoint(curve.control1[0], curve.control1[1],
curve.control2[0], curve.control2[1],
curve.end[0], curve.end[1])
return p
def set_pen(self, gc, stroke, width=1.0):
if width < 1.0:
width = 1.0
c = stroke
if c is not None and c != 'none':
swizzle_color = swizzlecolor(c)
self.color.SetRGBA(swizzle_color | c.alpha << 24) # wx has BBGGRR
self.pen.SetColour(self.color)
self.pen.SetWidth(width)
gc.SetPen(self.pen)
else:
gc.SetPen(wx.TRANSPARENT_PEN)
def set_brush(self, gc, fill):
c = fill
if c is not None and c != 'none':
swizzle_color = swizzlecolor(c)
self.color.SetRGBA(swizzle_color | c.alpha << 24) # wx has BBGGRR
self.brush.SetColour(self.color)
gc.SetBrush(self.brush)
else:
gc.SetBrush(wx.TRANSPARENT_BRUSH)
def set_element_pen(self, gc, element, zoomscale=1.0):
try:
sw = Length(element.stroke_width).value(ppi=96.0)
# if sw < 3.0:
# sw = 3.0
except AttributeError:
sw = 1.0
if sw is None:
sw = 1.0
limit = zoomscale**.5
if sw < limit:
sw = limit
self.set_pen(gc, element.stroke, width=sw)
def set_element_brush(self, gc, element):
self.set_brush(gc, element.fill)
def draw_path(self, element, gc, draw_mode, zoomscale=1.0):
"""Default draw routine for the laser path element."""
try:
matrix = element.transform
except AttributeError:
matrix = Matrix()
if not hasattr(element, 'cache') or element.cache is None:
cache = self.make_path(gc, element)
element.cache = cache
gc.PushState()
gc.ConcatTransform(wx.GraphicsContext.CreateMatrix(gc, ZMatrix(matrix)))
self.set_element_pen(gc, element, zoomscale=zoomscale)
self.set_element_brush(gc, element)
if draw_mode & DRAW_MODE_FILLS == 0 and element.fill is not None:
gc.FillPath(element.cache)
if draw_mode & DRAW_MODE_STROKES == 0 and element.stroke is not None:
gc.StrokePath(element.cache)
gc.PopState()
def draw_text(self, element, gc, draw_mode, zoomscale=1.0):
try:
matrix = element.transform
except AttributeError:
matrix = Matrix()
if hasattr(element, 'wxfont'):
font = element.wxfont
else:
if element.font_size < 1:
if element.font_size > 0:
element.transform.pre_scale(element.font_size,
element.font_size,
element.x,
element.y)
element.font_size = 1 # No zero sized fonts.
font = wx.Font(element.font_size, wx.SWISS, wx.NORMAL, wx.BOLD)
try:
f = []
if element.font_family is not None:
f.append(str(element.font_family))
if element.font_face is not None:
f.append(str(element.font_face))
if element.font_weight is not None:
f.append(str(element.font_weight))
f.append("%d" % element.font_size)
font.SetNativeFontInfoUserDesc(' '.join(f))
except:
pass
element.wxfont = font
gc.PushState()
gc.ConcatTransform(wx.GraphicsContext.CreateMatrix(gc, ZMatrix(matrix)))
self.set_element_pen(gc, element, zoomscale=zoomscale)
self.set_element_brush(gc, element)
if element.fill is None or element.fill == 'none':
gc.SetFont(font, wx.BLACK)
else:
gc.SetFont(font, wx.Colour(swizzlecolor(element.fill)))
text = element.text
x = element.x
y = element.y
if text is not None:
w, h = element.width, element.height
element.width, element.height = gc.GetTextExtent(element.text)
if w != element.width and h != element.height:
element.modified()
if not hasattr(element, 'anchor') or element.anchor == 'start':
y -= element.height
elif element.anchor == 'middle':
x -= (element.width / 2)
y -= element.height
elif element.anchor == 'end':
x -= element.width
y -= element.height
gc.DrawText(text, x, y)
gc.PopState()
def draw_image(self, node, gc, draw_mode, zoomscale=1.0):
try:
matrix = node.transform
except AttributeError:
matrix = Matrix()
gc.PushState()
gc.ConcatTransform(wx.GraphicsContext.CreateMatrix(gc, ZMatrix(matrix)))
if draw_mode & DRAW_MODE_CACHE == 0:
cache = None
try:
cache = node.cache
except AttributeError:
pass
if cache is None:
try:
max_allowed = node.max_allowed
except AttributeError:
max_allowed = 2048
node.c_width, node.c_height = node.image.size
node.cache = self.make_thumbnail(node.image, maximum=max_allowed)
gc.DrawBitmap(node.cache, 0, 0, node.c_width, node.c_height)
else:
node.c_width, node.c_height = node.image.size
cache = self.make_thumbnail(node.image)
gc.DrawBitmap(cache, 0, 0, node.c_width, node.c_height)
gc.PopState()
def draw_group(self, element, gc, draw_mode, zoomscale=1.0):
pass
def make_raster(self, elements, bounds, width=None, height=None, bitmap=False, step=1):
if bounds is None:
return None
xmin, ymin, xmax, ymax = bounds
xmax = ceil(xmax)
ymax = ceil(ymax)
xmin = floor(xmin)
ymin = floor(ymin)
image_width = int(xmax - xmin)
if image_width == 0:
image_width = 1
image_height = int(ymax - ymin)
if image_height == 0:
image_height = 1
if width is None:
width = image_width
if height is None:
height = image_height
width /= float(step)
height /= float(step)
width = int(width)
height = int(height)
bmp = wx.Bitmap(width, height, 32)
dc = wx.MemoryDC()
dc.SelectObject(bmp)
dc.SetBackground(wx.WHITE_BRUSH)
dc.Clear()
matrix = Matrix()
matrix.post_translate(-xmin, -ymin)
scale_x = width / float(image_width)
scale_y = height / float(image_height)
scale = min(scale_x, scale_y)
matrix.post_scale(scale)
gc = wx.GraphicsContext.Create(dc)
gc.SetInterpolationQuality(wx.INTERPOLATION_BEST)
gc.PushState()
gc.ConcatTransform(wx.GraphicsContext.CreateMatrix(gc, ZMatrix(matrix)))
gc.SetBrush(wx.WHITE_BRUSH)
gc.DrawRectangle(xmin - 1, ymin - 1, xmax + 1, ymax + 1)
if not isinstance(elements, (list, tuple)):
elements = [elements]
self.render(elements, gc, draw_mode=DRAW_MODE_CACHE)
gc.PopState()
img = bmp.ConvertToImage()
buf = img.GetData()
image = Image.frombuffer("RGB", tuple(bmp.GetSize()), bytes(buf), "raw", "RGB", 0, 1)
gc.Destroy()
del dc
if bitmap:
return bmp
return image
def make_thumbnail(self, pil_data, maximum=None, width=None, height=None):
"""Resizes the given pil image into wx.Bitmap object that fits the constraints."""
image_width, image_height = pil_data.size
if width is not None and height is None:
height = width * image_height / float(image_width)
if width is None and height is not None:
width = height * image_width / float(image_height)
if width is None and height is None:
width = image_width
height = image_height
if maximum is not None and (width > maximum or height > maximum):
scale_x = maximum / width
scale_y = maximum / height
scale = min(scale_x, scale_y)
width = int(round(width * scale))
height = int(round(height * scale))
if image_width != width or image_height != height:
pil_data = pil_data.copy().resize((width, height))
else:
pil_data = pil_data.copy()
if pil_data.mode != "RGBA":
pil_data = pil_data.convert('RGBA')
pil_bytes = pil_data.tobytes()
return wx.Bitmap.FromBufferRGBA(width, height, pil_bytes)
| 37.189504 | 94 | 0.541863 |
f72c0ea16de779f16beca6532d9f0bc2298b0a63 | 214 | py | Python | embiggen/sequences/generic_sequences/__init__.py | monarch-initiative/N2V | 8ae02ca125f1d24ca158c2849f2d9bb1711920b9 | [
"BSD-3-Clause"
] | 2 | 2020-01-30T11:57:37.000Z | 2020-05-02T00:05:49.000Z | embiggen/sequences/generic_sequences/__init__.py | monarch-initiative/N2V | 8ae02ca125f1d24ca158c2849f2d9bb1711920b9 | [
"BSD-3-Clause"
] | 93 | 2020-01-26T00:43:51.000Z | 2020-05-10T03:29:54.000Z | embiggen/sequences/generic_sequences/__init__.py | monarch-initiative/N2V | 8ae02ca125f1d24ca158c2849f2d9bb1711920b9 | [
"BSD-3-Clause"
] | 5 | 2020-02-13T07:18:11.000Z | 2020-03-19T08:03:34.000Z | """Module providing generic sequences that are used throught Embiggen."""
from embiggen.sequences.generic_sequences.edge_prediction_sequence import EdgePredictionSequence
__all__ = [
"EdgePredictionSequence"
] | 35.666667 | 96 | 0.827103 |
f72c1952b0e4622f4b26e6cbf2935e7858d86d51 | 3,061 | py | Python | BioNetGen-2.3.0/source_Atomizer/SBMLparser/atomizer/analyzeRDF.py | joseph-hellerstein/RuleBasedProgramming | fb88118ab764035979dc7c2bf8c89a7b484e4472 | [
"MIT"
] | null | null | null | BioNetGen-2.3.0/source_Atomizer/SBMLparser/atomizer/analyzeRDF.py | joseph-hellerstein/RuleBasedProgramming | fb88118ab764035979dc7c2bf8c89a7b484e4472 | [
"MIT"
] | null | null | null | BioNetGen-2.3.0/source_Atomizer/SBMLparser/atomizer/analyzeRDF.py | joseph-hellerstein/RuleBasedProgramming | fb88118ab764035979dc7c2bf8c89a7b484e4472 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 27 14:45:24 2012
@author: proto
"""
'''
this method classifies reactants according to the rdf information, and gives
us information on which reactants are the same, and how do they differ
(compartment etc)
'''
from sbml2bngl import SBML2BNGL
import libsbml
import collections
def getAnnotations(parser,stringKey=None):
annotation = parser.getSpeciesAnnotation()
annotationDictionary = collections.defaultdict(set)
for key,value in annotation.items():
annotationList = []
if annotation[key] != None:
for element in annotation[key]:
for index in range(0,element.getNumAttributes()):
if not stringKey or stringKey in element.getValue(index):
annotationList.append(element.getValue(index))
if annotationList == []:
continue
annotationDictionary[key].add(tuple(annotationList))
#annotationDictionary[frozenset(annotationList)].sort(lambda x,y: cmp(len(x), len(y)))
return annotationDictionary
def getSpeciesAnnotationStructure(parser):
model = parser.model
for species in model.getListOfSpecies():
name = species.getName()
speciesId = species.getId()
annotation = species.getAnnotation()
lista = libsbml.CVTermList()
libsbml.RDFAnnotationParser.parseRDFAnnotation(annotation,lista)
for idx in range(0,lista.getSize()):
for idx2 in range(0, lista.get(idx).getResources().getLength()):
resource = lista.get(idx).getResources().getValue(idx2)
qualifierType = lista.get(idx).getQualifierType()
qualifierDescription= bioqual[lista.get(idx).getBiologicalQualifierType()] if qualifierType \
else modqual[lista.get(idx).getModelQualifierType()]
#resource = resolveAnnotation(resource)
def getEquivalence(species,rdf_database):
'''
*species* is the species whose equivalence we will go and search
This method will search through the RDF database and look if param 'species'
is equal to any other element in the species database
'''
for element in rdf_database:
if species in rdf_database[element]:
if rdf_database[element].index(species) == 0:
return []
#return [x for x in rdf_database[element] if x != species]
#well only return the first one by default
return [rdf_database[element][0]]
return []
if __name__ == "__main__":
reader = libsbml.SBMLReader()
#BIOMD0000000272
document = reader.readSBMLFromFile('XMLExamples/curated/BIOMD0000000219.xml')
#document = reader.readSBMLFromFile('XMLExamples/simple4.xml')
model = document.getModel()
parser = SBML2BNGL(model)
annotationDictionary = getAnnotations(parser)
print annotationDictionary
#print getEquivalence('SAv_EpoR',annotationDictionary)
#print annotation
#print rules
| 37.790123 | 107 | 0.662202 |
f72c19b9cb3d05c83471ebe6d0f7ab647fcf3c80 | 5,478 | py | Python | saas/api/backend.py | markcstansberry/djaodjin-saas | 0749dac36c3b039334fe9f115d92b3167eea03d6 | [
"BSD-2-Clause"
] | null | null | null | saas/api/backend.py | markcstansberry/djaodjin-saas | 0749dac36c3b039334fe9f115d92b3167eea03d6 | [
"BSD-2-Clause"
] | 4 | 2021-04-08T21:56:58.000Z | 2022-02-10T13:26:56.000Z | saas/api/backend.py | markcstansberry/djaodjin-saas | 0749dac36c3b039334fe9f115d92b3167eea03d6 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2019, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pylint:disable=useless-super-delegation
from rest_framework import status
from rest_framework.exceptions import ValidationError
from rest_framework.generics import (RetrieveAPIView,
RetrieveUpdateDestroyAPIView)
from rest_framework.response import Response
from ..backends import ProcessorError
from ..docs import swagger_auto_schema
from ..mixins import OrganizationMixin
from .serializers import (BankSerializer, CardSerializer,
CardTokenSerializer)
#pylint: disable=no-init
class RetrieveBankAPIView(OrganizationMixin, RetrieveAPIView):
"""
Retrieves a payout account
Pass through that calls the processor API to retrieve some details about
the deposit account associated to a provider (if that information is
available through the :doc:`payment processor backend<backends>` API).
This API does not trigger payment of a subscriber to a provider. Checkout
of a subscription cart is done either through the
:ref:`HTML page<pages_cart>` or :ref:`API end point<api_checkout>`.
**Tags**: billing
**Examples**
.. code-block:: http
GET /api/billing/cowork/bank/ HTTP/1.1
responds
.. code-block:: json
{
"bank_name": "Stripe Test Bank",
"last4": "***-htrTZ",
"balance_amount": 0,
"balance_unit": "usd"
}
"""
serializer_class = BankSerializer
def retrieve(self, request, *args, **kwargs):
#pylint: disable=unused-argument
return Response(
self.organization.retrieve_bank())
class PaymentMethodDetailAPIView(OrganizationMixin,
RetrieveUpdateDestroyAPIView):
"""
Retrieves a payment method
Pass through to the processor to retrieve some details about
the payment method (ex: credit card) associated to a subscriber.
**Tags**: billing
**Examples**
.. code-block:: http
GET /api/billing/cowork/card/ HTTP/1.1
responds
.. code-block:: json
{
"last4": "1234",
"exp_date": "12/2019"
}
"""
serializer_class = CardSerializer
def delete(self, request, *args, **kwargs):
"""
Deletes a payment method
Pass through to the processor to remove the payment method (ex: credit
card) associated to a subscriber.
**Tags**: billing
**Examples**
.. code-block:: http
DELETE /api/billing/cowork/card/ HTTP/1.1
"""
return super(PaymentMethodDetailAPIView, self).delete(
request, *args, **kwargs)
@swagger_auto_schema(request_body=CardTokenSerializer)
def put(self, request, *args, **kwargs):
"""
Updates a payment method
Pass through to the processor to update some details about
the payment method (ex: credit card) associated to a subscriber.
**Tags**: billing
**Examples**
.. code-block:: http
PUT /api/billing/cowork/card/ HTTP/1.1
.. code-block:: json
{
"token": "xyz"
}
responds
.. code-block:: json
{
"last4": "1234",
"exp_date": "12/2019"
}
"""
return super(PaymentMethodDetailAPIView, self).put(
request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
self.organization.delete_card()
return Response(status=status.HTTP_204_NO_CONTENT)
def retrieve(self, request, *args, **kwargs):
#pylint: disable=unused-argument
return Response(self.organization.retrieve_card())
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
serializer = CardTokenSerializer(data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
token = serializer.validated_data['token']
try:
self.organization.update_card(token, self.request.user)
except ProcessorError as err:
raise ValidationError(err)
return self.retrieve(request, *args, **kwargs)
| 30.949153 | 78 | 0.667579 |
f72c2077893d50552e9dcf77f984503151020b67 | 2,825 | py | Python | Testing/Alignment.py | freder/PageBotExamples | eb4ced53a673b9376e8357afa9ea0795b022b13c | [
"Ruby",
"MIT"
] | 5 | 2020-06-20T22:01:23.000Z | 2021-08-06T04:39:50.000Z | Testing/Alignment.py | freder/PageBotExamples | eb4ced53a673b9376e8357afa9ea0795b022b13c | [
"Ruby",
"MIT"
] | 5 | 2020-05-17T09:32:27.000Z | 2021-03-15T19:45:52.000Z | Testing/Alignment.py | freder/PageBotExamples | eb4ced53a673b9376e8357afa9ea0795b022b13c | [
"Ruby",
"MIT"
] | 2 | 2021-02-25T19:07:45.000Z | 2022-01-09T21:14:06.000Z | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T E X A M P L E S
#
# Copyright (c) 2017 Thom Janssen <https://github.com/thomgb>
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# TODO: Floating on second line does not seem to work currently
from pagebot.toolbox.color import color
from pagebot import getContext
EXPORT_PATH = '_export/Start'
# Export in _export folder that does not commit in Git.
# Force to export to a few file formats:
EXPORT_PATH_PDF = EXPORT_PATH + '.pdf'
EXPORT_PATH_JPG = EXPORT_PATH + '.jpg'
EXPORT_PATH_PNG = EXPORT_PATH + '.png'
EXPORT_PATH_SVG = EXPORT_PATH + '.svg'
# Document is the main instance holding all information about the document
# together (pages, styles, etc.)
from pagebot.document import Document
from pagebot.elements import *
from pagebot.conditions import *
from pagebot.toolbox.color import Color
W, H = 500, 400
def makeDocument(context):
# Creates the publication/document that holds the pages.
doc = Document(w=W, h=H, context=context)
print(doc.view)
print(doc.pages)
doc.view.padding = 0 # Don't show cropmarks in this example.
#doc.margin =
doc.view.showPadding = True
c1 = [Right2Right(), Float2Top(), Float2Left()]
c2 = [Right2Right(), Float2Top()]
c3 = [Left2Left()]
c4 = [Left2Left(), Float2TopLeft()]
c5 = [Right2Right(), Float2TopLeft()]
c6 = [Left2Left(), Float2TopRight()]
conditions = [c1]#, c2]#, c3, c4, c5, c6]
page = doc[1]
for c in conditions:
makePage(doc, page, c)
#page = page.next
testCoordinates(context)
rectSets = []
def makePage(doc, page, conditions):
# Gets page by pageNumber, first in row (at this point there is only one in
# this row).
page.padding = 1
page.showPadding = True
numberOfSquares = 8
ratio = 1 / numberOfSquares
rects = []
for n in range(numberOfSquares):
r = newRect(w=40, h=42, mr=4, mt=4, parent=page,
fill=color(1 - n*ratio, 0, 0.5),
conditions=conditions, margin=0)
rects.append(r)
rectSets.append(rects)
score = doc.solve()
doc.build()
def testCoordinates(context):
context.fill((0, 1, 0))
context.stroke(None)
for rects in rectSets:
i = 0
for r in rects:
i +=1
x = r.getFloatSideLeft()
y = r.getFloatSideTop()
#print('%d %d' % (x, y))
context.circle(x, y, 2)
context.text('%d' % i, (x + 5, y - 5))
context = getContext()
makeDocument(context)
| 27.163462 | 79 | 0.593982 |
f72c27667d2dfd2d0ef541566592c06f430e047b | 147 | py | Python | scene/__init__.py | cloose/ray-tracer-challenge | 5e9dd56fb67c5cba47172986a963fc22a8cbcaa2 | [
"MIT"
] | null | null | null | scene/__init__.py | cloose/ray-tracer-challenge | 5e9dd56fb67c5cba47172986a963fc22a8cbcaa2 | [
"MIT"
] | null | null | null | scene/__init__.py | cloose/ray-tracer-challenge | 5e9dd56fb67c5cba47172986a963fc22a8cbcaa2 | [
"MIT"
] | null | null | null | from .camera import *
from .obj_file import *
from .obj_parser import *
from .ray_tracer import *
from .scene_parser import *
from .world import *
| 21 | 27 | 0.755102 |
f72c3198aa025287611b7e30ff0d1259ec7834b8 | 8,969 | py | Python | cache_dependencies/tests/test_transaction.py | Tusky/cache-dependencies | 6c19d0c2adfce19c3fdc53ad5704eddc6d84e106 | [
"BSD-3-Clause"
] | 3 | 2017-08-08T20:06:56.000Z | 2018-09-19T03:16:20.000Z | cache_dependencies/tests/test_transaction.py | Tusky/cache-dependencies | 6c19d0c2adfce19c3fdc53ad5704eddc6d84e106 | [
"BSD-3-Clause"
] | 1 | 2017-10-24T23:11:32.000Z | 2017-10-24T23:11:32.000Z | cache_dependencies/tests/test_transaction.py | Tusky/cache-dependencies | 6c19d0c2adfce19c3fdc53ad5704eddc6d84e106 | [
"BSD-3-Clause"
] | 8 | 2017-10-24T07:43:56.000Z | 2021-06-17T07:03:02.000Z | import time
import unittest
from cache_dependencies import interfaces, transaction
try:
from unittest import mock
except ImportError:
import mock
try:
str = unicode # Python 2.* compatible
string_types = (basestring,)
integer_types = (int, long)
except NameError:
string_types = (str,)
integer_types = (int,)
class AbstractTransactionTestCase(unittest.TestCase):
def setUp(self):
self.current_time = mock.Mock(return_value=time.time())
self.lock = mock.Mock(spec=interfaces.IDependency)
self.parent = mock.Mock(spec=interfaces.ITransaction)
self.transaction = self._make_transaction(self.lock, self.parent, self.current_time)
self.dependency = self._make_dependency()
def _make_transaction(self, lock, parent, current_time_accessor):
raise NotImplementedError
@staticmethod
def _make_dependency():
instance = mock.Mock(spec=interfaces.IDependency)
instance.extend = mock.Mock(return_value=True)
return instance
def test_evaluate(self):
self.transaction.evaluate(self.dependency, 1)
self.lock.evaluate.assert_called_once_with(self.dependency, self.transaction, 1)
def test_get_session_id(self):
session1_id = self.transaction.get_session_id()
session2_id = self.transaction.get_session_id()
self.assertEqual(session1_id, session2_id)
self.assertIsInstance(session2_id, string_types)
def run(self, result=None):
if self.__class__.__name__.startswith('Abstract'):
return
super(AbstractTransactionTestCase, self).run(result)
class TransactionTestCase(AbstractTransactionTestCase):
def _make_transaction(self, lock, parent, current_time_accessor):
class Transaction(transaction.Transaction):
_current_time = current_time_accessor
return Transaction(lock)
def test_parent(self):
self.assertIsNone(self.transaction.parent())
def test_get_start_time(self):
self.current_time.reset_mock()
self.assertAlmostEqual(self.transaction.get_start_time(), self.current_time.return_value)
initial_return_value = self.current_time.return_value
self.current_time.return_value += 1
time.sleep(1)
self.assertAlmostEqual(self.transaction.get_start_time(), initial_return_value)
self.current_time.assert_not_called()
def test_get_end_time(self):
self.current_time.reset_mock()
with self.assertRaises(RuntimeError):
self.transaction.get_end_time()
self.current_time.return_value += 1
self.transaction.finish()
self.assertAlmostEqual(self.transaction.get_end_time(), self.current_time.return_value)
initial_return_value = self.current_time.return_value
self.current_time.return_value += 1
time.sleep(1)
self.assertAlmostEqual(self.transaction.get_end_time(), initial_return_value)
self.current_time.assert_called_once_with()
def test_add_dependency_and_finish(self):
dependency1 = self._make_dependency()
dependency1.id = 1
dependency2 = self._make_dependency()
dependency2.id = 2
dependency3 = self._make_dependency()
dependency3.id = 3
self.transaction.add_dependency(dependency1, None)
self.lock.acquire.assert_called_once_with(dependency1, self.transaction, None)
self.lock.reset_mock()
self.transaction.add_dependency(dependency2, None)
self.lock.acquire.assert_called_once_with(dependency2, self.transaction, None)
self.lock.reset_mock()
dependency1.extend.assert_called_once_with(dependency2)
dependency1.reset_mock()
self.transaction.add_dependency(dependency3, 1)
self.lock.acquire.assert_called_once_with(dependency3, self.transaction, 1)
self.lock.reset_mock()
dependency1.extend.assert_not_called()
self.transaction.finish()
self.assertEqual(self.lock.release.call_count, 2)
args, kwargs = self.lock.release.call_args_list[-1]
self.assertEqual(len(args[0].delegates), 1)
self.assertEqual(args[0].delegates[0].id, 1)
self.assertIs(args[1], self.transaction)
self.assertIsNone(args[2])
args, kwargs = self.lock.release.call_args_list[-2]
self.assertEqual(len(args[0].delegates), 1)
self.assertEqual(args[0].delegates[0].id, 3)
self.assertIs(args[1], self.transaction)
self.assertEqual(args[2], 1)
def test_bool(self):
self.assertTrue(self.transaction)
class SavePointTestCase(AbstractTransactionTestCase):
def _make_transaction(self, lock, parent, current_time_accessor):
return transaction.SavePoint(lock, parent)
def test_parent(self):
self.assertIs(self.transaction.parent(), self.parent)
def test_get_start_time(self):
self.transaction.get_start_time()
self.parent.get_start_time.assert_called_once_with()
def test_get_end_time(self):
self.transaction.get_end_time()
self.parent.get_end_time.assert_called_once_with()
def test_add_dependency(self):
self.transaction.add_dependency(self.dependency, 1)
self.lock.acquire.assert_called_once_with(self.dependency, self.transaction, 1)
self.parent.add_dependency.assert_called_once_with(self.dependency, 1)
def test_bool(self):
self.assertTrue(self.transaction)
class DummyTransactionTestCase(AbstractTransactionTestCase):
def _make_transaction(self, lock, parent, current_time_accessor):
class DummyTransaction(transaction.DummyTransaction):
_current_time = current_time_accessor
return DummyTransaction(lock)
def test_parent(self):
self.assertIsNone(self.transaction.parent())
def test_get_start_time(self):
self.current_time.reset_mock()
self.assertAlmostEqual(self.transaction.get_start_time(), self.current_time.return_value)
initial_return_value = self.current_time.return_value
self.current_time.return_value += 1
time.sleep(1)
self.assertAlmostEqual(self.transaction.get_start_time(), self.current_time.return_value)
self.assertEqual(self.current_time.call_count, 2)
def test_get_end_time(self):
self.current_time.reset_mock()
self.current_time.return_value += 1
self.assertAlmostEqual(self.transaction.get_end_time(), self.current_time.return_value)
self.current_time.return_value += 1
time.sleep(1)
self.assertAlmostEqual(self.transaction.get_end_time(), self.current_time.return_value)
self.assertEqual(self.current_time.call_count, 2)
def test_bool(self):
self.assertFalse(self.transaction)
class TransactionManagerTestCase(unittest.TestCase):
def setUp(self):
self.transaction = mock.Mock(spec=interfaces.ITransaction)
self.lock = mock.Mock(spec=interfaces.IDependency)
self.transaction_manager = transaction.TransactionManager(self.lock)
def test_current_none(self):
current_transaction = self.transaction_manager.current()
self.assertIsInstance(current_transaction, transaction.DummyTransaction)
def test_current_set_get(self):
self.transaction_manager.current(self.transaction)
self.assertIs(self.transaction_manager.current(), self.transaction)
def test_begin(self):
initial_transaction = self.transaction_manager.begin()
self.assertIsInstance(initial_transaction, transaction.Transaction)
save_point = self.transaction_manager.begin()
self.assertIsInstance(save_point, transaction.SavePoint)
self.assertIs(save_point.parent(), initial_transaction)
nested_save_point = self.transaction_manager.begin()
self.assertIsInstance(nested_save_point, transaction.SavePoint)
self.assertIs(nested_save_point.parent(), save_point)
def test_finish_delegate_transaction(self):
self.transaction_manager.current(self.transaction)
self.transaction_manager.finish()
self.transaction.finish.assert_called_once_with()
def test_finish_transaction(self):
self.transaction_manager.begin()
self.transaction_manager.finish()
self.assertIsInstance(self.transaction_manager.current(), transaction.DummyTransaction)
def test_finish_savepoint(self):
initial_transaction = self.transaction_manager.begin()
self.transaction_manager.begin()
self.transaction_manager.finish()
self.assertIs(self.transaction_manager.current(), initial_transaction)
def test_flush(self):
self.transaction_manager.begin()
self.transaction_manager.begin()
self.transaction_manager.begin()
self.transaction_manager.flush()
self.assertIsInstance(self.transaction_manager.current(), transaction.DummyTransaction)
| 38.659483 | 97 | 0.721485 |
f72c5ca5cbb2721d967ad9ef9dfa896f7ccce240 | 2,924 | py | Python | tensorflow/python/estimator/canned/optimizers.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 522 | 2016-06-08T02:15:50.000Z | 2022-03-02T05:30:36.000Z | tensorflow/python/estimator/canned/optimizers.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/python/estimator/canned/optimizers.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 108 | 2016-06-16T15:34:05.000Z | 2022-03-12T13:23:11.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods related to optimizers used in canned_estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import ftrl
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import rmsprop
_OPTIMIZER_CLS_NAMES = {
'Adagrad': adagrad.AdagradOptimizer,
'Adam': adam.AdamOptimizer,
'Ftrl': ftrl.FtrlOptimizer,
'RMSProp': rmsprop.RMSPropOptimizer,
'SGD': gradient_descent.GradientDescentOptimizer,
}
def get_optimizer_instance(opt, learning_rate=None):
"""Returns an optimizer instance.
Supports the following types for the given `opt`:
* An `Optimizer` instance: Returns the given `opt`.
* A string: Creates an `Optimizer` subclass with the given `learning_rate`.
Supported strings:
* 'Adagrad': Returns an `AdagradOptimizer`.
* 'Adam': Returns an `AdamOptimizer`.
* 'Ftrl': Returns an `FtrlOptimizer`.
* 'RMSProp': Returns an `RMSPropOptimizer`.
* 'SGD': Returns a `GradientDescentOptimizer`.
Args:
opt: An `Optimizer` instance, or string, as discussed above.
learning_rate: A float. Only used if `opt` is a string.
Returns:
An `Optimizer` instance.
Raises:
ValueError: If `opt` is an unsupported string.
ValueError: If `opt` is a supported string but `learning_rate` was not
specified.
ValueError: If `opt` is none of the above types.
"""
if isinstance(opt, six.string_types):
if opt in six.iterkeys(_OPTIMIZER_CLS_NAMES):
if not learning_rate:
raise ValueError('learning_rate must be specified when opt is string.')
return _OPTIMIZER_CLS_NAMES[opt](learning_rate=learning_rate)
raise ValueError(
'Unsupported optimizer name: {}. Supported names are: {}'.format(
opt, tuple(sorted(six.iterkeys(_OPTIMIZER_CLS_NAMES)))))
if not isinstance(opt, optimizer_lib.Optimizer):
raise ValueError(
'The given object is not an Optimizer instance. Given: {}'.format(opt))
return opt
| 37.012658 | 80 | 0.719904 |
f72c705b69032d729b976d015ba6742b4d314c78 | 17,180 | py | Python | src/olympia/files/tests/test_file_viewer.py | shashwatsingh/addons-server | 8fce98901104349055a828b5a47865f5e8f4120b | [
"BSD-3-Clause"
] | null | null | null | src/olympia/files/tests/test_file_viewer.py | shashwatsingh/addons-server | 8fce98901104349055a828b5a47865f5e8f4120b | [
"BSD-3-Clause"
] | null | null | null | src/olympia/files/tests/test_file_viewer.py | shashwatsingh/addons-server | 8fce98901104349055a828b5a47865f5e8f4120b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import mimetypes
import os
import shutil
import zipfile
from django import forms
from django.conf import settings
from django.core.cache import cache
from django.core.files.storage import default_storage as storage
import pytest
from freezegun import freeze_time
from unittest.mock import Mock, patch
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.files.file_viewer import DiffHelper, FileViewer, extract_file
from olympia.files.models import File
from olympia.files.utils import SafeZip, get_all_files
from olympia.files.tests.test_utils import _run_lock_holding_process
root = os.path.join(settings.ROOT, 'src/olympia/files/fixtures/files')
def get_file(filename):
return os.path.join(root, filename)
def make_file(pk, file_path, **kwargs):
obj = Mock()
obj.id = obj.pk = pk
for k, v in kwargs.items():
setattr(obj, k, v)
obj.file_path = file_path
obj.current_file_path = file_path
obj.__str__ = lambda x: x.pk
obj.version = Mock()
obj.version.version = 1
return obj
class TestFileViewer(TestCase):
def setUp(self):
super(TestFileViewer, self).setUp()
self.viewer = FileViewer(make_file(1, get_file('dictionary-test.xpi')))
def tearDown(self):
self.viewer.cleanup()
super(TestFileViewer, self).tearDown()
def test_files_not_extracted(self):
assert not self.viewer.is_extracted()
def test_files_extracted(self):
self.viewer.extract()
assert self.viewer.is_extracted()
def test_recurse_extract(self):
self.viewer.src = get_file('recurse.xpi')
self.viewer.extract()
assert self.viewer.is_extracted()
def test_recurse_contents(self):
self.viewer.src = get_file('recurse.xpi')
self.viewer.extract()
files = self.viewer.get_files()
# We do not extract nested .zip or .xpi files anymore
assert list(files.keys()) == [
u'recurse',
u'recurse/chrome',
u'recurse/chrome/test-root.txt',
u'recurse/chrome/test.jar',
u'recurse/notazip.jar',
u'recurse/recurse.xpi',
u'recurse/somejar.jar']
def test_locked(self):
self.viewer.src = get_file('dictionary-test.xpi')
# Lock was successfully attained
assert self.viewer.extract()
lock_name = f'file-viewer-{self.viewer.file.pk}'
with _run_lock_holding_process(lock_name, sleep=3):
# Not extracting, the viewer is locked, lock could not be attained
assert not self.viewer.extract()
def test_extract_file_locked_message(self):
self.viewer.src = get_file('dictionary-test.xpi')
assert not self.viewer.is_extracted()
lock_name = f'file-viewer-{self.viewer.file.pk}'
with _run_lock_holding_process(lock_name, sleep=3):
msg = extract_file(self.viewer)
assert str(msg.get()).startswith(u'File viewer is locked')
msg.delete()
def test_cleanup(self):
self.viewer.extract()
self.viewer.cleanup()
assert not self.viewer.is_extracted()
@freeze_time('2017-01-08 02:01:00')
def test_dest(self):
viewer = FileViewer(make_file(1, get_file('webextension.xpi')))
assert viewer.dest == os.path.join(
settings.TMP_PATH, 'file_viewer',
'0108', str(self.viewer.file.pk))
def test_isbinary(self):
binary = self.viewer._is_binary
for f in ['foo.rdf', 'foo.xml', 'foo.js', 'foo.py'
'foo.html', 'foo.txt', 'foo.dtd', 'foo.xul', 'foo.sh',
'foo.properties', 'foo.json', 'foo.src', 'CHANGELOG']:
m, encoding = mimetypes.guess_type(f)
assert not binary(m, f), '%s should not be binary' % f
for f in ['foo.png', 'foo.gif', 'foo.exe', 'foo.swf']:
m, encoding = mimetypes.guess_type(f)
assert binary(m, f), '%s should be binary' % f
filename = os.path.join(settings.TMP_PATH, 'test_isbinary')
for txt in ['#!/usr/bin/python', '#python', u'\0x2']:
open(filename, 'w').write(txt)
m, encoding = mimetypes.guess_type(filename)
assert not binary(m, filename), '%s should not be binary' % txt
for txt in ['MZ']:
open(filename, 'w').write(txt)
m, encoding = mimetypes.guess_type(filename)
assert binary(m, filename), '%s should be binary' % txt
os.remove(filename)
def test_truncate(self):
truncate = self.viewer.truncate
for x, y in (['foo.rdf', 'foo.rdf'],
['somelongfilename.rdf', 'somelongfilenam...rdf'],
[u'unicode삮.txt', u'unicode\uc0ae.txt'],
[u'unicodesomelong삮.txt', u'unicodesomelong...txt'],
['somelongfilename.somelongextension',
'somelongfilenam...somelonge..'],):
assert truncate(x) == y
def test_get_files_not_extracted_runs_extraction(self):
self.viewer.src = get_file('dictionary-test.xpi')
assert not self.viewer.is_extracted()
self.viewer.get_files()
assert self.viewer.is_extracted()
def test_get_files_size(self):
self.viewer.extract()
files = self.viewer.get_files()
assert len(files) == 14
def test_get_files_directory(self):
self.viewer.extract()
files = self.viewer.get_files()
assert not files['install.js']['directory']
assert not files['install.js']['binary']
assert files['__MACOSX']['directory']
assert not files['__MACOSX']['binary']
def test_get_files_depth(self):
self.viewer.extract()
files = self.viewer.get_files()
assert files['dictionaries/license.txt']['depth'] == 1
def test_bom(self):
dest = os.path.join(settings.TMP_PATH, 'test_bom')
with open(dest, 'wb') as f:
f.write('foo'.encode('utf-16'))
self.viewer.select('foo')
self.viewer.selected = {'full': dest, 'size': 1}
assert self.viewer.read_file() == u'foo'
os.remove(dest)
def test_syntax(self):
for filename, syntax in [('foo.rdf', 'xml'),
('foo.xul', 'xml'),
('foo.json', 'js'),
('foo.jsm', 'js'),
('foo.htm', 'html'),
('foo.bar', 'plain'),
('foo.diff', 'plain')]:
assert self.viewer.get_syntax(filename) == syntax
def test_file_order(self):
self.viewer.extract()
dest = self.viewer.dest
open(os.path.join(dest, 'chrome.manifest'), 'w')
subdir = os.path.join(dest, 'chrome')
os.mkdir(subdir)
open(os.path.join(subdir, 'foo'), 'w')
cache.delete(self.viewer._cache_key())
files = list(self.viewer.get_files().keys())
rt = files.index(u'chrome')
assert files[rt:rt + 3] == [u'chrome', u'chrome/foo', u'dictionaries']
@patch.object(settings, 'FILE_VIEWER_SIZE_LIMIT', 5)
def test_file_size(self):
self.viewer.extract()
self.viewer.get_files()
self.viewer.select('install.js')
res = self.viewer.read_file()
assert res == ''
assert self.viewer.selected['msg'].startswith('File size is')
@pytest.mark.needs_locales_compilation
@patch.object(settings, 'FILE_VIEWER_SIZE_LIMIT', 5)
def test_file_size_unicode(self):
with self.activate(locale='he'):
self.viewer.extract()
self.viewer.get_files()
self.viewer.select('install.js')
res = self.viewer.read_file()
assert res == ''
assert (
self.viewer.selected['msg'].startswith(u'גודל הקובץ חורג'))
@patch.object(settings, 'FILE_UNZIP_SIZE_LIMIT', 5)
def test_contents_size(self):
self.assertRaises(forms.ValidationError, self.viewer.extract)
def test_default(self):
self.viewer.extract()
assert self.viewer.get_default(None) == 'install.rdf'
def test_default_webextension(self):
viewer = FileViewer(make_file(2, get_file('webextension.xpi')))
viewer.extract()
assert viewer.get_default(None) == 'manifest.json'
def test_default_webextension_zip(self):
viewer = FileViewer(make_file(2, get_file('webextension_no_id.zip')))
viewer.extract()
assert viewer.get_default(None) == 'manifest.json'
def test_default_webextension_crx(self):
viewer = FileViewer(make_file(2, get_file('webextension.crx')))
viewer.extract()
assert viewer.get_default(None) == 'manifest.json'
def test_default_package_json(self):
viewer = FileViewer(make_file(3, get_file('new-format-0.0.1.xpi')))
viewer.extract()
assert viewer.get_default(None) == 'package.json'
def test_delete_mid_read(self):
self.viewer.extract()
self.viewer.select('install.js')
os.remove(os.path.join(self.viewer.dest, 'install.js'))
res = self.viewer.read_file()
assert res == ''
assert self.viewer.selected['msg'].startswith('That file no')
@patch('olympia.files.file_viewer.get_sha256')
def test_delete_mid_tree(self, get_sha256):
get_sha256.side_effect = IOError('ow')
self.viewer.extract()
with self.assertRaises(IOError):
self.viewer.get_files()
@patch('olympia.files.file_viewer.os.fsync')
def test_verify_files_doesnt_call_fsync_regularly(self, fsync):
self.viewer.extract()
assert not fsync.called
@patch('olympia.files.file_viewer.os.fsync')
def test_verify_files_calls_fsync_on_differences(self, fsync):
self.viewer.extract()
assert not fsync.called
files_to_verify = get_all_files(self.viewer.dest)
files_to_verify.pop()
module_path = 'olympia.files.file_viewer.get_all_files'
with patch(module_path) as get_all_files_mck:
get_all_files_mck.return_value = files_to_verify
with pytest.raises(ValueError):
# We don't put things back into place after fsync
# so a `ValueError` is raised
self.viewer._verify_files(files_to_verify)
assert len(fsync.call_args_list) == len(files_to_verify) + 1
class TestSearchEngineHelper(TestCase):
fixtures = ['base/addon_4594_a9']
def setUp(self):
super(TestSearchEngineHelper, self).setUp()
self.left = File.objects.get(pk=25753)
self.viewer = FileViewer(self.left)
if not os.path.exists(os.path.dirname(self.viewer.src)):
os.makedirs(os.path.dirname(self.viewer.src))
with storage.open(self.viewer.src, 'w') as f:
f.write('some data\n')
def tearDown(self):
self.viewer.cleanup()
super(TestSearchEngineHelper, self).tearDown()
def test_is_search_engine(self):
assert self.viewer.is_search_engine()
def test_extract_search_engine(self):
self.viewer.extract()
assert os.path.exists(self.viewer.dest)
def test_default(self):
self.viewer.extract()
assert self.viewer.get_default(None) == 'a9.xml'
def test_default_no_files(self):
self.viewer.extract()
os.remove(os.path.join(self.viewer.dest, 'a9.xml'))
assert self.viewer.get_default(None) is None
class TestDiffSearchEngine(TestCase):
def setUp(self):
super(TestDiffSearchEngine, self).setUp()
src = os.path.join(settings.ROOT, get_file('search.xml'))
if not storage.exists(src):
with storage.open(src, 'w') as f:
f.write(open(src).read())
self.helper = DiffHelper(make_file(1, src, filename='search.xml'),
make_file(2, src, filename='search.xml'))
def tearDown(self):
self.helper.cleanup()
super(TestDiffSearchEngine, self).tearDown()
@patch(
'olympia.files.file_viewer.FileViewer.is_search_engine')
def test_diff_search(self, is_search_engine):
is_search_engine.return_value = True
self.helper.extract()
shutil.copyfile(os.path.join(self.helper.left.dest, 'search.xml'),
os.path.join(self.helper.right.dest, 's-20010101.xml'))
assert self.helper.select('search.xml')
assert len(self.helper.get_deleted_files()) == 0
class TestDiffHelper(TestCase):
def setUp(self):
super(TestDiffHelper, self).setUp()
src = os.path.join(settings.ROOT, get_file('dictionary-test.xpi'))
self.helper = DiffHelper(make_file(1, src), make_file(2, src))
def tearDown(self):
self.helper.cleanup()
super(TestDiffHelper, self).tearDown()
def clear_cache(self):
cache.delete(self.helper.left._cache_key())
cache.delete(self.helper.right._cache_key())
def test_files_not_extracted(self):
assert not self.helper.is_extracted()
def test_files_extracted(self):
self.helper.extract()
assert self.helper.is_extracted()
def test_get_files(self):
assert self.helper.left.get_files() == (
self.helper.get_files())
def test_diffable(self):
self.helper.extract()
self.helper.select('install.js')
assert self.helper.is_diffable()
def test_diffable_one_missing(self):
self.helper.extract()
os.remove(os.path.join(self.helper.right.dest, 'install.js'))
self.helper.select('install.js')
assert self.helper.is_diffable()
def test_diffable_allow_empty(self):
self.helper.extract()
self.assertRaises(AssertionError, self.helper.right.read_file)
assert self.helper.right.read_file(allow_empty=True) == ''
def test_diffable_both_missing(self):
self.helper.extract()
self.helper.select('foo.js')
assert not self.helper.is_diffable()
def test_diffable_deleted_files(self):
self.helper.extract()
os.remove(os.path.join(self.helper.left.dest, 'install.js'))
assert 'install.js' in self.helper.get_deleted_files()
def test_diffable_one_binary_same(self):
self.helper.extract()
self.helper.select('install.js')
self.helper.left.selected['binary'] = True
assert self.helper.is_binary()
def test_diffable_one_binary_diff(self):
self.helper.extract()
self.change(self.helper.left.dest, 'asd')
self.helper.select('install.js')
self.helper.left.selected['binary'] = True
assert self.helper.is_binary()
def test_diffable_two_binary_diff(self):
self.helper.extract()
self.change(self.helper.left.dest, 'asd')
self.change(self.helper.right.dest, 'asd123')
self.clear_cache()
self.helper.select('install.js')
self.helper.left.selected['binary'] = True
self.helper.right.selected['binary'] = True
assert self.helper.is_binary()
def test_diffable_one_directory(self):
self.helper.extract()
self.helper.select('install.js')
self.helper.left.selected['directory'] = True
assert not self.helper.is_diffable()
assert self.helper.left.selected['msg'].startswith('This file')
def test_diffable_parent(self):
self.helper.extract()
self.change(self.helper.left.dest, 'asd',
filename='__MACOSX/._dictionaries')
self.clear_cache()
files = self.helper.get_files()
assert files['__MACOSX/._dictionaries']['diff']
assert files['__MACOSX']['diff']
def change(self, file, text, filename='install.js'):
path = os.path.join(file, filename)
with open(path, 'rb') as f:
data = f.read()
data += text.encode('utf-8')
with open(path, 'wb') as f2:
f2.write(data)
class TestSafeZipFile(TestCase, amo.tests.AMOPaths):
# TODO(andym): get full coverage for existing SafeZip methods, most
# is covered in the file viewer tests.
@patch.object(settings, 'FILE_UNZIP_SIZE_LIMIT', 5)
def test_unzip_limit(self):
with pytest.raises(forms.ValidationError):
SafeZip(self.xpi_path('langpack-localepicker'))
def test_unzip_fatal(self):
with pytest.raises(zipfile.BadZipFile):
SafeZip(self.xpi_path('search.xml'))
def test_read(self):
zip_file = SafeZip(self.xpi_path('langpack-localepicker'))
assert b'locale browser de' in zip_file.read('chrome.manifest')
def test_not_secure(self):
zip_file = SafeZip(self.xpi_path('extension'))
assert not zip_file.is_signed()
def test_is_secure(self):
zip_file = SafeZip(self.xpi_path('signed'))
assert zip_file.is_signed()
def test_is_broken(self):
zip_file = SafeZip(self.xpi_path('signed'))
zip_file.info_list[2].filename = 'META-INF/foo.sf'
assert not zip_file.is_signed()
| 35.349794 | 79 | 0.626659 |
f72c7105b40a75a34bc96103e54bc62ef812eb03 | 4,785 | py | Python | rapvis/rapvis_process.py | liuwell/rapvis | a69d06a31b1d7fe4510c1c90bfeee22b68a9b3b9 | [
"MIT"
] | 1 | 2020-10-25T10:23:45.000Z | 2020-10-25T10:23:45.000Z | rapvis/rapvis_process.py | liuwell/rapvis | a69d06a31b1d7fe4510c1c90bfeee22b68a9b3b9 | [
"MIT"
] | null | null | null | rapvis/rapvis_process.py | liuwell/rapvis | a69d06a31b1d7fe4510c1c90bfeee22b68a9b3b9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import glob
import argparse
import numpy as np
import subprocess
import os
import time
import sys
#from rapvis_merge import merge_profiles, merge_gene_counts
#from rapvis_gene_dis import gene_dis
#from rapvis_quality import rRNAratio
from rapvis_general import current_time
import rapvis_rRNA
def process2(R1, R2, output, adapter, threads, libpath, mapper, minlen, trim5, counts, rRNA):
file_name = R1.split("/")[-1].split("_")[0]
outdir = os.path.join(output, file_name)
### make directory
if not os.path.exists(outdir):
try:
os.makedirs(outdir)
except Exception as e:
pass
prefix = os.path.join(outdir, file_name)
out_R1_p = prefix + "_R1.fq.gz"
out_R1_u = prefix + "_R1_unpaired.gz"
out_R2_p = prefix + "_R2.fq.gz"
out_R2_u = prefix + "_R2_unpaired.gz"
out_log = prefix + "_trimmomatic.log"
print("\n%s Processing: %s, %s" % (current_time(), R1,R2))
realpath = sys.path[0]
### trimmomatic
subprocess.call("trimmomatic PE -threads %d -phred33 %s %s %s %s %s %s ILLUMINACLIP:%s/../library/adapter/%s:1:30:10:5 SLIDINGWINDOW:4:20 MINLEN:%d HEADCROP:%d 2> %s" % (threads, R1, R2, out_R1_p, out_R1_u, out_R2_p, out_R2_u, realpath, adapter, minlen, trim5, out_log), shell=True)
### Mapping by hisat2
if mapper == 'hisat2':
SummaryFile = prefix + "_hisat_summary.txt"
MapOut = prefix + "_hisat_sort.bam"
subprocess.call("hisat2 -p %d -x %s/genome_tran -1 %s -2 %s -U %s,%s -t --dta --summary-file %s --new-summary|samtools sort -@ %d -m 10G -o %s" % (threads, libpath, out_R1_p, out_R2_p, out_R1_u, out_R2_u, SummaryFile, threads, MapOut), shell=True)
### Mapping by STAR
elif mapper == 'STAR':
STARprefix = prefix + "_STAR_"
subprocess.call("STAR --runThreadN %d --outSAMtype BAM SortedByCoordinate --genomeDir %s --readFilesIn %s %s --readFilesCommand zcat --outFileNamePrefix %s --quantMode GeneCounts --outFilterScoreMinOverLread 0.1 --outFilterMatchNminOverLread 0.1 --outFilterMatchNmin 0 --outFilterMismatchNmax 2" % (threads, libpath, out_R1_p, out_R2_p, STARprefix), shell=True)
MapOut = prefix + "_STAR_Aligned.sortedByCoord.out.bam" ## sorted bam file
### Asemble by stringtie
print("%s Asemble ..." % current_time())
stringtieGTF = prefix + '_stringtie.gtf'
stringtieGene = prefix + '_gene_abund.tab'
subprocess.call("stringtie %s -e -G %s/annotation.gtf -p %d -o %s -A %s" % (MapOut, libpath, threads, stringtieGTF, stringtieGene), shell=True)
### Gene counts
if counts:
countOut = prefix + '_gene_counts.txt'
subprocess.call("featureCounts -a %s/annotation.gtf -o %s %s -t exon -g gene_name -T %d -Q 30 -p" % (libpath, countOut, MapOut, threads), shell=True)
### rRNA
if rRNA:
rapvis_rRNA.rRNA(R1, R2, output, threads)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='A tool for RNAseq processing and visualization')
parser.add_argument('-R1', required=True, help='the input data R1')
parser.add_argument('-R2', required=True, help='the input data R2')
parser.add_argument('-o', '--output', default = 'processed_data', help = 'output directory (default: processed_data)')
parser.add_argument('-p', '--threads', default=5, type=int, help='number of threads (CPUs) to use (default: 5)')
#parser.add_argument('-s', '--species', default='Human', choices=['Human', 'Mouse', 'Rat', 'Rabbit', 'GoldenHamster', 'Zebrafish'], type=str, help='choose reference species for mapping and annotaion (default: Human)')
parser.add_argument('-lib', '--libraryPath', type=str, help='choose reference species for mapping and annotaion')
parser.add_argument('-m', '--mapper', default='hisat2', choices=['hisat2', 'STAR'], type=str, help='choose the mapping program (default: hisat2)')
parser.add_argument('-a', '--adapter', default='nextera', type=str, help='choose illumina adaptor (default: nextera), choices {nextera, universal, pAAAAA}')
parser.add_argument('--minlen', default=35, type=int, help='discard reads shorter than LEN (default: 35)')
parser.add_argument('--trim5', default=0, type=int, help='remove bases from the begining of each read (default:0)')
parser.add_argument('--counts', action='store_true', help='Get gene counts')
parser.add_argument('--rRNA', action='store_true', help='whether mapping to rRNA(Human)')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.0.2')
args = parser.parse_args()
#print("\n%s ..... Start RNAseq processing" % (current_time()))
#start_time = time.time()
process2(args.R1, args.R2, args.output, args.adapter, args.threads, args.libraryPath, args.mapper, args.minlen, args.trim5, args.counts, args.rRNA)
###
#end_time = time.time()
#run_time = round((end_time - start_time)/60, 5)
#print("\n%s ..... Finished all. Used time: %s m\n" % (current_time(), run_time))
| 47.376238 | 366 | 0.708255 |
f72c9587c2b7459c937e13b276ff7e0feb632297 | 3,314 | py | Python | detect_image.py | YunYang1994/CodeFun | 36fcdbfb4ed55fbb8f8dbc6f900842cc7bb9f068 | [
"MIT"
] | 150 | 2019-06-19T03:54:40.000Z | 2019-10-21T07:09:02.000Z | detect_image.py | YunYang1994/cv-notebooks | 36fcdbfb4ed55fbb8f8dbc6f900842cc7bb9f068 | [
"MIT"
] | 7 | 2019-11-26T07:27:42.000Z | 2020-04-02T03:35:29.000Z | detect_image.py | YunYang1994/cv-notebooks | 36fcdbfb4ed55fbb8f8dbc6f900842cc7bb9f068 | [
"MIT"
] | 25 | 2019-11-27T11:07:56.000Z | 2020-03-19T15:44:20.000Z | #! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2020 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : detect_image.py
# Author : YunYang1994
# Created date: 2020-03-19 14:05:53
# Description :
#
#================================================================
import os
import cv2
import time
import numpy as np
import tensorflow as tf
from PIL import Image, ImageFont, ImageDraw
from mtcnn import pnet, rnet, onet
from models import IResnet
from utils import detect_face, align_face, recognize_face
model = IResnet(tflite_model="IResnet.tflite")
font = ImageFont.truetype('weghts/HuaWenXinWei-1.ttf', 30)
image = cv2.imread("/Users/yangyun/多人照片/5.jpg")
image_h, image_w, _ = image.shape
org_image = image.copy()
image = cv2.cvtColor(image ,cv2.COLOR_BGR2RGB)
total_boxes, points = detect_face(image, 20, pnet, rnet, onet, [0.6, 0.7, 0.9], 0.709)
for idx, (bounding_box, keypoints) in enumerate(zip(total_boxes, points.T)):
bounding_boxes = {
'box': [int(bounding_box[0]), int(bounding_box[1]),
int(bounding_box[2]-bounding_box[0]), int(bounding_box[3]-bounding_box[1])],
'confidence': bounding_box[-1],
'keypoints': {
'left_eye': (int(keypoints[0]), int(keypoints[5])),
'right_eye': (int(keypoints[1]), int(keypoints[6])),
'nose': (int(keypoints[2]), int(keypoints[7])),
'mouth_left': (int(keypoints[3]), int(keypoints[8])),
'mouth_right': (int(keypoints[4]), int(keypoints[9])),
}
}
bounding_box = bounding_boxes['box']
keypoints = bounding_boxes['keypoints']
cv2.circle(org_image,(keypoints['left_eye']), 2, (255,0,0), 3)
cv2.circle(org_image,(keypoints['right_eye']), 2, (255,0,0), 3)
cv2.circle(org_image,(keypoints['nose']), 2, (255,0,0), 3)
cv2.circle(org_image,(keypoints['mouth_left']), 2, (255,0,0), 3)
cv2.circle(org_image,(keypoints['mouth_right']),2, (255,0,0), 3)
cv2.rectangle(org_image,
(bounding_box[0], bounding_box[1]),
(bounding_box[0]+bounding_box[2], bounding_box[1] + bounding_box[3]),
(0,255,0), 2)
# align face and extract it out
align_image = align_face(image, keypoints)
marigin = 16
xmin = max(bounding_box[0] - marigin, 0)
ymin = max(bounding_box[1] - marigin, 0)
xmax = min(bounding_box[0] + bounding_box[2] + marigin, image_w)
ymax = min(bounding_box[1] + bounding_box[3] + marigin, image_h)
crop_image = align_image[ymin:ymax, xmin:xmax, :]
if crop_image is not None:
t1 = time.time()
embedding = model(crop_image)
person = recognize_face(embedding)
org_image_pil = Image.fromarray(org_image)
draw = ImageDraw.Draw(org_image_pil)
text_size = draw.textsize(person, font)
draw.text((bounding_box[0], bounding_box[1]-16), person, fill=(0, 0, 255), font=font)
org_image = np.array(org_image_pil)
t2 = time.time()
print("time: %.2fms" %((t2-t1)*1000))
org_image = cv2.cvtColor(org_image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(org_image)
image.show()
# image.save("test.png")
| 36.822222 | 96 | 0.601992 |
f72caa4b74837bd62d61442cc130cfd18f4a2cb9 | 602 | py | Python | src/command_modules/azure-cli-find/azure/cli/command_modules/find/_help.py | v-Ajnava/azure-cli | febec631d79bfca151e84267b5b409594bad598e | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-find/azure/cli/command_modules/find/_help.py | v-Ajnava/azure-cli | febec631d79bfca151e84267b5b409594bad598e | [
"MIT"
] | 3 | 2021-03-26T00:48:20.000Z | 2022-03-29T22:05:39.000Z | src/command_modules/azure-cli-find/azure/cli/command_modules/find/_help.py | v-Ajnava/azure-cli | febec631d79bfca151e84267b5b409594bad598e | [
"MIT"
] | 1 | 2017-12-28T04:51:44.000Z | 2017-12-28T04:51:44.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.help_files import helps
helps['find'] = """
type: command
short-summary: Find Azure CLI commands.
examples:
- name: Search for commands containing 'vm' or 'secret'
text: >
az find -q vm secret
"""
| 37.625 | 94 | 0.465116 |
f72cbe35893af2f1b2c363e8fe4e587be57b909c | 6,321 | py | Python | InterventionsMIP/main.py | haoxiangyang89/COVID_Staged_Alert | 4c2cc5ef1d38c140875380a5f10a0fe1eaf8a47a | [
"MIT"
] | 1 | 2021-06-24T19:27:01.000Z | 2021-06-24T19:27:01.000Z | InterventionsMIP/main.py | haoxiangyang89/COVID_Staged_Alert | 4c2cc5ef1d38c140875380a5f10a0fe1eaf8a47a | [
"MIT"
] | null | null | null | InterventionsMIP/main.py | haoxiangyang89/COVID_Staged_Alert | 4c2cc5ef1d38c140875380a5f10a0fe1eaf8a47a | [
"MIT"
] | 3 | 2021-12-15T13:32:25.000Z | 2022-02-24T13:57:07.000Z | from InterventionsMIP import project_path, instances_path
import multiprocessing as mp
from threshold_policy import threshold_policy_search
from interventions import Intervension
from epi_params import EpiSetup, ParamDistribution
from utils import parse_arguments
from reporting.plotting import plot_stoch_simulations
from instances import load_instance
if __name__ == '__main__':
# Parse arguments
args = parse_arguments()
# Parse city and get corresponding instance
instance = load_instance(args.city, setup_file_name=args.f)
# TODO Read command line args for n_proc for better integration with crunch
n_proc = args.n_proc
# TODO: pull out n_replicas_train and n_replicas_test to a config file
n_replicas_train = args.train_reps
n_replicas_test = args.test_reps
# Create the pool (Note: pool needs to be created only once to run on a cluster)
mp_pool = mp.Pool(n_proc) if n_proc > 1 else None
for sc in [0]:
for co in [0.95]:
for base_line_train in [0.4]:
for base_line_test in [0.4]:
for const in ['test']: #[10 * i for i in range(0, 21)] + [215, 1000]:
policy_class = 'step'
instance_name = f'local_{instance.city}_SC{sc}_CO{co}_BLTrain{base_line_train}_BLTest_{base_line_test}_{policy_class}_{const}'
print('\n============================================')
print(instance_name)
#TODO: This list should be longe to include all possible transmission reduction values
# that might come in the instance file
interventions_train = [
Intervension(0, 0, 0, instance.epi, instance.N),
Intervension(1, 0, 0, instance.epi, instance.N),
Intervension(0, 0, base_line_train, instance.epi, instance.N),
Intervension(1, 0, base_line_train, instance.epi, instance.N),
Intervension(1, 0, 0.9, instance.epi, instance.N),
Intervension(0, co, base_line_train, instance.epi, instance.N),
Intervension(1, co, base_line_train, instance.epi, instance.N),
Intervension(1, co, 0.9, instance.epi, instance.N),
Intervension(1, 0, 0.95, instance.epi, instance.N),
Intervension(0, 0, 0.95, instance.epi, instance.N)
]
interventions_test = [
Intervension(0, 0, 0, instance.epi, instance.N),
Intervension(1, 0, 0, instance.epi, instance.N),
Intervension(0, 0, base_line_test, instance.epi, instance.N),
Intervension(1, 0, base_line_test, instance.epi, instance.N),
Intervension(1, 0, 0.9, instance.epi, instance.N),
Intervension(0, co, base_line_test, instance.epi, instance.N),
Intervension(1, co, base_line_test, instance.epi, instance.N),
Intervension(1, co, 0.9, instance.epi, instance.N),
Intervension(1, 0, 0.95, instance.epi, instance.N),
Intervension(0, 0, 0.95, instance.epi, instance.N)
]
sd_levels_train = {'H': 0.9, 'L': base_line_train}
sd_levels_test = {'H': 0.9, 'L': base_line_test}
best_policy_replicas, policy_params = threshold_policy_search(instance,
interventions_train,
interventions_test,
sd_levels_train,
sd_levels_test,
cocooning=co,
school_closure=sc,
mp_pool=mp_pool,
n_replicas_train=n_replicas_train,
n_replicas_test=n_replicas_test,
instance_name=instance_name,
policy={
'class': policy_class,
'vals': [120, 216, 9]
},
policy_class=policy_class)
n_replicas = len(best_policy_replicas)
plot_stoch_simulations(
instance_name,
best_policy_replicas,
['sim'] * n_replicas,
plot_left_axis=['IH'],
plot_right_axis=[],
T=instance.T, #437,
hosp_beds=instance.hosp_beds,
population=instance.N.sum(),
interventions=interventions_test,
calendar=instance.cal,
policy_params=policy_params,
plot_triggers=True,
plot_legend=True,
show=True,
align_axes=True,
n_replicas=5,
BL=base_line_test)
| 64.5 | 150 | 0.424933 |
f72cc73266bff2259d90cfeb4bc45a0cffcbecaf | 834 | py | Python | p22.py | kmark1625/Project-Euler | e80c4f2044fdbff93331117b8f02aa0becbb0706 | [
"MIT"
] | null | null | null | p22.py | kmark1625/Project-Euler | e80c4f2044fdbff93331117b8f02aa0becbb0706 | [
"MIT"
] | null | null | null | p22.py | kmark1625/Project-Euler | e80c4f2044fdbff93331117b8f02aa0becbb0706 | [
"MIT"
] | null | null | null | def read_from_file(filename):
txt = open(filename)
string_of_names = txt.read()
return string_of_names
def string_of_names_to_array(string_of_names):
return string_of_names.replace('"','').split(",")
def get_alpha_value(string):
sum = 0
for char in string:
sum += (ord(char) - 64)
return sum
def main():
sum = 0
filename = "p022_names.txt"
string_of_names = read_from_file(filename)
array_of_names = string_of_names_to_array(string_of_names)
array_of_names.sort()
for i in range(0, len(array_of_names)):
sum += (i+1) * get_alpha_value(array_of_names[i])
return sum
def test_get_alpha_value():
assert get_alpha_value("COLIN") == 53
assert get_alpha_value("A") == 1
assert get_alpha_value("Z") == 26
def test():
test_get_alpha_value()
if __name__ == '__main__':
test()
print main() | 23.166667 | 60 | 0.708633 |
f72cea9211379eec544b1b493a257f6c5b6255c7 | 1,066 | py | Python | api/utils/data_utils.py | wtomin/Multitask-Emotion-Recognition-with-Incomplete-Labels | e6df7ffc9b0318fdce405e40993c79785b47c785 | [
"MIT"
] | 74 | 2020-03-08T15:29:00.000Z | 2022-03-05T14:57:33.000Z | api/utils/data_utils.py | wtomin/Multitask-Emotion-Recognition-with-Incomplete-Labels | e6df7ffc9b0318fdce405e40993c79785b47c785 | [
"MIT"
] | 19 | 2020-03-06T08:56:51.000Z | 2022-03-27T05:07:35.000Z | api/utils/data_utils.py | wtomin/Multitask-Emotion-Recognition-with-Incomplete-Labels | e6df7ffc9b0318fdce405e40993c79785b47c785 | [
"MIT"
] | 23 | 2020-03-20T08:19:55.000Z | 2022-03-16T17:40:09.000Z | from PIL import Image
import numbers
class RandomCrop(object):
def __init__(self, size, v):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.v = v
def __call__(self, img):
w, h = img.size
th, tw = self.size
x1 = int(( w - tw)*self.v)
y1 = int(( h - th)*self.v)
#print("print x, y:", x1, y1)
assert(img.size[0] == w and img.size[1] == h)
if w == tw and h == th:
out_image = img
else:
out_image = img.crop((x1, y1, x1 + tw, y1 + th)) #same cropping method for all images in the same group
return out_image
class RandomHorizontalFlip(object):
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
"""
def __init__(self, v):
self.v = v
return
def __call__(self, img):
if self.v < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
#print ("horiontal flip: ",self.v)
return img | 31.352941 | 115 | 0.545028 |
f72cf7000c24d4d32e896071d41604079d19da89 | 3,666 | py | Python | mpsci/distributions/binomial.py | WarrenWeckesser/mpsci | 675f0f3b76700529558a3bae2a1b2ca09552233b | [
"BSD-2-Clause"
] | 7 | 2019-03-27T17:25:41.000Z | 2022-03-31T03:55:29.000Z | mpsci/distributions/binomial.py | WarrenWeckesser/mpsci | 675f0f3b76700529558a3bae2a1b2ca09552233b | [
"BSD-2-Clause"
] | 2 | 2019-05-09T16:09:45.000Z | 2021-01-04T03:55:09.000Z | mpsci/distributions/binomial.py | WarrenWeckesser/mpsci | 675f0f3b76700529558a3bae2a1b2ca09552233b | [
"BSD-2-Clause"
] | null | null | null | """
Binomial distribution
---------------------
"""
import mpmath
from ..fun import logbinomial
__all__ = ['pmf', 'logpmf', 'cdf', 'sf', 'mean', 'var']
def _validate_np(n, p):
if p < 0 or p > 1:
raise ValueError('p must be in the range [0, 1]')
if n < 0:
raise ValueError('n must be a nonnegative integer.')
return
def pmf(k, n, p):
"""
Probability mass function of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
p = mpmath.mpf(p)
return (mpmath.binomial(n, k) *
mpmath.power(p, k) *
mpmath.power(1 - p, n - k))
def logpmf(k, n, p):
"""
Natural log of the probability mass function of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
return (logbinomial(n, k)
+ k*mpmath.log(p)
+ mpmath.fsum([n, -k])*mpmath.log1p(-p))
def cdf(k, n, p, method='incbeta'):
"""
Cumulative distribution function of the binomial distribution.
`method` must be either "sumpmf" or "incbeta". When `method` is "sumpmf",
the CDF is computed with a simple sum of the PMF values. When `method`
is "incbeta", the incomplete beta function is used. This method is
generally faster than the "sumpmf" method, but for large values of k
or n, the incomplete beta function of mpmath might fail.
"""
_validate_np(n, p)
if method not in ['sumpmf', 'incbeta']:
raise ValueError('method must be "sum" or "incbeta"')
if method == 'incbeta':
with mpmath.extradps(5):
p = mpmath.mpf(p)
# XXX For large values of k and/or n, betainc fails. The failure
# occurs in one of the hypergeometric functions.
return mpmath.betainc(n - k, k + 1, x1=0, x2=1 - p,
regularized=True)
else:
# method is "sumpmf"
with mpmath.extradps(5):
c = mpmath.fsum([mpmath.exp(logpmf(t, n, p))
for t in range(k + 1)])
return c
def sf(k, n, p, method='incbeta'):
"""
Survival function of the binomial distribution.
`method` must be either "sumpmf" or "incbeta". When `method` is "sumpmf",
the survival function is computed with a simple sum of the PMF values.
When `method` is "incbeta", the incomplete beta function is used. This
method is generally faster than the "sumpmf" method, but for large values
of k or n, the incomplete beta function of mpmath might fail.
"""
_validate_np(n, p)
if method not in ['sumpmf', 'incbeta']:
raise ValueError('method must be "sum" or "incbeta"')
if method == 'incbeta':
with mpmath.extradps(5):
p = mpmath.mpf(p)
# XXX For large values of k and/or n, betainc fails. The failure
# occurs in one of the hypergeometric functions.
return mpmath.betainc(n - k, k + 1, x1=1-p, x2=1,
regularized=True)
else:
# method is "sumpmf"
with mpmath.extradps(5):
c = mpmath.fsum([mpmath.exp(logpmf(t, n, p))
for t in range(k + 1, n + 1)])
return c
def mean(n, p):
"""
Mean of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
n = mpmath.mpf(n)
p = mpmath.mpf(p)
return n*p
def var(n, p):
"""
Variance of the binomial distribution.
"""
_validate_np(n, p)
with mpmath.extradps(5):
n = mpmath.mpf(n)
p = mpmath.mpf(p)
return n * p * (1 - p)
| 30.297521 | 78 | 0.563011 |
f72cfecb9a75e28d76c6235057fe3ad2011e3f3f | 4,092 | py | Python | code/txburstML.py | astrophys/Python_Debugging_Examples | 510b4b6966166dddc14eda3f6813700386d2324f | [
"MIT"
] | null | null | null | code/txburstML.py | astrophys/Python_Debugging_Examples | 510b4b6966166dddc14eda3f6813700386d2324f | [
"MIT"
] | null | null | null | code/txburstML.py | astrophys/Python_Debugging_Examples | 510b4b6966166dddc14eda3f6813700386d2324f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import argparse
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from joblib import delayed,Parallel
import os
def whichKeep(est_params):
kon = np.array(est_params)[:,0]
koff = np.array(est_params)[:,1]
ksyn = np.array(est_params)[:,2]
which_kon = ~(kon < 2*1e-3)*~(kon > 1e3 - 1)
which_koff = ~(koff < 2*1e-3)*~(koff > 1e3 - 1)
which_burst = ksyn/koff > 1
which_ksyn = ksyn > 1
which = which_burst*which_koff*which_kon*which_ksyn
return which
def MaximumLikelihood(vals, export_asymp_ci = False, fix = 0, metod = 'L-BFGS-B'):
from scipy.interpolate import interp1d
from scipy.optimize import minimize
from scipy import special
from scipy.stats import poisson,norm
from scipy.special import j_roots
from scipy.special import beta as beta_fun
import numpy as np
if len(vals) == 0:
return np.array([np.nan, np.nan, np.nan])
def dBP(at, alpha, bet, lam):
at.shape = (len(at), 1)
np.repeat(at, 50, axis = 1)
def fun(at, m):
if(max(m) < 1e6):
return(poisson.pmf(at,m))
else:
return(norm.pdf(at,loc=m,scale=sqrt(m)))
x,w = j_roots(50,alpha = bet - 1, beta = alpha - 1)
gs = np.sum(w*fun(at, m = lam*(1+x)/2), axis=1)
prob = 1/beta_fun(alpha, bet)*2**(-alpha-bet+1)*gs
return(prob)
def LogLikelihood(x, vals):
kon = x[0]
koff = x[1]
ksyn = x[2]
return(-np.sum(np.log( dBP(vals,kon,koff,ksyn) + 1e-10) ) )
x0 = MomentInference(vals)
if np.isnan(x0).any() or any(x0 < 0):
x0 = np.array([10,10,10])
bnds = ((1e-3,1e3),(1e-3,1e3), (1, 1e4))
vals_ = np.copy(vals) # Otherwise the structure is violated.
try:
ll = minimize(LogLikelihood, x0, args = (vals_), method=metod, bounds=bnds)
except:
return np.array([np.nan,np.nan,np.nan])
#se = ll.hess_inv.todense().diagonal()
estim = ll.x
return estim
# moment-based inference
def MomentInference(vals, export_moments=False):
# code from Anton Larsson's R implementation
from scipy import stats # needs imports inside function when run in ipyparallel
import numpy as np
m1 = float(np.mean(vals))
m2 = float(sum(vals*(vals - 1))/len(vals))
m3 = float(sum(vals*(vals - 1)*(vals - 2))/len(vals))
# sanity check on input (e.g. need at least on expression level)
if sum(vals) == 0: return np.nan
if m1 == 0: return np.nan
if m2 == 0: return np.nan
r1=m1
r2=m2/m1
r3=m3/m2
if (r1*r2-2*r1*r3 + r2*r3) == 0: return np.nan
if ((r1*r2 - 2*r1*r3 + r2*r3)*(r1-2*r2+r3)) == 0: return np.nan
if (r1 - 2*r2 + r3) == 0: return np.nan
lambda_est = (2*r1*(r3-r2))/(r1*r2-2*r1*r3 + r2*r3)
mu_est = (2*(r3-r2)*(r1-r3)*(r2-r1))/((r1*r2 - 2*r1*r3 + r2*r3)*(r1-2*r2+r3))
v_est = (2*r1*r3 - r1*r2 - r2*r3)/(r1 - 2*r2 + r3)
if export_moments:
return np.array([lambda_est, mu_est, v_est, r1, r2, r3])
return np.array([lambda_est, mu_est, v_est])
parser = argparse.ArgumentParser(description='Maximum likelihood inference of bursting kinetics from scRNA-seq data')
parser.add_argument('file', metavar='file', type=str, nargs=1,help='.csv file with allelic-resolution transcript counts' )
parser.add_argument('--njobs', default=[50], nargs=1, type=int, help='Number of jobs for the parallelization, default 50')
args = parser.parse_args()
filename = args.file[0]
njobs = args.njobs[0]
print('Reading file ' + filename)
rpkm = pd.read_csv(filename, index_col=0)
print('Inferring kinetics:')
params = Parallel(n_jobs=njobs, verbose = 3)(delayed(MaximumLikelihood)(np.around(rpkm[pd.notnull(rpkm)])) for i,rpkm in rpkm.iterrows())
keep = whichKeep(params)
print('Inferred kinetics of {} genes out of {} total'.format(np.sum(keep), len(keep)))
base = os.path.splitext(os.path.basename(filename))[0]
base = base + '_ML.pkl'
print('Saving result to ' + base)
pd.to_pickle(pd.DataFrame([ params, list(keep)], columns=rpkm.index).T, base)
| 35.582609 | 137 | 0.631232 |
f72d0862c33d21e1bd9da9c2c2fa5d10f09f06f4 | 4,140 | py | Python | pystratis/api/federation/tests/test_federation.py | madrazzl3/pystratis | 8b78552e753ae1d12f2afb39e9a322a270fbb7b3 | [
"MIT"
] | null | null | null | pystratis/api/federation/tests/test_federation.py | madrazzl3/pystratis | 8b78552e753ae1d12f2afb39e9a322a270fbb7b3 | [
"MIT"
] | null | null | null | pystratis/api/federation/tests/test_federation.py | madrazzl3/pystratis | 8b78552e753ae1d12f2afb39e9a322a270fbb7b3 | [
"MIT"
] | null | null | null | import pytest
from pytest_mock import MockerFixture
from pystratis.api.federation.responsemodels import *
from pystratis.api.federation import Federation
from pystratis.core.networks import StraxMain, CirrusMain
def test_all_strax_endpoints_implemented(strax_swagger_json):
paths = [key.lower() for key in strax_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
def test_all_cirrus_endpoints_implemented(cirrus_swagger_json):
paths = [key.lower() for key in cirrus_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
def test_all_interfluxstrax_endpoints_implemented(interfluxstrax_swagger_json):
paths = [key.lower() for key in interfluxstrax_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
def test_all_interfluxcirrus_endpoints_implemented(interfluxcirrus_swagger_json):
paths = [key.lower() for key in interfluxcirrus_swagger_json['paths']]
for endpoint in paths:
if Federation.route + '/' in endpoint:
assert endpoint in Federation.endpoints
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_reconstruct(mocker: MockerFixture, network):
data = "Reconstruction flag set, please restart the node."
mocker.patch.object(Federation, 'put', return_value=data)
federation = Federation(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = federation.reconstruct()
assert response == data
# noinspection PyUnresolvedReferences
federation.put.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_members_current(mocker: MockerFixture, network, generate_compressed_pubkey, get_datetime):
data = {
"pollStartBlockHeight": None,
"pollNumberOfVotesAcquired": None,
"pollFinishedBlockHeight": None,
"pollWillFinishInBlocks": None,
"pollExecutedBlockHeight": None,
"memberWillStartMiningAtBlockHeight": None,
"memberWillStartEarningRewardsEstimateHeight": None,
"pollType": None,
"rewardEstimatePerBlock": 0.05,
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:32.9200000"
}
mocker.patch.object(Federation, 'get', return_value=data)
federation = Federation(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = federation.members_current()
assert response == FederationMemberDetailedModel(**data)
# noinspection PyUnresolvedReferences
federation.get.assert_called_once()
@pytest.mark.parametrize('network', [StraxMain(), CirrusMain()], ids=['StraxMain', 'CirrusMain'])
def test_member(mocker: MockerFixture, network, generate_compressed_pubkey, get_datetime):
data = [
{
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:32.9200000"
},
{
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:33.9200000"
},
{
"pubkey": generate_compressed_pubkey,
"collateralAmount": 50000,
"lastActiveTime": get_datetime(5),
"periodOfInactivity": "00:02:34.9200000"
}
]
mocker.patch.object(Federation, 'get', return_value=data)
federation = Federation(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = federation.members()
assert response == [FederationMemberModel(**x) for x in data]
# noinspection PyUnresolvedReferences
federation.get.assert_called_once()
| 38.333333 | 100 | 0.700966 |
f72d089f873a7c0b075b4dd30a15b63980100580 | 20,269 | py | Python | tools/ami-creator/scripts/win2019_cuda11_installer.py | mseth10/incubator-mxnet-ci | 36a5050b9c7bd720a4aa87d225738400083d611d | [
"Apache-2.0"
] | 10 | 2019-08-19T17:12:52.000Z | 2021-11-07T21:25:32.000Z | tools/ami-creator/scripts/win2019_cuda11_installer.py | mseth10/incubator-mxnet-ci | 36a5050b9c7bd720a4aa87d225738400083d611d | [
"Apache-2.0"
] | 16 | 2019-10-22T17:07:40.000Z | 2022-02-08T23:33:27.000Z | tools/ami-creator/scripts/win2019_cuda11_installer.py | mseth10/incubator-mxnet-ci | 36a5050b9c7bd720a4aa87d225738400083d611d | [
"Apache-2.0"
] | 15 | 2019-08-25T18:44:54.000Z | 2021-11-07T21:25:25.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Dependency installer for Windows"""
__author__ = 'Pedro Larroy, Chance Bair, Joe Evans'
__version__ = '0.4'
import argparse
import errno
import logging
import os
import psutil
import shutil
import subprocess
import urllib
import stat
import tempfile
import zipfile
from time import sleep
from urllib.error import HTTPError
import logging
from subprocess import check_output, check_call, call
import re
import sys
import urllib.request
import contextlib
import glob
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
log = logging.getLogger(__name__)
DEPS = {
'openblas': 'https://windows-post-install.s3-us-west-2.amazonaws.com/OpenBLAS-windows-v0_2_19.zip',
'opencv': 'https://windows-post-install.s3-us-west-2.amazonaws.com/opencv-windows-4.1.2-vc14_vc15.zip',
'cudnn7': 'https://windows-post-install.s3-us-west-2.amazonaws.com/cudnn-10.2-windows10-x64-v7.6.5.32.zip',
'cudnn8': 'https://windows-post-install.s3-us-west-2.amazonaws.com/cudnn-11.0-windows-x64-v8.0.3.33.zip',
'perl': 'http://strawberryperl.com/download/5.30.1.1/strawberry-perl-5.30.1.1-64bit.msi',
'clang': 'https://github.com/llvm/llvm-project/releases/download/llvmorg-9.0.1/LLVM-9.0.1-win64.exe',
}
DEFAULT_SUBPROCESS_TIMEOUT = 3600
@contextlib.contextmanager
def remember_cwd():
'''
Restore current directory when exiting context
'''
curdir = os.getcwd()
try:
yield
finally:
os.chdir(curdir)
def retry(target_exception, tries=4, delay_s=1, backoff=2):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param target_exception: the exception to check. may be a tuple of
exceptions to check
:type target_exception: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay_s: initial delay between retries in seconds
:type delay_s: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
"""
import time
from functools import wraps
def decorated_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay_s
while mtries > 1:
try:
return f(*args, **kwargs)
except target_exception as e:
logging.warning("Exception: %s, Retrying in %d seconds...", str(e), mdelay)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return decorated_retry
@retry((ValueError, OSError, HTTPError), tries=5, delay_s=2, backoff=5)
def download(url, dest=None, progress=False) -> str:
from urllib.request import urlopen
from urllib.parse import (urlparse, urlunparse)
import progressbar
import http.client
class ProgressCB():
def __init__(self):
self.pbar = None
def __call__(self, block_num, block_size, total_size):
if not self.pbar and total_size > 0:
self.pbar = progressbar.bar.ProgressBar(max_value=total_size)
downloaded = block_num * block_size
if self.pbar:
if downloaded < total_size:
self.pbar.update(downloaded)
else:
self.pbar.finish()
if dest and os.path.isdir(dest):
local_file = os.path.split(urlparse(url).path)[1]
local_path = os.path.normpath(os.path.join(dest, local_file))
else:
local_path = dest
with urlopen(url) as c:
content_length = c.getheader('content-length')
length = int(content_length) if content_length and isinstance(c, http.client.HTTPResponse) else None
if length and local_path and os.path.exists(local_path) and os.stat(local_path).st_size == length:
log.debug(f"download('{url}'): Already downloaded.")
return local_path
log.debug(f"download({url}, {local_path}): downloading {length} bytes")
if local_path:
with tempfile.NamedTemporaryFile(delete=False) as tmpfd:
urllib.request.urlretrieve(url, filename=tmpfd.name, reporthook=ProgressCB() if progress else None)
shutil.move(tmpfd.name, local_path)
else:
(local_path, _) = urllib.request.urlretrieve(url, reporthook=ProgressCB())
log.debug(f"download({url}, {local_path}'): done.")
return local_path
# Takes arguments and runs command on host. Shell is disabled by default.
# TODO: Move timeout to args
def run_command(*args, shell=False, timeout=DEFAULT_SUBPROCESS_TIMEOUT, **kwargs):
try:
logging.info("Issuing command: {}".format(args))
res = subprocess.check_output(*args, shell=shell, timeout=timeout).decode("utf-8").replace("\r\n", "\n")
logging.info("Output: {}".format(res))
except subprocess.CalledProcessError as e:
raise RuntimeError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
return res
# Copies source directory recursively to destination.
def copy(src, dest):
try:
shutil.copytree(src, dest)
logging.info("Moved {} to {}".format(src, dest))
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, dest)
logging.info("Moved {} to {}".format(src, dest))
else:
raise RuntimeError("copy return with error: {}".format(e))
# Workaround for windows readonly attribute error
def on_rm_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and unlink it.
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
def reboot_system():
logging.info("Rebooting system now...")
run_command("shutdown -r -t 5")
exit(0)
def shutdown_system():
logging.info("Shutting down system now...")
# wait 20 sec so we can capture the install logs
run_command("shutdown -s -t 20")
exit(0)
def install_vs():
if os.path.exists("C:\\Program Files (x86)\\Microsoft Visual Studio\\2019"):
logging.info("MSVS already installed, skipping.")
return False
# Visual Studio 2019
# Components: https://docs.microsoft.com/en-us/visualstudio/install/workload-component-id-vs-community?view=vs-2019#visual-studio-core-editor-included-with-visual-studio-community-2019
logging.info("Installing Visual Studio 2019...")
vs_file_path = download('https://windows-post-install.s3-us-west-2.amazonaws.com/vs_community__1246179388.1585201415.exe')
run_command("PowerShell Rename-Item -Path {} -NewName \"{}.exe\"".format(vs_file_path,
vs_file_path.split('\\')[-1]), shell=True)
vs_file_path = vs_file_path + '.exe'
logging.info("Installing VisualStudio 2019.....")
ret = call(vs_file_path +
' --add Microsoft.VisualStudio.Workload.ManagedDesktop'
' --add Microsoft.VisualStudio.Workload.NetCoreTools'
' --add Microsoft.VisualStudio.Workload.NetWeb'
' --add Microsoft.VisualStudio.Workload.Node'
' --add Microsoft.VisualStudio.Workload.Office'
' --add Microsoft.VisualStudio.Component.TypeScript.2.0'
' --add Microsoft.VisualStudio.Component.TestTools.WebLoadTest'
' --add Component.GitHub.VisualStudio'
' --add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core'
' --add Microsoft.VisualStudio.Component.Static.Analysis.Tools'
' --add Microsoft.VisualStudio.Component.VC.CMake.Project'
' --add Microsoft.VisualStudio.Component.VC.140'
' --add Microsoft.VisualStudio.Component.Windows10SDK.18362.Desktop'
' --add Microsoft.VisualStudio.Component.Windows10SDK.18362.UWP'
' --add Microsoft.VisualStudio.Component.Windows10SDK.18362.UWP.Native'
' --add Microsoft.VisualStudio.ComponentGroup.Windows10SDK.18362'
' --add Microsoft.VisualStudio.Component.Windows10SDK.16299'
' --wait'
' --passive'
' --norestart'
)
if ret == 3010 or ret == 0:
# 3010 is restart required
logging.info("VS install successful.")
else:
raise RuntimeError("VS failed to install, exit status {}".format(ret))
# Workaround for --wait sometimes ignoring the subprocesses doing component installs
def vs_still_installing():
return {'vs_installer.exe', 'vs_installershell.exe', 'vs_setup_bootstrapper.exe'} & set(map(lambda process: process.name(), psutil.process_iter()))
timer = 0
while vs_still_installing() and timer < DEFAULT_SUBPROCESS_TIMEOUT:
logging.warning("VS installers still running for %d s", timer)
if timer % 60 == 0:
logging.info("Waiting for Visual Studio to install for the last {} seconds".format(str(timer)))
sleep(1)
timer += 1
if vs_still_installing():
logging.warning("VS install still running after timeout (%d)", DEFAULT_SUBPROCESS_TIMEOUT)
else:
logging.info("Visual studio install complete.")
return True
def install_perl():
if os.path.exists("C:\\Strawberry\\perl\\bin\\perl.exe"):
logging.info("Perl already installed, skipping.")
return False
logging.info("Installing Perl")
with tempfile.TemporaryDirectory() as tmpdir:
perl_file_path = download(DEPS['perl'], tmpdir)
check_call(['msiexec ', '/n', '/passive', '/i', perl_file_path])
logging.info("Perl install complete")
return True
def install_clang():
if os.path.exists("C:\\Program Files\\LLVM"):
logging.info("Clang already installed, skipping.")
return False
logging.info("Installing Clang")
with tempfile.TemporaryDirectory() as tmpdir:
clang_file_path = download(DEPS['clang'], tmpdir)
run_command(clang_file_path + " /S /D=C:\\Program Files\\LLVM")
logging.info("Clang install complete")
return True
def install_openblas():
if os.path.exists("C:\\Program Files\\OpenBLAS-windows-v0_2_19"):
logging.info("OpenBLAS already installed, skipping.")
return False
logging.info("Installing OpenBLAS")
local_file = download(DEPS['openblas'])
with zipfile.ZipFile(local_file, 'r') as zip:
zip.extractall("C:\\Program Files")
run_command("PowerShell Set-ItemProperty -path 'hklm:\\system\\currentcontrolset\\control\\session manager\\environment' -Name OpenBLAS_HOME -Value 'C:\\Program Files\\OpenBLAS-windows-v0_2_19'")
logging.info("Openblas Install complete")
return True
def install_mkl():
if os.path.exists("C:\\Program Files (x86)\\IntelSWTools"):
logging.info("Intel MKL already installed, skipping.")
return False
logging.info("Installing MKL 2019.3.203...")
file_path = download("http://registrationcenter-download.intel.com/akdlm/irc_nas/tec/15247/w_mkl_2019.3.203.exe")
run_command("{} --silent --remove-extracted-files yes --a install -output=C:\mkl-install-log.txt -eula=accept".format(file_path))
logging.info("MKL Install complete")
return True
def install_opencv():
if os.path.exists("C:\\Program Files\\opencv"):
logging.info("OpenCV already installed, skipping.")
return False
logging.info("Installing OpenCV")
with tempfile.TemporaryDirectory() as tmpdir:
local_file = download(DEPS['opencv'])
with zipfile.ZipFile(local_file, 'r') as zip:
zip.extractall(tmpdir)
copy(f'{tmpdir}\\opencv\\build', r'c:\Program Files\opencv')
run_command("PowerShell Set-ItemProperty -path 'hklm:\\system\\currentcontrolset\\control\\session manager\\environment' -Name OpenCV_DIR -Value 'C:\\Program Files\\opencv'")
logging.info("OpenCV install complete")
return True
def install_cudnn7():
if os.path.exists("C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\bin\\cudnn64_7.dll"):
logging.info("cuDNN7 already installed, skipping.")
return False
# cuDNN
logging.info("Installing cuDNN7")
with tempfile.TemporaryDirectory() as tmpdir:
local_file = download(DEPS['cudnn7'])
with zipfile.ZipFile(local_file, 'r') as zip:
zip.extractall(tmpdir)
for f in glob.glob(tmpdir+"\\cuda\\bin\\*"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\bin")
for f in glob.glob(tmpdir+"\\cuda\\include\\*.h"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\include")
for f in glob.glob(tmpdir+"\\cuda\\lib\\x64\\*"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\lib\\x64")
logging.info("cuDNN7 install complete")
return True
def install_cudnn8():
if os.path.exists("C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\bin\\cudnn64_8.dll"):
logging.info("cuDNN7 already installed, skipping.")
return False
# cuDNN
logging.info("Installing cuDNN8")
with tempfile.TemporaryDirectory() as tmpdir:
local_file = download(DEPS['cudnn8'])
with zipfile.ZipFile(local_file, 'r') as zip:
zip.extractall(tmpdir)
for f in glob.glob(tmpdir+"\\cuda\\bin\\*"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\bin")
for f in glob.glob(tmpdir+"\\cuda\\include\\*.h"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\include")
for f in glob.glob(tmpdir+"\\cuda\\lib\\x64\\*"):
copy(f, "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\lib\\x64")
logging.info("cuDNN8 install complete")
return True
def instance_family():
return urllib.request.urlopen('http://instance-data/latest/meta-data/instance-type').read().decode().split('.')[0]
CUDA_COMPONENTS=[
'nvcc', 'cublas', 'cublas_dev', 'cudart', 'cufft', 'cufft_dev', 'curand', 'curand_dev', 'cusolver',
'cusolver_dev', 'cusparse', 'cusparse_dev', 'npp', 'npp_dev', 'nvrtc', 'nvrtc_dev', 'nvml_dev'
]
def install_cuda110():
if os.path.exists("C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0\\bin"):
logging.info("CUDA 11.0 already installed, skipping.")
return False
logging.info("Downloadinng CUDA 11.0...")
cuda_file_path = download(
'http://developer.download.nvidia.com/compute/cuda/11.0.3/network_installers/cuda_11.0.3_win10_network.exe')
try:
check_call("PowerShell Rename-Item -Path {} -NewName \"{}.exe\"".format(cuda_file_path,
cuda_file_path.split('\\')[-1]), shell=True)
except subprocess.CalledProcessError as e:
logging.exception("Rename file failed")
cuda_file_path = cuda_file_path + '.exe'
logging.info("Installing CUDA 11.0...")
check_call(cuda_file_path + ' -s')
#check_call(cuda_file_path + ' -s ' + " ".join([p + "_11.0" for p in CUDA_COMPONENTS]))
logging.info("Done installing CUDA 11.0.")
return True
def install_cuda102():
if os.path.exists("C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2\\bin"):
logging.info("CUDA 10.2 already installed, skipping.")
return False
logging.info("Downloading CUDA 10.2...")
cuda_file_path = download(
'http://developer.download.nvidia.com/compute/cuda/10.2/Prod/network_installers/cuda_10.2.89_win10_network.exe')
try:
check_call("PowerShell Rename-Item -Path {} -NewName \"{}.exe\"".format(cuda_file_path,
cuda_file_path.split('\\')[-1]), shell=True)
except subprocess.CalledProcessError as e:
logging.exception("Rename file failed")
cuda_file_path = cuda_file_path + '.exe'
logging.info("Installing CUDA 10.2...")
check_call(cuda_file_path + ' -s')
#check_call(cuda_file_path + ' -s ' + " ".join([p + "_10.2" for p in CUDA_COMPONENTS]))
logging.info("Downloading CUDA 10.2 patch...")
patch_file_path = download(
'http://developer.download.nvidia.com/compute/cuda/10.2/Prod/patches/1/cuda_10.2.1_win10.exe')
try:
check_call("PowerShell Rename-Item -Path {} -NewName \"{}.exe\"".format(patch_file_path,
patch_file_path.split('\\')[-1]), shell=True)
except subprocess.CalledProcessError as e:
logging.exception("Rename patch failed")
patch_file_path = patch_file_path + '.exe'
logging.info("Installing CUDA patch...")
check_call(patch_file_path + ' -s ')
logging.info("Done installing CUDA 10.2 and patches.")
return True
def schedule_aws_userdata():
logging.info("Scheduling AWS init so userdata will run on next boot...")
run_command("PowerShell C:\\ProgramData\\Amazon\\EC2-Windows\\Launch\\Scripts\\InitializeInstance.ps1 -Schedule")
def add_paths():
# TODO: Add python paths (python -> C:\\Python37\\python.exe, python2 -> C:\\Python27\\python.exe)
logging.info("Adding Windows Kits to PATH...")
current_path = run_command(
"PowerShell (Get-Itemproperty -path 'hklm:\\system\\currentcontrolset\\control\\session manager\\environment' -Name Path).Path")
current_path = current_path.rstrip()
logging.debug("current_path: {}".format(current_path))
new_path = current_path + \
";C:\\Program Files (x86)\\Windows Kits\\10\\bin\\10.0.16299.0\\x86;C:\\Program Files\\OpenBLAS-windows-v0_2_19\\bin;C:\\Program Files\\LLVM\\bin;C:\\Program Files\\opencv\\bin;C:\\Program Files\\opencv\\x64\\vc15\\bin"
logging.debug("new_path: {}".format(new_path))
run_command("PowerShell Set-ItemProperty -path 'hklm:\\system\\currentcontrolset\\control\\session manager\\environment' -Name Path -Value '" + new_path + "'")
def script_name() -> str:
""":returns: script name with leading paths removed"""
return os.path.split(sys.argv[0])[1]
def remove_install_task():
logging.info("Removing stage2 startup task...")
run_command("PowerShell Unregister-ScheduledTask -TaskName 'Stage2Install' -Confirm:$false")
def main():
logging.getLogger().setLevel(os.environ.get('LOGLEVEL', logging.DEBUG))
logging.basicConfig(filename="C:\\install.log", format='{}: %(asctime)sZ %(levelname)s %(message)s'.format(script_name()))
# install all necessary software and reboot after some components
# for CUDA, the last version you install will be the default, based on PATH variable
if install_cuda110():
reboot_system()
install_cudnn8()
#if install_cuda102():
# reboot_system()
#install_cudnn7()
if install_vs():
reboot_system()
install_openblas()
install_mkl()
install_opencv()
install_perl()
install_clang()
add_paths()
remove_install_task()
schedule_aws_userdata()
shutdown_system()
if __name__ == "__main__":
exit(main())
| 42.852008 | 227 | 0.660763 |
f72d316388cae4d3fa92f9185ec7e2b07efe7778 | 24,267 | py | Python | source/spktype21.py | whiskie14142/spktype21 | 7ed22365fe92cdb74c416d27634df96a45712953 | [
"MIT"
] | 1 | 2021-10-21T20:07:04.000Z | 2021-10-21T20:07:04.000Z | source/spktype21.py | whiskie14142/spktype21 | 7ed22365fe92cdb74c416d27634df96a45712953 | [
"MIT"
] | 1 | 2020-05-20T05:54:34.000Z | 2020-05-20T05:54:34.000Z | source/spktype21.py | whiskie14142/spktype21 | 7ed22365fe92cdb74c416d27634df96a45712953 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""A supporting module for jplephem to handle data type 21 (Version 0.1.0)
This module computes position and velocity of a celestial small body, from a
NASA SPICE SPK ephemeris kernel file of data type 21 (Extended Modified
Difference Arrays).
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/FORTRAN/req/spk.html
You can get SPK files for many solar system small bodies from HORIZONS
system of NASA/JPL. See https://ssd.jpl.nasa.gov/?horizons
This module reads SPK files of data type 21, one of the types of binary SPK
file.
At the point of Oct. 2018, HORIZONS system provides files of type 21 for
binary SPK files by default. You can get type 21 binary SPK file for celestial
small bodies through TELNET interface by answering back 'Binary' for
'SPK file format'. Also you can get type 21 binary SPK file from:
https://ssd.jpl.nasa.gov/x/spk.html
Modules required:
jplephem (version 2.6 or later)
numpy
Usage:
from spktype21 import SPKType21
kernel = SPKType21.open('path')
position, velocity = kernel.compute_type21(center, target, jd)
where:
path - path to the SPK file
center - SPKID of central body (0 for SSB, 10 for Sun, etc.)
target - SPKID of target body
jd - time for computation (Julian date)
Exceptions:
RuntimeError will be raised when:
invalid data_type of SPK file, or
SPK file contains too large table in EMDA record(s)
ValueError will be raised when:
invalid parameter(s) of compute_type21 function
Author: Shushi Uetsuki (whiskie14142)
This module has been developed based on jplephem and FORTRAN source
of the SPICE Toolkit of NASA/JPL/NAIF.
jplephem : https://pypi.org/project/jplephem/
SPICE Toolkit : http://naif.jpl.nasa.gov/naif/toolkit.html
"""
from numpy import array, zeros, reshape
from jplephem.daf import DAF
from jplephem.names import target_names
T0 = 2451545.0
S_PER_DAY = 86400.0
# Included from 'spk21.inc' on the FORTRAN source 'spke21.f'
MAXTRM = 25
def jd(seconds):
"""Convert a number of seconds since J2000 to a Julian Date.
"""
return T0 + seconds / S_PER_DAY
class SPKType21(object):
"""Class for SPK kernel to handle data type 21 (Extended Modified Difference Arrays)
"""
def __init__(self, daf):
self.daf = daf
self.segments = [Segment(self.daf, *t) for t in self.daf.summaries()]
ssec = lambda s : s.start_second
self.segments.sort(key=ssec)
# initialize arrays for spke21
self.G = zeros(MAXTRM)
self.REFPOS = zeros(3)
self.REFVEL = zeros(3)
self.KQ = array([0, 0, 0])
self.FC = zeros(MAXTRM)
self.FC[0] = 1.0
self.WC = zeros(MAXTRM - 1)
self.W = zeros(MAXTRM + 2)
# initialize for compute_type21
self.mda_record_exist = False
self.current_segment_exist = False
@classmethod
def open(cls, path):
"""Open the file at `path` and return an SPK instance.
"""
return cls(DAF(open(path, 'rb')))
def close(self):
"""Close this SPK file."""
self.daf.file.close()
def __str__(self):
daf = self.daf
d = lambda b: b.decode('latin-1')
lines = (str(segment) for segment in self.segments)
return 'File type {0} and format {1} with {2} segments:\n{3}'.format(
d(daf.locidw), d(daf.locfmt), len(self.segments), '\n'.join(lines))
def comments(self):
return self.daf.comments()
def compute_type21(self, center, target, jd1, jd2=0.0):
"""Compute position and velocity of target from SPK data (data type 21).
Inputs:
center - SPKID of the coordinate center (0 for Solar System Barycenter,
10 for Sun, etc)
target - SPKID of the target
jd1, jd2 - Julian date of epoch for computation. (jd1 + jd2) will
be used for computation. If you want precise definition of
epoch, jd1 should be an integer or a half integer, and jd2 should
be a relatively small floating point number.
Returns:
Position (X, Y, Z) and velocity (XD, YD, ZD) of the target at
epoch. Position and velocity are provided as Numpy arrays
respectively.
"""
eval_sec = (jd1 - T0)
eval_sec = (eval_sec + jd2) * S_PER_DAY
if self.mda_record_exist:
if eval_sec >= self.mda_lb and eval_sec < self.mda_ub:
result = self.spke21(eval_sec, self.mda_record)
return result[0:3], result[3:]
self.mda_record, self.mda_lb, self.mda_ub = self.get_MDA_record(eval_sec, target, center)
self.mda_record_exists = True
result = self.spke21(eval_sec, self.mda_record)
return result[0:3], result[3:]
def get_MDA_record(self, eval_sec, target, center):
"""Return a EMDA record for defined epoch.
Inputs:
eval_sec - epoch for computation, seconds from J2000
target - body ID of the target
center - body ID of coordinate center
Returns:
EMDA record - a Numpy array of DLSIZE floating point numbers
Exception:
ValueError will be raised when:
eval_sed is outside of SPK data
target and center are not in SPK data
RuntimeError will be raised when:
invalid data type of SPK data
"""
# chech last segment can be used
if self.current_segment_exist:
if eval_sec >= self.current_segment.start_second \
and eval_sec < self.current_segment.end_second \
and target == self.current_segment.target \
and center == self.current_segment.center:
return self.current_segment.get_MDA_record(eval_sec)
# select segments with matched 'target' and 'center'
matched = []
for segment in self.segments:
if segment.target == target and segment.center == center:
matched.append(segment)
if len(matched) == 0:
raise ValueError('Invalid Target and/or Center')
if eval_sec < matched[0].start_second or eval_sec >= matched[-1].end_second:
raise ValueError('Invalid Time to evaluate')
# selet a segment based on eval_sec
found = False
for segment in matched:
if eval_sec < segment.end_second:
found = True
self.current_segment = segment
break
if not found:
self.current_segment = matched[-1]
self.current_segment_exist = True
# get the MDA record from selected segment
if self.current_segment.data_type != 21:
raise RuntimeError('Invalid data. Data Type must be 21')
return self.current_segment.get_MDA_record(eval_sec)
# left this module only 2018/10/12
def spke21(self, ET, RECORD):
"""Compute position and velocity from a Modified Difference Array record
Inputs:
ET: Epoch time to evaluate position and velocity (seconds since J2000)
RECORD: A record of Extended Modified Difference Array
Returns: STATE
STATE: A numpy array which contains position and velocity
"""
# This method was translated from FORTRAN source code ‘spke21.f’ of SPICE
# Toolkit and modified by Shushi Uetsuki.
#
# SPICE Toolkit for FORTRAN : http://naif.jpl.nasa.gov/naif/toolkit_FORTRAN.html
# SPK Required Reading : http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/req/spk.html
#
# Unfortunately, I found some discrepancies between FORTRAN source code
# and actual data contained in SPK files. So, I tried to compose a
# method that compute positions and velocities correctly by referencing
# code of spktype01.
# Following comments start with #C were copied from original FORTRAN code.
#C$ Abstract
#C
#C Evaluate a single SPK data record from a segment of type 21
#C (Extended Difference Lines).
#C
#C$ Disclaimer
#C
#C THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE
#C CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S.
#C GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE
#C PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED "AS-IS"
#C TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY
#C WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A
#C PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC
#C SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE
#C SOFTWARE AND RELATED MATERIALS, HOWEVER USED.
#C
#C IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA
#C BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT
#C LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND,
#C INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS,
#C REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE
#C REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY.
#C
#C RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF
#C THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY
#C CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE
#C ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE.
#C
#C$ Required_Reading
#C
#C SPK
#C TIME
#C
#C$ Keywords
#C
#C EPHEMERIS
#C
#C$ Declarations
STATE = zeros(6)
#C$ Brief_I/O
#C
#C Variable I/O Description
#C -------- --- --------------------------------------------------
#C ET I Evaluation epoch.
#C RECORD I Data record.
#C STATE O State (position and velocity).
#C MAXTRM P Maximum number of terms per difference table
#C component.
#C
#C$ Detailed_Input
#C
#C ET is an epoch at which a state vector is to be
#C computed. The epoch is represented as seconds past
#C J2000 TDB.
#C
#C RECORD is a data record which, when evaluated at epoch ET,
#C will give the state (position and velocity) of an
#C ephemeris object, relative to its center of motion,
#C in an inertial reference frame.
#C
#C The contents of RECORD are as follows:
#C
#C RECORD(1): The difference table size per
#C Cartesian component. Call this
#C size MAXDIM; then the difference
#C line (MDA) size DLSIZE is
#C
#C ( 4 * MAXDIM ) + 11
#C
#C RECORD(2)
#C ...
#C RECORD(1+DLSIZE): An extended difference line.
#C The contents are:
#C
#C Dimension Description
#C --------- ----------------------------------
#C 1 Reference epoch of difference line
#C MAXDIM Stepsize function vector
#C 1 Reference position vector, x
#C 1 Reference velocity vector, x
#C 1 Reference position vector, y
#C 1 Reference velocity vector, y
#C 1 Reference position vector, z
#C 1 Reference velocity vector, z
#C MAXDIM,3 Modified divided difference
#C arrays (MDAs)
#C 1 Maximum integration order plus 1
#C 3 Integration order array
#C
#C$ Detailed_Output
#C
#C STATE is the state resulting from evaluation of the input
#C record at ET. Units are km and km/sec.
#C
#C$ Parameters
#C
#C MAXTRM is the maximum number of terms allowed in
#C each component of the difference table
#C contained in the input argument RECORD.
#C See the INCLUDE file spk21.inc for the value
#C of MAXTRM.
#C
#C$ Exceptions
#C
#C 1) If the maximum table size of the input record exceeds
#C MAXTRM, the error SPICE(DIFFLINETOOLARGE) is signaled.
#C
#C$ Files
#C
#C None.
#C
#C$ Particulars
#C
#C The exact format and structure of type 21 (difference lines)
#C segments are described in the SPK Required Reading file.
#C
#C SPKE21 is a modified version of SPKE01. The routine has been
#C generalized to support variable size difference lines.
#C
#C$ Examples
#C
#C None.
#C
#C$ Restrictions
#C
#C Unknown.
#C
#C$ Literature_References
#C
#C NAIF Document 168.0, "S- and P- Kernel (SPK) Specification and
#C User's Guide"
#C
#C$ Author_and_Institution
#C
#C N.J. Bachman (JPL)
#C F.T. Krogh (JPL)
#C W.L. Taber (JPL)
#C I.M. Underwood (JPL)
#C
#C$ Version
#C
#C- SPICELIB Version 1.0.0, 03-FEB-2014 (NJB) (FTK) (WLT) (IMU)
#C
#C-&
#
#C$ Index_Entries
#C
#C evaluate type_21 spk segment
#C
#C-&
#C
#C The first element of the input record is the dimension
#C of the difference table MAXDIM.
#C
# The FORTRAN source code indicates that RECORD[0] contains MAXDIM, but actual
# data record does not contain it. MAXDIM is contained in each segment.
MAXDIM = self.current_segment.MAXDIM
if MAXDIM > MAXTRM:
mes = ('SPKE21 \nThe input record has a maximum table dimension ' +
'of {0}, while the maximum supported by this routine is {1}. ' +
'It is possible that this problem is due to your software ' +
'beeing out of date.').format(MAXDIM, MAXTRM)
raise RuntimeError(mes)
return STATE
#C
#C Unpack the contents of the MDA array.
#C
#C Name Dimension Description
#C ------ --------- -------------------------------
#C TL 1 Reference epoch of record
#C G MAXDIM Stepsize function vector
#C REFPOS 3 Reference position vector
#C REFVEL 3 Reference velocity vector
#C DT MAXDIM,NTE Modified divided difference arrays
#C KQMAX1 1 Maximum integration order plus 1
#C KQ NTE Integration order array
#C
#C For our purposes, NTE is always 3.
#C
# The FORTRAN source code indicates that RECORD[1] contains TL, but on the
# actual data RECORD[0] contains it, and all addresses for following data are
# shifted forward by one.
self.TL = RECORD[0]
self.G = RECORD[1:MAXDIM + 1]
#C
#C Collect the reference position and velocity.
#C
self.REFPOS[0] = RECORD[MAXDIM + 1]
self.REFVEL[0] = RECORD[MAXDIM + 2]
self.REFPOS[1] = RECORD[MAXDIM + 3]
self.REFVEL[1] = RECORD[MAXDIM + 4]
self.REFPOS[2] = RECORD[MAXDIM + 5]
self.REFVEL[2] = RECORD[MAXDIM + 6]
#C
#C Initializing the difference table is one aspect of this routine
#C that's a bit different from SPKE01. Here the first dimension of
#C the table in the input record can be smaller than MAXTRM. So, we
#C must transfer separately the portions of the table corresponding
#C to each component.
#C
self.DT = reshape(RECORD[MAXDIM + 7:MAXDIM * 4 + 7], (MAXDIM, 3),
order='F')
self.KQMAX1 = int(RECORD[4 * MAXDIM + 7])
self.KQ[0] = int(RECORD[4 * MAXDIM + 8])
self.KQ[1] = int(RECORD[4 * MAXDIM + 9])
self.KQ[2] = int(RECORD[4 * MAXDIM + 10])
#C
#C Next we set up for the computation of the various differences
#C
self.DELTA = ET - self.TL
self.TP = self.DELTA
self.MQ2 = self.KQMAX1 - 2
self.KS = self.KQMAX1 - 1
#C
#C This is clearly collecting some kind of coefficients.
#C The problem is that we have no idea what they are...
#C
#C The G coefficients are supposed to be some kind of step size
#C vector.
#C
#C TP starts out as the delta t between the request time and the
#C difference line's reference epoch. We then change it from DELTA
#C by the components of the stepsize vector G.
#C
for J in range(1, self.MQ2 + 1):
#C
#C Make sure we're not about to attempt division by zero.
#C
if self.G[J-1] == 0.0:
mes = ('SPKE21\nA value of zero was found at index {0} ' +
'of the step size vector.').format(J)
raise RuntimeError(mes)
return STATE
self.FC[J] = self.TP / self.G[J-1]
self.WC[J-1] = self.DELTA / self.G[J-1]
self.TP = self.DELTA + self.G[J-1]
#C
#C Collect KQMAX1 reciprocals.
#C
for J in range(1, self.KQMAX1 + 1):
self.W[J-1] = 1.0 / float(J)
#C
#C Compute the W(K) terms needed for the position interpolation
#C (Note, it is assumed throughout this routine that KS, which
#C starts out as KQMAX1-1 (the ``maximum integration'')
#C is at least 2.
#C
self.JX = 0
self.KS1 = self.KS - 1
while self.KS >= 2:
self.JX = self.JX + 1
for J in range(1, self.JX + 1):
self.W[J+self.KS-1] = self.FC[J] * self.W[J+self.KS1-1] - self.WC[J-1] * self.W[J+self.KS-1]
self.KS = self.KS1
self.KS1 = self.KS1 - 1
#C
#C Perform position interpolation: (Note that KS = 1 right now.
#C We don't know much more than that.)
#C
for I in range(1, 3 + 1):
self.KQQ = self.KQ[I-1]
self.SUM = 0.0
for J in range(self.KQQ, 0, -1):
self.SUM = self.SUM + self.DT[J-1, I-1] * self.W[J+self.KS-1]
STATE[I-1] = self.REFPOS[I-1] + self.DELTA * (self.REFVEL[I-1] + self.DELTA * self.SUM)
#C
#C Again we need to compute the W(K) coefficients that are
#C going to be used in the velocity interpolation.
#C (Note, at this point, KS = 1, KS1 = 0.)
#C
for J in range(1, self.JX + 1):
self.W[J+self.KS-1] = self.FC[J] * self.W[J+self.KS1-1] - self.WC[J-1] * self.W[J+self.KS-1]
self.KS = self.KS - 1
#C
#C Perform velocity interpolation:
#C
for I in range(1, 3 + 1):
self.KQQ = self.KQ[I-1]
self.SUM = 0.0
for J in range(self.KQQ, 0, -1):
self.SUM = self.SUM + self.DT[J-1, I-1] * self.W[J+self.KS-1]
STATE[I+3-1] = self.REFVEL[I-1] + self.DELTA * self.SUM
return STATE
class Segment(object):
"""A single segment of a SPK file.
There are several items of information about each segment that are
loaded from the underlying SPK file, and made available as object
attributes:
segment.source - official ephemeris name, like 'DE-0430LE-0430'
segment.start_second - initial epoch, as seconds from J2000
segment.end_second - final epoch, as seconds from J2000
segment.start_jd - start_second, converted to a Julian Date
segment.end_jd - end_second, converted to a Julian Date
segment.center - integer center identifier
segment.target - integer target identifier
segment.frame - integer frame identifier
segment.data_type - integer data type identifier
segment.start_i - index where segment starts
segment.end_i - index where segment ends
"""
def __init__(self, daf, source, descriptor):
self.daf = daf
self.source = source
(self.start_second, self.end_second, self.target, self.center,
self.frame, self.data_type, self.start_i, self.end_i) = descriptor
self.start_jd = jd(self.start_second)
self.end_jd = jd(self.end_second)
# 'SPK Required Reading' indicates that the penultimate element of the segment
# is the difference line size (DLSIZE), but actual data contains there a MAXDIM.
self.MAXDIM = int(self.daf.map_array(self.end_i - 1, self.end_i - 1))
self.DLSIZE = 4 * self.MAXDIM + 11
def __str__(self):
return self.describe(verbose=False)
def describe(self, verbose=True):
"""Return a textual description of the segment.
"""
center = titlecase(target_names.get(self.center, 'Unknown center'))
target = titlecase(target_names.get(self.target, 'Unknown target'))
text = ('{0.start_jd:.2f}..{0.end_jd:.2f} {1} ({0.center})'
' -> {2} ({0.target})'
' data_type={0.data_type}'.format(self, center, target))
if verbose:
text += ('\n frame={0.frame} data_type={0.data_type} source={1}'
.format(self, self.source.decode('ascii')))
return text
def get_MDA_record(self, time_sec):
"""Return a Modified Difference Array(MDA) record for the time to
evaluate with its effective time boundaries (lower and upper).
Inputs:
time_sec - epoch for computation, seconds from J2000
Returns: mda_record, lower_boundary, upper_boundary
mda_record: A Modified Difference Array record
lower_boundary: lower boundary of the record, seconds since J2000
upper_boundary: upper boundary of the record, seconds since J2000
"""
# Number of records in this segment
entry_count = int(self.daf.map_array(self.end_i, self.end_i))
# Number of entries in epoch directory
epoch_dir_count = entry_count // 100
# serch target epoch in epoch directory to narrow serching aria
if epoch_dir_count >= 1:
epoch_dir = self.daf.map_array(self.end_i - epoch_dir_count - 1,
self.end_i - 2)
found = False
for i in range(1, epoch_dir_count + 1):
if epoch_dir[i-1] > time_sec:
found = True
break
if found:
serch_last_index = i * 100
serch_start_index = (i - 1) * 100 + 1
else:
serch_last_index = entry_count
serch_start_index = epoch_dir_count * 100 + 1
else:
serch_last_index = entry_count
serch_start_index = 1
# epoch_table contains epochs for all records in this segment
epoch_table = self.daf.map_array(self.start_i + (entry_count * self.DLSIZE),
self.start_i + (entry_count * self.DLSIZE) + entry_count - 1)
# serch target epoch in epoch_table
found = False
for i in range(serch_start_index, serch_last_index + 1):
if epoch_table[i-1] > time_sec:
found = True
break
if not found:
i = serch_last_index
record_index = i
upper_boundary = epoch_table[i-1]
if i != 1:
lower_boundary = epoch_table[i-2]
else:
lower_boundary = self.start_second
mda_record = self.daf.map_array(self.start_i + ((record_index - 1) * self.DLSIZE),
self.start_i + (record_index * self.DLSIZE) - 1)
# mda_record : one record of MDA
# lower_boundary : lower boundary of epoch in this MDA record
# upper_boundary : upper boundary of epoch in this MDA record
return mda_record, lower_boundary, upper_boundary
def titlecase(name):
"""Title-case target `name` if it looks safe to do so.
"""
return name if name.startswith(('1', 'C/', 'DSS-')) else name.title()
| 36.712557 | 108 | 0.588536 |
f72d35520a9df35cca1aca5514b7eef3d130635f | 1,121 | py | Python | tests/test_dipdup/models.py | spruceid/dipdup-py | adc904196cfd66563938feec0f0afcc5f3df03e3 | [
"MIT"
] | null | null | null | tests/test_dipdup/models.py | spruceid/dipdup-py | adc904196cfd66563938feec0f0afcc5f3df03e3 | [
"MIT"
] | null | null | null | tests/test_dipdup/models.py | spruceid/dipdup-py | adc904196cfd66563938feec0f0afcc5f3df03e3 | [
"MIT"
] | null | null | null | # generated by datamodel-codegen:
# filename: storage.json
from __future__ import annotations
from typing import Dict, List, Optional
from pydantic import BaseModel, Extra
class ResourceMap(BaseModel):
class Config:
extra = Extra.forbid
id: str
rate: str
class ResourceCollectorStorage(BaseModel):
class Config:
extra = Extra.forbid
administrator: str
current_user: Optional[str]
default_start_time: str
generation_rate: str
managers: List[str]
metadata: Dict[str, str]
nft_registry: str
paused: bool
resource_map: Dict[str, ResourceMap]
resource_registry: str
tezotop_collection: Dict[str, str]
# 'resource_map': {
# 'type': 'object',
# 'propertyNames': {'type': 'string', '$comment': 'string'},
# 'additionalProperties': {
# 'type': 'object',
# 'properties': {'id': {'type': 'string', '$comment': 'nat'}, 'rate': {'type': 'string', '$comment': 'nat'}},
# 'required': ['id', 'rate'],
# 'additionalProperties': False,
# '$comment': 'pair',
# },
# '$comment': 'map',
# },
| 23.354167 | 117 | 0.613738 |
f72d627417aaa695ea5bcada408ff82f6f850efa | 11,113 | py | Python | trajectory_generator.py | keshaviyengar/rl-baselines-zoo | 6e39f5c7c6c2d30873297308ed064551bffaa52d | [
"MIT"
] | null | null | null | trajectory_generator.py | keshaviyengar/rl-baselines-zoo | 6e39f5c7c6c2d30873297308ed064551bffaa52d | [
"MIT"
] | null | null | null | trajectory_generator.py | keshaviyengar/rl-baselines-zoo | 6e39f5c7c6c2d30873297308ed064551bffaa52d | [
"MIT"
] | null | null | null | import rospy
from geometry_msgs.msg import Pose, Point
from std_msgs.msg import Bool
import numpy as np
import os
# This script creates a square trajectory for a robot to follow.
# Will output errors as well.
class CircleTrajectory(object):
def __init__(self, x_offset, y_offset, z_height, radius, theta_step):
self.trajectory_pub = rospy.Publisher("desired_goal", Pose, queue_size=10)
self.trajectory_finish_pub = rospy.Publisher("trajectory_finish", Bool, queue_size=10)
self._current_pose = Pose()
# Create a timer to update the desired trajectory
self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)
self.traj_finish = False
# For now set initial current pose as 0
self._desired_pose = Pose()
self.x_offset = x_offset
self.y_offset = y_offset
self.radius = radius
self.thetas = np.arange(0, 2 * np.pi, np.deg2rad(theta_step))
self.thetas_counter = 0
self._desired_pose.position.x = self.x_offset + self.radius * np.cos(self.thetas[self.thetas_counter])
self._desired_pose.position.y = self.y_offset + self.radius * np.sin(self.thetas[self.thetas_counter])
self._desired_pose.position.z = z_height
self._desired_pose.orientation.x = 0
self._desired_pose.orientation.y = 0
self._desired_pose.orientation.z = 0
self._desired_pose.orientation.w = 1
self.speed = 1
def _trajectory_callback(self, event):
self.thetas_counter += 1
if self.thetas_counter == self.thetas.size - 1:
self.traj_finish = True
print("Trajectory is complete.")
self.trajectory_finish_pub.publish(True)
self.trajectory_timer.shutdown()
if not self.traj_finish:
self._desired_pose.position.x = self.x_offset + self.radius * np.cos(self.thetas[self.thetas_counter])
self._desired_pose.position.y = self.y_offset + self.radius * np.sin(self.thetas[self.thetas_counter])
# Publish new pose
self.trajectory_pub.publish(self._desired_pose)
class TriangleTrajectory(object):
def __init__(self, point_a, point_b, point_c, z_height):
self.trajectory_pub = rospy.Publisher("desired_goal", Pose, queue_size=10)
self.trajectory_finish_pub = rospy.Publisher("trajectory_finish", Bool, queue_size=10)
self._current_pose = Pose()
# Second timer for how long to move in axis before moving to next
# self.change_direction_timer = rospy.Timer(rospy.Duration(5.0), self._change_direction)
# Specify three points to reach to create the triangle
self.points = np.array([point_a, point_b, point_c])
self._turn_count = 0
self.del_vector = [(self.points[1][0] - self.points[0][0]), (self.points[1][1] - self.points[0][1])]
self._done_trajectory = False
self._desired_pose = Pose()
self._desired_pose.position.x = point_a[0]
self._desired_pose.position.y = point_a[1]
self._desired_pose.position.z = z_height
self._desired_pose.orientation.x = 0
self._desired_pose.orientation.y = 0
self._desired_pose.orientation.z = 0
self._desired_pose.orientation.w = 1
# Publish initial point and sleep to initialize
for _ in range(10):
self.trajectory_pub.publish(self._desired_pose)
rospy.sleep(0.1)
self.prev_time = rospy.get_time()
self.traj_finish = False
# Create a timer to update the desired trajectory
self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)
# This callback changes the direction by 90 degrees, to make the square.
def _change_direction(self):
if self._turn_count == 0:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[1][0] - self.points[0][0]),
(self.points[1][1] - self.points[0][1])]
if self._turn_count == 1:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[2][0] - self.points[1][0]),
(self.points[2][1] - self.points[1][1])]
if self._turn_count == 2:
if np.linalg.norm(self.points[0] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[0][0] - self.points[2][0]),
(self.points[0][1] - self.points[2][1])]
if self._turn_count == 3:
print("Trajectory is complete.")
self.traj_finish = True
self.trajectory_finish_pub.publish(True)
self.trajectory_timer.shutdown()
# self.change_direction_timer.shutdown()
def _trajectory_callback(self, event):
# Compute current difference in time from last callback
if not self.traj_finish:
current_time = rospy.get_time()
delta_t = current_time - self.prev_time
self.prev_time = current_time
self._change_direction()
self._desired_pose.position.x += self.del_vector[0] * delta_t
self._desired_pose.position.y += self.del_vector[1] * delta_t
self.trajectory_pub.publish(self._desired_pose)
class SquareTrajectory2(object):
def __init__(self, point_a, point_b, point_c, point_d, z_height):
self.trajectory_pub = rospy.Publisher("desired_goal", Pose, queue_size=10)
self.trajectory_finish_pub = rospy.Publisher("trajectory_finish", Bool, queue_size=10)
self._current_pose = Pose()
self.points = [point_a, point_b, point_c, point_d]
self._turn_count = 0
self.del_vector = [(self.points[1][0] - self.points[0][0]), (self.points[1][1] - self.points[0][1])]
# For now set initial current pose as 0
self._desired_pose = Pose()
self._desired_pose.position.x = point_a[0]
self._desired_pose.position.y = point_a[1]
self._desired_pose.position.z = z_height
self._desired_pose.orientation.x = 0
self._desired_pose.orientation.y = 0
self._desired_pose.orientation.z = 0
self._desired_pose.orientation.w = 1
# Publish initial point and sleep to initialize
for _ in range(10):
self.trajectory_pub.publish(self._desired_pose)
rospy.sleep(0.1)
self.prev_time = rospy.get_time()
self.traj_finish = False
# Create a timer to update the desired trajectory
self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)
# This callback changes the direction by 90 degrees, to make the square.
def _change_direction(self):
if self._turn_count == 0:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[1][0] - self.points[0][0]),
(self.points[1][1] - self.points[0][1])]
if self._turn_count == 1:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[2][0] - self.points[1][0]),
(self.points[2][1] - self.points[1][1])]
if self._turn_count == 2:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[3][0] - self.points[2][0]),
(self.points[3][1] - self.points[2][1])]
if self._turn_count == 3:
if np.linalg.norm(self.points[0] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[0][0] - self.points[3][0]),
(self.points[0][1] - self.points[3][1])]
if self._turn_count == 4:
print("Trajectory is complete.")
self.traj_finish = True
self.trajectory_finish_pub.publish(True)
self.trajectory_timer.shutdown()
def _trajectory_callback(self, event):
# Compute current difference in time from last callback
if not self.traj_finish:
current_time = rospy.get_time()
delta_t = current_time - self.prev_time
self.prev_time = current_time
self._change_direction()
self._desired_pose.position.x += self.del_vector[0] * delta_t
self._desired_pose.position.y += self.del_vector[1] * delta_t
self.trajectory_pub.publish(self._desired_pose)
if __name__ == '__main__':
rospy.init_node("trajectory_generator")
experiments = [7]
for exp in experiments:
x_offset = 5
y_offset = 5
if exp in [1, 2, 3, 4, 5]:
z_height = 100
elif exp in [6, 7, 8, 9, 10]:
z_height = 100
else:
z_height = 125
radius = 2.0
theta_step = 0.5
print("Circle trajectory")
circle_trajectory = CircleTrajectory(x_offset, y_offset, z_height, radius, theta_step)
while not circle_trajectory.traj_finish:
if circle_trajectory.traj_finish:
break
# point_a = [20, 20]
# point_b = [20, 30]
# point_c = [30, 20]
# point_a = [-5, 0]
# point_b = [-10, -5]
# point_c = [5, 0]
# if exp in [1, 2, 3, 4, 5]:
# z_height = 100
# elif exp in [6, 7, 8, 9, 10]:
# z_height = 125
# else:
# z_height = 125
# print("Triangle trajectory")
# triangle_trajectory = TriangleTrajectory(point_a, point_b, point_c, z_height)
# while not triangle_trajectory.traj_finish:
# pass
# point_a = [5, 0]
# point_b = [-5, 0]
# point_c = [-5, -5]
# point_d = [5, -5]
# if exp in [1, 2, 3, 4, 5]:
# z_height = 100
# elif exp in [6, 7, 8, 9, 10]:
# z_height = 125
# else:
# z_height = 125
# print("Square trajectory")
# square_trajectory = SquareTrajectory2(point_a, point_b, point_c, point_d, z_height)
# while not square_trajectory.traj_finish:
# pass
| 41.778195 | 114 | 0.602268 |
f72d78d5dc3108cc117be1ea0357004699e0b64f | 2,137 | py | Python | rlberry/utils/torch.py | akrouriad/rlberry | dde4e2cbafca05fdef1df07646bb6368059eeadf | [
"MIT"
] | null | null | null | rlberry/utils/torch.py | akrouriad/rlberry | dde4e2cbafca05fdef1df07646bb6368059eeadf | [
"MIT"
] | null | null | null | rlberry/utils/torch.py | akrouriad/rlberry | dde4e2cbafca05fdef1df07646bb6368059eeadf | [
"MIT"
] | null | null | null | import os
import re
import shutil
from subprocess import check_output, run, PIPE
import numpy as np
import torch
import logging
logger = logging.getLogger(__name__)
def get_gpu_memory_map():
result = check_output(
["nvidia-smi", "--query-gpu=memory.used", "--format=csv,nounits,noheader"]
)
return [int(x) for x in result.split()]
def least_used_device():
"""Get the GPU device with most available memory."""
if not torch.cuda.is_available():
raise RuntimeError("cuda unavailable")
if shutil.which("nvidia-smi") is None:
raise RuntimeError(
"nvidia-smi unavailable: \
cannot select device with most least memory used."
)
memory_map = get_gpu_memory_map()
device_id = np.argmin(memory_map)
logger.info(
f"Choosing GPU device: {device_id}, " f"memory used: {memory_map[device_id]}"
)
return torch.device("cuda:{}".format(device_id))
def choose_device(preferred_device, default_device="cpu"):
if preferred_device == "cuda:best":
try:
preferred_device = least_used_device()
except RuntimeError:
logger.info(
f"Could not find least used device (nvidia-smi might be missing), use cuda:0 instead"
)
if torch.cuda.is_available():
return choose_device("cuda:0")
else:
return choose_device("cpu")
try:
torch.zeros((1,), device=preferred_device) # Test availability
except (RuntimeError, AssertionError) as e:
logger.info(
f"Preferred device {preferred_device} unavailable ({e})."
f"Switching to default {default_device}"
)
return default_device
return preferred_device
def get_memory(pid=None):
if not pid:
pid = os.getpid()
command = "nvidia-smi"
result = run(
command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True
).stdout
m = re.findall(
"\| *[0-9] *" + str(pid) + " *C *.*python.*? +([0-9]+).*\|",
result,
re.MULTILINE,
)
return [int(mem) for mem in m]
| 28.878378 | 101 | 0.617688 |
f72d7e72a009707d55813c1c29a5c3ce6628c6cf | 41 | py | Python | sciwing/engine/__init__.py | sean-dingxu/sciwing | 75eca1ea43be165eab20cf8bd81bbc19cecda74c | [
"MIT"
] | 50 | 2019-09-13T10:32:29.000Z | 2022-02-14T16:52:53.000Z | sciwing/engine/__init__.py | sean-dingxu/sciwing | 75eca1ea43be165eab20cf8bd81bbc19cecda74c | [
"MIT"
] | 31 | 2019-09-03T11:06:03.000Z | 2021-08-20T14:57:09.000Z | sciwing/engine/__init__.py | sean-dingxu/sciwing | 75eca1ea43be165eab20cf8bd81bbc19cecda74c | [
"MIT"
] | 9 | 2019-09-16T03:25:15.000Z | 2021-05-11T10:28:25.000Z | from sciwing.engine.engine import Engine
| 20.5 | 40 | 0.853659 |
f72d8f50dd5091fb3db1affbd4c3e936c1aff93a | 2,332 | py | Python | sqlalchemy_geonames/files.py | dionysio/sqlalchemy-geonames | 0d2542cf53512b14415319f23ad53dc4994691a8 | [
"BSD-2-Clause-FreeBSD"
] | 17 | 2015-02-24T20:20:49.000Z | 2021-07-21T02:32:15.000Z | sqlalchemy_geonames/files.py | dionysio/sqlalchemy-geonames | 0d2542cf53512b14415319f23ad53dc4994691a8 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2016-11-13T17:00:26.000Z | 2020-05-28T13:12:07.000Z | sqlalchemy_geonames/files.py | dionysio/sqlalchemy-geonames | 0d2542cf53512b14415319f23ad53dc4994691a8 | [
"BSD-2-Clause-FreeBSD"
] | 6 | 2015-03-28T12:23:50.000Z | 2020-05-28T08:41:50.000Z | BASE_DOWNLOAD_URL = 'http://download.geonames.org/export/dump/'
def full_url(filename):
return BASE_DOWNLOAD_URL + filename
filename_config = {
'admin1CodesASCII.txt': {
'url': full_url('admin1CodesASCII.txt'),
},
'admin2Codes.txt': {
'url': full_url('admin2Codes.txt'),
},
'allCountries.txt': {
'url': full_url('allCountries.zip'),
'unzip': True,
'is_primary': True,
},
'alternateNames.txt': {
'url': full_url('alternateNames.zip'),
'unzip': True,
},
'cities1000.txt': {
'url': full_url('cities1000.zip'),
'unzip': True,
'is_primary': True,
},
'cities15000.txt': {
'url': full_url('cities15000.zip'),
'unzip': True,
'is_primary': True,
},
'cities5000.txt': {
'url': full_url('cities5000.zip'),
'unzip': True,
'is_primary': True,
},
'countryInfo.txt': {
'url': full_url('countryInfo.txt'),
},
'featureCodes_bg.txt': {
'url': full_url('featureCodes_bg.txt'),
'language_code': 'bg',
},
'featureCodes_en.txt': {
'url': full_url('featureCodes_en.txt'),
'language_code': 'en',
},
'featureCodes_nb.txt': {
'url': full_url('featureCodes_nb.txt'),
'language_code': 'nb',
},
'featureCodes_nn.txt': {
'url': full_url('featureCodes_nn.txt'),
'language_code': 'nn',
},
'featureCodes_no.txt': {
'url': full_url('featureCodes_no.txt'),
'language_code': 'no',
},
'featureCodes_ru.txt': {
'url': full_url('featureCodes_ru.txt'),
'language_code': 'ru',
},
'featureCodes_sv.txt': {
'url': full_url('featureCodes_sv.txt'),
'language_code': 'sv',
},
'hierarchy.txt': {
'url': full_url('hierarchy.zip'),
'unzip': True,
},
'iso-languagecodes.txt': {
'url': full_url('iso-languagecodes.txt'),
},
'timeZones.txt': {
'url': full_url('timeZones.txt'),
},
'userTags.txt': {
'url': full_url('userTags.zip'),
'unzip': True,
},
}
# TODO: Support modification files
# alternateNamesDeletes-2013-12-16.txt
# alternateNamesModifications-2013-12-16.txt
# deletes-2013-12-16.txt
# modifications-2013-12-16.txt
| 25.626374 | 63 | 0.551458 |
f72d97d1ff332aa397f3106364df1be3656a74db | 7,814 | py | Python | make_prg/subcommands/update.py | leoisl/make_prg | 9204cb8a60d8fae0985b4eb464c5dd99c1338d45 | [
"MIT"
] | 1 | 2021-05-07T02:04:07.000Z | 2021-05-07T02:04:07.000Z | make_prg/subcommands/update.py | leoisl/make_prg | 9204cb8a60d8fae0985b4eb464c5dd99c1338d45 | [
"MIT"
] | 9 | 2021-03-22T12:28:06.000Z | 2021-12-17T06:46:51.000Z | make_prg/subcommands/update.py | leoisl/make_prg | 9204cb8a60d8fae0985b4eb464c5dd99c1338d45 | [
"MIT"
] | 2 | 2021-06-29T04:54:22.000Z | 2022-01-03T12:19:59.000Z | import multiprocessing
import os
import shutil
from pathlib import Path
from loguru import logger
from make_prg import io_utils
from make_prg.denovo_paths_reader import DenovoPathsDB
from make_prg.prg_builder import PrgBuilderCollection, PrgBuilder, LeafNotFoundException
from make_prg.utils import output_files_already_exist
def register_parser(subparsers):
subparser_update_prg = subparsers.add_parser(
"update",
usage="make_prg update",
help="Update PRGs given new sequences output by pandora.",
)
subparser_update_prg.add_argument(
"-u",
"--update_DS",
action="store",
type=str,
required=True,
help=(
"Filepath to the update data structures. Should point to a file *.update_DS."
),
)
subparser_update_prg.add_argument(
"-d",
"--denovo_paths",
action="store",
type=str,
required=True,
help=(
"Filepath containing denovo sequences output by pandora. Should point to a denovo_paths.txt file."
),
)
subparser_update_prg.add_argument(
"-o",
"--output_prefix",
action="store",
type=str,
required=True,
help="Output prefix: prefix for the output files",
)
subparser_update_prg.add_argument(
"-t",
"--threads",
action="store",
type=int,
default=1,
help="Number of threads",
)
subparser_update_prg.add_argument(
"--mafft",
help="Path to MAFFT executable. By default, it is assumed to be on PATH",
default="mafft",
)
subparser_update_prg.add_argument(
"--keep_temp", action="store_true", default=False, help="Keep temp files."
)
subparser_update_prg.set_defaults(func=run)
return subparser_update_prg
def get_stats_on_variants(stats_files):
nb_of_variants_successfully_applied = 0
nb_of_variants_that_failed_to_be_applied = 0
for stat_file in stats_files:
with open(stat_file) as stat_file_fh:
line_split = stat_file_fh.readline().strip().split()
nb_of_variants_successfully_applied_for_this_locus = int(line_split[1])
nb_of_variants_successfully_applied += (
nb_of_variants_successfully_applied_for_this_locus
)
nb_of_variants_that_failed_to_be_applied_for_this_locus = int(line_split[2])
nb_of_variants_that_failed_to_be_applied += (
nb_of_variants_that_failed_to_be_applied_for_this_locus
)
return nb_of_variants_successfully_applied, nb_of_variants_that_failed_to_be_applied
def update(
locus_name,
prg_builder_pickle_filepath,
variant_nodes_with_mutation,
temp_dir,
mafft: str,
):
prg_builder_for_locus = PrgBuilder.deserialize(prg_builder_pickle_filepath)
nb_of_variants_sucessfully_updated = 0
nb_of_variants_with_failed_update = 0
we_have_variants = len(variant_nodes_with_mutation) > 0
if we_have_variants:
logger.debug(f"Updating {locus_name} ...")
leaves_to_update = set()
for variant_node_with_mutation in variant_nodes_with_mutation:
try:
prg_builder_tree_node = prg_builder_for_locus.get_node_given_interval(
variant_node_with_mutation.key
)
prg_builder_tree_node.add_seq_to_batch_update(
variant_node_with_mutation.mutated_node_sequence
)
leaves_to_update.add(prg_builder_tree_node)
nb_of_variants_sucessfully_updated += 1
except LeafNotFoundException as exc:
logger.debug(f"Failed finding leaf: {exc}")
nb_of_variants_with_failed_update += 1
# update the changed leaves
for leaf in leaves_to_update:
leaf.batch_update(temp_dir, mafft=mafft)
logger.debug(
f"Updated {locus_name}: {len(variant_nodes_with_mutation)} denovo sequences added!"
)
else:
logger.debug(f"{locus_name} has no new variants, no update needed")
# regenerate PRG
locus_prefix = temp_dir / locus_name / locus_name
locus_prefix_parent = locus_prefix.parent
os.makedirs(locus_prefix_parent, exist_ok=True)
prg = prg_builder_for_locus.build_prg()
logger.info(f"Write PRG file to {locus_prefix}.prg.fa")
io_utils.write_prg(str(locus_prefix), prg)
with open(f"{locus_prefix}.stats", "w") as stats_filehandler:
print(
f"{locus_name} {nb_of_variants_sucessfully_updated} {nb_of_variants_with_failed_update}",
file=stats_filehandler,
)
# Note: we intentionally do not regenerate updateable data structure here because we don't want to update
# PRGs on top of already updated PRGs
# TODO: change this?
def run(options):
if output_files_already_exist(options.output_prefix):
raise RuntimeError("One or more output files already exists, aborting run...")
# NB: don't use logging, it causes deadlocks: https://pythonspeed.com/articles/python-multiprocessing/
logger.info("Reading update data structures...")
prg_builder_collection = PrgBuilderCollection.deserialize(options.update_DS)
prg_builder_collection.to_absolute_paths(Path(options.update_DS).parent)
logger.info(f"Reading {options.denovo_paths}...")
denovo_paths_db = DenovoPathsDB(options.denovo_paths)
output_dir = Path(options.output_prefix).parent
os.makedirs(output_dir, exist_ok=True)
temp_path = Path(options.output_prefix + "_tmp")
os.makedirs(temp_path, exist_ok=True)
# update all PRGs with denovo sequences
logger.debug(f"Using {options.threads} threads to update PRGs...")
multithreaded_input = []
for (
locus_name,
prg_builder_pickle_filepath,
) in (
prg_builder_collection.locus_name_to_pickle_files.items()
): # we do for all PRGs as those that don't have denovo variants will be generated also
variant_nodes_with_mutation = (
denovo_paths_db.locus_name_to_variant_nodes_with_mutation.get(
locus_name, []
)
)
multithreaded_input.append(
(
locus_name,
prg_builder_pickle_filepath,
variant_nodes_with_mutation,
temp_path,
options.mafft,
)
)
with multiprocessing.Pool(options.threads, maxtasksperchild=1) as pool:
pool.starmap(update, multithreaded_input, chunksize=1)
logger.success(f"All PRGs updated!")
# concatenate output PRGs
logger.info("Concatenating files from several threads into single, final file...")
prg_files = [
f"{temp_path}/{locus_name}/{locus_name}.prg.fa"
for locus_name in prg_builder_collection.locus_name_to_pickle_files.keys()
]
io_utils.concatenate_text_files(prg_files, options.output_prefix + ".prg.fa")
# sum up stats files and output stats
stats_files = [
f"{temp_path}/{locus_name}/{locus_name}.stats"
for locus_name in prg_builder_collection.locus_name_to_pickle_files.keys()
]
(
nb_of_variants_successfully_applied,
nb_of_variants_that_failed_to_be_applied,
) = get_stats_on_variants(stats_files)
logger.success(
f"Number of variants successfully applied: {nb_of_variants_successfully_applied}"
)
logger.warning(
f"Number of variants that failed to be applied: {nb_of_variants_that_failed_to_be_applied}"
)
# remove temp files if needed
if not options.keep_temp and temp_path.exists():
logger.debug("Removing temp files...")
shutil.rmtree(temp_path)
logger.success("All done!")
| 35.680365 | 110 | 0.677374 |
f72dc8316a82c5f73e59a6498145758dff2e6fa1 | 345 | py | Python | python/ABC/2020-07-25_m-solutions2020/c.py | KATO-Hiro/atcoder-1 | c2cbfcfd5c3d46ac9810ba330a37d437aa2839c2 | [
"MIT"
] | null | null | null | python/ABC/2020-07-25_m-solutions2020/c.py | KATO-Hiro/atcoder-1 | c2cbfcfd5c3d46ac9810ba330a37d437aa2839c2 | [
"MIT"
] | null | null | null | python/ABC/2020-07-25_m-solutions2020/c.py | KATO-Hiro/atcoder-1 | c2cbfcfd5c3d46ac9810ba330a37d437aa2839c2 | [
"MIT"
] | null | null | null | import sys
input = sys.stdin.readline
def main():
N, K = map(int, input().split())
A = tuple(map(int, input().split()))
ans = [0] * (N - K)
for i in range(N - K):
if A[i] < A[K + i]:
ans[i] = "Yes"
else:
ans[i] = "No"
print("\n".join(ans))
if __name__ == "__main__":
main()
| 15.681818 | 40 | 0.449275 |
f72e0a3f831f9e9c61a2e9d77828ffb12d8428b1 | 20,450 | py | Python | tensorflow/contrib/training/python/training/training.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 71 | 2017-05-25T16:02:15.000Z | 2021-06-09T16:08:08.000Z | tensorflow/contrib/training/python/training/training.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/contrib/training/python/training/training.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 31 | 2018-09-11T02:17:17.000Z | 2021-12-15T10:33:35.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains various routines and helper functions for training models.
This script contains various functions for training models. These include
manipulating gradients, creating a `train_op` (an operation that computes the
loss and applies the gradients) and a training loop function. The training loop
allows the user to pass in the `train_op` and runs the optimization according
to user-specified arguments.
************************************
* A simple working training script *
************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
tf.contrib.losses.log_loss(predictions, labels)
total_loss = tf.contrib.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Run training.
tf.contrib.training.train(train_op, my_log_dir)
*************************
* Creating the train_op *
*************************
In order to use the `train` function, one needs a train_op: an `Operation` that
(a) computes the loss, (b) applies the gradients to update the weights and
(c) returns the value of the loss. tf.contrib.training.create_train_op creates
such an `Operation`. This function also provides the ability to manipulate
the gradients using a few arguments:
# Create the train_op and clip the gradient norms:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
transform_grads_fn=clip_gradient_norms_fn(3))
# Create the train_op and scale the gradients by providing a map from variable
# name (or variable) to a scaling coefficient:
def transform_grads_fn(grads):
gradient_multipliers = {
'conv0/weights': 1.2,
'fc8/weights': 3.4,
}
return tf.contrib.training.multiply_gradients(
grads, gradient_multipliers)
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
transform_grads_fn=transform_grads_fn)
****************************************************************
* Performing additional (non-gradient) updates during training *
****************************************************************
Many networks utilize modules, like BatchNorm, that require performing a series
of non-gradient updates during training. tf.contrib.training.create_train_op
allows a user to pass in a list of update_ops to call along with the gradient
updates.
train_op = tf.contrib.training.create_train_op(
total_loss, optimizer, update_ops)
By default, tf.contrib.training.create_train_op includes all update ops that are
part of the `tf.GraphKeys.UPDATE_OPS` collection. Additionally, the
tf.contrib.layers.batch_norm function adds the moving mean and moving variance
updates to this collection. Consequently, users who want to use
tf.contrib.layers.batch_norm will not need to take any additional steps in order
to have the moving mean and moving variance updates be computed.
However, users with additional, specialized updates can either override the
default update ops or simply add additional update ops to the
`tf.GraphKeys.UPDATE_OPS` collection:
# Force `create_train_op` to NOT use ANY update_ops:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=[])
# Use an alternative set of update ops:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=my_other_update_ops)
# Use a set of update ops in addition to the default updates:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update0)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update1)
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer)
# Which is the same as:
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS))
******************************************
* Initializing a model from a checkpoint *
******************************************
It is common to want to 'warm-start' a model from a pre-trained checkpoint.
One can use a tf.Scaffold and an initializing function to do so.
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Create the initial assignment op
checkpoint_path = '/path/to/old_model_checkpoint'
variables_to_restore = tf.contrib.framework.get_model_variables()
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
# Run training.
scaffold = tf.Scaffold(init_fn=init_fn)
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
***************************************************************************
* Initializing a model from a checkpoint whose variable names don't match *
***************************************************************************
At times, a user may want to initialize a new model with values from a
checkpoint whose variable names do not match those of the current model. In this
case, one needs to create a mapping from the checkpoint variable names to the
current model variables. This requires only a small modification of the code
above:
...
# Creates a model with two variables, var0 and var1
predictions = MyModel(images)
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Create the mapping:
variables_to_restore = {
'name_var_0_in_checkpoint':
tf.contrib.framework.get_unique_variable('var0'),
'name_var_1_in_checkpoint':
tf.contrib.framework.get_unique_variable('var1')
}
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
*************************************************
* Fine-Tuning Part of a model from a checkpoint *
*************************************************
Rather than initializing all of the weights of a given model, we sometimes
only want to restore some of the weights from a checkpoint. To do this, one
need only filter those variables to initialize as follows:
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Specify the variables to restore via a list of inclusion or exclusion
# patterns:
variables_to_restore = tf.contrib.framework.get_variables_to_restore(
include=["conv"], exclude=["fc8", "fc9])
# or
variables_to_restore = tf.contrib.framework.get_variables_to_restore(
exclude=["conv"])
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
checkpoint_path, variables_to_restore)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
******************************************************
* Initializing model variables from values in memory *
******************************************************
One may want to initialize the weights of a model from values coming from an
arbitrary source (a text document, matlab file, etc). While this is technically
feasible using assign operations, this strategy results in the values of your
weights being stored in the graph. For large models, this becomes prohibitively
large. However, it's possible to perform this initial assignment without having
to store the values of the initial model in the graph itself by using
placeholders and a feed dictionary:
...
# Create the train_op
train_op = tf.contrib.training.create_train_op(total_loss, optimizer)
# Create the mapping from variable names to values:
var0_initial_value = ReadFromDisk(...)
var1_initial_value = ReadFromDisk(...)
var_names_to_values = {
'var0': var0_initial_value,
'var1': var1_initial_value,
}
init_fn = tf.contrib.framework.assign_from_values_fn(var_names_to_values)
scaffold = tf.Scaffold(init_fn=init_fn)
# Run training.
tf.contrib.training.train(train_op, my_log_dir, scaffold=scaffold)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import monitored_session
from tensorflow.python.training import optimizer as tf_optimizer
from tensorflow.python.training import training_util
# TODO(nsilberman): move add_gradients_summaries, clip_gradient_norms and
# multiply_gradients into contrib/summaries and contrib/optimizers.py
__all__ = [
'add_gradients_summaries',
'clip_gradient_norms',
'clip_gradient_norms_fn',
'create_train_op',
'multiply_gradients',
'train',
]
def add_gradients_summaries(grads_and_vars):
"""Add summaries to gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The list of created summaries.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(
summary.histogram(var.op.name + '_gradient', grad_values))
summaries.append(
summary.scalar(var.op.name + '_gradient_norm',
clip_ops.global_norm([grad_values])))
else:
logging.info('Var %s has no gradient', var.op.name)
return summaries
def clip_gradient_norms(gradients_to_variables, max_norm):
"""Clips the gradients by the given value.
Args:
gradients_to_variables: A list of gradient to variable pairs (tuples).
max_norm: the maximum norm value.
Returns:
A list of clipped gradient to variable pairs.
"""
clipped_grads_and_vars = []
for grad, var in gradients_to_variables:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
tmp = clip_ops.clip_by_norm(grad.values, max_norm)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad = clip_ops.clip_by_norm(grad, max_norm)
clipped_grads_and_vars.append((grad, var))
return clipped_grads_and_vars
def clip_gradient_norms_fn(max_norm):
"""Returns a `transform_grads_fn` function for gradient clipping."""
def clip_norms(gradients_to_variables):
return clip_gradient_norms(gradients_to_variables, max_norm)
return clip_norms
def multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
gradient_multipliers: A map from either `Variables` or `Variable` op names
to the coefficient by which the associated gradient should be scaled.
Returns:
The updated list of gradient to variable pairs.
Raises:
ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`
is empty or None or if `gradient_multipliers` is not a dictionary.
"""
if not isinstance(grads_and_vars, list):
raise ValueError('`grads_and_vars` must be a list.')
if not gradient_multipliers:
raise ValueError('`gradient_multipliers` is empty.')
if not isinstance(gradient_multipliers, dict):
raise ValueError('`gradient_multipliers` must be a dict.')
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if var in gradient_multipliers or var.op.name in gradient_multipliers:
key = var if var in gradient_multipliers else var.op.name
if grad is None:
raise ValueError('Requested multiple of `None` gradient.')
if isinstance(grad, ops.IndexedSlices):
tmp = grad.values * constant_op.constant(
gradient_multipliers[key], dtype=grad.dtype)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad *= constant_op.constant(
gradient_multipliers[key], dtype=grad.dtype)
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
_USE_GLOBAL_STEP = 0
def create_train_op(total_loss,
optimizer,
global_step=_USE_GLOBAL_STEP,
update_ops=None,
variables_to_train=None,
transform_grads_fn=None,
summarize_gradients=False,
gate_gradients=tf_optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
check_numerics=True):
"""Creates an `Operation` that evaluates the gradients and returns the loss.
Args:
total_loss: A `Tensor` representing the total loss.
optimizer: A tf.Optimizer to use for computing the gradients.
global_step: A `Tensor` representing the global step variable. If left as
`_USE_GLOBAL_STEP`, then tf.contrib.framework.global_step() is used.
update_ops: An optional list of updates to execute. If `update_ops` is
`None`, then the update ops are set to the contents of the
`tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but
it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`,
a warning will be displayed.
variables_to_train: an optional list of variables to train. If None, it will
default to all tf.trainable_variables().
transform_grads_fn: A function which takes a single argument, a list of
gradient to variable pairs (tuples), performs any requested gradient
updates, such as gradient clipping or multipliers, and returns the updated
list.
summarize_gradients: Whether or not add summaries for each gradient.
gate_gradients: How to gate the computation of gradients. See tf.Optimizer.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: Whether or not to try colocating the gradients
with the ops that generated them.
check_numerics: Whether or not we apply check_numerics.
Returns:
A `Tensor` that when evaluated, computes the gradients and returns the total
loss value.
"""
if global_step is _USE_GLOBAL_STEP:
global_step = training_util.get_or_create_global_step()
# Update ops use GraphKeys.UPDATE_OPS collection if update_ops is None.
global_update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
if update_ops is None:
update_ops = global_update_ops
else:
update_ops = set(update_ops)
if not global_update_ops.issubset(update_ops):
logging.warning('update_ops in create_train_op does not contain all the '
' update_ops in GraphKeys.UPDATE_OPS')
# Make sure update_ops are computed before total_loss.
if update_ops:
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name='update_barrier')
total_loss = control_flow_ops.with_dependencies([barrier], total_loss)
if variables_to_train is None:
# Default to tf.trainable_variables()
variables_to_train = tf_variables.trainable_variables()
else:
# Make sure that variables_to_train are in tf.trainable_variables()
for v in variables_to_train:
assert v in tf_variables.trainable_variables()
assert variables_to_train
# Create the gradients. Note that apply_gradients adds the gradient
# computation to the current graph.
grads = optimizer.compute_gradients(
total_loss,
variables_to_train,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
if transform_grads_fn:
grads = transform_grads_fn(grads)
# Summarize gradients.
if summarize_gradients:
with ops.name_scope('summarize_grads'):
add_gradients_summaries(grads)
# Create gradient updates.
grad_updates = optimizer.apply_gradients(grads, global_step=global_step)
with ops.name_scope('train_op'):
# Make sure total_loss is valid.
if check_numerics:
total_loss = array_ops.check_numerics(total_loss,
'LossTensor is inf or nan')
# Ensure the train_tensor computes grad_updates.
train_op = control_flow_ops.with_dependencies([grad_updates], total_loss)
# Add the operation used for training to the 'train_op' collection
train_ops = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if train_op not in train_ops:
train_ops.append(train_op)
return train_op
def train(train_op,
logdir,
master='',
is_chief=True,
scaffold=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=600,
save_summaries_steps=100,
config=None,
max_wait_secs=7200):
"""Runs the training loop.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where the graph and checkpoints are saved.
master: The URL of the master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
scaffold: An tf.train.Scaffold instance.
hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the
training loop.
chief_only_hooks: List of `tf.train.SessionRunHook` instances which are run
inside the training loop for the chief trainer only.
save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved
using a default checkpoint saver. If `save_checkpoint_secs` is set to
`None`, then the default checkpoint saver isn't used.
save_summaries_steps: The frequency, in number of global steps, that the
summaries are written to disk using a default summary saver. If
`save_summaries_steps` is set to `None`, then the default summary saver
isn't used.
config: An instance of `tf.ConfigProto`.
max_wait_secs: Maximum time workers should wait for the session to
become available. This should be kept relatively short to help detect
incorrect code, but sometimes may need to be increased if the chief takes
a while to start up.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `logdir` is `None` and either `save_checkpoint_secs` or
`save_summaries_steps` are `None.
"""
if logdir is None and is_chief:
if save_summaries_steps:
raise ValueError(
'logdir cannot be None when save_summaries_steps is not None')
if save_checkpoint_secs:
raise ValueError(
'logdir cannot be None when save_checkpoint_secs is not None')
with monitored_session.MonitoredTrainingSession(
master=master,
is_chief=is_chief,
checkpoint_dir=logdir,
scaffold=scaffold,
hooks=hooks,
chief_only_hooks=chief_only_hooks,
save_checkpoint_secs=save_checkpoint_secs,
save_summaries_steps=save_summaries_steps,
config=config,
max_wait_secs=max_wait_secs) as session:
loss = None
while not session.should_stop():
loss = session.run(train_op)
return loss
| 37.454212 | 80 | 0.712567 |
f72e1011955a719906527c88ace33c87f218c56f | 5,731 | py | Python | docs/conf.py | kponder/astrorapid | 91053af8049724a07d1f55baf4e1f60fc36b1101 | [
"MIT"
] | 12 | 2019-01-15T19:40:11.000Z | 2020-12-17T11:36:18.000Z | docs/conf.py | kponder/astrorapid | 91053af8049724a07d1f55baf4e1f60fc36b1101 | [
"MIT"
] | 9 | 2019-07-19T15:29:19.000Z | 2022-03-12T00:59:37.000Z | docs/conf.py | kponder/astrorapid | 91053af8049724a07d1f55baf4e1f60fc36b1101 | [
"MIT"
] | 11 | 2019-02-19T20:35:08.000Z | 2021-07-16T05:56:28.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('./..'))
sys.path.insert(0, os.path.abspath('./../..'))
sys.path.insert(0, 'astrorapid')
sys.path.insert(0, 'astrorapid/read_from_database')
sys.path.insert(0, './../astrorapid')
sys.path.insert(0, './../astrorapid/read_from_database')
# -- Project information -----------------------------------------------------
project = 'astrorapid'
copyright = '2019, Daniel Muthukrishna'
author = 'Daniel Muthukrishna'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.autosummary'
]
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'Python'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'astrorapiddoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'astrorapid.tex', 'astrorapid Documentation',
'Daniel Muthukrishna', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'astrorapid', 'astrorapid Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'astrorapid', 'astrorapid Documentation',
author, 'astrorapid', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 30.005236 | 79 | 0.652242 |
f72e5d9a801e6d998e27b90743043c49babb6526 | 575 | py | Python | jumpscale/packages/vdc_dashboard/services/provision_wallet_billing.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 13 | 2020-09-02T09:05:08.000Z | 2022-03-12T02:43:24.000Z | jumpscale/packages/vdc_dashboard/services/provision_wallet_billing.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 1,998 | 2020-06-15T11:46:10.000Z | 2022-03-24T22:12:41.000Z | jumpscale/packages/vdc_dashboard/services/provision_wallet_billing.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 8 | 2020-09-29T06:50:35.000Z | 2021-06-14T03:30:52.000Z | from jumpscale.loader import j
from jumpscale.packages.vdc.billing import auto_extend_billing
from jumpscale.tools.servicemanager.servicemanager import BackgroundService
class AutoExtendbillingService(BackgroundService):
def __init__(self, interval=60 * 60, *args, **kwargs):
"""Provisioning wallet service that will run every hour to extend the VDC pool
"""
super().__init__(interval, *args, **kwargs)
def job(self):
auto_extend_billing()
j.logger.info("Auto extend billing service")
service = AutoExtendbillingService()
| 31.944444 | 86 | 0.735652 |
f72e6942e7acd082f6cba1fa9c3d70a58320e52f | 611 | py | Python | python/math/0204_count_primes.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 6 | 2019-07-15T13:23:57.000Z | 2020-01-22T03:12:01.000Z | python/math/0204_count_primes.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | null | null | null | python/math/0204_count_primes.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 1 | 2019-07-24T02:15:31.000Z | 2019-07-24T02:15:31.000Z | import math
class Solution:
def countPrimes(self, n):
"""
:type n: int
:rtype: int
厄拉多塞筛法
比如求20以内质数的个数, 首先0,1不是质数。
2是第一个质数,然后把20以内所有2的倍数划去。
2后面紧跟的数即为下一个质数3,然后把3所有的倍数划去。
3后面紧跟的数即为下一个质数5,再把5所有的倍数划去,以此类推。
"""
if n < 2:
return 0
s = [1] * n
s[0] = s[1] = 0
for i in range(2, int(math.sqrt(n)) + 1):
if s[i] == 1:
s[i * i:n:i] = [0] * int((n - i * i - 1) / i + 1)
return sum(s)
def test_count_primes():
s = Solution()
assert 4 == s.countPrimes(10)
| 21.821429 | 65 | 0.466448 |
f72e8e165d3d5f54741251e1676396f0a12b4e4f | 5,929 | py | Python | lian/ssh_deploy.py | catroll/lian | 405fd0c8c4ce8557609bf595431284a07e7b443e | [
"Apache-2.0"
] | null | null | null | lian/ssh_deploy.py | catroll/lian | 405fd0c8c4ce8557609bf595431284a07e7b443e | [
"Apache-2.0"
] | null | null | null | lian/ssh_deploy.py | catroll/lian | 405fd0c8c4ce8557609bf595431284a07e7b443e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import os
import re
import time
from collections import OrderedDict
LOG = logging.getLogger(__name__)
def md5sum_command(directory='.', find_type='f', match='', not_match=''):
return ' '.join([i for i in [
'find', directory,
('-type %s' % find_type) if find_type else '',
'-regextype posix-extended' if match or not_match else '',
('-regex %s' % match) if match else '',
('! -regex "%s"' % not_match) if not_match else '',
"""-print0 | xargs -0 md5sum | awk '{printf "%-50s %s\\n", $2, $1}' | sort"""
] if i])
def check_sum(chain, local_path, remote_path, *md5sum_args, **md5sum_kwargs):
title = re.sub('[^a-zA-Z0-9]', '-', local_path) + '.' + time.strftime('%Y%m%d-%H%I%S')
cmd_md5sum = md5sum_command(*md5sum_args, **md5sum_kwargs)
# ---------- get md5sum ----------
# locally
command = 'cd ' + local_path + '; ' + cmd_md5sum
LOG.info('local command: %s', command)
content = os.popen(command).read()
with open('/tmp/%s.a.txt' % title, 'w') as _file:
_file.write(content)
local_sums = OrderedDict((_file, _sum) for _file, _sum in [line.split() for line in content.splitlines()])
# remotely
command = 'cd ' + remote_path + '; ' + cmd_md5sum
LOG.info('remote command: %s', command)
code, out, err = chain.execute('cd ' + remote_path + '; ' + cmd_md5sum, buff_size=1024000)
out = out.decode('utf-8')
with open('/tmp/%s.b.txt' % title, 'w') as _file:
_file.write(out)
remote_sums = OrderedDict((_file, _sum) for _file, _sum in [line.split() for line in out.splitlines()])
# ---------- compare result ----------
LOG.info('*' * 50)
LOG.info('')
is_synced = True
for _file in local_sums:
if _file not in remote_sums:
is_synced = False
LOG.info(u'🐈 [LOCAL] ' + _file)
continue
if local_sums[_file] != remote_sums[_file]:
is_synced = False
LOG.info(u'🐍 [DIFF] ' + _file)
continue
# LOG.info('[SAME] ' + _file + ' ignore it')
for _file in remote_sums:
if _file not in local_sums:
is_synced = False
LOG.info(u'🐦 [REMOTE] ' + _file)
if is_synced:
LOG.info(u'㊗️ ㊗️ ㊗️ Perfect!!! ㊗️ ㊗️ ㊗️'.center(44))
LOG.info('')
LOG.info('*' * 50)
def sftp_download(chain, files_will_transferred):
for remote_path, local_path in files_will_transferred:
try:
chain.use().download(remote_path, local_path)
except Exception as error:
LOG.warning(error)
def download_files(chain, local_path, remote_path, files=None):
# download specified files
if not files:
LOG.debug('Download, but no file specified, over!')
return
move_tasks = [(os.path.join(remote_path, path), os.path.join(local_path, path)) for path in files]
sftp_download(chain, move_tasks)
def sftp_upload(chain, files_will_transferred):
""" SFTP upload
Args:
chain: object of SSHChain
files_will_transferred: list[tuple]
"""
LOG.info(files_will_transferred)
for local_path, remote_path in files_will_transferred:
chain.use().upload(local_path, remote_path)
def upload_files(chain, local_path, remote_path, files=None, ignore_patterns=None):
"""Upload local files or directory, can ignore some files by pattern
Args:
chain:
local_path:
remote_path:
files:
ignore_patterns:
"""
files = files or []
ignore_patterns = ignore_patterns or []
re_ignore = re.compile('(%s)' % (')|('.join(ignore_patterns))) if ignore_patterns else ''
move_tasks = []
for path in files:
fullpath = os.path.join(local_path, path)
if not os.path.exists(fullpath):
LOG.error('The file need uploaded not found: %s', fullpath)
exit()
if os.path.isfile(fullpath):
move_tasks.append((fullpath, os.path.join(remote_path, path)))
continue
assert os.path.isdir(fullpath)
for root, dirs, _files in os.walk(fullpath):
for _file in _files:
_fullpath = os.path.join(root, _file)
if re_ignore and re_ignore.search(_fullpath):
continue
relpath = os.path.relpath(_fullpath, local_path)
move_tasks.append((_fullpath, os.path.join(remote_path, relpath)))
sftp_upload(chain, move_tasks)
def file_sync(chain, local_path, remote_path,
files_upload=None, ignore_patterns=None, # upload arguments
files_download=None): # download arguments
if files_download:
download_files(chain, local_path, remote_path, files_download)
if files_upload:
upload_files(chain, local_path, remote_path, files_upload, ignore_patterns)
ACTIONS = 'check', 'sync', 'all',
def main(chain, local_path, remote_path, action='check',
files_upload=None, ignore_patterns=None, files_download=None,
*md5sum_args, **md5sum_kwargs):
"""
Args:
chain: object of SSHChain
local_path: str, absolute path
remote_path: str, absolute path
action: str
files_upload: list of files to upload
ignore_patterns
files_download: list of files to download
md5sum_args:
md5sum_kwargs: like: directory='.', find_type='f', match='', not_match=''
"""
if action not in ACTIONS:
return
def _file_sync():
file_sync(chain, local_path, remote_path, files_upload, ignore_patterns, files_download)
def _check_sum():
check_sum(chain, local_path, remote_path, *md5sum_args, **md5sum_kwargs)
if action == 'sync':
_file_sync()
return
if action == 'check':
_check_sum()
return
_file_sync()
_check_sum()
| 31.041885 | 110 | 0.608366 |
f72e975ec68de10ec951352b654b92d2aa5f1c53 | 908 | py | Python | tests/xgboost_data/models.py | wmonteiro92/xmoai-examples | 0286d57e15cb60693f57cdff386cbb246787442b | [
"MIT"
] | 1 | 2021-03-22T11:31:00.000Z | 2021-03-22T11:31:00.000Z | tests/xgboost_data/models.py | wmonteiro92/xmoai-examples | 0286d57e15cb60693f57cdff386cbb246787442b | [
"MIT"
] | null | null | null | tests/xgboost_data/models.py | wmonteiro92/xmoai-examples | 0286d57e15cb60693f57cdff386cbb246787442b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 17 21:16:08 2020
@author: wmonteiro92
"""
from xgboost import XGBRegressor, XGBClassifier
def train_ml_model(X, y, algorithm, random_state=0):
"""Train one dataset in Python.
:param X: the input values.
:type X: np.array
:param y: the target values.
:type y: np.array
:param algorithm: the machine learning model to use. Allowed values are
`XGBClassifier` and `XGBRegressor`.
:type algorithm: str
:param random_state: the seed. Default is 0.
:type random_state: Integer
:return: the trained machine learning model.
:rtype: Object
"""
if algorithm == 'XGBClassifier':
model = XGBClassifier(random_state=random_state)
elif algorithm == 'XGBRegressor':
model = XGBRegressor(random_state=random_state)
model.fit(X, y)
return model | 27.515152 | 76 | 0.643172 |
f72e9dda78207301c3e7659b1baf989a356f877b | 9,707 | py | Python | aesara/tensor/nnet/conv3d2d.py | anirudhacharya/Theano-PyMC | 55f54243cf88397b032ebc7121d1090ee91aea7d | [
"BSD-3-Clause"
] | null | null | null | aesara/tensor/nnet/conv3d2d.py | anirudhacharya/Theano-PyMC | 55f54243cf88397b032ebc7121d1090ee91aea7d | [
"BSD-3-Clause"
] | null | null | null | aesara/tensor/nnet/conv3d2d.py | anirudhacharya/Theano-PyMC | 55f54243cf88397b032ebc7121d1090ee91aea7d | [
"BSD-3-Clause"
] | null | null | null | import aesara
from aesara import tensor as at
from aesara.gradient import DisconnectedType
from aesara.graph.basic import Apply
from aesara.graph.op import Op
from aesara.graph.opt import TopoOptimizer, copy_stack_trace, local_optimizer
def get_diagonal_subtensor_view(x, i0, i1):
"""
Helper function for DiagonalSubtensor and IncDiagonalSubtensor.
Notes
-----
It returns a partial view of x, not a partial copy.
"""
# We have to cast i0 and i0 to int because python
# do not support indexing with 0-dim, 'int*' ndarrays.
i0 = int(i0)
i1 = int(i1)
if x.shape[i0] < x.shape[i1]:
raise NotImplementedError("is this allowed?")
idx = [slice(None)] * x.ndim
idx[i0] = slice(x.shape[i1] - 1, None, None)
xview = x.__getitem__(tuple(idx))
strides = list(xview.strides)
if x.shape[i1] != 1:
strides[i1] -= strides[i0]
xview.strides = strides
return xview
class DiagonalSubtensor(Op):
"""
Return a form a nd diagonal subtensor.
Parameters
----------
x
n-d tensor
i0
Axis index in x
i1
Axis index in x
Notes
-----
Work on the GPU.
Extended summary
----------------
``x`` is some n-dimensional tensor, but this Op only deals with a
matrix-shaped slice, using axes i0 and i1. Without loss of
generality, suppose that ``i0`` picks out our ``row`` dimension,
and i1 the ``column`` dimension.
So the relevant part of ``x`` is some matrix ``u``. Suppose it has 7 rows
and 4 columns::
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
The view returned by this function is also a matrix. It's a thick,
diagonal ``stripe`` across u that discards the lower left triangle
and the upper right triangle:
[ x 0 0 0 ]
[ x x 0 0 ]
[ x x x 0 ]
[ 0 x x x ]
[ 0 0 x x ]
[ 0 0 0 x ]
In this case the return value would be this view of shape 3x4. The
returned view has the same number of dimensions as the input
``x``, and the only difference is that the shape along dimension
``i0`` has been reduced by ``shape[i1] - 1`` because of the
triangles that got chopped out.
The NotImplementedError is meant to catch the case where shape[i0]
is too small for the stripe to reach across the matrix, in which
case it's not clear what this function should do. Maybe always
raise an error. I'd look back to the call site in the Conv3D to
see what's necessary at that point.
"""
__props__ = ("inplace",)
def __str__(self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
return f"{self.__class__.__name__}"
def __init__(self, inplace=False):
self.inplace = inplace
if inplace:
self.view_map = {0: [0]}
def make_node(self, x, i0, i1):
_i0 = at.as_tensor_variable(i0)
_i1 = at.as_tensor_variable(i1)
return Apply(self, [x, _i0, _i1], [x.type()])
def perform(self, node, inputs, output_storage):
xview = get_diagonal_subtensor_view(*inputs)
if self.inplace:
output_storage[0][0] = xview
else:
output_storage[0][0] = xview.copy()
def grad(self, inputs, g_outputs):
z = at.zeros_like(inputs[0])
gx = inc_diagonal_subtensor(z, inputs[1], inputs[2], g_outputs[0])
return [gx, DisconnectedType()(), DisconnectedType()()]
def connection_pattern(self, node):
rval = [[True], [False], [False]]
return rval
diagonal_subtensor = DiagonalSubtensor(False)
class IncDiagonalSubtensor(Op):
"""
The gradient of DiagonalSubtensor.
"""
__props__ = ("inplace",)
def __str__(self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
return f"{self.__class__.__name__}"
def __init__(self, inplace=False):
self.inplace = inplace
if inplace:
self.destroy_map = {0: [0]}
def make_node(self, x, i0, i1, amt):
_i0 = at.as_tensor_variable(i0)
_i1 = at.as_tensor_variable(i1)
return Apply(self, [x, _i0, _i1, amt], [x.type()])
def perform(self, node, inputs, output_storage):
x, i0, i1, amt = inputs
if not self.inplace:
x = x.copy()
xview = get_diagonal_subtensor_view(x, i0, i1)
xview += amt
output_storage[0][0] = x
def grad(self, inputs, g_outputs):
x, i0, i1, amt = inputs
gy = g_outputs[0]
return [
gy,
DisconnectedType()(),
DisconnectedType()(),
diagonal_subtensor(gy, i0, i1),
]
def connection_pattern(self, node):
rval = [[True], [False], [False], [True]]
return rval
inc_diagonal_subtensor = IncDiagonalSubtensor(False)
def conv3d(
signals, filters, signals_shape=None, filters_shape=None, border_mode="valid"
):
"""
Convolve spatio-temporal filters with a movie.
It flips the filters.
Parameters
----------
signals
Timeseries of images whose pixels have color channels.
Shape: [Ns, Ts, C, Hs, Ws].
filters
Spatio-temporal filters.
Shape: [Nf, Tf, C, Hf, Wf].
signals_shape
None or a tuple/list with the shape of signals.
filters_shape
None or a tuple/list with the shape of filters.
border_mode
One of 'valid', 'full' or 'half'.
Notes
-----
Another way to define signals: (batch, time, in channel, row, column)
Another way to define filters: (out channel,time,in channel, row, column)
For the GPU, use nnet.conv3d.
See Also
--------
Someone made a script that shows how to swap the axes between
both 3d convolution implementations in Aesara. See the last
`attachment <https://groups.google.com/d/msg/aesara-users/1S9_bZgHxVw/0cQR9a4riFUJ>`_
"""
if isinstance(border_mode, str):
border_mode = (border_mode, border_mode, border_mode)
if signals_shape is None:
_signals_shape_5d = signals.shape
else:
_signals_shape_5d = signals_shape
if filters_shape is None:
_filters_shape_5d = filters.shape
else:
_filters_shape_5d = filters_shape
Ns, Ts, C, Hs, Ws = _signals_shape_5d
Nf, Tf, C, Hf, Wf = _filters_shape_5d
_signals_shape_4d = (Ns * Ts, C, Hs, Ws)
_filters_shape_4d = (Nf * Tf, C, Hf, Wf)
if border_mode[1] != border_mode[2]:
raise NotImplementedError("height and width bordermodes must match")
conv2d_signal_shape = _signals_shape_4d
conv2d_filter_shape = _filters_shape_4d
if signals_shape is None:
conv2d_signal_shape = None
if filters_shape is None:
conv2d_filter_shape = None
out_4d = aesara.tensor.nnet.conv2d(
signals.reshape(_signals_shape_4d),
filters.reshape(_filters_shape_4d),
input_shape=conv2d_signal_shape,
filter_shape=conv2d_filter_shape,
border_mode=border_mode[1],
) # ignoring border_mode[2]
# compute the intended output size
if border_mode[1] == "valid":
Hout = Hs - Hf + 1
Wout = Ws - Wf + 1
elif border_mode[1] == "full":
Hout = Hs + Hf - 1
Wout = Ws + Wf - 1
elif border_mode[1] == "half":
Hout = Hs - (Hf % 2) + 1
Wout = Ws - (Wf % 2) + 1
elif border_mode[1] == "same":
raise NotImplementedError()
else:
raise ValueError("invalid border mode", border_mode[1])
# reshape the temporary output to restore its original size
out_tmp = out_4d.reshape((Ns, Ts, Nf, Tf, Hout, Wout))
# now sum out along the Tf to get the output
# but we have to sum on a diagonal through the Tf and Ts submatrix.
if Tf == 1:
# for Tf==1, no sum along Tf, the Ts-axis of the output is unchanged!
out_5d = out_tmp.reshape((Ns, Ts, Nf, Hout, Wout))
else:
# for some types of convolution, pad out_tmp with zeros
if border_mode[0] == "valid":
Tpad = 0
elif border_mode[0] == "full":
Tpad = Tf - 1
elif border_mode[0] == "half":
Tpad = Tf // 2
elif border_mode[0] == "same":
raise NotImplementedError()
else:
raise ValueError("invalid border mode", border_mode[0])
if Tpad == 0:
out_5d = diagonal_subtensor(out_tmp, 1, 3).sum(axis=3)
else:
# pad out_tmp with zeros before summing over the diagonal
out_tmp_padded = at.zeros(
dtype=out_tmp.dtype, shape=(Ns, Ts + 2 * Tpad, Nf, Tf, Hout, Wout)
)
out_tmp_padded = aesara.tensor.subtensor.set_subtensor(
out_tmp_padded[:, Tpad : (Ts + Tpad), :, :, :, :], out_tmp
)
out_5d = diagonal_subtensor(out_tmp_padded, 1, 3).sum(axis=3)
return out_5d
@local_optimizer([DiagonalSubtensor, IncDiagonalSubtensor])
def local_inplace_DiagonalSubtensor(fgraph, node):
"""Also work for IncDiagonalSubtensor."""
if (
isinstance(node.op, (DiagonalSubtensor, IncDiagonalSubtensor))
and not node.op.inplace
):
new_op = node.op.__class__(inplace=True)
new_node = new_op(*node.inputs)
copy_stack_trace(node.outputs[0], new_node)
return [new_node]
return False
aesara.compile.optdb.register(
"local_inplace_DiagonalSubtensor",
TopoOptimizer(
local_inplace_DiagonalSubtensor, failure_callback=TopoOptimizer.warn_inplace
),
60,
"fast_run",
"inplace",
)
| 29.685015 | 89 | 0.609457 |
f72f19778b0b87aa95ac05b779fa868ad2422b2a | 256 | py | Python | pybat/__init__.py | mbercx/pybat | e0cf610fd06a97979f5ec70757406de1f9a788ef | [
"MIT"
] | 3 | 2019-04-08T13:10:15.000Z | 2021-07-04T07:23:49.000Z | pybat/__init__.py | mbercx/pybat | e0cf610fd06a97979f5ec70757406de1f9a788ef | [
"MIT"
] | 1 | 2019-02-28T12:51:57.000Z | 2019-02-28T12:51:57.000Z | pybat/__init__.py | mbercx/pybat | e0cf610fd06a97979f5ec70757406de1f9a788ef | [
"MIT"
] | 4 | 2018-07-30T12:58:35.000Z | 2020-03-05T20:09:46.000Z | # These import commands make importing core classes easier, e.g. you can just import
# Cathode using:
#
# from pybat import Cathode
#
# Instead of:
#
# from pybat.core import Cathode
#
from pybat.core import Cathode, LiRichCathode, Dimer, DimerNEBAnalysis
| 23.272727 | 84 | 0.765625 |
f72f39ea13255cbce714cd52ecd6820c01e3e27b | 10,487 | py | Python | tests/test_phase_change.py | volpatto/chemicals | 721904ee17604f5e8685b0e5fff12e0bac567f73 | [
"MIT"
] | null | null | null | tests/test_phase_change.py | volpatto/chemicals | 721904ee17604f5e8685b0e5fff12e0bac567f73 | [
"MIT"
] | null | null | null | tests/test_phase_change.py | volpatto/chemicals | 721904ee17604f5e8685b0e5fff12e0bac567f73 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fluids.numerics import assert_close, assert_close1d
from chemicals.phase_change import *
from chemicals.phase_change import (Hvap_data_CRC, Hfus_data_CRC,
Hvap_data_Gharagheizi, Hsub_data_Gharagheizi,
Tb_data_Yaws, Tm_ON_data,
phase_change_data_Perrys2_150,
phase_change_data_Alibakhshi_Cs,
phase_change_data_VDI_PPDS_4)
from chemicals.miscdata import CRC_inorganic_data, CRC_organic_data
from chemicals.identifiers import check_CAS
def test_Watson():
Hvap = Watson(T=320, Hvap_ref=43908, T_ref=300.0, Tc=647.14)
assert_close(Hvap, 42928.990094915454, rtol=1e-12)
def test_Clapeyron():
Hvap = Clapeyron(294.0, 466.0, 5.55E6)
assert_close(Hvap, 26512.36357131963)
# Test at 1/2 bar, sZ=0.98
Hvap = Clapeyron(264.0, 466.0, 5.55E6, 0.98, 5E4)
assert_close(Hvap, 23370.947571814384)
def test_Pitzer():
Hvap = Pitzer(452, 645.6, 0.35017)
assert_close(Hvap, 36696.749078320056)
def test_SMK():
Hvap = SMK(553.15, 751.35, 0.302)
assert_close(Hvap, 39866.18999046232)
def test_MK():
# Problem in article for SMK function.
Hv1 = MK(553.15, 751.35, 0.302)
# data in [1]_., should give 26.43 KJ/mol
Hv2 = MK(298.15, 469.69, 0.2507)
assert_close(Hv1, 38728.00667307733, rtol=1e-12)
assert_close(Hv2, 25940.988533726406, rtol=1e-12)
def test_Velasco():
Hv1 = Velasco(553.15, 751.35, 0.302)
Hv2 = Velasco(333.2, 476.0, 0.5559)
assert_close(Hv1, 39524.251054691274, rtol=1e-12)
assert_close(Hv2, 33299.428636069264, rtol=1e-12)
def test_Riedel():
# same problem as in Perry's examples
Hv1 = Riedel(294.0, 466.0, 5.55E6)
# Pyridine, 0.0% err vs. exp: 35090 J/mol; from Poling [2]_.
Hv2 = Riedel(388.4, 620.0, 56.3E5)
assert_close(Hv1, 26828.59040728512, rtol=1e-12)
assert_close(Hv2, 35089.80179000598, rtol=1e-12)
def test_Chen():
Hv1 = Chen(294.0, 466.0, 5.55E6)
assert_close(Hv1, 26705.902558030946)
def test_Liu():
Hv1 = Liu(294.0, 466.0, 5.55E6)
assert_close(Hv1, 26378.575260517395)
def test_Vetere():
Hv1 = Vetere(294.0, 466.0, 5.55E6)
assert_close(Hv1, 26363.43895706672)
def test_Hvap_CRC_data():
HvapTb_tot = Hvap_data_CRC['HvapTb'].sum()
assert_close(HvapTb_tot, 30251890.0)
Hvap298_tot = Hvap_data_CRC['Hvap298'].sum()
assert_close(Hvap298_tot, 29343710.0)
Tb_tot = Hvap_data_CRC['Tb'].sum()
assert_close(Tb_tot, 407502.95600000001)
assert Hvap_data_CRC.index.is_unique
assert Hvap_data_CRC.shape == (926, 5)
assert all([check_CAS(i) for i in list(Hvap_data_CRC.index)])
def test_Hfus_CRC_data():
Hfus_total = Hfus_data_CRC['Hfus'].sum()
assert_close(Hfus_total, 29131241)
assert Hfus_data_CRC.index.is_unique
assert Hfus_data_CRC.shape == (1112, 3)
assert all([check_CAS(i) for i in list(Hfus_data_CRC.index)])
def test_Hfus():
assert_close(Hfus('462-06-6', method='CRC'), 11310.0, rtol=1e-12)
assert_close(Hfus('462-06-6'), 11310.0, rtol=1e-12)
assert_close(Hfus(CASRN='75-07-0'), 2310.0)
assert Hfus(CASRN='75000-07-0') is None
assert Hfus_methods('7732-18-5') == ['CRC']
def test_Gharagheizi_Hvap_data():
# 51 CAS number DO NOT validate
Hvap298_tot = Hvap_data_Gharagheizi['Hvap298'].sum()
assert_close(Hvap298_tot, 173584900)
assert Hvap_data_Gharagheizi.index.is_unique
assert Hvap_data_Gharagheizi.shape == (2730, 2)
def test_Gharagheizi_Hsub_data():
tots = [Hsub_data_Gharagheizi[i].sum() for i in ['Hsub', 'error']]
assert_close(tots[0], 130537650)
assert_close(tots[1], 1522960.0)
assert Hsub_data_Gharagheizi.index.is_unique
assert Hsub_data_Gharagheizi.shape == (1241, 3)
def test_Yaws_Tb_data():
tot = Tb_data_Yaws.sum()
assert_close(tot, 6631287.51)
assert Tb_data_Yaws.index.is_unique
assert Tb_data_Yaws.shape == (13461, 1)
@pytest.mark.slow
def test_Yaws_Tb_CAS_valid():
assert all([check_CAS(i) for i in Tb_data_Yaws.index])
def test_Tm_ON_data():
tot = Tm_ON_data.sum()
assert_close(tot, 4059989.425)
assert Tm_ON_data.shape == (11549, 1)
assert Tm_ON_data.index.is_unique
@pytest.mark.slow
def test_Tm_ON_data_CAS_valid():
assert all([check_CAS(i) for i in Tm_ON_data.index])
def test_Perrys2_150_data():
# rtol=2E-4 for Tmin; only helium-4 needs a higher tolerance
# Everything hits 0 at Tmax except Difluoromethane, methane, and water;
# those needed their Tmax adjusted to their real Tc.
# C1 is divided by 1000, to give units of J/mol instead of J/kmol
# Terephthalic acid removed, was a constant value only.
assert all([check_CAS(i) for i in phase_change_data_Perrys2_150.index])
tots_calc = [phase_change_data_Perrys2_150[i].abs().sum() for i in [u'Tc', u'C1', u'C2', u'C3', u'C4', u'Tmin', u'Tmax']]
tots = [189407.42499999999, 18617223.739999998, 174.34494000000001, 112.51209900000001, 63.894040000000004, 70810.849999999991, 189407.005]
assert_close1d(tots_calc, tots)
assert phase_change_data_Perrys2_150.index.is_unique
assert phase_change_data_Perrys2_150.shape == (344, 8)
def test_Alibakhshi_Cs_data():
# Oops, a bunch of these now-lonely coefficients have an invalid CAS...
# assert all([check_CAS(i) for i in phase_change_data_Alibakhshi_Cs.index])
tots_calc = [phase_change_data_Alibakhshi_Cs[i].abs().sum() for i in [u'C']]
tots = [28154.361500000003]
assert_close1d(tots_calc, tots)
assert phase_change_data_Alibakhshi_Cs.index.is_unique
assert phase_change_data_Alibakhshi_Cs.shape == (1890, 2)
def test_VDI_PPDS_4_data():
"""I believe there are no errors here."""
assert all([check_CAS(i) for i in phase_change_data_VDI_PPDS_4.index])
tots_calc = [phase_change_data_VDI_PPDS_4[i].abs().sum() for i in [u'A', u'B', u'C', u'D', u'E', u'Tc', u'MW']]
tots = [1974.2929800000002, 2653.9399000000003, 2022.530649, 943.25633100000005, 3124.9258610000002, 150142.28, 27786.919999999998]
assert_close1d(tots_calc, tots)
assert phase_change_data_VDI_PPDS_4.index.is_unique
assert phase_change_data_VDI_PPDS_4.shape == (272, 8)
@pytest.mark.slow
@pytest.mark.fuzz
def test_Tb_all_values():
s1 = CRC_inorganic_data.index[CRC_inorganic_data['Tb'].notnull()]
s2 = CRC_organic_data.index[CRC_organic_data['Tb'].notnull()]
s3 = Tb_data_Yaws.index
tots = []
tots_exp = [639213.2310000042, 2280667.079999829, 6631287.510000873]
# These should match the sums of the respective series
for s, method in zip([s1, s2, s3], ['CRC_INORG', 'CRC_ORG', 'YAWS']):
tots.append(sum([Tb(i, method=method) for i in s]))
assert_close1d(tots, tots_exp, rtol=1e-11)
s = set(); s.update(s1); s.update(s2); s.update(s3)
assert len(s) == 13868
def test_Tb():
# CRC_inorg, CRC org, Yaws
Tbs_calc = Tb('993-50-0'), Tb('626-94-8'), Tb('7631-99-4')
Tbs = [399.15, 412.15, 653.15]
assert_close1d(Tbs, Tbs_calc)
hits = [Tb_methods(i) for i in ['993-50-0', '626-94-8', '7631-99-4']]
assert hits == [['CRC_INORG'], ['CRC_ORG'], ['YAWS']]
with pytest.raises(Exception):
Tb('993-50-0', method='BADMETHOD')
assert None == Tb('9923443-50-0')
assert [] == Tb_methods('9923443-50-0')
w_methods = Tb_methods('7732-18-5')
assert w_methods == ['CRC_INORG', 'YAWS']
Tbs = [Tb('7732-18-5', method=i) for i in w_methods]
assert_close1d(Tbs, [373.124, 373.15])
@pytest.mark.slow
@pytest.mark.fuzz
def test_Tm_all_values():
s1 = CRC_inorganic_data.index[CRC_inorganic_data['Tm'].notnull()]
s2 = CRC_organic_data.index[CRC_organic_data['Tm'].notnull()]
s3 = Tm_ON_data.index
tots = []
tots_exp = [1543322.6125999668, 2571284.480399755, 4059989.4249993376]
# These should match the sums of the respective series
for s, method in zip([s1, s2, s3], ['CRC_INORG', 'CRC_ORG', 'OPEN_NTBKM']):
tots.append(sum([Tm(i, method=method) for i in s]))
assert_close1d(tots, tots_exp, rtol=1e-11)
s = set(); s.update(s1); s.update(s2); s.update(s3)
assert len(s) == 14723
def test_Tm():
# Open notebook, CRC organic, CRC inorg
Tms_calc = Tm('996-50-9'), Tm('999-78-0'), Tm('993-50-0')
Tms = [263.15, 191.15, 274.15]
assert_close1d(Tms, Tms_calc)
hits = [Tm_methods(i) for i in ['996-50-9', '999-78-0', '993-50-0']]
assert hits == [['OPEN_NTBKM'], ['CRC_ORG'], ['CRC_INORG']]
with pytest.raises(Exception):
Tm('993-50-0', method='BADMETHOD')
assert Tm('9923443-50-0') is None
assert [] == Tm_methods('9923443-50-0')
w_methods = Tm_methods('7732-18-5')
assert w_methods == ['OPEN_NTBKM', 'CRC_INORG']
Tms = [Tm('7732-18-5', method=i) for i in w_methods]
assert_close1d(Tms, [273.15, 273.15])
def test_Alibakhshi():
Hvap = Alibakhshi(T=320.0, Tc=647.14, C=-16.7171)
assert_close(Hvap, 41961.30490225752, rtol=1e-13)
def test_PPDS12():
Hvap = PPDS12(300.0, 591.75, 4.60584, 13.97224, -10.592315, 2.120205, 4.277128)
assert_close(Hvap, 37948.76862035927, rtol=1e-13)
| 34.840532 | 143 | 0.685229 |
f72f3a0ab9d913c2109ddb7b9fece41c54d596c4 | 11,902 | py | Python | typhon/retrieval/common.py | gerritholl/typhon | dbde147be12922ec730bd072dc4797c9da9a6d6b | [
"MIT"
] | null | null | null | typhon/retrieval/common.py | gerritholl/typhon | dbde147be12922ec730bd072dc4797c9da9a6d6b | [
"MIT"
] | null | null | null | typhon/retrieval/common.py | gerritholl/typhon | dbde147be12922ec730bd072dc4797c9da9a6d6b | [
"MIT"
] | null | null | null | from ast import literal_eval
import copy
from importlib import import_module
import json
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
from typhon.utils import to_array
__all__ = [
'RetrievalProduct',
]
class NotTrainedError(Exception):
"""Should be raised if someone runs a non-trained retrieval product
"""
def __init__(self, *args):
message = "You must train this retrieval product before running it!"
Exception.__init__(self, message, *args)
class RetrievalProduct:
"""Retrieval that can be trained with data and stored to json files
This is basically a wrapper around the scikit-learn estimator and trainer
classes and makes it possible to save the trained models as json file.
To save this object to a json file, the additional package json_tricks is
required.
"""
def __init__(self, verbose=False):
"""Initialize a Retriever object
Args:
verbose: The higher this value is the more debug messages are
printed. Default is False.
"""
# The trainer and/or model for this retriever:
self.estimator = None
self.verbose = verbose
self._inputs = []
self._outputs = []
@property
def inputs(self):
return self._inputs
@property
def outputs(self):
return self._outputs
@staticmethod
def _import_class(module_name, class_name):
"""Import a class dynamically to the namespace"""
mod = import_module(module_name)
klass = getattr(mod, class_name)
return klass
@staticmethod
def _encode_numpy(obj):
def _to_dict(item):
if isinstance(item, np.ndarray):
return {
"__ndarray__": item.tolist(),
"__dtype__": str(item.dtype),
"__shape__": item.shape,
}
else:
return np.asscalar(item)
def _is_numpy(item):
return type(item).__module__ == np.__name__
if isinstance(obj, dict):
obj = obj.copy()
iterator = obj.items()
elif isinstance(obj, list):
obj = obj.copy()
iterator = enumerate(obj)
else:
return obj
for key, value in iterator:
if _is_numpy(value):
obj[key] = _to_dict(value)
elif isinstance(value, (list, dict)):
obj[key] = RetrievalProduct._encode_numpy(value)
return obj
@staticmethod
def _decode_numpy(obj):
def _from_dict(item):
try:
return np.array(
item["__ndarray__"],
dtype=item["__dtype__"],
)
except TypeError:
return np.array(
item["__ndarray__"],
dtype=literal_eval(item["__dtype__"]),
)
def _is_numpy(item):
return isinstance(item, dict) and "__ndarray__" in item
if isinstance(obj, dict):
obj = obj.copy()
iterator = obj.items()
elif isinstance(obj, list):
obj = obj.copy()
iterator = enumerate(obj)
else:
return obj
for key, value in iterator:
if _is_numpy(value):
obj[key] = _from_dict(value)
elif isinstance(value, (list, tuple, dict)):
obj[key] = RetrievalProduct._decode_numpy(value)
return obj
@staticmethod
def _tree_to_dict(tree):
return {
"module": type(tree).__module__,
"class": type(tree).__name__,
"coefs": tree.__getstate__(),
}
@staticmethod
def _tree_from_dict(dictionary, coefs):
instance = RetrievalProduct._import_class(
dictionary["module"], dictionary["class"]
)
tree = instance(
to_array(coefs["n_features_"]),
to_array(coefs["n_classes_"]),
to_array(coefs["n_outputs_"])
)
tree.__setstate__(dictionary["coefs"])
return tree
@staticmethod
def _model_to_dict(model):
"""Convert a sklearn model object to a dictionary"""
dictionary = {
"module": type(model).__module__,
"class": type(model).__name__,
"params": model.get_params(deep=True),
"coefs": {
attr: copy.deepcopy(getattr(model, attr))
for attr in model.__dir__()
if not attr.startswith("__") and attr.endswith("_")
}
}
if "tree_" in dictionary["coefs"]:
# Not funny. sklearn.tree objects are not directly
# serializable to json. Hence, we must dump them by ourselves.
dictionary["coefs"]["tree_"] = RetrievalProduct._tree_to_dict(
dictionary["coefs"]["tree_"]
)
return RetrievalProduct._encode_numpy(dictionary)
@staticmethod
def _model_from_dict(dictionary):
"""Create a sklearn model object from a dictionary"""
dictionary = RetrievalProduct._decode_numpy(dictionary)
instance = RetrievalProduct._import_class(
dictionary["module"], dictionary["class"]
)
model = instance(**dictionary["params"])
for attr, value in dictionary["coefs"].items():
if attr == "tree_":
# We must treat a tree specially:
value = RetrievalProduct._tree_from_dict(
value, dictionary["coefs"]
)
try:
setattr(model, attr, value)
except AttributeError:
# Some attributes cannot be set such as feature_importances_
pass
return model
@staticmethod
def _pipeline_to_dict(pipeline):
"""Convert a pipeline object to a dictionary"""
if pipeline is None:
raise ValueError("No object trained!")
all_steps = {}
for name, model in pipeline.steps:
all_steps[name] = RetrievalProduct._model_to_dict(model)
return all_steps
@staticmethod
def _pipeline_from_dict(dictionary):
"""Create a pipeline object from a dictionary"""
all_steps = []
for name, step in dictionary.items():
model = RetrievalProduct._model_from_dict(step)
all_steps.append([name, model])
return Pipeline(all_steps)
def is_trained(self):
"""Return true if RetrievalProduct is trained"""
return self.estimator is not None
@classmethod
def from_dict(cls, parameter, *args, **kwargs):
"""Load a retrieval product from a dictionary
Args:
parameter: A dictionary with the training parameters. Simply the
output of :meth:`to_dict`.
*args: Positional arguments allowed for :meth:`__init__`.
**kwargs Keyword arguments allowed for :meth:`__init__`.
Returns:
A new :class:`RetrievalProduct` object.
"""
self = cls(*args, **kwargs)
estimator = parameter.get("estimator", None)
if estimator is None:
raise ValueError("Found no coefficients for estimator!")
is_pipeline = parameter["estimator_is_pipeline"]
if is_pipeline:
self.estimator = self._pipeline_from_dict(estimator)
else:
self.estimator = self._model_from_dict(estimator)
self._inputs = parameter["inputs"]
self._outputs = parameter["outputs"]
return self
def to_dict(self):
"""Dump this retrieval product to a dictionary"""
parameter = {}
if isinstance(self.estimator, Pipeline):
parameter["estimator"] = self._pipeline_to_dict(self.estimator)
parameter["estimator_is_pipeline"] = True
else:
parameter["estimator"] = self._model_to_dict(self.estimator)
parameter["estimator_is_pipeline"] = False
parameter["inputs"] = self.inputs
parameter["outputs"] = self.outputs
return parameter
@classmethod
def from_txt(cls, filename, *args, **kwargs):
"""Load a retrieval product from a txt file
Notes:
The output format is not standard json!
Training parameters are:
* weights of the estimator
* names of the input and target fields
Args:
filename: The name of file from where to load the training
parameters.
*args: Positional arguments allowed for :meth:`__init__`.
**kwargs Keyword arguments allowed for :meth:`__init__`.
Returns:
A new :class:`RetrievalProduct` object.
"""
with open(filename, 'r') as infile:
parameter = literal_eval(infile.read())
return cls.from_dict(parameter, *args, **kwargs)
def to_txt(self, filename):
"""Save this retrieval product to a txt file
Training parameters are:
* configuration of the used estimator
* names of the input, output, and target fields
Args:
filename: The name of the file where to store the training
parameters.
Returns:
None
"""
with open(filename, 'w') as outfile:
outfile.write(repr(self.to_dict()))
def retrieve(self, inputs):
"""Predict the target values for data coming from arrays
Args:
inputs: A pandas.DataFrame object. The keys must be the
same labels as used in :meth:`train`.
Returns:
A pandas.DataFrame object with the retrieved data.
Examples:
.. :code-block:: python
# TODO
"""
if self.estimator is None:
raise NotTrainedError()
# Skip empty datasets
if inputs.empty:
return None
# Retrieve the data from the neural network:
output_data = self.estimator.predict(inputs)
return pd.DataFrame(data=output_data, columns=self.outputs)
def score(self, inputs, targets):
"""
Args:
inputs: A pandas.DataFrame with input data.
targets: A pandas.DataFrame with target data.
Returns:
The metric score as a number
"""
if self.estimator is None:
raise NotTrainedError()
return self.estimator.score(inputs.squeeze(), targets.squeeze())
def train(self, estimator, inputs, targets):
"""Train this retriever with data from arrays
Args:
estimator: The object that will be trained. If it is a trainer
object such as a GridSearchCV, the best estimator will be
chosen after training. Can also be a Pipeline or a standard
Estimator from scikit-learn.
inputs: A pandas.DataFrame with input data.
targets: A pandas.DataFrame with target data.
Returns:
A float number indicating the training score.
"""
# The input and target labels will be saved because to know what this
# product retrieves and from what:
self._inputs = inputs.columns.tolist()
self._outputs = targets.columns.tolist()
# Start to train!
estimator.fit(inputs.squeeze(), targets.squeeze())
# Let's check whether the estimator was a trainer object such as
# GridSearchCV, etc. Then we save only the best estimator.
if hasattr(estimator, "best_estimator_"):
# Use the best estimator from now on:
self.estimator = estimator.best_estimator_
else:
self.estimator = estimator
return self.score(inputs, targets)
| 31.075718 | 77 | 0.582423 |
f72f3b30d80ddaeb33b04d28e7f95a0cabbea8cd | 2,075 | py | Python | integrations/tensorflow/e2e/broadcasting_test.py | BernhardRiemann/iree | 471349762b316f7d6b83eb5f9089255d78052758 | [
"Apache-2.0"
] | 1 | 2021-03-15T13:53:30.000Z | 2021-03-15T13:53:30.000Z | integrations/tensorflow/e2e/broadcasting_test.py | BernhardRiemann/iree | 471349762b316f7d6b83eb5f9089255d78052758 | [
"Apache-2.0"
] | null | null | null | integrations/tensorflow/e2e/broadcasting_test.py | BernhardRiemann/iree | 471349762b316f7d6b83eb5f9089255d78052758 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test broadcasting support."""
from absl import app
import numpy as np
from pyiree.tf.support import tf_test_utils
from pyiree.tf.support import tf_utils
import tensorflow.compat.v2 as tf
class BroadcastingModule(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([None], tf.float32),
tf.TensorSpec([None], tf.float32),
])
def add(self, lhs, rhs):
return lhs + rhs
class BroadcastingTest(tf_test_utils.TracedModuleTestCase):
def __init__(self, methodName="runTest"):
super(BroadcastingTest, self).__init__(methodName)
self._modules = tf_test_utils.compile_tf_module(BroadcastingModule)
def test_add_same_shape(self):
def add_same_shape(module):
lhs = tf_utils.uniform([4])
rhs = tf_utils.uniform([4])
module.add(lhs, rhs)
self.compare_backends(add_same_shape, self._modules)
def test_add_broadcast_lhs(self):
def add_broadcast_lhs(module):
lhs = tf_utils.uniform([1])
rhs = tf_utils.uniform([4])
module.add(lhs, rhs)
self.compare_backends(add_broadcast_lhs, self._modules)
def test_add_broadcast_rhs(self):
def add_broadcast_rhs(module):
lhs = tf_utils.uniform([4])
rhs = tf_utils.uniform([1])
module.add(lhs, rhs)
self.compare_backends(add_broadcast_rhs, self._modules)
def main(argv):
del argv # Unused
if hasattr(tf, 'enable_v2_behavior'):
tf.enable_v2_behavior()
tf.test.main()
if __name__ == '__main__':
app.run(main)
| 26.948052 | 74 | 0.723855 |
f72f41e99b9e572abeb953f1f643d46f46a98281 | 8,713 | py | Python | ros2_ws/src/utils/logger/logger/logger.py | FastSense/rosbot-ros2 | c2d274ce179534fec5b2786a6f96b6d638019ac4 | [
"MIT"
] | null | null | null | ros2_ws/src/utils/logger/logger/logger.py | FastSense/rosbot-ros2 | c2d274ce179534fec5b2786a6f96b6d638019ac4 | [
"MIT"
] | 2 | 2021-07-05T14:50:09.000Z | 2021-09-14T15:21:11.000Z | ros2_ws/src/utils/logger/logger/logger.py | FastSense/metalbot | 063c897a16129d9aa88c2c7c52bdf6547af894e4 | [
"MIT"
] | null | null | null | import os
import pandas as pd
from matplotlib import pyplot as plt
import rclpy
import numpy as np
from rclpy.node import Node
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from std_srvs.srv import Empty
from logger.utils import convert_ros2_time_to_float
from logger.create_graphs import build_general_graph_for_rosbot
from scipy.spatial.transform import Rotation
class Logger(Node):
"""
Class for logging the state of the rosbot
Node for logging the state of the robot,
kinematic model (optional) and neural network
model (optional), control and time stamps
:Attributes:
:first_tick: (bool) srue if it is first callbcak
:init_time: (float) node start time (time of the first callback)
:curr_control: (list) current control [u_v, u_w]
:output_path: (str) Absolute path to the directory
where the logged data will be saved
:control_topic: (str) nam of the control topic (/cmd_vel)
:parent_frame: (str) name of the origin tf frame
:kinetic_model_frame: (str) name of the kinematic model tf frame
:nn_model_frame: (str) name of the NN model tf frame
:robot_state: (pandas.DataFrame) container for rosbot state
:kinetic_model_state: (pandas.DataFrame) container for
kinematic model state
:nn_model_state: (pandas.DataFrame) container for NN model state
:robot_control: (pandas.DataFrame) container for rosbot control
:time: (list) container for time stamps
:odom_sub: subscriber to /odom topic
:control_sub: subscriber to control topic
"""
def __init__(self):
"""
"""
super().__init__('logger')
self.init_parameters()
self.get_node_parametes()
self.init_subs()
self.init_containers()
self.first_tick = True
self.init_time = None
self.curr_control = list()
self.srv = self.create_service(Empty, 'shutdown_logger', self.shutdown_logger_callback)
rclpy.get_default_context().on_shutdown(self.on_shutdown)
def init_parameters(self):
"""
Declares node parameters
"""
self.declare_parameter('output_path', "")
self.declare_parameter('control_topic', '/cmd_vel')
self.declare_parameter('parent_frame', 'odom')
self.declare_parameter('kinetic_model_frame', 'model_link')
self.declare_parameter('nn_model_frame', 'nn_model_link')
# self.declare_parameter('tf_topic', '/tf')
def get_node_parametes(self):
"""
Gets node parameters
"""
self.output_path = self.get_parameter('output_path').get_parameter_value().string_value
self.control_topic = self.get_parameter('control_topic').get_parameter_value().string_value
self.parent_frame = self.get_parameter('parent_frame').get_parameter_value().string_value
self.kinetic_model_frame = self.get_parameter('kinetic_model_frame').get_parameter_value().string_value
self.nn_model_frame = self.get_parameter('nn_model_frame').get_parameter_value().string_value
# self.tf_topic = self.get_parameter('tf_topic').get_parameter_value().string_value
def init_containers(self):
"""
Declares containers for logged data
"""
self.robot_state = pd.DataFrame(
columns=[
'x', 'y', 'z', 'roll', 'pitch', 'yaw',
'v_x', 'v_y', 'v_z', 'w_x', 'w_y', 'w_z',
]
)
self.kinetic_model_state = pd.DataFrame(
columns=[
'x', 'y', 'z', 'roll', 'pitch', 'yaw',
'v_x', 'v_y', 'v_z', 'w_x', 'w_y', 'w_z',
]
)
self.nn_model_state = pd.DataFrame(
columns=[
'x', 'y', 'z', 'roll', 'pitch', 'yaw',
'v_x', 'v_y', 'v_z', 'w_x', 'w_y', 'w_z',
]
)
self.robot_control = pd.DataFrame(
columns=[
'v_x', 'w_z'
]
)
self.time = list()
def init_subs(self):
"""
Declares node subscribers
"""
self.odom_sub = self.create_subscription(
Odometry,
'/odom',
self.odom_callback,
1
)
self.control_sub = self.create_subscription(
Twist,
self.control_topic,
self.control_callback,
1
)
# prevent unused variable warning
self.control_sub
self.odom_sub
def odom_callback(self, odom_msg):
"""
Callback on odom message
Robot position, current time and control are logged
Args:
:odom_msg: (nav_msgs.msg.Odometry): odom msg
"""
if (len(self.curr_control) == 0):
return
curr_time = convert_ros2_time_to_float(
self.get_clock().now().seconds_nanoseconds()
)
# update time container
self.time.append(curr_time - self.init_time)
# update control container
self.robot_control.loc[len(self.robot_control)] = self.curr_control
# update robot_state container
rosbot_pose = odom_msg.pose.pose
rosbot_velocities = odom_msg.twist.twist
x, y, z = rosbot_pose.position.x, rosbot_pose.position.y, rosbot_pose.position.z
rpy = Rotation.from_quat([
np.float(rosbot_pose.orientation.x),
np.float(rosbot_pose.orientation.y),
np.float(rosbot_pose.orientation.z),
np.float(rosbot_pose.orientation.w)]
).as_euler('xyz')
rpy = list(rpy)
v_x = rosbot_velocities.linear.x # Linear velocity
v_y = rosbot_velocities.linear.y
v_z = rosbot_velocities.linear.z
w_x = rosbot_velocities.angular.x
w_y = rosbot_velocities.angular.y
w_z = rosbot_velocities.angular.z # YAW velocity
last_row = len(self.robot_state)
self.robot_state.loc[last_row] = [x,y,z] + rpy + [v_x, v_y, v_z, w_x, w_y, w_z]
def control_callback(self, control):
"""
Updates the current control
Args:
:control: (geometry_msgs.msg.Twist) control msg
"""
if self.first_tick:
self.first_tick = False
self.init_time = convert_ros2_time_to_float(
self.get_clock().now().seconds_nanoseconds()
)
self.curr_control = [control.linear.x, control.angular.z]
def save_collected_data_to_csv(self):
"""
Saves logged data in csv format
"""
# if not os.path.exists(self.output_path):
# os.makedirs(self.output_path)
self.robot_state.to_csv(
path_or_buf=os.path.join(self.output_path, "rosbot_state.csv"),
sep=' ',
index=False
)
self.kinetic_model_state.to_csv(
path_or_buf=os.path.join(self.output_path, "kinematic_model_state.csv"),
sep=' ',
index=False
)
self.nn_model_state.to_csv(
path_or_buf=os.path.join(self.output_path, "nn_model_state.csv"),
sep=' ',
index=False
)
self.robot_control.to_csv(
path_or_buf= os.path.join(self.output_path,"control.csv"),
sep=' ',
index=False
)
pd.DataFrame(data=self.time, columns=['t']).to_csv(
path_or_buf= os.path.join(self.output_path, "time.csv"),
sep=' ',
index=False
)
def shutdown_logger_callback(self):
"""
Callback for the shutdown_logger service,
turns off the logger node
"""
rclpy.try_shutdown()
def on_shutdown(self):
"""
A function that is executed when a node shutdown.
Plots a graph of all collected data, saves it in csv format.
"""
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
data_plots = build_general_graph_for_rosbot(
robot_state_df=self.robot_state,
control_df=self.robot_control,
time_list=self.time,
save_to_png=True,
path=self.output_path
)
self.save_collected_data_to_csv()
self.get_logger().warn("Output path = {}".format(self.output_path))
def main():
"""
Declares the logger node.
Node works
"""
rclpy.init()
logger = Logger()
try:
rclpy.spin(logger)
except:
pass
logger.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 32.033088 | 111 | 0.596236 |
f72f4aa4e427e9d3ee2d0c986960f64ae8d9cdb5 | 1,029 | py | Python | pyfatfs/__init__.py | abrasive/pyfatfs | a35586bfa2b1d3f8d4638142407db68f9318b86d | [
"MIT"
] | null | null | null | pyfatfs/__init__.py | abrasive/pyfatfs | a35586bfa2b1d3f8d4638142407db68f9318b86d | [
"MIT"
] | null | null | null | pyfatfs/__init__.py | abrasive/pyfatfs | a35586bfa2b1d3f8d4638142407db68f9318b86d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Python FAT filesystem module with :doc:`PyFilesystem2 <pyfilesystem2:index>` \
compatibility.
pyfatfs allows interaction with FAT12/16/32 filesystems, either via
:doc:`PyFilesystem2 <pyfilesystem2:index>` for file-level abstraction
or direct interaction with the filesystem for low-level access.
"""
__name__ = 'pyfatfs'
__author__ = 'Nathan-J. Hirschauer'
__author_email__ = 'nathanhi@deepserve.info'
__license__ = 'MIT License'
#: Specifies default ("OEM") encoding
from pyfatfs._exceptions import PyFATException
FAT_OEM_ENCODING = 'ibm437'
#: Specifies the long file name encoding, which is always UTF-16 (LE)
FAT_LFN_ENCODING = 'utf-16-le'
def _init_check(func):
def _wrapper(*args, **kwargs):
initialized = args[0].initialized
if initialized is True:
return func(*args, **kwargs)
else:
raise PyFATException("Class has not yet been fully initialized, "
"please instantiate first.")
return _wrapper
| 27.810811 | 78 | 0.696793 |
f72f5ff661ebbd7baa4fdc30f0fe55a852bc6baf | 1,660 | py | Python | pycondor/geodesic_test_proj.py | kant/pycondor | ee87854504e8f4023feda860d8a9ddbecc7a70da | [
"BSD-3-Clause"
] | 6 | 2015-04-17T08:25:30.000Z | 2020-04-11T23:58:16.000Z | pycondor/geodesic_test_proj.py | kant/pycondor | ee87854504e8f4023feda860d8a9ddbecc7a70da | [
"BSD-3-Clause"
] | 3 | 2015-12-22T07:40:02.000Z | 2019-01-21T15:07:00.000Z | pycondor/geodesic_test_proj.py | kant/pycondor | ee87854504e8f4023feda860d8a9ddbecc7a70da | [
"BSD-3-Clause"
] | 6 | 2015-11-13T18:55:22.000Z | 2020-03-12T19:32:56.000Z | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import click
import os
#import json
import pandas as pd
import numpy as np
import pyproj
import matplotlib.pyplot as plt
from geodesic import plot_geodesic
@click.command()
@click.argument('xls_filename')
@click.option('--outdir', default='', help="Output directory - default is 'script_directory\out'")
def main(xls_filename, outdir):
basepath = os.path.dirname(__file__)
#basepath = os.path.dirname(os.path.abspath(__file__))
if outdir=='':
outdir = os.path.join(basepath, 'out')
#xls_filename = os.path.join(outdir, "Provence-Oisans2.xlsx")
filename_base, filename_ext = os.path.splitext(os.path.basename(xls_filename))
d_df = {}
d_df = pd.read_excel(xls_filename, sheetname=None)
max_x, max_y = d_df['ref']['PosX'].max(), d_df['ref']['PosY'].max()
print "max_x=%s, max_y=%s" % (max_x, max_y)
p = pyproj.Proj(
proj='utm',
zone=32,
ellps='WGS84'
)
d_df['measures']['PosX2'] = np.nan
d_df['measures']['PosY2'] = np.nan
for i, point in d_df['measures'].iterrows():
xy2 = p(point['Lat'], point['Lon'])
d_df['measures']['PosX2'][i] = xy2[0]
d_df['measures']['PosY2'][i] = xy2[1]
#print(xy2)
d_df['measures']['Eps'] = np.sqrt(
(d_df['measures']['PosX2'] - d_df['measures']['PosX'])**2 + \
(d_df['measures']['PosY2'] - d_df['measures']['PosY'])**2
)
print(d_df)
print(d_df['measures']['Eps'].mean())
plot_geodesic(outdir, filename_base, d_df['measures'])
plt.show()
#if show:
# plt.show()
if __name__ == "__main__":
main()
| 26.349206 | 98 | 0.605422 |
f72f76abe1221bf6bd92370f84f0906ef075e999 | 3,133 | py | Python | src/dataset_builder.py | elangovana/large-scale-ptm-ppi | cc835df915d12dd20c35f9cea5e40365200a6d3d | [
"MIT"
] | 1 | 2022-02-25T22:06:39.000Z | 2022-02-25T22:06:39.000Z | src/dataset_builder.py | elangovana/ppi-aimed | cc835df915d12dd20c35f9cea5e40365200a6d3d | [
"MIT"
] | null | null | null | src/dataset_builder.py | elangovana/ppi-aimed | cc835df915d12dd20c35f9cea5e40365200a6d3d | [
"MIT"
] | null | null | null | import logging
import os
from torch.utils.data import DataLoader
from locator import Locator
class DatasetBuilder:
def __init__(self, val_data, dataset_factory_name, tokenisor_factory_name, train_data=None, num_workers=None,
batch_size=8, addition_args_dict=None):
self._addition_args_dict = addition_args_dict
self.train_data = train_data
self.val_data = val_data
self.batch_size = batch_size
self._dataset_factory = Locator().get(dataset_factory_name)
self._tokenisor_factory = Locator().get(tokenisor_factory_name)
self.num_workers = num_workers or os.cpu_count() - 1
if self.num_workers <= 0:
self.num_workers = 0
self._tokenisor = None
self._train_dataloader = None
self._train_dataset = None
self._val_dataset = None
self._val_dataloader = None
self._scorers = None
self._label_mapper = None
@property
def _logger(self):
return logging.getLogger(__name__)
def get_tokenisor(self):
self._logger.info("Retrieving Tokeniser")
if self._tokenisor is None:
self._tokenisor = self._tokenisor_factory.get_tokenisor(**self._addition_args_dict)
return self._tokenisor
def get_train_dataset(self):
if self._train_dataset is None:
self._train_dataset = self._dataset_factory.get_dataset(self.train_data,
preprocessors=self.get_tokenisor(),
**self._addition_args_dict)
return self._train_dataset
def get_val_dataset(self):
if self._val_dataset is None:
self._val_dataset = self._dataset_factory.get_dataset(self.val_data, preprocessors=self.get_tokenisor(),
**self._addition_args_dict)
return self._val_dataset
def get_label_mapper(self):
if self._label_mapper is None:
self._label_mapper = self._dataset_factory.get_label_mapper()
return self._label_mapper
def num_classes(self):
return self.get_label_mapper().num_classes
def positive_label_index(self):
return self._label_mapper.positive_label_index
def get_scorers(self):
if self._scorers is None:
self._scorers = self._dataset_factory.get_scorers()
return self._scorers
def get_train_dataloader(self):
if self._train_dataloader is None:
self._train_dataloader = DataLoader(dataset=self.get_train_dataset(), num_workers=self.num_workers,
batch_size=self.batch_size, shuffle=True)
return self._train_dataloader
def get_val_dataloader(self):
if self._val_dataloader is None:
self._val_dataloader = DataLoader(dataset=self.get_val_dataset(), num_workers=self.num_workers,
batch_size=self.batch_size, shuffle=False)
return self._val_dataloader
| 34.811111 | 116 | 0.635812 |
f72f8d11340df1f1d9eb56840c1b60800b76a5a8 | 827 | py | Python | Calculator.py | EdvinAlvarado/Code-Wars | a3a06a44cda004052b5c0930f3693678c5c92e21 | [
"BSD-2-Clause"
] | null | null | null | Calculator.py | EdvinAlvarado/Code-Wars | a3a06a44cda004052b5c0930f3693678c5c92e21 | [
"BSD-2-Clause"
] | null | null | null | Calculator.py | EdvinAlvarado/Code-Wars | a3a06a44cda004052b5c0930f3693678c5c92e21 | [
"BSD-2-Clause"
] | null | null | null |
class Calculator(object):
def evaluate(self, string):
print(string)
cmd = [int(s) if s.isdigit() else s for s in string.split(" ")]
cmd = [float(s) if isinstance(s, str) and s.find('.') != -1 else s for s in cmd]
print(cmd)
for i in range(sum([1 if s == '*' or s == '/' else 0 for s in cmd])):
for i, p in enumerate(cmd):
if p == '*':
cmd[i - 1] = cmd[i - 1] * cmd[i + 1]
del cmd[i:i+2]
break
elif p == '/':
cmd[i - 1] = cmd[i - 1] / cmd[i + 1]
del cmd[i:i+2]
break
for i in range(sum([1 if s == '+' or s == '-' else 0 for s in cmd])):
for i, p in enumerate(cmd):
if p == '+':
cmd[i - 1] = cmd[i - 1] + cmd[i + 1]
del cmd[i:i+2]
break
elif p == '-':
cmd[i - 1] = cmd[i - 1] - cmd[i + 1]
del cmd[i:i+2]
break
return cmd[0] | 22.972222 | 82 | 0.481258 |
f72f8e94a0df815f7d517e2b81ffc86c5c545f07 | 2,893 | py | Python | tensorflow/contrib/autograph/utils/multiple_dispatch_test.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 71 | 2017-05-25T16:02:15.000Z | 2021-06-09T16:08:08.000Z | tensorflow/contrib/autograph/utils/multiple_dispatch_test.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/contrib/autograph/utils/multiple_dispatch_test.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 31 | 2018-09-11T02:17:17.000Z | 2021-12-15T10:33:35.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multiple_dispatch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.autograph.utils import multiple_dispatch
from tensorflow.python.client.session import Session
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.platform import test
class MultipleDispatchTest(test.TestCase):
def test_dynamic_is_python(self):
a = np.eye(3)
also_a = a
not_actually_a = np.eye(3)
should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)
should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)
should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)
should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)
self.assertTrue(should_be_true1)
self.assertTrue(should_be_true2)
self.assertFalse(should_be_false1)
self.assertFalse(should_be_false2)
def test_dynamic_is_tf(self):
with Session().as_default():
a = constant([2.0])
also_a = a
not_actually_a = constant([2.0])
should_be_true1 = multiple_dispatch.dynamic_is(a, also_a)
should_be_false1 = multiple_dispatch.dynamic_is_not(a, also_a)
should_be_true2 = multiple_dispatch.dynamic_is_not(a, not_actually_a)
should_be_false2 = multiple_dispatch.dynamic_is(a, not_actually_a)
self.assertTrue(should_be_true1)
self.assertTrue(should_be_true2)
self.assertFalse(should_be_false1)
self.assertFalse(should_be_false2)
def test_run_cond_python(self):
true_fn = lambda: (2,)
false_fn = lambda: (3,)
self.assertEqual(multiple_dispatch.run_cond(True, true_fn, false_fn), 2)
self.assertEqual(multiple_dispatch.run_cond(False, true_fn, false_fn), 3)
def test_run_cond_tf(self):
true_fn = lambda: (constant(2),)
false_fn = lambda: (constant(3),)
with Session() as sess:
out = multiple_dispatch.run_cond(constant(True), true_fn, false_fn)
self.assertEqual(sess.run(out), 2)
out = multiple_dispatch.run_cond(constant(False), true_fn, false_fn)
self.assertEqual(sess.run(out), 3)
if __name__ == '__main__':
test.main()
| 38.065789 | 80 | 0.733495 |
f72fa31aa61c2010032ba331da9c46f0c28c8f64 | 586 | py | Python | recipys/types.py | gmso/recipys | 8ca39a7b0ace2c678fe7a5c8271c6843db6c2c35 | [
"MIT"
] | 13 | 2021-07-24T20:49:35.000Z | 2021-08-21T18:15:16.000Z | recipys/types.py | gmso/recipys | 8ca39a7b0ace2c678fe7a5c8271c6843db6c2c35 | [
"MIT"
] | 2 | 2021-08-17T17:11:09.000Z | 2021-09-01T19:05:17.000Z | recipys/types.py | gmso/recipys | 8ca39a7b0ace2c678fe7a5c8271c6843db6c2c35 | [
"MIT"
] | 1 | 2021-07-29T16:36:35.000Z | 2021-07-29T16:36:35.000Z | from dataclasses import dataclass
from typing import Optional, List
@dataclass
class RecipeConstraints:
meal: Optional[str] = None
ingredients: Optional[List[str]] = None
@dataclass
class Printable:
title: str = ""
ingredients: str = ""
preparation: str = ""
error_message: Optional[str] = None
warning_message: Optional[str] = None
info_message: Optional[str] = None
@dataclass
class FetchingError(Exception):
message: str = "An error ocurred"
@dataclass
class PrintInterrupt(Exception):
printable: Printable
| 20.206897 | 44 | 0.677474 |
f72fac17ae338185f2e1470376f1a6802595e44a | 1,514 | py | Python | jnpy/app/vnpy_webtrader/__init__.py | jojoquant/jonpy | 58692f8fbf398aab7be915a63d0a376e2e0e664c | [
"MIT"
] | 5 | 2020-05-19T07:32:39.000Z | 2022-03-14T09:09:48.000Z | jnpy/app/vnpy_webtrader/__init__.py | jojoquant/jonpy | 58692f8fbf398aab7be915a63d0a376e2e0e664c | [
"MIT"
] | null | null | null | jnpy/app/vnpy_webtrader/__init__.py | jojoquant/jonpy | 58692f8fbf398aab7be915a63d0a376e2e0e664c | [
"MIT"
] | 3 | 2020-04-02T08:30:17.000Z | 2020-05-03T12:12:05.000Z | # The MIT License (MIT)
#
# Copyright (c) 2015-present, Xiaoyou Chen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pathlib import Path
from vnpy.trader.app import BaseApp
from .engine import WebEngine, APP_NAME
class WebTraderApp(BaseApp):
""""""
app_name = APP_NAME
app_module = __module__
app_path = Path(__file__).parent
display_name = "Web服务"
engine_class = WebEngine
widget_name = "WebManager"
icon_name = "web.ico"
| 37.85 | 81 | 0.738441 |
f72fb1455f5a6d221b2e71016d342136e0b43efe | 127 | py | Python | instance/config.py | ags68/CS4398Project | a9652a3670fdbfd2f482104b77573a2d1f138c69 | [
"Unlicense"
] | 3 | 2017-02-27T02:13:52.000Z | 2017-03-05T03:54:25.000Z | instance/config.py | ags68/CS4398Project | a9652a3670fdbfd2f482104b77573a2d1f138c69 | [
"Unlicense"
] | null | null | null | instance/config.py | ags68/CS4398Project | a9652a3670fdbfd2f482104b77573a2d1f138c69 | [
"Unlicense"
] | null | null | null | # instance/config.py
SECRET_KEY = 'p9Bv<3Eid9%$i01'
SQLALCHEMY_DATABASE_URI = 'mysql://esss_admin:esss2017@localhost/esss_db'
| 25.4 | 73 | 0.787402 |
f72fbfc6053fee1b605915399588d9a35599ebe1 | 13,242 | py | Python | care/utils/tests/test_base.py | agzuniverse/care | 952babf5b394921fcdb4fd4b1405cb571261f322 | [
"MIT"
] | null | null | null | care/utils/tests/test_base.py | agzuniverse/care | 952babf5b394921fcdb4fd4b1405cb571261f322 | [
"MIT"
] | null | null | null | care/utils/tests/test_base.py | agzuniverse/care | 952babf5b394921fcdb4fd4b1405cb571261f322 | [
"MIT"
] | null | null | null | import abc
import datetime
from collections import OrderedDict
from typing import Any, Dict
import dateparser
from django.contrib.gis.geos import Point
from pytz import unicode
from rest_framework import status
from rest_framework.test import APITestCase
from care.facility.models import (
CATEGORY_CHOICES,
DISEASE_CHOICES_MAP,
SYMPTOM_CHOICES,
Disease,
DiseaseStatusEnum,
Facility,
LocalBody,
PatientConsultation,
PatientRegistration,
User,
)
from care.users.models import District, State
from config.tests.helper import EverythingEquals, mock_equal
class TestBase(APITestCase):
"""
Base class for tests, handles most of the test setup and tools for setting up data
"""
maxDiff = None
@classmethod
def create_user(cls, district: District, username: str = "user", **kwargs):
data = {
"email": f"{username}@somedomain.com",
"phone_number": "5554446667",
"age": 30,
"gender": 2,
"verified": True,
"username": username,
"password": "bar",
"district": district,
"user_type": User.TYPE_VALUE_MAP["Staff"],
}
data.update(kwargs)
return User.objects.create_user(**data)
@classmethod
def create_super_user(cls, district: District, username: str = "superuser"):
user = cls.create_user(district=district, username=username, user_type=User.TYPE_VALUE_MAP["DistrictAdmin"],)
user.is_superuser = True
user.save()
return user
@classmethod
def create_district(cls, state: State):
return District.objects.create(state=state, name=f"District{datetime.datetime.now().timestamp()}")
@classmethod
def create_state(cls):
return State.objects.create(name=f"State{datetime.datetime.now().timestamp()}")
@classmethod
def create_facility(cls, district: District, user: User = None, **kwargs):
user = user or cls.user
data = {
"name": "Foo",
"district": district,
"facility_type": 1,
"address": "8/88, 1st Cross, 1st Main, Boo Layout",
"location": Point(24.452545, 49.878248),
"oxygen_capacity": 10,
"phone_number": "9998887776",
"created_by": user,
}
data.update(kwargs)
f = Facility(**data)
f.save()
return f
@classmethod
def create_patient(cls, **kwargs):
patient_data = cls.get_patient_data().copy()
patient_data.update(kwargs)
medical_history = patient_data.pop("medical_history", [])
district_id = patient_data.pop("district", None)
state_id = patient_data.pop("state", None)
patient_data.update(
{
"district_id": district_id,
"state_id": state_id,
"disease_status": getattr(DiseaseStatusEnum, patient_data["disease_status"]).value,
}
)
patient = PatientRegistration.objects.create(**patient_data)
diseases = [
Disease.objects.create(patient=patient, disease=DISEASE_CHOICES_MAP[mh["disease"]], details=mh["details"])
for mh in medical_history
]
patient.medical_history.set(diseases)
return patient
@classmethod
def get_user_data(cls, district: District = None, user_type: str = None):
"""
Returns the data to be used for API testing
Returns:
dict
Params:
district: District
user_type: str(A valid mapping for the integer types mentioned inside the models)
"""
district = district or cls.district
user_type = user_type or User.TYPE_VALUE_MAP["Staff"]
return {
"user_type": user_type,
"district": district,
"state": district.state,
"phone_number": "8887776665",
"gender": 2,
"age": 30,
"email": "foo@foobar.com",
"username": "user",
"password": "bar",
}
@classmethod
def get_facility_data(cls, district):
"""
Returns the data to be used for API testing
Returns:
dict
Params:
district: int
An id for the instance of District object created
user_type: str
A valid mapping for the integer types mentioned inside the models
"""
return {
"name": "Foo",
"district": (district or cls.district).id,
"facility_type": 1,
"address": f"Address {datetime.datetime.now().timestamp}",
"location": {"latitude": 49.878248, "longitude": 24.452545},
"oxygen_capacity": 10,
"phone_number": "9998887776",
"capacity": [],
}
@classmethod
def get_patient_data(cls, district=None, state=None):
return {
"name": "Foo",
"age": 32,
"date_of_birth": datetime.date(1992, 4, 1),
"gender": 2,
"is_medical_worker": True,
"blood_group": "O+",
"ongoing_medication": "",
"date_of_return": datetime.datetime(2020, 4, 1, 15, 30, 00),
"disease_status": "SUSPECTED",
"phone_number": "+918888888888",
"address": "Global citizen",
"contact_with_confirmed_carrier": True,
"contact_with_suspected_carrier": True,
"estimated_contact_date": None,
"past_travel": False,
"countries_travelled": "",
"present_health": "Fine",
"has_SARI": False,
"is_active": True,
"state": (state or cls.state).id,
"district": (district or cls.district).id,
"local_body": None,
"number_of_aged_dependents": 2,
"number_of_chronic_diseased_dependents": 1,
"medical_history": [{"disease": "Diabetes", "details": "150 count"}],
"date_of_receipt_of_information": datetime.datetime(2020, 4, 1, 15, 30, 00),
}
@classmethod
def setUpClass(cls) -> None:
super(TestBase, cls).setUpClass()
cls.state = cls.create_state()
cls.district = cls.create_district(cls.state)
cls.user_type = User.TYPE_VALUE_MAP["Staff"]
cls.user = cls.create_user(cls.district)
cls.super_user = cls.create_super_user(district=cls.district)
cls.facility = cls.create_facility(cls.district)
cls.patient = cls.create_patient()
cls.user_data = cls.get_user_data(cls.district, cls.user_type)
cls.facility_data = cls.get_facility_data(cls.district)
cls.patient_data = cls.get_patient_data(cls.district)
def setUp(self) -> None:
self.client.force_login(self.user)
@abc.abstractmethod
def get_base_url(self):
"""
Should return the base url of the testing viewset
WITHOUT trailing slash
eg: return "api/v1/facility"
:return: str
"""
raise NotImplementedError()
def get_url(self, entry_id=None, action=None, *args, **kwargs):
url = self.get_base_url(*args, **kwargs)
if entry_id is not None:
url = f"{url}/{entry_id}"
if action is not None:
url = f"{url}/{action}"
return f"{url}/"
@classmethod
def clone_object(cls, obj, save=True):
new_obj = obj._meta.model.objects.get(pk=obj.id)
new_obj.pk = None
new_obj.id = None
if save:
new_obj.save()
return new_obj
@abc.abstractmethod
def get_list_representation(self, obj) -> dict:
"""
Returns the dict representation of the obj in list API
:param obj: Object to be represented
:return: dict
"""
raise NotImplementedError()
@abc.abstractmethod
def get_detail_representation(self, obj=None) -> dict:
"""
Returns the dict representation of the obj in detail/retrieve API
:param obj: Object to be represented
:param data: data
:return: dict
"""
raise NotImplementedError()
def get_local_body_district_state_representation(self, obj):
"""
Returns the local body, district and state representation for the obj.
The obj is expected to have `local_body`, `district` and `state` in it's attributes
Eg: Facility, Patient, User
:param obj: Any object which has `local_body`, `district` and `state` in attrs
:return:
"""
response = {}
response.update(self.get_local_body_representation(getattr(obj, "local_body", None)))
response.update(self.get_district_representation(getattr(obj, "district", None)))
response.update(self.get_state_representation(getattr(obj, "state", None)))
return response
def get_local_body_representation(self, local_body: LocalBody):
if local_body is None:
return {"local_body": None, "local_body_object": None}
else:
return {
"local_body": local_body.id,
"local_body_object": {
"id": local_body.id,
"name": local_body.name,
"district": local_body.district.id,
},
}
def get_district_representation(self, district: District):
if district is None:
return {"district": None, "district_object": None}
return {
"district": district.id,
"district_object": {"id": district.id, "name": district.name, "state": district.state.id,},
}
def get_state_representation(self, state: State):
if state is None:
return {"state": None, "state_object": None}
return {"state": state.id, "state_object": {"id": state.id, "name": state.name}}
def assertDictEqual(self, first: Dict[Any, Any], second: Dict[Any, Any], msg: Any = ...) -> None:
first_dict = self._convert_to_matchable_types(first.copy())
second_dict = self._convert_to_matchable_types(second.copy())
return super(TestBase, self).assertDictEqual(first_dict, second_dict, msg)
def _convert_to_matchable_types(self, d):
def dict_to_matching_type(d: dict):
return {k: to_matching_type(k, v) for k, v in d.items()}
def to_matching_type(name: str, value):
if isinstance(value, (OrderedDict, dict)):
return dict_to_matching_type(dict(value))
elif isinstance(value, list):
return [to_matching_type("", v) for v in value]
elif "date" in name and not isinstance(value, (type(None), EverythingEquals)):
return_value = value
if isinstance(value, (str, unicode,)):
return_value = dateparser.parse(value)
return (
return_value.astimezone(tz=datetime.timezone.utc)
if isinstance(return_value, datetime.datetime)
else return_value
)
return value
return dict_to_matching_type(d)
def execute_list(self, user=None):
user = user or self.user
self.client.force_authenticate(user)
response = self.client.get(self.get_url(), format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
return response
def get_facility_representation(self, facility):
if facility is None:
return facility
else:
return {
"id": facility.id,
"name": facility.name,
"facility_type": {"id": facility.facility_type, "name": facility.get_facility_type_display()},
**self.get_local_body_district_state_representation(facility),
}
@classmethod
def get_consultation_data(cls):
return {
"patient": cls.patient,
"facility": cls.facility,
"symptoms": [SYMPTOM_CHOICES[0][0], SYMPTOM_CHOICES[1][0]],
"other_symptoms": "No other symptoms",
"symptoms_onset_date": datetime.datetime(2020, 4, 7, 15, 30),
"category": CATEGORY_CHOICES[0][0],
"examination_details": "examination_details",
"existing_medication": "existing_medication",
"prescribed_medication": "prescribed_medication",
"suggestion": PatientConsultation.SUGGESTION_CHOICES[0][0],
"referred_to": None,
"admitted": False,
"admitted_to": None,
"admission_date": None,
"discharge_date": None,
"created_date": mock_equal,
"modified_date": mock_equal,
}
@classmethod
def create_consultation(cls, patient=None, facility=None, **kwargs) -> PatientConsultation:
data = cls.get_consultation_data()
kwargs.update({"patient": patient or cls.patient, "facility": facility or cls.facility})
data.update(kwargs)
return PatientConsultation.objects.create(**data)
| 35.596774 | 118 | 0.589337 |
f72fcde236c44645e86f524db09b26ce3bfb931b | 3,649 | py | Python | server/core.py | cwza/deep_t2i | 22877fdd28ad407984ddc3bc4d57109c54c22fc0 | [
"Apache-2.0"
] | null | null | null | server/core.py | cwza/deep_t2i | 22877fdd28ad407984ddc3bc4d57109c54c22fc0 | [
"Apache-2.0"
] | null | null | null | server/core.py | cwza/deep_t2i | 22877fdd28ad407984ddc3bc4d57109c54c22fc0 | [
"Apache-2.0"
] | 1 | 2020-11-30T06:11:02.000Z | 2020-11-30T06:11:02.000Z | import os
from pathlib import Path
import numpy as np
from PIL import Image
import requests
from google.cloud import storage
import base64
from io import BytesIO
import uuid
__all__ = ['do', 'recaptcha_check']
def predict_and2jpg(model, cap):
''' cap: "white hair yellow eyes", returns: jpeg file buffer remember to close it or use with '''
img, _ = model.predict(cap)
img = Image.fromarray(np.uint8(img.numpy()))
buf = BytesIO()
img.save(buf, format='JPEG')
buf.seek(0)
return buf
# import matplotlib.pyplot as plt
# from deep_t2i.model_anime_heads import ExportedModel
# from deep_t2i.inference_anime_heads import predict
# model = ExportedModel.from_pretrained('./anime_heads.pt')
# with predict_and2jpg(model, "white hair yellow eyes") as buf:
# img = Image.open(buf)
# plt.imshow(img)
# plt.show()
gs_bucket_id = os.getenv('gs_bucket_id')
def upload_to_gs(client, img_file):
"upload img_file to google storage name it fname and return url"
bucket = client.bucket(gs_bucket_id)
fname = f'{uuid.uuid4().hex[:8]}.jpg'
blob = bucket.blob(fname)
blob.upload_from_file(img_file, content_type="image/jpeg")
return f'https://storage.googleapis.com/{gs_bucket_id}/{fname}'
# from deep_t2i.model_anime_heads import ExportedModel
# from deep_t2i.inference_anime_heads import predict
# gs_client = storage.Client()
# model = ExportedModel.from_pretrained('./anime_heads.pt')
# with predict_and2jpg(model, "white hair yellow eyes") as buf:
# url = upload_to_gs(gs_client, buf)
# print(url)
imgur_client_id = os.getenv('imgur_client_id')
def upload_to_imgur(img_file):
"upload img_file to imgur and return url"
img = img_file.read()
img = base64.standard_b64encode(img)
url = "https://api.imgur.com/3/image"
data = {'image': img, 'type': 'base64'}
headers = { 'Authorization': f'Client-ID {imgur_client_id}' }
res = requests.post(url, headers=headers, data=data).json()
if res['success']: return res["data"]["link"]
else:
raise Exception("Failed to upload to imgur")
# from deep_t2i.model_anime_heads import ExportedModel
# from deep_t2i.inference_anime_heads import predict
# model = ExportedModel.from_pretrained('./anime_heads.pt')
# with predict_and2jpg(model, "white hair yellow eyes") as buf:
# url = upload_to_imgur(buf)
# print(url)
def save_to_tmp(img_file):
" save img_file to ./tmp_jpg/ "
img = Image.open(img_file)
fname = f'{uuid.uuid4().hex[:8]}.jpg'
path = f'./temp_jpg/{fname}'
img.save(path)
return path
# from deep_t2i.model_anime_heads import ExportedModel
# from deep_t2i.inference_anime_heads import predict
# model = ExportedModel.from_pretrained('./anime_heads.pt')
# with predict_and2jpg(model, "white hair yellow eyes") as buf:
# url = save_to_tmp(buf)
# print(url)
img_server = os.getenv("img_server")
gs_client = storage.Client() if img_server=="gs" else None
def do(model, cap):
"generate image from model, upload image to img_server and return link"
with predict_and2jpg(model, cap) as buf:
if img_server=="gs":
url = upload_to_gs(gs_client, buf)
elif img_server=="imgur":
url = upload_to_imgur(buf)
else:
url = save_to_tmp(buf)
return url
# Recaptcha check
recaptcha_secret = os.getenv('recaptcha_secret')
def recaptcha_check(token):
if token is None: return False
url = "https://www.google.com/recaptcha/api/siteverify"
data = {
'secret': recaptcha_secret,
'response': token,
}
r = requests.post(url=url, data=data)
return r.json()['success']
| 34.424528 | 101 | 0.698822 |
f7300106c5722946f16c6d7a68325a64b58c05ce | 787 | py | Python | tests/test_convention.py | henhans/TBmodels | 7424acaea8d91850d80bb48898af875430f25fa0 | [
"Apache-2.0"
] | 1 | 2021-01-18T13:55:40.000Z | 2021-01-18T13:55:40.000Z | tests/test_convention.py | henhans/TBmodels | 7424acaea8d91850d80bb48898af875430f25fa0 | [
"Apache-2.0"
] | null | null | null | tests/test_convention.py | henhans/TBmodels | 7424acaea8d91850d80bb48898af875430f25fa0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
import numpy as np
import pythtb as pt
import tbmodels as tb
def test_compare_pythtb():
pt_model = pt.tb_model(1, 1, lat=[[1]], orb=[[0], [0.2]])
tb_model = tb.Model(dim=1, pos=[[0], [0.2]], uc=[[1]])
pt_model.set_hop(3j, 0, 1, [1])
tb_model.add_hop(3j, 0, 1, [1])
assert np.isclose(pt_model._gen_ham([0]), tb_model.hamilton([0])).all()
assert np.isclose(pt_model._gen_ham([0]), tb_model.hamilton([0], convention=1)).all()
assert np.isclose(pt_model._gen_ham([1]), tb_model.hamilton([1], convention=1)).all()
assert np.isclose(pt_model._gen_ham([0.2]), tb_model.hamilton(0.2, convention=1)).all()
| 32.791667 | 91 | 0.655654 |
f7300271abe53d1c530313d92118bc1bdad057e3 | 2,612 | py | Python | ooni/report/cli.py | irl/ooni-probe | c21861c28ca6bd667715872d099006fab87222fd | [
"BSD-2-Clause"
] | null | null | null | ooni/report/cli.py | irl/ooni-probe | c21861c28ca6bd667715872d099006fab87222fd | [
"BSD-2-Clause"
] | null | null | null | ooni/report/cli.py | irl/ooni-probe | c21861c28ca6bd667715872d099006fab87222fd | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
import os
import sys
from ooni import canonical_bouncer
from ooni.report import __version__
from ooni.report import tool
from ooni.settings import config
from twisted.python import usage
class Options(usage.Options):
synopsis = """%s [options] upload | status
""" % (os.path.basename(sys.argv[0]),)
optFlags = [
["default-collector", "d", "Upload the reports to the default "
"collector that is looked up with the "
"canonical bouncer."]
]
optParameters = [
["configfile", "f", None,
"Specify the configuration file to use."],
["collector", "c", None,
"Specify the collector to upload the result to."],
["bouncer", "b", None,
"Specify the bouncer to query for a collector."]
]
def opt_version(self):
print("oonireport version: %s" % __version__)
sys.exit(0)
def parseArgs(self, *args):
if len(args) == 0:
raise usage.UsageError(
"Must specify at least one command"
)
return
self['command'] = args[0]
if self['command'] not in ("upload", "status"):
raise usage.UsageError(
"Must specify either command upload or status"
)
if self['command'] == "upload":
try:
self['report_file'] = args[1]
except IndexError:
self['report_file'] = None
def tor_check():
if not config.tor.socks_port:
print("Currently oonireport requires that you start Tor yourself "
"and set the socks_port inside of ooniprobe.conf")
sys.exit(1)
def run():
options = Options()
try:
options.parseOptions()
except Exception as exc:
print("Error: %s" % exc)
print(options)
sys.exit(2)
config.global_options = dict(options)
config.set_paths()
config.read_config_file()
if options['default-collector']:
options['bouncer'] = canonical_bouncer
if options['command'] == "upload" and options['report_file']:
tor_check()
return tool.upload(options['report_file'],
options['collector'],
options['bouncer'])
elif options['command'] == "upload":
tor_check()
return tool.upload_all(options['collector'],
options['bouncer'])
elif options['command'] == "status":
return tool.status()
else:
print(options)
| 28.703297 | 74 | 0.561256 |
f7300bb85137e40f00493456c34280037c0a2f36 | 273 | py | Python | src/oscar/apps/offer/config.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | src/oscar/apps/offer/config.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | src/oscar/apps/offer/config.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class OfferConfig(AppConfig):
label = 'offer'
name = 'oscar.apps.offer'
verbose_name = _('Offer')
def ready(self):
from . import signals # noqa
| 22.75 | 56 | 0.663004 |
f7301503bf0efbff166667ede074659aa5f11e70 | 391 | py | Python | Twitter/wsgi.py | sonus21/MiniTwitter | b62c0c540c1726fc7e6197b33514d7af15f1b58e | [
"BSD-2-Clause"
] | null | null | null | Twitter/wsgi.py | sonus21/MiniTwitter | b62c0c540c1726fc7e6197b33514d7af15f1b58e | [
"BSD-2-Clause"
] | null | null | null | Twitter/wsgi.py | sonus21/MiniTwitter | b62c0c540c1726fc7e6197b33514d7af15f1b58e | [
"BSD-2-Clause"
] | null | null | null | """
WSGI config for Twitter project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Twitter.settings")
application = get_wsgi_application()
| 23 | 78 | 0.785166 |
f7301f02b847c99bc61b46703e6c682dfcb86ed7 | 2,745 | py | Python | bidobe/astunit.py | pbrus/binary-doppler-beaming | fb7b8e58d36da41759d643a58270a76f61bd5c90 | [
"MIT"
] | 1 | 2018-06-19T18:35:55.000Z | 2018-06-19T18:35:55.000Z | bidobe/astunit.py | pbrus/binary-doppler-beaming | fb7b8e58d36da41759d643a58270a76f61bd5c90 | [
"MIT"
] | null | null | null | bidobe/astunit.py | pbrus/binary-doppler-beaming | fb7b8e58d36da41759d643a58270a76f61bd5c90 | [
"MIT"
] | null | null | null | """
Store physical constants and calculate astronomical units
from and to the International System of Units.
"""
class UnitsConverter:
"""
UnitsConverter converts different astronomical units
from and to the International System of Units (SI).
"""
# All constants in SI units.
G = 6.67408e-11
LIGHT_SPEED = 2.99792458e8
PLANCK_CONSTANT = 6.62606979e-34
BOLTZMANN_CONSTANT = 1.38064852e-23
STEFAN_BOLTZMANN_CONSTANT = 5.670367e-8
SUN_MASS = 1.9884e30
SUN_RADIUS = 6.957e8
AU = 1.49597e11
PARSEC = 3.086e16
DAY = 86400
MINUTE = 60
def convert_sun_mass_to_kg(self, mass):
"""Convert mass in the solar mass to kilograms."""
return mass*self.SUN_MASS
def convert_kg_to_sun_mass(self, mass):
"""Convert mass in kilograms to the solar mass."""
return mass/self.SUN_MASS
def convert_days_to_sec(self, days):
"""Convert time in days to seconds."""
return days*self.DAY
def convert_sec_to_days(self, seconds):
"""Convert time in seconds to days."""
return seconds/self.DAY
def convert_min_to_sec(self, minutes):
"""Convert time in minutes to seconds."""
return self.MINUTE*minutes
def convert_sec_to_min(self, seconds):
"""Convert time in seconds to minutes."""
return seconds/self.MINUTE
def convert_hours_to_sec(self, minutes):
"""Convert time in hours to seconds."""
return (self.MINUTE**2)*minutes
def convert_sec_to_hours(self, seconds):
"""Convert time in seconds to hours."""
return seconds/(self.MINUTE**2)
def convert_au_to_m(self, au):
"""Convert length in the Astronomical Units to meters."""
return au*self.AU
def convert_m_to_au(self, meters):
"""Convert length in meters to the Astronomical Units."""
return meters/self.AU
def convert_kmps_to_mps(self, speed):
"""Convert speed in kilometers per second to meters per second."""
return 1000.0*speed
def convert_mps_to_kmps(self, speed):
"""Convert speed in meters per second to kilometers per second."""
return speed/1000.0
def convert_m_to_sun_radius(self, meters):
"""Convert length in meters to the solar radius."""
return meters/self.SUN_RADIUS
def convert_sun_radius_to_m(self, radii):
"""Convert length in the solar radius to meters."""
return self.SUN_RADIUS*radii
def convert_m_to_parsec(self, meters):
"""Convert length in meters to parsec."""
return meters/self.PARSEC
def convert_parsec_to_m(self, parsecs):
"""Convert length in parsec to meters."""
return parsecs*self.PARSEC
| 30.5 | 74 | 0.660109 |
f7302747c69d4cf11b6c0eb5fdafe30a97f317af | 4,356 | py | Python | KoreanLipNet/training/overlapped_speakers/train.py | khujay15/koreanLipNet | 9db7524c7f3a577841cff88c7cd195e941c06fd6 | [
"MIT"
] | 1 | 2020-05-19T01:47:24.000Z | 2020-05-19T01:47:24.000Z | KoreanLipNet/training/overlapped_speakers/train.py | youda9/koreanLipNet | 46e1304477b2bd275206559e21815d204a5d1a72 | [
"MIT"
] | null | null | null | KoreanLipNet/training/overlapped_speakers/train.py | youda9/koreanLipNet | 46e1304477b2bd275206559e21815d204a5d1a72 | [
"MIT"
] | null | null | null | from keras.optimizers import Adam
from keras.callbacks import TensorBoard, CSVLogger, ModelCheckpoint
from lipnet.lipreading.generators import BasicGenerator
from lipnet.lipreading.callbacks import Statistics, Visualize
from lipnet.lipreading.curriculums import Curriculum
from lipnet.core.decoders import Decoder
from lipnet.lipreading.helpers import labels_to_text
from lipnet.utils.spell import Spell
from lipnet.model2 import LipNet
import numpy as np
import datetime
import os
import sys
np.random.seed(55) # seed value setting
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__)) # train.py path
PREDICT_GREEDY = False # ??
PREDICT_BEAM_WIDTH = 200 # ??
PREDICT_DICTIONARY = os.path.join(CURRENT_PATH,'..','..','common','dictionaries','grid.txt') # predict dic
def curriculum_rules(epoch):
return { 'sentence_length': -1, 'flip_probability': 0.5, 'jitter_probability': 0.05 }
def train(run_name, speaker, start_epoch, stop_epoch, img_c, img_w, img_h, frames_n, absolute_max_string_len, minibatch_size):
DATASET_DIR = os.path.join(CURRENT_PATH, speaker, 'datasets') #datasets dir path
OUTPUT_DIR = os.path.join(CURRENT_PATH, speaker, 'results') #results dir path
LOG_DIR = os.path.join(CURRENT_PATH, speaker, 'logs') #logs dir path
curriculum = Curriculum(curriculum_rules)
print "Generator start -- "
lip_gen = BasicGenerator(dataset_path=DATASET_DIR,
minibatch_size=minibatch_size,
img_c=img_c, img_w=img_w, img_h=img_h, frames_n=frames_n,
absolute_max_string_len=absolute_max_string_len,
curriculum=curriculum, start_epoch=start_epoch).build()
print "Generator finish --"
lipnet = LipNet(img_c=img_c, img_w=img_w, img_h=img_h, frames_n=frames_n,
absolute_max_string_len=absolute_max_string_len, output_size=lip_gen.get_output_size())
lipnet.summary()
adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
lipnet.model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam)
# load weight if necessary
if start_epoch > 0:
weight_file = os.path.join(OUTPUT_DIR, os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))
lipnet.model.load_weights(weight_file)
print "spell start--"
spell = Spell(path=PREDICT_DICTIONARY)
print "spell finish--"
print "decoder start--"
decoder = Decoder(greedy=PREDICT_GREEDY, beam_width=PREDICT_BEAM_WIDTH,
postprocessors=[labels_to_text, spell.sentence])
print "decoder finish--"
# define callbacks
statistics = Statistics(lipnet, lip_gen.next_val(), decoder, 256, output_dir=os.path.join(OUTPUT_DIR, run_name))
visualize = Visualize(os.path.join(OUTPUT_DIR, run_name), lipnet, lip_gen.next_val(), decoder, num_display_sentences=minibatch_size)
#tensorboard = TensorBoard(log_dir=os.path.join(LOG_DIR, run_name))
csv_logger = CSVLogger(os.path.join(LOG_DIR, "{}-{}.csv".format('training',run_name)), separator=',', append=True)
checkpoint = ModelCheckpoint(os.path.join(OUTPUT_DIR, run_name, "weights{epoch:02d}.h5"), monitor='val_loss', save_weights_only=True, mode='auto', period=1)
lipnet.model.fit_generator(generator=lip_gen.next_train(),
steps_per_epoch=lip_gen.default_training_steps, epochs=stop_epoch,
validation_data=lip_gen.next_val(), validation_steps=lip_gen.default_validation_steps,
callbacks=[checkpoint, statistics, visualize, lip_gen, csv_logger],
initial_epoch=start_epoch,
verbose=1,
max_q_size=5,
workers=2,
pickle_safe=True)
if __name__ == '__main__':
run_name = datetime.datetime.now().strftime('%Y:%m:%d:%H:%M:%S') # now time ex)2019:05:15:16:14:20
speaker = sys.argv[1] # speaker : s{1}
train(run_name, speaker, 0, 5000, 3, 100, 50, 75, 32, 2) #5000 epoch color 100x50 75frames 32len string minibatch_size 50
# train(run_name, speaker, 0, 5000, 3, 100, 50, 75, 32, 50) #5000 epoch color 100x50 75frames 32len string minibatch_size 50
| 49.5 | 161 | 0.688246 |
f730342ab5ef9de61150bbe987c938e724c98ab0 | 793 | py | Python | taxcli/helper/invoice_files.py | Nukesor/taxcli | f313acd2f1c9a551361a535f8428a17c53e6b468 | [
"MIT"
] | null | null | null | taxcli/helper/invoice_files.py | Nukesor/taxcli | f313acd2f1c9a551361a535f8428a17c53e6b468 | [
"MIT"
] | null | null | null | taxcli/helper/invoice_files.py | Nukesor/taxcli | f313acd2f1c9a551361a535f8428a17c53e6b468 | [
"MIT"
] | null | null | null | import os
def get_invoice_files(invoices, year=False):
for invoice in invoices:
if invoice.invoice_file:
# Get folder for this invoice and create it if it doesn't exist
if not invoice.afa:
folder = invoice.invoice_type.name
else:
folder = 'afa'
if not os.path.exists(folder):
os.mkdir(folder)
invoice_name = '{}-{}-{}.{}'.format(
invoice.contact_alias,
invoice.invoice_number,
invoice.date.isoformat(),
invoice.invoice_file_type,
)
path = os.path.join(folder, invoice_name)
with open(path, "wb") as invoice_file:
invoice_file.write(invoice.invoice_file)
| 33.041667 | 75 | 0.538462 |
f7307ab549cd6a961231f29a99a04cc4dd37c4f8 | 2,822 | py | Python | StreamPy/TestExamplesListToStreams/test_example_list_multi_in_multi_out_stateful.py | AnomalyInc/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | 2 | 2017-04-27T11:04:27.000Z | 2019-02-07T21:03:32.000Z | StreamPy/TestExamplesListToStreams/test_example_list_multi_in_multi_out_stateful.py | StreamPy/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | null | null | null | StreamPy/TestExamplesListToStreams/test_example_list_multi_in_multi_out_stateful.py | StreamPy/StreamPy | 94abca276b2857de48259f4f42ef95efbdf5f6d1 | [
"Apache-2.0"
] | null | null | null | """This module contains examples of stream_func where f_type
is 'element' and stream_func has a list of multiple input streams,
a single output stream, and the operation is stateless. These
examples must have a LIST of input streams and not a single
input stream.
The functions on static Python data structures are of the form:
list -> element
"""
if __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from functools import partial
from Stream import Stream, _no_value
from Operators import stream_func
import numpy as np
from stream_test import *
def inrange_and_outlier_streams(x_and_y_streams, a, b, delta):
def inrange_and_outlier(x_and_y_lists, state):
num_inrange, num_outliers = state
z_list = zip(*x_and_y_lists)
inrange_list, outliers_list = [], []
for v in z_list:
if abs(a*v[0] + b -v[1]) > delta:
# outlier
num_outliers += 1
percentage_outliers = num_outliers/float(num_outliers+num_inrange)
outliers_list.append((v, percentage_outliers))
else:
# in range
num_inrange += 1
percentage_outliers = num_outliers/float(num_outliers+num_inrange)
inrange_list.append((v, percentage_outliers))
state = num_inrange, num_outliers
return ([inrange_list, outliers_list], state)
return stream_func(
inputs=x_and_y_streams, f_type='list',
f=inrange_and_outlier, num_outputs=2,
state=(0,0))
def test():
x = Stream('input_0')
y = Stream('input_1')
inrange_stream, outlier_stream = inrange_and_outlier_streams(
x_and_y_streams=[x,y], a=1, b=0, delta=3)
inrange_stream.set_name('inrange')
outlier_stream.set_name('outlier')
check(inrange_stream, [((3, 4), 0.0), ((8, 8), 1.0 / 3.0), ((12, 12), 0.4)])
check(outlier_stream, [((5, 9), 0.5), ((10, 15), 0.5), ((21, 11), 0.5)])
print
# Add values to the tail of stream x.
x.extend([3, 5, 8, 10])
y.extend([4, 9, 8, 15])
# Print recent values of the streams
print 'recent values of input streams'
x.print_recent()
y.print_recent()
print 'recent values of output streams'
inrange_stream.print_recent()
outlier_stream.print_recent()
print
# Add more values to the tail of stream x.
x.extend([12, 21, 13])
y.extend([12, 11])
# Print recent values of the streams
print 'recent values of input streams'
x.print_recent()
y.print_recent()
print 'recent values of output streams'
inrange_stream.print_recent()
outlier_stream.print_recent()
check_empty()
if __name__ == '__main__':
test()
| 28.795918 | 82 | 0.642807 |
f7309bc8a28f4f8cdb7fb8535f464ad2bbe04bfe | 392 | py | Python | tests/test_invenio_circulation.py | NRodriguezcuellar/invenio-circulation | 8e0c977849eb76ba9a342542dae3b6a6ef5bae16 | [
"MIT"
] | 1 | 2020-04-27T14:47:30.000Z | 2020-04-27T14:47:30.000Z | tests/test_invenio_circulation.py | NRodriguezcuellar/invenio-circulation | 8e0c977849eb76ba9a342542dae3b6a6ef5bae16 | [
"MIT"
] | 1 | 2020-06-09T15:23:04.000Z | 2020-06-09T15:23:04.000Z | tests/test_invenio_circulation.py | NRodriguezcuellar/invenio-circulation | 8e0c977849eb76ba9a342542dae3b6a6ef5bae16 | [
"MIT"
] | 1 | 2020-01-13T17:10:13.000Z | 2020-01-13T17:10:13.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2019 CERN.
# Copyright (C) 2018-2019 RERO.
#
# Invenio-Circulation is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Module tests."""
def test_version():
"""Test version import."""
from invenio_circulation import __version__
assert __version__
| 23.058824 | 77 | 0.701531 |
f730c75227bfe93ee337638f1e0109a02d1051eb | 5,810 | py | Python | examples/python/01-list-properties.py | lkucalaba/tiscamera | e1fa7b21bb4dd777ae8039dfa072cfa2daa88244 | [
"Apache-2.0"
] | 241 | 2015-02-20T09:10:41.000Z | 2022-03-18T08:53:26.000Z | examples/python/01-list-properties.py | lkucalaba/tiscamera | e1fa7b21bb4dd777ae8039dfa072cfa2daa88244 | [
"Apache-2.0"
] | 435 | 2015-01-19T10:18:01.000Z | 2022-03-28T08:03:08.000Z | examples/python/01-list-properties.py | lkucalaba/tiscamera | e1fa7b21bb4dd777ae8039dfa072cfa2daa88244 | [
"Apache-2.0"
] | 141 | 2015-01-03T17:54:08.000Z | 2022-02-09T09:55:15.000Z | #!/usr/bin/env python3
# Copyright 2017 The Imaging Source Europe GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example will show you how to list available properties
#
import sys
import gi
gi.require_version("Tcam", "0.1")
gi.require_version("Gst", "1.0")
from gi.repository import Tcam, Gst
def list_properties(camera):
property_names = camera.get_tcam_property_names()
for name in property_names:
(ret, value,
min_value, max_value,
default_value, step_size,
value_type, flags,
category, group) = camera.get_tcam_property(name)
if not ret:
print("could not receive value {}".format(name))
continue
if value_type == "integer" or value_type == "double":
print("{}({}) value: {} default: {} min: {} max: {} grouping: {} - {}".format(name,
value_type,
value, default_value,
min_value, max_value,
category, group))
elif value_type == "string":
print("{}(string) value: {} default: {} grouping: {} - {}".format(name,
value,
default_value,
category,
group))
elif value_type == "button":
print("{}(button) grouping is {} - {}".format(name,
category,
group))
elif value_type == "boolean":
print("{}(boolean) value: {} default: {} grouping: {} - {}".format(name,
value,
default_value,
category,
group))
elif value_type == "enum":
enum_entries = camera.get_tcam_menu_entries(name)
print("{}(enum) value: {} default: {} grouping {} - {}".format(name,
value,
default_value,
category,
group))
print("Entries: ")
for entry in enum_entries:
print("\t {}".format(entry))
else:
print("This should not happen.")
def block_until_playing(pipeline):
while True:
# wait 0.1 seconds for something to happen
change_return, state, pending = pipeline.get_state(100000000)
if change_return == Gst.StateChangeReturn.SUCCESS:
return True
elif change_return == Gst.StateChangeReturn.FAILURE:
print("Failed to change state {} {} {}".format(change_return,
state,
pending))
return False
def main():
Gst.init(sys.argv) # init gstreamer
# this line sets the gstreamer default logging level
# it can be removed in normal applications
# gstreamer logging can contain verry useful information
# when debugging your application
# see https://gstreamer.freedesktop.org/documentation/tutorials/basic/debugging-tools.html
# for further details
Gst.debug_set_default_threshold(Gst.DebugLevel.WARNING)
pipeline = Gst.parse_launch("tcambin name=source ! fakesink")
if not pipeline:
print("Unable to create pipeline")
return 1
# set this to a specific camera serial if you
# do not want to use the default camera
serial = None
# get the tcambin to retrieve a property list through it
source = pipeline.get_by_name("source")
# serial is defined, thus make the source open that device
if serial is not None:
source.set_property("serial", serial)
print("Properties before state PLAYING:")
list_properties(source)
# in the READY state the camera will always be initialized
# in the PLAYING sta1te additional properties may appear from gstreamer elements
pipeline.set_state(Gst.State.PLAYING)
# helper function to ensure we have the right state
# alternatively wait for the first image
if not block_until_playing(pipeline):
print("Unable to start pipeline")
print("Properties during state PLAYING:")
list_properties(source)
pipeline.set_state(Gst.State.NULL)
return 0
if __name__ == "__main__":
sys.exit(main())
| 39.52381 | 111 | 0.492083 |
f730e637b6cdd9ca1951b596c2df73147bceb07c | 13,442 | py | Python | mlir/test/python/integration/dialects/linalg/opsrun.py | mgehre-xlx/sycl | 2086745509ef4bc298d7bbec402a123dae68f25e | [
"Apache-2.0"
] | 61 | 2019-04-12T18:49:57.000Z | 2022-03-19T22:23:16.000Z | mlir/test/python/integration/dialects/linalg/opsrun.py | mgehre-xlx/sycl | 2086745509ef4bc298d7bbec402a123dae68f25e | [
"Apache-2.0"
] | 127 | 2019-04-09T00:55:50.000Z | 2022-03-21T15:35:41.000Z | mlir/test/python/integration/dialects/linalg/opsrun.py | mgehre-xlx/sycl | 2086745509ef4bc298d7bbec402a123dae68f25e | [
"Apache-2.0"
] | 10 | 2019-04-02T18:25:40.000Z | 2022-02-15T07:11:37.000Z | # RUN: %PYTHON %s 2>&1 | FileCheck %s
import ctypes
import sys
from mlir.ir import *
from mlir.dialects import builtin
from mlir.dialects import linalg
from mlir.dialects import std
from mlir.passmanager import *
from mlir.execution_engine import *
# Log everything to stderr and flush so that we have a unified stream to match
# errors/info emitted by MLIR to stderr.
def log(*args):
print(*args, file=sys.stderr)
sys.stderr.flush()
matmul_boiler = """
func @main() -> f32 attributes {llvm.emit_c_interface} {
%v0 = constant 0.0 : f32
%v1 = constant 1.0 : f32
%v2 = constant 2.0 : f32
%A = memref.alloc() : memref<4x16xf32>
%B = memref.alloc() : memref<16x8xf32>
%C = memref.alloc() : memref<4x8xf32>
linalg.fill(%v1, %A) : f32, memref<4x16xf32>
linalg.fill(%v2, %B) : f32, memref<16x8xf32>
linalg.fill(%v0, %C) : f32, memref<4x8xf32>
call @matmul_on_buffers(%A, %B, %C) :
(memref<4x16xf32>, memref<16x8xf32>, memref<4x8xf32>) -> ()
%c0 = constant 0 : index
%0 = memref.load %C[%c0, %c0] : memref<4x8xf32>
// TODO: FFI-based solution to allow testing and printing with python code.
return %0 : f32
}
"""
fill_boiler = """
func @main() -> i32 attributes {llvm.emit_c_interface} {
%O = memref.alloc() : memref<4x16xi32>
%min = constant -1000.0 : f64
%max = constant 1000.0 : f64
%seed = constant 42 : i32
call @fill_on_buffers(%min, %max, %seed, %O) :
(f64, f64, i32, memref<4x16xi32>) -> ()
%c0 = constant 0 : index
%0 = memref.load %O[%c0, %c0] : memref<4x16xi32>
// TODO: FFI-based solution to allow testing and printing with python code.
return %0 : i32
}
"""
conv_boiler = """
func @main() -> i32 attributes {llvm.emit_c_interface} {
%v0 = constant 0 : i32
%v1 = constant 1.0 : f64
%v2 = constant 2.0 : f64
%input = memref.alloc() : memref<1x4x16x1xf64>
%filter = memref.alloc() : memref<2x2x1xf64>
%output = memref.alloc() : memref<1x2x4x1xi32>
linalg.fill(%v1, %input) : f64, memref<1x4x16x1xf64>
linalg.fill(%v2, %filter) : f64, memref<2x2x1xf64>
linalg.fill(%v0, %output) : i32, memref<1x2x4x1xi32>
call @conv_on_buffers(%input, %filter, %output) :
(memref<1x4x16x1xf64>, memref<2x2x1xf64>, memref<1x2x4x1xi32>) -> ()
%c0 = constant 0 : index
%0 = memref.load %output[%c0, %c0, %c0, %c0] : memref<1x2x4x1xi32>
// TODO: FFI-based solution to allow testing and printing with python code.
return %0 : i32
}
"""
pooling_boiler = """
func @main() -> i32 attributes {llvm.emit_c_interface} {
%v0 = constant 0 : i32
%v42 = constant 42.0 : f64
%v77 = constant 77.0 : f64
%v-13 = constant -13.0 : f64
%v1 = constant 1.0 : f64
%input = memref.alloc() : memref<1x4x16x1xf64>
%shape = memref.alloc() : memref<2x2xf64>
%output = memref.alloc() : memref<1x2x4x1xi32>
linalg.fill(%v1, %input) : f64, memref<1x4x16x1xf64>
linalg.fill(%v1, %shape) : f64, memref<2x2xf64>
linalg.fill(%v0, %output) : i32, memref<1x2x4x1xi32>
%c0 = constant 0 : index
%c1 = constant 1 : index
%c2 = constant 2 : index
memref.store %v42, %input[%c0, %c0, %c0, %c0] : memref<1x4x16x1xf64>
memref.store %v77, %input[%c0, %c0, %c1, %c0] : memref<1x4x16x1xf64>
memref.store %v-13, %input[%c0, %c0, %c2, %c0] : memref<1x4x16x1xf64>
call @pooling_on_buffers(%input, %shape, %output) :
(memref<1x4x16x1xf64>, memref<2x2xf64>, memref<1x2x4x1xi32>) -> ()
%0 = memref.load %output[%c0, %c0, %c0, %c0] : memref<1x2x4x1xi32>
// TODO: FFI-based solution to allow testing and printing with python code.
return %0 : i32
}
"""
def transform(module, boilerplate):
import mlir.conversions
import mlir.dialects.linalg.passes
import mlir.transforms
# TODO: Allow cloning functions from one module to another.
# Atm we have to resort to string concatenation.
mod = Module.parse(
str(module.operation.regions[0].blocks[0].operations[0].operation) +
boilerplate)
pm = PassManager.parse(
"builtin.func(convert-linalg-to-loops, lower-affine, " +
"convert-scf-to-std), convert-vector-to-llvm," +
"convert-memref-to-llvm,convert-std-to-llvm")
pm.run(mod)
return mod
def test_matmul_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f32 = F32Type.get()
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((4, 16), f32), MemRefType.get((16, 8), f32),
MemRefType.get((4, 8), f32))
def matmul_on_buffers(lhs, rhs, out):
linalg.matmul(lhs, rhs, outs=[out])
execution_engine = ExecutionEngine(transform(module, matmul_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result f32.
# Arguments must be passed as pointers.
c_float_p = ctypes.c_float * 1
res = c_float_p(-1.)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: 32.0
test_matmul_builtin()
def test_matmul_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f32 = F32Type.get()
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((4, 16), f32), MemRefType.get((16, 8), f32),
MemRefType.get((4, 8), f32))
def matmul_on_buffers(lhs, rhs, out):
linalg.matmul(lhs, rhs, outs=[out], emit_generic=True)
execution_engine = ExecutionEngine(transform(module, matmul_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result f32.
# Arguments must be passed as pointers.
c_float_p = ctypes.c_float * 1
res = c_float_p(-1.)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: 32.0
test_matmul_generic()
def test_fill_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(f64, f64, i32, MemRefType.get((4, 16), i32))
def fill_on_buffers(min, max, seed, out):
linalg.fill_rng_2d(min, max, seed, outs=[out])
execution_engine = ExecutionEngine(transform(module, fill_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: -480
test_fill_builtin()
def test_fill_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(f64, f64, i32, MemRefType.get((4, 16), i32))
def fill_on_buffers(min, max, seed, out):
linalg.fill_rng_2d(min, max, seed, outs=[out], emit_generic=True)
execution_engine = ExecutionEngine(transform(module, fill_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: -480
test_fill_generic()
def test_conv_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2, 1), f64),
MemRefType.get((1, 2, 4, 1), i32))
def conv_on_buffers(input, filter, output):
linalg.depthwise_conv_2d_input_nhwc_filter_hwc_poly(
input, filter, outs=[output], strides=[2, 4], dilations=[1, 2])
execution_engine = ExecutionEngine(transform(module, conv_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: 8
test_conv_builtin()
def test_conv_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2, 1), f64),
MemRefType.get((1, 2, 4, 1), i32))
def conv_on_buffers(input, filter, output):
linalg.depthwise_conv_2d_input_nhwc_filter_hwc_poly(
input,
filter,
outs=[output],
strides=[2, 4],
dilations=[1, 2],
emit_generic=True)
execution_engine = ExecutionEngine(transform(module, conv_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: 8
test_conv_generic()
def test_max_pooling_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64),
MemRefType.get((1, 2, 4, 1), i32))
def pooling_on_buffers(input, shape, output):
linalg.pooling_nhwc_max(
input, shape, outs=[output], strides=[2, 4], dilations=[1, 2])
execution_engine = ExecutionEngine(transform(module, pooling_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# 77 is not selected due to the dilation 2 in the second dimension.
# CHECK: RESULT: 42
test_max_pooling_builtin()
def test_max_pooling_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64),
MemRefType.get((1, 2, 4, 1), i32))
def pooling_on_buffers(input, shape, output):
linalg.pooling_nhwc_max(
input,
shape,
outs=[output],
strides=[2, 4],
dilations=[1, 2],
emit_generic=True)
execution_engine = ExecutionEngine(transform(module, pooling_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# 77 is not selected due to the dilation 2 in the second dimension.
# CHECK: RESULT: 42
test_max_pooling_generic()
def test_min_pooling_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64),
MemRefType.get((1, 2, 4, 1), i32))
def pooling_on_buffers(input, shape, output):
linalg.pooling_nhwc_min(
input, shape, outs=[output], strides=[2, 4], dilations=[1, 2])
execution_engine = ExecutionEngine(transform(module, pooling_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: -13
test_min_pooling_builtin()
def test_min_pooling_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64),
MemRefType.get((1, 2, 4, 1), i32))
def pooling_on_buffers(input, shape, output):
linalg.pooling_nhwc_min(
input,
shape,
outs=[output],
strides=[2, 4],
dilations=[1, 2],
emit_generic=True)
execution_engine = ExecutionEngine(transform(module, pooling_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: -13
test_min_pooling_generic()
| 30.411765 | 79 | 0.651912 |
f730f61c0558a354a0cd7c399108c3d7b08479b3 | 4,692 | py | Python | Tools/SeeDot/seedot/predictor.py | krantikiran/EdgeML | e5c7bd7c56884ca61f6d54cedb0074553cfdc896 | [
"MIT"
] | 1 | 2020-03-26T17:19:54.000Z | 2020-03-26T17:19:54.000Z | Tools/SeeDot/seedot/predictor.py | krantikiran/EdgeML | e5c7bd7c56884ca61f6d54cedb0074553cfdc896 | [
"MIT"
] | 2 | 2020-03-26T02:59:12.000Z | 2020-04-23T19:09:00.000Z | Tools/SeeDot/seedot/predictor.py | krantikiran/EdgeML | e5c7bd7c56884ca61f6d54cedb0074553cfdc896 | [
"MIT"
] | 3 | 2020-03-25T18:45:39.000Z | 2020-12-17T19:09:54.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import os
import subprocess
import seedot.config as config
import seedot.util as Util
# Program to build and run the predictor project using msbuild
# The accuracy and other statistics are written to the output file specified
class Predictor:
def __init__(self, algo, version, datasetType, outputDir, scaleForX):
self.algo, self.version, self.datasetType = algo, version, datasetType
self.outputDir = outputDir
os.makedirs(self.outputDir, exist_ok=True)
self.scaleForX = scaleForX
self.genHeaderFile()
def genHeaderFile(self):
with open("datatypes.h", 'w') as file:
file.write("#pragma once\n\n")
if config.wordLength == 8:
file.write("#define INT8\n")
file.write("typedef int8_t MYINT;\n\n")
elif config.wordLength == 16:
file.write("#define INT16\n")
file.write("typedef int16_t MYINT;\n\n")
elif config.wordLength == 32:
file.write("#define INT32\n")
file.write("typedef int32_t MYINT;\n\n")
file.write("typedef int16_t MYITE;\n")
file.write("typedef uint16_t MYUINT;\n\n")
file.write("const int scaleForX = %d;\n\n" % (self.scaleForX))
if Util.debugMode():
file.write("const bool debugMode = true;\n")
else:
file.write("const bool debugMode = false;\n")
def buildForWindows(self):
'''
Builds using the Predictor.vcxproj project file and creates the executable
The target platform is currently set to x64
'''
print("Build...", end='')
projFile = "Predictor.vcxproj"
args = [config.msbuildPath, projFile, r"/t:Build",
r"/p:Configuration=Release", r"/p:Platform=x64"]
logFile = os.path.join(self.outputDir, "msbuild.txt")
with open(logFile, 'w') as file:
process = subprocess.call(args, stdout=file, stderr=subprocess.STDOUT)
if process == 1:
print("FAILED!!\n")
return False
else:
print("success")
return True
def buildForLinux(self):
print("Build...", end='')
args = ["make"]
logFile = os.path.join(self.outputDir, "build.txt")
with open(logFile, 'w') as file:
process = subprocess.call(args, stdout=file, stderr=subprocess.STDOUT)
if process == 1:
print("FAILED!!\n")
return False
else:
print("success")
return True
def build(self):
if Util.windows():
return self.buildForWindows()
else:
return self.buildForLinux()
def executeForWindows(self):
'''
Invokes the executable with arguments
'''
print("Execution...", end='')
exeFile = os.path.join("x64", "Release", "Predictor.exe")
args = [exeFile, self.version, self.datasetType]
logFile = os.path.join(self.outputDir, "exec.txt")
with open(logFile, 'w') as file:
process = subprocess.call(args, stdout=file, stderr=subprocess.STDOUT)
if process == 1:
print("FAILED!!\n")
return None
else:
print("success")
acc = self.readStatsFile()
return acc
def executeForLinux(self):
print("Execution...", end='')
exeFile = os.path.join("./Predictor")
args = [exeFile, self.version, self.datasetType]
logFile = os.path.join(self.outputDir, "exec.txt")
with open(logFile, 'w') as file:
process = subprocess.call(args, stdout=file, stderr=subprocess.STDOUT)
if process == 1:
print("FAILED!!\n")
return None
else:
print("success")
acc = self.readStatsFile()
return acc
def execute(self):
if Util.windows():
return self.executeForWindows()
else:
return self.executeForLinux()
# Read statistics of execution (currently only accuracy)
def readStatsFile(self):
statsFile = os.path.join(
"output", self.version, "stats-" + self.datasetType + ".txt")
with open(statsFile, 'r') as file:
content = file.readlines()
stats = [x.strip() for x in content]
return float(stats[0])
def run(self):
res = self.build()
if res == False:
return None
acc = self.execute()
return acc
| 29.509434 | 82 | 0.563725 |
f730f6f1c65f95e6079be44f28f2c5b909f64b71 | 1,999 | py | Python | flagging_site/data/_store/refresh.py | danivalades/flagging | b1a097594e9b7d9e101edef9bb6abf98a333383c | [
"MIT"
] | null | null | null | flagging_site/data/_store/refresh.py | danivalades/flagging | b1a097594e9b7d9e101edef9bb6abf98a333383c | [
"MIT"
] | null | null | null | flagging_site/data/_store/refresh.py | danivalades/flagging | b1a097594e9b7d9e101edef9bb6abf98a333383c | [
"MIT"
] | null | null | null | """The data store contains offline versions of the data so that you can run a
demo version of the website without the vault keys, or simply develop parts of
the website that don't require actively updated data without having to worry.
This data is used for the actual website when the `USE_MOCK_DATA` config
variable is True. It is useful for dev, but it should never be used in
production.
This file is a CLI to refresh the data store. You can run it with:
`python flagging_site/data/_store/refresh.py`
"""
import os
import sys
from typing import Optional
import click
DATA_STORE_PATH = os.path.dirname(__file__)
@click.command()
@click.option('--vault_password',
prompt=True,
default=lambda: os.environ.get('VAULT_PASSWORD', None))
def refresh_data_store(vault_password: Optional[str] = None) -> None:
"""When run, this function runs all the functions that compose the data
store. The app itself should not be running this function; in fact, this
function will raise an error if the app is turned on. This should only be
run from the command line or a Python console.
"""
os.environ['USE_MOCK_DATA'] = 'false'
if vault_password:
os.environ['VAULT_PASSWORD'] = vault_password
from flask import current_app
if current_app:
raise Exception('The app should not be running when the data store is '
'being refreshed.')
from flagging_site.data.hobolink import get_live_hobolink_data
from flagging_site.data.hobolink import HOBOLINK_STATIC_FILE_NAME
get_live_hobolink_data('code_for_boston_export_21d')\
.to_pickle(os.path.join(DATA_STORE_PATH, HOBOLINK_STATIC_FILE_NAME))
from flagging_site.data.usgs import get_live_usgs_data
from flagging_site.data.usgs import USGS_STATIC_FILE_NAME
get_live_usgs_data()\
.to_pickle(os.path.join(DATA_STORE_PATH, USGS_STATIC_FILE_NAME))
if __name__ == '__main__':
sys.path.append('.')
refresh_data_store()
| 37.018519 | 79 | 0.737869 |
f73108a0572bbd0f825ddaada87e96d987ccfd6b | 8,856 | py | Python | auxiliary/model.py | hellolele/PoseFromShape | b7cd6fc7eab5be7710e34557504c192d36c35000 | [
"MIT"
] | 2 | 2019-07-12T14:10:37.000Z | 2019-07-12T14:10:39.000Z | auxiliary/model.py | tim885/PoseFromShape | 7daf9e4889af065861d2719cd2bca2de8a45d185 | [
"MIT"
] | null | null | null | auxiliary/model.py | tim885/PoseFromShape | 7daf9e4889af065861d2719cd2bca2de8a45d185 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import resnet
# ============================================================================ #
# Baseline network #
# ============================================================================ #
class BaselineEstimator(nn.Module):
"""Pose estimator using image feature with shape feature
Arguments:
img_feature_dim: output feature dimension for image
pretrained_resnet: use the ResNet pretrained on ImageNet if True
Return:
Three angle bin classification probability with a delta value regression for each bin
"""
def __init__(self, img_feature_dim=1024, separate_branch=False,
azi_classes=24, ele_classes=12, inp_classes=24, pretrained_resnet=False):
super(BaselineEstimator, self).__init__()
# RGB image encoder
self.img_encoder = resnet.resnet18(pretrained=pretrained_resnet, num_classes=img_feature_dim)
self.compress = nn.Sequential(nn.Linear(img_feature_dim, 800), nn.BatchNorm1d(800), nn.ReLU(inplace=True),
nn.Linear(800, 400), nn.BatchNorm1d(400), nn.ReLU(inplace=True),
nn.Linear(400, 200), nn.BatchNorm1d(200), nn.ReLU(inplace=True))
self.separate_branch = separate_branch
# separate branch for classification and regression
if separate_branch:
self.compress_delta = nn.Sequential(nn.Linear(img_feature_dim, 800), nn.BatchNorm1d(800), nn.ReLU(inplace=True),
nn.Linear(800, 400), nn.BatchNorm1d(400), nn.ReLU(inplace=True),
nn.Linear(400, 200), nn.BatchNorm1d(200), nn.ReLU(inplace=True))
self.fc_cls_azi = nn.Linear(200, azi_classes)
self.fc_cls_ele = nn.Linear(200, ele_classes)
self.fc_cls_inp = nn.Linear(200, inp_classes)
self.fc_reg_azi = nn.Linear(200, azi_classes)
self.fc_reg_ele = nn.Linear(200, ele_classes)
self.fc_reg_inp = nn.Linear(200, inp_classes)
def forward(self, im):
# pass the image through image encoder
img_feature = self.img_encoder(im)
# concatenate the features obtained from two encoders into one feature
x = self.compress(img_feature)
cls_azi = self.fc_cls_azi(x)
cls_ele = self.fc_cls_ele(x)
cls_inp = self.fc_cls_inp(x)
# use the shared features if share branch
x_delta = self.compress_delta(img_feature) if self.separate_branch else x
reg_azi = self.fc_reg_azi(x_delta)
reg_ele = self.fc_reg_ele(x_delta)
reg_inp = self.fc_reg_inp(x_delta)
return [cls_azi, cls_ele, cls_inp, reg_azi, reg_ele, reg_inp]
# ============================================================================ #
# Proposed network #
# ============================================================================ #
class ShapeEncoderMV(nn.Module):
"""Shape Encoder using rendering images under multiple views
Arguments:
feature_dim: output feature dimension for each rendering image
channels: 3 for normal rendering image, 4 for normal map with depth map, and 3*12 channels for concatenating
pretrained_resnet: use the ResNet pretrained on ImageNet if True
Return:
A tensor of size NxC, where N is the batch size and C is the feature_dim
"""
def __init__(self, feature_dim=256, channels=3, pretrained_resnet=False):
super(ShapeEncoderMV, self).__init__()
self.render_encoder = resnet.resnet18(input_channel=channels, num_classes=feature_dim, pretrained=pretrained_resnet)
def forward(self, renders):
# reshape render images from dimension N*K*C*H*W to (N*K)*C*H*W
N, K, C, H, W = renders.size()
renders = renders.view(N*K, C, H, W)
# pass the encoder and reshape render features from dimension (N*K)*D1 to N*(K*D1)
render_feature = self.render_encoder(renders)
render_feature = render_feature.view(N, -1)
return render_feature
class ShapeEncoderPC(nn.Module):
"""Shape Encoder using point cloud TO BE MODIFIED
"""
def __init__(self, feature_dim=256, channels=3, pretrained_resnet=False):
super(ShapeEncoderPC, self).__init__()
self.pc_encoder = resnet.resnet18(input_channel=channels, num_classes=feature_dim, pretrained=pretrained_resnet)
def forward(self, shapes):
shape_feature = self.pc_encoder(shapes)
return shape_feature
class PoseEstimator(nn.Module):
"""Pose estimator using image feature with shape feature
Arguments:
img_feature_dim: output feature dimension for image
shape_feature_dim: output feature dimension for shape
shape: shape representation in PointCloud or MultiView
channels: channel number for multi-view encoder
pretrained_resnet: use the ResNet pretrained on ImageNet if True
Return:
Three angle bin classification probability with a delta value regression for each bin
"""
def __init__(self, render_number=12, img_feature_dim=1024, shape_feature_dim=256, channels=3, separate_branch=False,
azi_classes=24, ele_classes=12, inp_classes=24, pretrained_resnet=False, shape='PointCloud'):
super(PoseEstimator, self).__init__()
# 3D shape encoder
if shape == 'PointCloud':
self.shape_encoder = ShapeEncoderPC()
else:
self.shape_encoder = ShapeEncoderMV(feature_dim=shape_feature_dim, channels=channels, pretrained_resnet=pretrained_resnet)
shape_feature_dim = shape_feature_dim * render_number if shape != 'PointCloud' else shape_feature_dim
# RGB image encoder
self.img_encoder = resnet.resnet18(pretrained=pretrained_resnet, num_classes=img_feature_dim)
self.compress = nn.Sequential(nn.Linear(shape_feature_dim + img_feature_dim, 800),
nn.BatchNorm1d(800), nn.ReLU(inplace=True),
nn.Linear(800, 400), nn.BatchNorm1d(400), nn.ReLU(inplace=True),
nn.Linear(400, 200), nn.BatchNorm1d(200), nn.ReLU(inplace=True))
self.separate_branch = separate_branch
# separate branch for classification and regression
if separate_branch:
self.compress_delta = nn.Sequential(nn.Linear(shape_feature_dim + img_feature_dim, 800),
nn.BatchNorm1d(800), nn.ReLU(inplace=True),
nn.Linear(800, 400), nn.BatchNorm1d(400), nn.ReLU(inplace=True),
nn.Linear(400, 200), nn.BatchNorm1d(200), nn.ReLU(inplace=True))
self.fc_cls_azi = nn.Linear(200, azi_classes)
self.fc_cls_ele = nn.Linear(200, ele_classes)
self.fc_cls_inp = nn.Linear(200, inp_classes)
self.fc_reg_azi = nn.Linear(200, azi_classes)
self.fc_reg_ele = nn.Linear(200, ele_classes)
self.fc_reg_inp = nn.Linear(200, inp_classes)
def forward(self, im, shape):
# pass the image through image encoder
img_feature = self.img_encoder(im)
# pass the shape through shape encoder
shape_feature = self.shape_encoder(shape)
# concatenate the features obtained from two encoders into one feature
global_feature = torch.cat((shape_feature, img_feature), 1)
x = self.compress(global_feature)
cls_azi = self.fc_cls_azi(x)
cls_ele = self.fc_cls_ele(x)
cls_inp = self.fc_cls_inp(x)
# use the shared features if share branch
x_delta = self.compress_delta(global_feature) if self.separate_branch else x
reg_azi = self.fc_reg_azi(x_delta)
reg_ele = self.fc_reg_ele(x_delta)
reg_inp = self.fc_reg_inp(x_delta)
return [cls_azi, cls_ele, cls_inp, reg_azi, reg_ele, reg_inp]
if __name__ == '__main__':
print('test model')
sim_im = Variable(torch.rand(4, 3, 224, 224))
sim_renders = Variable(torch.rand(4, 12, 3, 224, 224))
sim_im = sim_im.cuda()
sim_renders = sim_renders.cuda()
#model = PoseEstimator(shape='MultiView', separate_branch=False)
model = BaselineEstimator(separate_branch=False, pretrained_resnet=False)
model.cuda()
#cls_azi, cls_ele, cls_inp, reg_azi, reg_ele, reg_inp = model(sim_im, sim_renders)
cls_azi, cls_ele, cls_inp, reg_azi, reg_ele, reg_inp = model(sim_im)
print(cls_azi.size(), cls_ele.size(), cls_inp.size(), reg_azi.size(), reg_ele.size(), reg_inp.size())
| 47.106383 | 134 | 0.630872 |
f7310fabca6b6ef898997efb6b048ead96681b15 | 4,598 | py | Python | plastering/evaluator.py | MingzheWu418/plastering | 322531e934c3acf2ecc8f520b37a6d255b9959c2 | [
"MIT"
] | 29 | 2018-09-19T01:16:27.000Z | 2022-03-29T14:35:36.000Z | plastering/evaluator.py | MingzheWu418/plastering | 322531e934c3acf2ecc8f520b37a6d255b9959c2 | [
"MIT"
] | 14 | 2019-04-12T18:37:36.000Z | 2022-02-10T00:27:55.000Z | plastering/evaluator.py | MingzheWu418/plastering | 322531e934c3acf2ecc8f520b37a6d255b9959c2 | [
"MIT"
] | 14 | 2019-03-05T23:44:11.000Z | 2022-03-18T07:29:31.000Z | from copy import deepcopy
from sklearn.metrics import f1_score
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pdb
def binarize_labels(true_labels, pred_labels):
srcids = list(pred_labels.keys())
tot_labels = [list(labels) for labels in
list(pred_labels.values()) + list(true_labels.values())]
mlb = MultiLabelBinarizer().fit(tot_labels)
pred_mat = mlb.transform(pred_labels.values())
true_mat = mlb.transform(true_labels.values())
return true_mat, pred_mat
def get_micro_f1(true_labels, pred_labels):
true_mat, pred_mat = binarize_labels(true_labels, pred_labels)
return get_micro_f1_mat(true_mat, pred_mat)
def get_macro_f1(true_labels, pred_labels):
true_mat, pred_mat = binarize_labels(true_labels, pred_labels)
return get_macro_f1_mat(true_mat, pred_mat)
def get_macro_f1_mat(true_mat, pred_mat):
assert true_mat.shape == pred_mat.shape
f1s = []
for i in range(0, true_mat.shape[1]):
if 1 not in true_mat[:,i]:
continue
f1 = f1_score(true_mat[:,i], pred_mat[:,i])
f1s.append(f1)
return np.mean(f1s)
def get_multiclass_micro_f1(true_labels, pred_labels):
le = LabelEncoder()
#pred_mat, true_mat = binarize_labels(true_labels, pred_labels)
#f1_custom = get_micro_f1_mat(true_mat, pred_mat)
srcids = list(true_labels.keys())
true_label_list = [true_labels[srcid] for srcid in srcids]
pred_label_list = [pred_labels[srcid] for srcid in srcids]
le = LabelEncoder()
le.fit(true_label_list + pred_label_list)
true_encoded = le.transform(true_label_list)
pred_encoded = le.transform(pred_label_list)
f1_micro = f1_score(true_encoded, pred_encoded, average='micro')
#f1_weighted = f1_score(true_encoded, pred_encoded, average='weighted')
#pdb.set_trace()
return f1_micro
def get_multiclass_macro_f1(true_labels, pred_labels):
le = LabelEncoder()
#pred_mat, true_mat = binarize_labels(true_labels, pred_labels)
#f1_custom = get_micro_f1_mat(true_mat, pred_mat)
srcids = list(true_labels.keys())
true_label_list = [true_labels[srcid] for srcid in srcids]
pred_label_list = [pred_labels[srcid] for srcid in srcids]
le = LabelEncoder()
le.fit(true_label_list + pred_label_list)
true_encoded = le.transform(true_label_list)
pred_encoded = le.transform(pred_label_list)
f1_micro = f1_score(true_encoded, pred_encoded, average='macro')
#f1_weighted = f1_score(true_encoded, pred_encoded, average='weighted')
#pdb.set_trace()
return f1_micro
def get_micro_f1_mat(true_mat, pred_mat):
TP = np.sum(np.bitwise_and(true_mat==1, pred_mat==1))
TN = np.sum(np.bitwise_and(true_mat==0, pred_mat==0))
FN = np.sum(np.bitwise_and(true_mat==1, pred_mat==0))
FP = np.sum(np.bitwise_and(true_mat==0, pred_mat==1))
micro_prec = TP / (TP + FP)
micro_rec = TP / (TP + FN)
return 2 * micro_prec * micro_rec / (micro_prec + micro_rec)
def get_point_accuracy(true_tagsets, pred_tagsets):
target_srcids = pred_tagsets.keys()
return sum([true_tagsets[srcid].lower() == pred_tagsets[srcid].lower()
for srcid in target_srcids]) / len(target_srcids)
def get_accuracy(true_tagsets_sets, pred_tagsets_sets):
acc = 0
for srcid, pred_tagsets in pred_tagsets_sets.items():
pred = set(pred_tagsets)
true = set(true_tagsets_sets[srcid])
jaccard = len(pred.intersection(true)) / len(pred.union(true))
acc += jaccard
return acc / len(pred_tagsets_sets)
def exclude_common_tagsets(tagsets):
return [tagset for tagset in tagsets
if tagset.split('-')[0] != 'networkadapter' and
tagset.split('-')[0] != 'building'
]
def get_accuracy_conservative(true_tagsets_sets, pred_tagsets_sets):
acc = 0
for srcid, pred_tagsets in pred_tagsets_sets.items():
pred = set(exclude_common_tagsets(pred_tagsets))
true = set(exclude_common_tagsets(true_tagsets_sets[srcid]))
if len(true) == 0:
jaccard = 1
else:
jaccard = len(pred.intersection(true)) / len(pred.union(true))
acc += jaccard
return acc / len(pred_tagsets_sets)
def get_set_accuracy(true_label_sets, pred_tagset_sets):
# Accuracy per sample = #intersection / #union
# Accuracy over set = average of the accuracy per sample
# Input params dictionary based on the srcids
for srcid, pred_tagset_set in pred_tagset_sets.items():
pass #TODO
| 38.966102 | 75 | 0.707916 |
f731159d8d119b22890a43bc26246da1964a17db | 3,103 | py | Python | alipay/aop/api/domain/AlipayOpenMiniAmpeTracerSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayOpenMiniAmpeTracerSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayOpenMiniAmpeTracerSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenMiniAmpeTracerSyncModel(object):
def __init__(self):
self._device_id = None
self._product_id = None
self._spm_a = None
self._spm_b = None
self._spm_c = None
self._spm_d = None
@property
def device_id(self):
return self._device_id
@device_id.setter
def device_id(self, value):
self._device_id = value
@property
def product_id(self):
return self._product_id
@product_id.setter
def product_id(self, value):
self._product_id = value
@property
def spm_a(self):
return self._spm_a
@spm_a.setter
def spm_a(self, value):
self._spm_a = value
@property
def spm_b(self):
return self._spm_b
@spm_b.setter
def spm_b(self, value):
self._spm_b = value
@property
def spm_c(self):
return self._spm_c
@spm_c.setter
def spm_c(self, value):
self._spm_c = value
@property
def spm_d(self):
return self._spm_d
@spm_d.setter
def spm_d(self, value):
self._spm_d = value
def to_alipay_dict(self):
params = dict()
if self.device_id:
if hasattr(self.device_id, 'to_alipay_dict'):
params['device_id'] = self.device_id.to_alipay_dict()
else:
params['device_id'] = self.device_id
if self.product_id:
if hasattr(self.product_id, 'to_alipay_dict'):
params['product_id'] = self.product_id.to_alipay_dict()
else:
params['product_id'] = self.product_id
if self.spm_a:
if hasattr(self.spm_a, 'to_alipay_dict'):
params['spm_a'] = self.spm_a.to_alipay_dict()
else:
params['spm_a'] = self.spm_a
if self.spm_b:
if hasattr(self.spm_b, 'to_alipay_dict'):
params['spm_b'] = self.spm_b.to_alipay_dict()
else:
params['spm_b'] = self.spm_b
if self.spm_c:
if hasattr(self.spm_c, 'to_alipay_dict'):
params['spm_c'] = self.spm_c.to_alipay_dict()
else:
params['spm_c'] = self.spm_c
if self.spm_d:
if hasattr(self.spm_d, 'to_alipay_dict'):
params['spm_d'] = self.spm_d.to_alipay_dict()
else:
params['spm_d'] = self.spm_d
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniAmpeTracerSyncModel()
if 'device_id' in d:
o.device_id = d['device_id']
if 'product_id' in d:
o.product_id = d['product_id']
if 'spm_a' in d:
o.spm_a = d['spm_a']
if 'spm_b' in d:
o.spm_b = d['spm_b']
if 'spm_c' in d:
o.spm_c = d['spm_c']
if 'spm_d' in d:
o.spm_d = d['spm_d']
return o
| 26.75 | 71 | 0.548179 |
f73133716abf93447f9a681574d785a552cadd2f | 877 | py | Python | setup.py | jomido/jogger | d105a5d701c7958bb5ad072af4c23477e82cd363 | [
"MIT"
] | 6 | 2015-08-06T00:54:48.000Z | 2022-02-03T13:55:33.000Z | setup.py | jomido/jogger | d105a5d701c7958bb5ad072af4c23477e82cd363 | [
"MIT"
] | null | null | null | setup.py | jomido/jogger | d105a5d701c7958bb5ad072af4c23477e82cd363 | [
"MIT"
] | 1 | 2015-05-19T11:45:34.000Z | 2015-05-19T11:45:34.000Z | from setuptools import setup, find_packages
setup(
name='jogger',
version='0.1.1',
description='Navigate log files.',
long_description=(
open('README.md').read()
),
url='http://github.com/jomido/jogger/',
license='MIT',
author='Jonathan Dobson',
author_email='jon.m.dobson@gmail.com',
packages=[
'jogger'
],
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Text Processing',
'Topic :: System :: Logging'
],
) | 29.233333 | 54 | 0.586089 |
f7316151b54b3cb3affdbe30c1d0eafb1b8f3f72 | 23,981 | py | Python | scripts/casava_data_delivery.py | ssjunnebo/scilifelab | 79960f7042118f900bd1eaabe4902ee76abd8020 | [
"MIT"
] | 1 | 2016-03-21T14:04:09.000Z | 2016-03-21T14:04:09.000Z | scripts/casava_data_delivery.py | ssjunnebo/scilifelab | 79960f7042118f900bd1eaabe4902ee76abd8020 | [
"MIT"
] | 35 | 2015-01-22T08:25:02.000Z | 2020-02-17T12:09:12.000Z | scripts/casava_data_delivery.py | ssjunnebo/scilifelab | 79960f7042118f900bd1eaabe4902ee76abd8020 | [
"MIT"
] | 6 | 2015-01-16T15:32:08.000Z | 2020-01-30T14:34:40.000Z | # A script to help doing the deliveries.
# Now using the Casava directory structure
# The user is asked to provide a project ID, a run name, and an UPPMAX project
import sys
import os
import glob
import re
import grp
from datetime import datetime
import argparse
import stat
from subprocess import check_call, CalledProcessError
from scilifelab.utils.misc import filtered_walk, query_yes_no, touch_file
from scilifelab.utils.timestamp import utc_time
def fixProjName(pname):
newname = pname[0].upper()
postperiod = False
for i in range(1, len(pname)):
if pname[i] == ".":
newname += pname[i]
postperiod = True
elif postperiod:
newname += pname[i].upper()
postperiod = False
else:
newname += pname[i]
postperiod = False
return newname
def is_fastq(fname):
fastq_ext = [".fastq.gz",
".fastq",
"_fastq.txt.gz",
"_fastq.txt",
".fastq..gz",
"_fastq.txt..gz"
]
for ext in fastq_ext:
if fname.endswith(ext):
return True
return False
def create_final_name(fname, date, fc_id, sample_name):
"""Create the final name of the delivered file
"""
# Split the file name according to CASAVA convention
m = re.match(r'(\S+?)_(?:[ACGTN\-]+|NoIndex|Undetermined)_L0*(\d+)_R(\d)_\d+\.fastq(.*)', fname)
if m is not None:
lane = m.group(2)
read = m.group(3)
ext = m.group(4)
else:
# Split the file name according to bcbb convention
m = re.match(r'(\d+)_(\d+)_([^_]+)_(\d+)_(?:nophix_)?(\d+)_fastq.txt(.*)', fname)
if m is None:
raise ValueError("Could not parse file name {:s} correctly!".format(fname))
lane = m.group(1)
read = m.group(5)
ext = m.group(6)
dest_file_name = "{:s}.fastq{:s}".format("_".join([lane,
date,
fc_id,
sample_name,
read]),
ext.replace('..','.'))
return dest_file_name
def get_file_copy_list(proj_base_dir, dest_proj_path, fcid, deliver_all_fcs, deliver_nophix, skip_list):
to_copy = []
for fqfile in filtered_walk(proj_base_dir,
is_fastq,
include_dirs=[fcid] if not deliver_all_fcs else None,
exclude_dirs=skip_list):
# Get the run_name and sample_name from the path
sample_name, run_name, _ = os.path.relpath(fqfile,proj_base_dir).split(os.sep,2)
date, fc_id = run_name.split('_')
# Skip if we deliver from nophix and the parent dir is not nophix (or vice versa)
pdir = os.path.basename(os.path.dirname(fqfile))
if deliver_nophix and pdir != "nophix":
continue
if not deliver_nophix and pdir != run_name:
continue
# Skip if a compressed version of the current file exists
if os.path.exists("{:s}.gz".format(fqfile)):
print("WARNING: Both compressed and non-compressed versions of {:s} exists! " \
"Is compression/decompression in progress? Will deliver compressed version " \
"but you should make sure that the delivered files are complete!".format(fqfile))
continue
print("DEBUG: source_delivery_path = {:s}".format(os.path.dirname(fqfile)))
fname = os.path.basename(fqfile)
print(fname)
dest_run_path = os.path.join(dest_proj_path, sample_name, run_name)
dest_file_name = create_final_name(fname,date,fc_id,sample_name)
to_copy.append([fqfile,
dest_run_path,
dest_file_name])
return to_copy
def rsync_files(to_copy, logfile, group, dry):
# Iterate over the files to copy and create directories and copy files as necessary
successful = 0
uid = os.getuid()
gid = os.getgid()
if group is not None and len(group) > 0:
gid = grp.getgrnam(group).gr_gid
for src_file, dst_dir, dst_name in to_copy:
dst_file = os.path.join(dst_dir, dst_name)
print "Will copy (rsync) ", src_file, "to ", dst_file
if not dry:
# Create the destination directory if necessary
logfile.write("[{:s}] - Creating run-level delivery directory: {:s} " \
"(or leaving it in place if already present)\n".format(utc_time(),
dst_dir))
if os.path.exists(dst_dir):
print("Directory {:s} already exists!".format(dst_dir))
else:
try:
# Create directory hierarchy with ug+rwX permissions
os.makedirs(dst_dir, 0770)
except:
print("Could not create run-level delivery directory!")
clean_exit(1,logfile,dry)
# Rsync the file across
command_to_execute = ['rsync',
'-ac',
src_file,
dst_file]
logfile.write("[{:s}] - Executing command: {:s}\n".format(utc_time(), " ".join(command_to_execute)))
logfile.flush()
try:
check_call(command_to_execute)
except CalledProcessError, e:
logfile.write("[{:s}] - rsync exited with exit code {:d}\n".format(utc_time(), e.returncode))
raise e
logfile.write("[{:s}] - rsync exited with exit code 0\n".format(utc_time()))
successful += 1
print("{:d} of {:d} files copied successfully".format(successful,len(to_copy)))
# Modify the permissions to ug+rw
os.chmod(dst_file, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)
def main():
parser = argparse.ArgumentParser(description="A script to help doing the deliveries, now using the Casava directory structure. " \
"The user is asked to provide a project ID, a run name, and an UPPMAX project")
parser.add_argument('-c', '--casava-path', action="store", dest="caspath", default='/proj/a2010002/nobackup/illumina/',
help="Specify a path to a Casava directory manually")
parser.add_argument('-l', '--log-path', action="store", dest="logpath", default='/proj/a2010002/private/delivery_logs',
help="Specify a path to a log file")
parser.add_argument('-i', '--interactive', action="store_true", dest="interactive", default=False,
help="Interactively select samples to be delivered")
parser.add_argument('-d', '--dry-run', action="store_true", dest="dry", default=False,
help="Dry run: nothing will be done")
parser.add_argument('-a', '--deliver-all-fcs', action="store_true", dest="deliver_all_fcs", default=False,
help="rsync samples from all flow cells. Default is to only deliver from specified flowcell")
parser.add_argument('-p', '--nophix', action="store_true", dest="deliver_nophix", default=False,
help="Deliver fastq files from nophix subdirectory. Default is to deliver from run directory")
parser.add_argument('-g', '--group', action="store", dest="group", default="uppmax",
help="Group membership to set on copied files")
parser.add_argument('project_name', action='store', help="Project name to deliver, e.g. J.Doe_10_01")
parser.add_argument('flowcell_id', action='store', help="Flowcell id to deliver, e.g. 120824_BD1915ACXX")
parser.add_argument('uppmax_id', action='store', help="UPPMAX project id to deliver to, e.g. b2012001")
args = parser.parse_args()
print("""\n****** Deprication ******\nPlease note that this script is deprecated and the functionality has been replaced by 'pm deliver raw-data'\n""")
if not args.project_name in os.listdir(args.caspath):
print("Could not find project. Check directory listing:")
for f in os.listdir(args.caspath):
print(f)
clean_exit(0,None,args.dry)
fcid = args.flowcell_id
fcid_comp = fcid.split('_')
if len(fcid_comp) > 2:
fcid = fcid_comp[0] + '_' + fcid_comp[-1]
print("FCID format too long, trying {:s}".format(fcid))
dt = datetime.now()
time_str = "_".join([str(dt.year),
str(dt.month),
str(dt.day),
str(dt.hour),
str(dt.minute),
str(dt.second)])
logfilename = os.path.join(os.path.normpath(args.logpath),"{:s}.log".format(time_str))
if not args.dry:
logfile = open(logfilename, "w")
else:
logfile = sys.stdout
logfile.write("[{:s}] - Project to move files for:\n{:s}\n".format(utc_time(), args.project_name))
logfile.flush()
proj_base_dir = os.path.join(args.caspath, args.project_name)
skip_list = []
if args.interactive:
for sample_dir in os.listdir(proj_base_dir):
if not os.path.isdir(os.path.join(proj_base_dir,sample_dir)):
continue
if not query_yes_no("Deliver sample {:s}?".format(sample_dir), default="no"):
skip_list.append(sample_dir)
created_proj_dir_name = fixProjName(args.project_name)
del_path_top = '/proj/' + args.uppmax_id + "/INBOX/" + created_proj_dir_name
to_copy = get_file_copy_list(proj_base_dir,
del_path_top,
fcid,
args.deliver_all_fcs,
args.deliver_nophix,
skip_list)
# Prompt user if any of the files are non-compressed
for fqfile, _, _ in to_copy:
if os.path.splitext(fqfile)[1] == ".gz":
continue
print("WARNING: The file {:s}, which you are about to deliver, does not seem to be compressed. " \
"It is recommended that you compress files prior to delivery.".format(fqfile))
if query_yes_no("Do you wish to continue delivering " \
"uncompressed fastq files?", default="yes"):
break
clean_exit(1,logfile,args.dry)
rsync_files(to_copy,
logfile,
args.group,
args.dry)
# Touch the flag for the Uppmax cronjob to fix the INBOX permissions
touch_file(os.path.join("/sw","uppmax","var","inboxfix","schedule",args.uppmax_id))
clean_exit(0,logfile,args.dry)
def clean_exit(exitcode, logfile, dry=False):
"""Close the logfile and exit with the given exit code
"""
if not dry and logfile is not None:
logfile.close()
sys.exit(exitcode)
if __name__ == "__main__":
main()
########## Tests ###########
import unittest
import shutil
import tempfile
import random
import uuid
class TestDataDelivery(unittest.TestCase):
def test_fixProjName(self):
"""Fix project name
"""
test_pnames = [("j.doe_11_01","J.Doe_11_01"),
("j.Doe_11_01","J.Doe_11_01"),
("J.doe_11_01","J.Doe_11_01"),
("J.Doe_11_01","J.Doe_11_01"),
("doe_11_01","Doe_11_01"),
("j.d.doe_11_01","J.D.Doe_11_01"),]
for test_pname, exp_pname in test_pnames:
obs_pname = fixProjName(test_pname)
self.assertEqual(obs_pname,
exp_pname,
"Did not get the expected fix ({:s}) for project name {:s} (got {:s})".format(exp_pname,test_pname,obs_pname))
def test_is_fastq(self):
"""Determine if a file name corresponds to a fastq file
"""
test_fnames = [("foo.fastq",True),
("foo.fastq.gz",True),
("foo_fastq.txt",True),
("foo_fastq.txt.gz",True),
("foo.fastq.bar",False),
("foo.txt",False),]
for test_fname, exp_result in test_fnames:
obs_result = is_fastq(test_fname)
self.assertEqual(obs_result,
exp_result,
"Did not get expected result ({:s}) for file name {:s}".format(str(exp_result),test_fname))
def _create_test_files(self, root):
to_copy = []
for n in xrange(10):
fd, sfile = tempfile.mkstemp(suffix=".tmp", prefix="rsync_test_", dir=root)
os.close(fd)
# Generate destination file hierarchies
ddir = root
for l in xrange(random.randint(1,5)):
ddir = os.path.join(ddir,str(uuid.uuid4()))
to_copy.append([sfile,ddir,"{:s}.tmp".format(str(uuid.uuid4()))])
return to_copy
def test_rsync_files(self):
"""Test the rsync functionality
"""
root = tempfile.mkdtemp(prefix="rsync_test_")
# Create some files to move
to_copy = self._create_test_files(root)
# Run rsync
with open(os.devnull, 'w') as f:
old_stdout = sys.stdout
sys.stdout = f
rsync_files(to_copy,sys.stdout,None,False)
sys.stdout = old_stdout
# Verify the copy process
for src, ddir, dname in to_copy:
self.assertTrue(os.path.exists(src),
"The rsync process have removed source file")
self.assertTrue(os.path.exists(ddir) and os.path.isdir(ddir),
"The expected destination directory was not created")
dfile = os.path.join(ddir,dname)
self.assertTrue(os.path.exists(dfile) and os.path.isfile(dfile),
"The expected destination file was not created")
exp_stat = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP
obs_stat = stat.S_IMODE(os.stat(dfile).st_mode)
self.assertEqual(obs_stat,
exp_stat,
"The mode of the created file is not as expected")
shutil.rmtree(root)
def test_rsync_set_group(self):
"""Test setting the group membership on rsync'd files
"""
root = tempfile.mkdtemp(prefix="rsync_test_set_group_")
avail_groups = os.getgroups()
exp_group = grp.getgrgid(avail_groups[random.randint(1,len(avail_groups))-1])[0]
# Create some files to move
to_copy = self._create_test_files(root)
# Run rsync
with open(os.devnull, 'w') as f:
old_stdout = sys.stdout
sys.stdout = f
rsync_files(to_copy,sys.stdout,exp_group,False)
sys.stdout = old_stdout
# Verify the copy process set the correct group on created directories
for ddir in set([d[1] for d in to_copy]):
gid = os.stat(ddir).st_gid
obs_group = grp.getgrgid(gid)[0]
self.assertEqual(obs_group,
exp_group,
"Failed to set group '{}' on directory. Group is {}".format(exp_group,
obs_group))
# Verify the copy process set the correct group
for src, ddir, dname in to_copy:
dfile = os.path.join(ddir,dname)
gid = os.stat(dfile).st_gid
obs_group = grp.getgrgid(gid)[0]
self.assertEqual(obs_group,
exp_group,
"Failed to set group '{}' on file. Group is {}".format(exp_group,
obs_group))
def test_create_final_name(self):
"""Create the destination file name
"""
date = "111111"
fcid = "A11A22BCXX"
sample_name = "P101_150B_index5"
test_names = [("1_{}_{}_1_nophix_1_fastq.txt.gz".format(date,fcid),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),
("1_{}_{}_1_nophix_1_fastq.txt".format(date,fcid),
"1_{}_{}_{}_1.fastq".format(date,fcid,sample_name)),
("1_{}_{}_1_1_fastq.txt.gz".format(date,fcid),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),
("{}_CGATGT_L001_R1_001.fastq.gz".format(sample_name),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),
("{}_NoIndex_L001_R2_001.fastq.gz".format(sample_name),
"1_{}_{}_{}_2.fastq.gz".format(date,fcid,sample_name)),
("{}_CGATGT_L001_R1_001.fastq..gz".format(sample_name),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),
("{}_CGATGT_L001_R1_001.fastq".format(sample_name),
"1_{}_{}_{}_1.fastq".format(date,fcid,sample_name))]
for test_fname, exp_result in test_names:
obs_result = create_final_name(test_fname,date,fcid,sample_name)
self.assertEqual(obs_result,
exp_result,
"Did not get expected final name ({:s}) for file name {:s}".format(exp_result,test_fname))
# Try without the _index part of file name
sample_name_noindex = "P101_150"
test_names = [("1_{}_{}_1_nophix_1_fastq.txt.gz".format(date,fcid),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name_noindex)),
("{}_CGATGT_L001_R1_001.fastq.gz".format(sample_name_noindex),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name_noindex)),
("{}_NoIndex_L001_R2_001.fastq.gz".format(sample_name_noindex),
"1_{}_{}_{}_2.fastq.gz".format(date,fcid,sample_name_noindex))]
for test_fname, exp_result in test_names:
obs_result = create_final_name(test_fname,date,fcid,sample_name_noindex)
self.assertEqual(obs_result,
exp_result,
"Did not get expected final name ({:s}) for file name {:s}".format(exp_result,test_fname))
# Try some illegal file names and assert that they raise exceptions
test_names = ["1_{}_{}_1_nophix_1_fastq.gz".format(date,fcid),
"a_{}_{}_1_nophix_1_fastq.txt".format(date,fcid),
"{}_CGATRGT_L1_R1_001.fastq.gz".format(sample_name)]
for test_name in test_names:
with self.assertRaises(ValueError):
create_final_name(test_name,date,fcid,sample_name)
# Try a file with undetermined reads
sample_name = "lane1"
test_names = [("{}_Undetermined_L001_R1_001.fastq.gz".format(sample_name),
"1_{}_{}_{}_1.fastq.gz".format(date,fcid,sample_name)),]
for test_fname, exp_result in test_names:
obs_result = create_final_name(test_fname,date,fcid,sample_name)
self.assertEqual(obs_result,
exp_result,
"Did not get expected final name ({:s}) for file name {:s}".format(exp_result,test_fname))
def test_get_file_copy_list(self):
"""Get list of files to copy and the destinations
"""
so = sys.stdout
dn = open(os.devnull,"w")
# Create a file hierarchy to search for files
root = tempfile.mkdtemp(prefix="test_casava_data_delivery_")
date = "111111"
fcs = ["{}_{}".format(date,fcid) for fcid in ["FCA","FCB"]]
# Create some sample files
exp_files = []
samples = []
for n in xrange(2):
sample = tempfile.mkdtemp(dir=root)
samples.append(os.path.basename(sample))
for fcid in fcs:
fcdir = os.path.join(sample,fcid)
nophixdir = os.path.join(fcdir,"nophix")
for d in [fcdir,nophixdir]:
os.makedirs(d)
test_names = ["{:d}_{:s}_1_1_fastq.txt.gz".format(random.randint(1,8),
fcid),
"{}_CGATGT_L001_R1_001.fastq.gz".format(samples[-1]),
"{}_CGATGT_L001_R1_001.fastq..gz".format(samples[-1]),]
for test_name in test_names:
test_file = os.path.join(d,test_name)
open(test_file,"w").close()
exp_files.append([samples[-1],
fcid,
os.path.basename(d) == "nophix",
test_file,
os.path.join(samples[-1],fcid),
create_final_name(os.path.basename(test_name),date,fcid.split("_")[-1],samples[-1])])
# Get the list of files to copy under various conditions
for deliver_all_fcs in [False, True]:
for fcid in fcs:
for deliver_nophix in [False, True]:
for skip_sample_list in [[],[samples[0]],[samples[1]],samples]:
sys.stdout = dn
obs_to_copy = sorted(get_file_copy_list(root,"",fcid,deliver_all_fcs,deliver_nophix,skip_sample_list))
sys.stdout = so
exp_to_copy = sorted([ef[3:6] for ef in exp_files if (deliver_all_fcs or ef[1] == fcid) and \
deliver_nophix == ef[2] and \
ef[0] not in skip_sample_list])
#import pdb; pdb.set_trace()
self.assertListEqual(obs_to_copy,
exp_to_copy,
"The files to copy result did not match the expected for " \
"{:s}".format(", ".join(["{:s}: {:s}".format(k,v) for k, v in \
dict(zip(["deliver_all_fcs",
"fcid",
"deliver_nophix",
"skip_samples"],
[str(deliver_all_fcs),
fcid,
str(deliver_nophix),
" ".join(skip_sample_list)])).items()])))
| 46.206166 | 155 | 0.517285 |
f731716eb335aa711c9fca62072e00fad94f8a35 | 4,633 | py | Python | sis-api/swagger_server/models/address.py | maxbilbow/7054CEM-sis | 1c5067c9afc38e340fcce046048f8ae21d267365 | [
"MIT"
] | null | null | null | sis-api/swagger_server/models/address.py | maxbilbow/7054CEM-sis | 1c5067c9afc38e340fcce046048f8ae21d267365 | [
"MIT"
] | null | null | null | sis-api/swagger_server/models/address.py | maxbilbow/7054CEM-sis | 1c5067c9afc38e340fcce046048f8ae21d267365 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class Address(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id: int=None, number_or_name: str=None, street: str=None, town: str=None, county: str=None, postcode: str=None): # noqa: E501
"""Address - a model defined in Swagger
:param id: The id of this Address. # noqa: E501
:type id: int
:param number_or_name: The number_or_name of this Address. # noqa: E501
:type number_or_name: str
:param street: The street of this Address. # noqa: E501
:type street: str
:param town: The town of this Address. # noqa: E501
:type town: str
:param county: The county of this Address. # noqa: E501
:type county: str
:param postcode: The postcode of this Address. # noqa: E501
:type postcode: str
"""
self.swagger_types = {
'id': int,
'number_or_name': str,
'street': str,
'town': str,
'county': str,
'postcode': str
}
self.attribute_map = {
'id': 'id',
'number_or_name': 'number_or_name',
'street': 'street',
'town': 'town',
'county': 'county',
'postcode': 'postcode'
}
self._id = id
self._number_or_name = number_or_name
self._street = street
self._town = town
self._county = county
self._postcode = postcode
@classmethod
def from_dict(cls, dikt) -> 'Address':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Address of this Address. # noqa: E501
:rtype: Address
"""
return util.deserialize_model(dikt, cls)
@property
def id(self) -> int:
"""Gets the id of this Address.
:return: The id of this Address.
:rtype: int
"""
return self._id
@id.setter
def id(self, id: int):
"""Sets the id of this Address.
:param id: The id of this Address.
:type id: int
"""
self._id = id
@property
def number_or_name(self) -> str:
"""Gets the number_or_name of this Address.
:return: The number_or_name of this Address.
:rtype: str
"""
return self._number_or_name
@number_or_name.setter
def number_or_name(self, number_or_name: str):
"""Sets the number_or_name of this Address.
:param number_or_name: The number_or_name of this Address.
:type number_or_name: str
"""
self._number_or_name = number_or_name
@property
def street(self) -> str:
"""Gets the street of this Address.
:return: The street of this Address.
:rtype: str
"""
return self._street
@street.setter
def street(self, street: str):
"""Sets the street of this Address.
:param street: The street of this Address.
:type street: str
"""
self._street = street
@property
def town(self) -> str:
"""Gets the town of this Address.
:return: The town of this Address.
:rtype: str
"""
return self._town
@town.setter
def town(self, town: str):
"""Sets the town of this Address.
:param town: The town of this Address.
:type town: str
"""
self._town = town
@property
def county(self) -> str:
"""Gets the county of this Address.
:return: The county of this Address.
:rtype: str
"""
return self._county
@county.setter
def county(self, county: str):
"""Sets the county of this Address.
:param county: The county of this Address.
:type county: str
"""
self._county = county
@property
def postcode(self) -> str:
"""Gets the postcode of this Address.
:return: The postcode of this Address.
:rtype: str
"""
return self._postcode
@postcode.setter
def postcode(self, postcode: str):
"""Sets the postcode of this Address.
:param postcode: The postcode of this Address.
:type postcode: str
"""
self._postcode = postcode
| 24.005181 | 149 | 0.564429 |
f73180ffb7b8ac7ff84ef16f39b5d76cd4a45949 | 255 | py | Python | tests/urls.py | srijwalzartek/django-slick-reporting | aed9262a3dd83aa28e141301a4b3bf7041be7748 | [
"BSD-3-Clause"
] | null | null | null | tests/urls.py | srijwalzartek/django-slick-reporting | aed9262a3dd83aa28e141301a4b3bf7041be7748 | [
"BSD-3-Clause"
] | null | null | null | tests/urls.py | srijwalzartek/django-slick-reporting | aed9262a3dd83aa28e141301a4b3bf7041be7748 | [
"BSD-3-Clause"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('report1/', views.MonthlyProductSales.as_view(), name='report1'),
path('product_crosstab_client/', views.ProductClientSalesMatrix.as_view(), name='product_crosstab_client'),
]
| 31.875 | 111 | 0.756863 |
f73184b337ca7e743751238f2a113e71bb8c75e0 | 7,364 | py | Python | tests/rundb/test_sqldb.py | EdmondIguazio/mlrun | e63b34a610788ebe522ce7a46642e26927e39882 | [
"Apache-2.0"
] | null | null | null | tests/rundb/test_sqldb.py | EdmondIguazio/mlrun | e63b34a610788ebe522ce7a46642e26927e39882 | [
"Apache-2.0"
] | null | null | null | tests/rundb/test_sqldb.py | EdmondIguazio/mlrun | e63b34a610788ebe522ce7a46642e26927e39882 | [
"Apache-2.0"
] | 1 | 2021-05-05T14:19:46.000Z | 2021-05-05T14:19:46.000Z | # Copyright 2019 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQLDB specific tests, common tests should be in test_dbs.py"""
from collections import defaultdict
from contextlib import contextmanager
from datetime import datetime, timedelta
from unittest.mock import Mock
import pytest
from sqlalchemy.orm import Session
from mlrun.api.db.sqldb.db import SQLDB
from mlrun.api.db.sqldb.models import _tagged
from tests.conftest import new_run
@contextmanager
def patch(obj, **kw):
old = {}
for k, v in kw.items():
old[k] = getattr(obj, k)
setattr(obj, k, v)
try:
yield obj
finally:
for k, v in old.items():
setattr(obj, k, v)
def test_list_artifact_tags(db: SQLDB, db_session: Session):
db.store_artifact(db_session, "k1", {}, "1", tag="t1", project="p1")
db.store_artifact(db_session, "k1", {}, "2", tag="t2", project="p1")
db.store_artifact(db_session, "k1", {}, "2", tag="t2", project="p2")
tags = db.list_artifact_tags(db_session, "p1")
assert {"t1", "t2"} == set(tags), "bad tags"
def test_list_artifact_date(db: SQLDB, db_session: Session):
t1 = datetime(2020, 2, 16)
t2 = t1 - timedelta(days=7)
t3 = t2 - timedelta(days=7)
prj = "p7"
db.store_artifact(db_session, "k1", {"updated": t1}, "u1", project=prj)
db.store_artifact(db_session, "k2", {"updated": t2}, "u2", project=prj)
db.store_artifact(db_session, "k3", {"updated": t3}, "u3", project=prj)
arts = db.list_artifacts(db_session, project=prj, since=t3, tag="*")
assert 3 == len(arts), "since t3"
arts = db.list_artifacts(db_session, project=prj, since=t2, tag="*")
assert 2 == len(arts), "since t2"
arts = db.list_artifacts(
db_session, project=prj, since=t1 + timedelta(days=1), tag="*"
)
assert not arts, "since t1+"
arts = db.list_artifacts(db_session, project=prj, until=t2, tag="*")
assert 2 == len(arts), "until t2"
arts = db.list_artifacts(db_session, project=prj, since=t2, until=t2, tag="*")
assert 1 == len(arts), "since/until t2"
def test_list_projects(db: SQLDB, db_session: Session):
for i in range(10):
run = new_run("s1", {"l1": "v1", "l2": "v2"}, x=1)
db.store_run(db_session, run, "u7", project=f"prj{i % 3}", iter=i)
assert {"prj0", "prj1", "prj2"} == {p.name for p in db.list_projects(db_session)}
def test_run_iter0(db: SQLDB, db_session: Session):
uid, prj = "uid39", "lemon"
run = new_run("s1", {"l1": "v1", "l2": "v2"}, x=1)
for i in range(7):
db.store_run(db_session, run, uid, prj, i)
db._get_run(db_session, uid, prj, 0) # See issue 140
def test_artifacts_latest(db: SQLDB, db_session: Session):
k1, u1, art1 = "k1", "u1", {"a": 1}
prj = "p38"
db.store_artifact(db_session, k1, art1, u1, project=prj)
arts = db.list_artifacts(db_session, project=prj, tag="latest")
assert art1["a"] == arts[0]["a"], "bad artifact"
u2, art2 = "u2", {"a": 17}
db.store_artifact(db_session, k1, art2, u2, project=prj)
arts = db.list_artifacts(db_session, project=prj, tag="latest")
assert 2 == len(arts), "count"
assert art2["a"] == arts[1]["a"], "bad artifact"
k2, u3, art3 = "k2", "u3", {"a": 99}
db.store_artifact(db_session, k2, art3, u3, project=prj)
arts = db.list_artifacts(db_session, project=prj, tag="latest")
assert 3 == len(arts), "number"
assert {1, 17, 99} == set(art["a"] for art in arts), "latest"
@pytest.mark.parametrize("cls", _tagged)
def test_tags(db: SQLDB, db_session: Session, cls):
p1, n1 = "prj1", "name1"
obj1, obj2, obj3 = cls(), cls(), cls()
db_session.add(obj1)
db_session.add(obj2)
db_session.add(obj3)
db_session.commit()
db.tag_objects(db_session, [obj1, obj2], p1, n1)
objs = db.find_tagged(db_session, p1, n1)
assert {obj1, obj2} == set(objs), "find tags"
db.del_tag(db_session, p1, n1)
objs = db.find_tagged(db_session, p1, n1)
assert [] == objs, "find tags after del"
def _tag_objs(db: SQLDB, db_session: Session, count, project, tags):
by_tag = defaultdict(list)
for i in range(count):
cls = _tagged[i % len(_tagged)]
obj = cls()
by_tag[tags[i % len(tags)]].append(obj)
db_session.add(obj)
db_session.commit()
for tag, objs in by_tag.items():
db.tag_objects(db_session, objs, project, tag)
def test_list_tags(db: SQLDB, db_session: Session):
p1, tags1 = "prj1", ["a", "b", "c"]
_tag_objs(db, db_session, 17, p1, tags1)
p2, tags2 = "prj2", ["b", "c", "d", "e"]
_tag_objs(db, db_session, 11, p2, tags2)
tags = db.list_tags(db_session, p1)
assert set(tags) == set(tags1), "tags"
def test_projects(db: SQLDB, db_session: Session):
prj1 = {
"name": "p1",
"description": "banana",
# 'users': ['u1', 'u2'],
"spec": {"company": "ACME"},
"state": "active",
"created": datetime.now(),
}
pid1 = db.add_project(db_session, prj1)
p1 = db.get_project(db_session, project_id=pid1)
assert p1, f"project {pid1} not found"
out = {
"name": p1.name,
"description": p1.description,
# 'users': sorted(u.name for u in p1.users),
"spec": p1.spec,
"state": p1.state,
"created": p1.created,
}
assert prj1 == out, "bad project"
data = {"description": "lemon"}
db.update_project(db_session, p1.name, data)
p1 = db.get_project(db_session, project_id=pid1)
assert data["description"] == p1.description, "bad update"
prj2 = {"name": "p2"}
db.add_project(db_session, prj2)
prjs = {p.name for p in db.list_projects(db_session)}
assert {prj1["name"], prj2["name"]} == prjs, "list"
def test_cache_projects(db: SQLDB, db_session: Session):
assert 0 == len(db._projects), "empty cache"
name = "prj348"
db.add_project(db_session, {"name": name})
assert {name} == db._projects, "project"
mock = Mock()
with patch(db, add_project=mock):
db._ensure_project(db_session, name)
mock.assert_not_called()
mock = Mock()
with patch(db, add_project=mock):
db._ensure_project(db_session, name + "-new")
mock.assert_called_once()
project_2_name = "project-2"
db.add_project(db_session, {"name": project_2_name})
db._projects = set()
mock = Mock()
with patch(db, add_project=mock):
db._ensure_project(db_session, name)
mock.assert_not_called()
# def test_function_latest(db: SQLDB, db_session: Session):
# fn1, t1 = {'x': 1}, 'u83'
# fn2, t2 = {'x': 2}, 'u23'
# prj, name = 'p388', 'n3023'
# db.store_function(db_session, fn1, name, prj, t1)
# db.store_function(db_session, fn2, name, prj, t2)
#
# fn = db.get_function(db_session, name, prj, 'latest')
# assert fn2 == fn, 'latest'
| 33.022422 | 85 | 0.629957 |
f731a7aa0c3b474639722781f113b0d34999a1c2 | 1,011 | py | Python | nova/api/openstack/compute/legacy_v2/contrib/used_limits_for_admin.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | 7 | 2015-09-22T11:27:16.000Z | 2015-11-02T12:33:46.000Z | nova/api/openstack/compute/legacy_v2/contrib/used_limits_for_admin.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | 9 | 2015-05-20T11:20:17.000Z | 2017-07-27T08:21:33.000Z | nova/api/openstack/compute/legacy_v2/contrib/used_limits_for_admin.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | 13 | 2015-05-05T09:34:04.000Z | 2017-11-08T02:03:46.000Z | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
class Used_limits_for_admin(extensions.ExtensionDescriptor):
"""Provide data to admin on limited resources used by other tenants."""
name = "UsedLimitsForAdmin"
alias = "os-used-limits-for-admin"
namespace = ("http://docs.openstack.org/compute/ext/used_limits_for_admin"
"/api/v1.1")
updated = "2013-05-02T00:00:00Z"
| 38.884615 | 78 | 0.721068 |
f731d330a352677e2aec29c1fc65fdf8568da45c | 1,747 | py | Python | neuralmonkey/processors/alignment.py | Simon-Will/neuralmonkey | b686a9d302cb10eda5fca991e1d7ee6b9e84b75a | [
"BSD-3-Clause"
] | 5 | 2017-04-24T21:10:03.000Z | 2019-05-22T13:19:35.000Z | neuralmonkey/processors/alignment.py | Simon-Will/neuralmonkey | b686a9d302cb10eda5fca991e1d7ee6b9e84b75a | [
"BSD-3-Clause"
] | null | null | null | neuralmonkey/processors/alignment.py | Simon-Will/neuralmonkey | b686a9d302cb10eda5fca991e1d7ee6b9e84b75a | [
"BSD-3-Clause"
] | 5 | 2017-04-25T01:36:44.000Z | 2019-12-13T15:04:03.000Z | import re
from typing import List
import numpy as np
# pylint: disable=too-few-public-methods
ID_SEP = re.compile(r"[-:]")
class WordAlignmentPreprocessor(object):
"""A preprocessor for word alignments in a text format.
One of the following formats is expected:
s1-t1 s2-t2 ...
s1:1/w1 s2:t2/w2 ...
where each `s` and `t` is the index of a word in the source and target
sentence, respectively, and `w` is the corresponding weight. If the weight
is not given, it is assumend to be 1. The separators `-` and `:` are
interchangeable.
The output of the preprocessor is an alignment matrix of the fixed shape
(target_len, source_len) for each sentence.
"""
def __init__(self, source_len, target_len, dtype=np.float32,
normalize=True, zero_based=True):
self._source_len = source_len
self._target_len = target_len
self._dtype = dtype
self._normalize = normalize
self._zero_based = zero_based
def __call__(self, sentence: List[str]):
result = np.zeros((self._target_len, self._source_len), self._dtype)
for ali in sentence:
ids, _, str_weight = ali.partition("/")
i, j = [int(id_str) for id_str in ID_SEP.split(ids)]
weight = float(str_weight) if str_weight else 1.
if not self._zero_based:
i -= 1
j -= 1
if i < self._source_len and j < self._target_len:
result[j][i] = weight
if self._normalize:
with np.errstate(divide="ignore", invalid="ignore"):
result /= result.sum(axis=1, keepdims=True)
result[np.isnan(result)] = 0
return result
| 30.12069 | 78 | 0.613051 |
f731d3eb4aae5e78db2bfaf2ea2d5f1e9832f7d4 | 9,108 | py | Python | repos/system_upgrade/el7toel8/actors/systemfacts/libraries/systemfacts.py | adka1408/leapp-repository | be5a9603b57f86c65d395ba6a02b860cacae0fb6 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/actors/systemfacts/libraries/systemfacts.py | adka1408/leapp-repository | be5a9603b57f86c65d395ba6a02b860cacae0fb6 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/actors/systemfacts/libraries/systemfacts.py | adka1408/leapp-repository | be5a9603b57f86c65d395ba6a02b860cacae0fb6 | [
"Apache-2.0"
] | null | null | null | import errno
import functools
import grp
import json
import logging
import os
import pwd
import re
from six.moves import configparser
import six
from leapp.libraries.stdlib import CalledProcessError, api, run
from leapp.models import SysctlVariablesFacts, SysctlVariable, ActiveKernelModulesFacts, ActiveKernelModule, \
KernelModuleParameter, UsersFacts, User, GroupsFacts, Group, RepositoriesFacts, RepositoryFile, RepositoryData, \
SELinuxFacts, fields, FirewallStatus, FirewallsFacts
def aslist(f):
''' Decorator used to convert generator to list '''
@functools.wraps(f)
def inner(*args, **kwargs):
return list(f(*args, **kwargs))
return inner
def anyendswith(value, ends):
''' Check if `value` ends with one of the possible `ends` '''
for end in ends:
if value.endswith(end):
return True
return False
def anyhasprefix(value, prefixes):
''' Check if `value` starts with on of the possible `prefixes` '''
for p in prefixes:
if value.startswith(p):
return True
return False
@aslist
def _get_system_users():
for p in pwd.getpwall():
yield User(
name=p.pw_name,
uid=p.pw_uid,
gid=p.pw_gid,
home=p.pw_dir
)
def get_system_users_status():
''' Get a list of users from `/etc/passwd` '''
return UsersFacts(users=_get_system_users())
@aslist
def _get_system_groups():
for g in grp.getgrall():
yield Group(
name=g.gr_name,
gid=g.gr_gid,
members=g.gr_mem
)
def get_system_groups_status():
''' Get a list of groups from `/etc/groups` '''
return GroupsFacts(groups=_get_system_groups())
@aslist
def _get_active_kernel_modules(logger):
lines = run(['lsmod'], split=True)['stdout']
for l in lines[1:]:
name = l.split(' ')[0]
# Read parameters of the given module as exposed by the
# `/sys` VFS, if there are no parameters exposed we just
# take the name of the module
base_path = '/sys/module/{module}'.format(module=name)
parameters_path = os.path.join(base_path, 'parameters')
if not os.path.exists(parameters_path):
yield ActiveKernelModule(filename=name, parameters=[])
continue
# Use `modinfo` to probe for signature information
parameter_dict = {}
try:
signature = run(['modinfo', '-F', 'signature', name], split=False)['stdout']
except CalledProcessError:
signature = None
signature_string = None
if signature:
# Remove whitspace from the signature string
signature_string = re.sub(r"\s+", "", signature, flags=re.UNICODE)
# Since we're using the `/sys` VFS we need to use `os.listdir()` to get
# all the property names and then just read from all the listed paths
parameters = sorted(os.listdir(parameters_path))
for param in parameters:
try:
with open(os.path.join(parameters_path, param), mode='r') as fp:
parameter_dict[param] = fp.read().strip()
except IOError as exc:
# Some parameters are write-only, in that case we just log the name of parameter
# and the module and continue
if exc.errno in (errno.EACCES, errno.EPERM):
msg = 'Unable to read parameter "{param}" of kernel module "{name}"'
logger.warning(msg.format(param=param, name=name))
else:
raise exc
# Project the dictionary as a list of key values
items = [
KernelModuleParameter(name=k, value=v)
for (k, v) in six.iteritems(parameter_dict)
]
yield ActiveKernelModule(
filename=name,
parameters=items,
signature=signature_string
)
def get_active_kernel_modules_status(logger):
''' Get a list of active kernel modules '''
return ActiveKernelModulesFacts(kernel_modules=_get_active_kernel_modules(logger))
@aslist
def _get_sysctls():
unstable = ('fs.dentry-state', 'fs.file-nr', 'fs.inode-nr',
'fs.inode-state', 'kernel.random.uuid', 'kernel.random.entropy_avail',
'kernel.ns_last_pid', 'net.netfilter.nf_conntrack_count',
'net.netfilter.nf_conntrack_events', 'kernel.sched_domain.',
'dev.cdrom.info', 'kernel.pty.nr')
variables = []
for sc in run(['sysctl', '-a'], split=True)['stdout']:
name = sc.split(' ', 1)[0]
# if the sysctl name has an unstable prefix, we skip
if anyhasprefix(name, unstable):
continue
variables.append(sc)
# sort our variables so they can be diffed directly when needed
for var in sorted(variables):
name, value = tuple(map(type(var).strip, var.split('=')))
yield SysctlVariable(
name=name,
value=value
)
def get_sysctls_status():
r''' Get a list of stable `sysctls` variables
Note that some variables are inherently unstable and we need to blacklist
them:
diff -u <(sysctl -a 2>/dev/null | sort) <(sysctl -a 2>/dev/null | sort)\
| grep -E '^\+[a-z]'\
| cut -d' ' -f1\
| cut -d+ -f2
'''
return SysctlVariablesFacts(sysctl_variables=_get_sysctls())
@aslist
def _get_repositories():
def asbool(x):
return x == '1'
@aslist
def _parse(r):
with open(r, mode='r') as fp:
cp = configparser.ConfigParser()
cp.readfp(fp)
for section in cp.sections():
prepared = {'repoid': section, 'additional_fields': {}}
data = dict(cp.items(section))
for key in data.keys():
if key in RepositoryData.fields:
if isinstance(RepositoryData.fields[key], fields.Boolean):
data[key] = asbool(data[key])
prepared[key] = data[key]
else:
prepared['additional_fields'][key] = data[key]
prepared['additional_fields'] = json.dumps(prepared['additional_fields'])
yield RepositoryData(**prepared)
repos = run(
['find', '/etc/yum.repos.d/', '-type', 'f', '-name', '*.repo'],
split=True
)['stdout']
for repo in repos:
yield RepositoryFile(file=repo, data=_parse(repo))
def get_repositories_status():
''' Get a basic information about YUM repositories installed in the system '''
return RepositoriesFacts(repositories=_get_repositories())
def get_selinux_status():
''' Get SELinux status information '''
# will be None if something went wrong or contain SELinuxFacts otherwise
res = None
try:
import selinux
except ImportError:
api.report_error("SELinux Import Error", details="libselinux-python package must be installed.")
return res
outdata = dict({'enabled': selinux.is_selinux_enabled() == 1})
outdata['mls_enabled'] = selinux.is_selinux_mls_enabled() == 1
try:
outdata['runtime_mode'] = "enforcing" if selinux.security_getenforce() == 1 else "permissive"
# FIXME: check selinux_getenforcemode[0] (that should be return value of a underneath function)
enforce_mode = selinux.selinux_getenforcemode()[1]
if enforce_mode >= 0:
outdata['static_mode'] = "enforcing" if enforce_mode == 1 else "permissive"
else:
outdata['static_mode'] = "disabled"
outdata['policy'] = selinux.selinux_getpolicytype()[1]
except OSError:
# This happens when SELinux is disabled
# [Errno 2] No such file or directory
outdata['runtime_mode'] = 'permissive'
outdata['static_mode'] = 'disabled'
outdata['policy'] = 'targeted'
res = SELinuxFacts(**outdata)
return res
def get_firewalls_status():
''' Get firewalld status information '''
logger = logging.getLogger('get_firewalld_status')
def _get_firewall_status(service_name):
try:
ret_list = run(['systemctl', 'is-active', service_name], split=True)['stdout']
active = ret_list[0] == 'active'
except CalledProcessError:
active = False
logger.debug('The %s service is likely not active', service_name)
try:
ret_list = run(['systemctl', 'is-enabled', service_name], split=True)['stdout']
enabled = ret_list[0] == 'enabled'
except CalledProcessError:
enabled = False
logger.debug('The %s service is likely not enabled nor running', service_name)
return FirewallStatus(
active=active,
enabled=enabled,
)
return FirewallsFacts(
firewalld=_get_firewall_status('firewalld'),
iptables=_get_firewall_status('iptables'),
ip6tables=_get_firewall_status('ip6tables'),
)
| 33.485294 | 117 | 0.60639 |
f731f4063b2323508a137e94df93708a25ff3792 | 11,583 | py | Python | evap/contributor/views.py | JannisBerndt/EvaP | a3ca8bcf091e811421084c4db14ae9666cf2a27f | [
"MIT"
] | 26 | 2015-01-18T18:01:57.000Z | 2018-10-12T14:37:15.000Z | evap/contributor/views.py | JannisBerndt/EvaP | a3ca8bcf091e811421084c4db14ae9666cf2a27f | [
"MIT"
] | 737 | 2015-01-02T17:43:25.000Z | 2018-12-10T20:45:10.000Z | evap/contributor/views.py | JannisBerndt/EvaP | a3ca8bcf091e811421084c4db14ae9666cf2a27f | [
"MIT"
] | 83 | 2015-01-14T12:39:41.000Z | 2018-10-29T16:36:43.000Z | from django.contrib import messages
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.db import IntegrityError, transaction
from django.db.models import Exists, Max, OuterRef, Q
from django.forms.models import inlineformset_factory
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import gettext as _
from django.views.decorators.http import require_POST
from evap.contributor.forms import DelegateSelectionForm, EditorContributionForm, EvaluationForm
from evap.evaluation.auth import editor_or_delegate_required, responsible_or_contributor_or_delegate_required
from evap.evaluation.models import (
Contribution,
Course,
CourseType,
Degree,
EmailTemplate,
Evaluation,
Semester,
UserProfile,
)
from evap.evaluation.tools import (
FileResponse,
get_object_from_dict_pk_entry_or_logged_40x,
get_parameter_from_url_or_session,
sort_formset,
)
from evap.results.exporters import ResultsExporter
from evap.results.tools import annotate_distributions_and_grades, get_evaluations_with_course_result_attributes
from evap.staff.forms import ContributionFormset
from evap.student.views import get_valid_form_groups_or_render_vote_page
@responsible_or_contributor_or_delegate_required
def index(request):
user = request.user
show_delegated = get_parameter_from_url_or_session(request, "show_delegated", True)
represented_proxy_users = user.represented_users.filter(is_proxy_user=True)
contributor_visible_states = [
Evaluation.State.PREPARED,
Evaluation.State.EDITOR_APPROVED,
Evaluation.State.APPROVED,
Evaluation.State.IN_EVALUATION,
Evaluation.State.EVALUATED,
Evaluation.State.REVIEWED,
Evaluation.State.PUBLISHED,
]
own_courses = Course.objects.filter(
Q(evaluations__state__in=contributor_visible_states)
& (
Q(responsibles=user)
| Q(evaluations__contributions__contributor=user)
| Q(evaluations__contributions__contributor__in=represented_proxy_users)
| Q(responsibles__in=represented_proxy_users)
)
)
own_evaluations = (
Evaluation.objects.filter(course__in=own_courses)
.annotate(contributes_to=Exists(Evaluation.objects.filter(id=OuterRef("id"), contributions__contributor=user)))
.prefetch_related("course", "course__evaluations", "course__degrees", "course__type", "course__semester")
)
own_evaluations = [evaluation for evaluation in own_evaluations if evaluation.can_be_seen_by(user)]
displayed_evaluations = own_evaluations
if show_delegated:
represented_users = user.represented_users.exclude(is_proxy_user=True)
delegated_courses = Course.objects.filter(
Q(evaluations__state__in=contributor_visible_states)
& (
Q(responsibles__in=represented_users)
| Q(
evaluations__contributions__role=Contribution.Role.EDITOR,
evaluations__contributions__contributor__in=represented_users,
)
)
)
delegated_evaluations = Evaluation.objects.filter(course__in=delegated_courses).prefetch_related(
"course", "course__evaluations", "course__degrees", "course__type", "course__semester"
)
delegated_evaluations = [evaluation for evaluation in delegated_evaluations if evaluation.can_be_seen_by(user)]
for evaluation in delegated_evaluations:
evaluation.delegated_evaluation = True
displayed_evaluations += set(delegated_evaluations) - set(displayed_evaluations)
displayed_evaluations.sort(
key=lambda evaluation: (evaluation.course.name, evaluation.name)
) # evaluations must be sorted for regrouping them in the template
annotate_distributions_and_grades(e for e in displayed_evaluations if e.state == Evaluation.State.PUBLISHED)
displayed_evaluations = get_evaluations_with_course_result_attributes(displayed_evaluations)
semesters = Semester.objects.all()
semester_list = [
dict(
semester_name=semester.name,
id=semester.id,
is_active=semester.is_active,
evaluations=[
evaluation for evaluation in displayed_evaluations if evaluation.course.semester_id == semester.id
],
)
for semester in semesters
]
template_data = dict(
semester_list=semester_list,
show_delegated=show_delegated,
delegate_selection_form=DelegateSelectionForm(),
)
return render(request, "contributor_index.html", template_data)
@editor_or_delegate_required
def evaluation_view(request, evaluation_id):
user = request.user
evaluation = get_object_or_404(Evaluation, id=evaluation_id)
# check rights
if (
not evaluation.is_user_editor_or_delegate(user)
or not Evaluation.State.PREPARED <= evaluation.state <= Evaluation.State.REVIEWED
):
raise PermissionDenied
InlineContributionFormset = inlineformset_factory(
Evaluation, Contribution, formset=ContributionFormset, form=EditorContributionForm, extra=0
)
form = EvaluationForm(request.POST or None, instance=evaluation)
formset = InlineContributionFormset(request.POST or None, instance=evaluation)
# make everything read-only
for cform in formset.forms + [form]:
for field in cform.fields.values():
field.disabled = True
template_data = dict(
form=form,
formset=formset,
evaluation=evaluation,
editable=False,
questionnaires_with_answers_per_contributor={},
)
return render(request, "contributor_evaluation_form.html", template_data)
def render_preview(request, formset, evaluation_form, evaluation):
# open transaction to not let any other requests see anything of what we're doing here
try:
with transaction.atomic():
evaluation = evaluation_form.save()
formset.save()
request.POST = None # this prevents errors rendered in the vote form
preview_response = get_valid_form_groups_or_render_vote_page(
request, evaluation, preview=True, for_rendering_in_modal=True
)[1].content.decode()
raise IntegrityError # rollback transaction to discard the database writes
except IntegrityError:
pass
return preview_response
@editor_or_delegate_required
def evaluation_edit(request, evaluation_id):
evaluation = get_object_or_404(Evaluation, id=evaluation_id)
# check rights
if not (evaluation.is_user_editor_or_delegate(request.user) and evaluation.state == Evaluation.State.PREPARED):
raise PermissionDenied
post_operation = request.POST.get("operation") if request.POST else None
preview = post_operation == "preview"
InlineContributionFormset = inlineformset_factory(
Evaluation, Contribution, formset=ContributionFormset, form=EditorContributionForm, extra=1
)
evaluation_form = EvaluationForm(request.POST or None, instance=evaluation)
formset = InlineContributionFormset(
request.POST or None, instance=evaluation, form_kwargs={"evaluation": evaluation}
)
forms_are_valid = evaluation_form.is_valid() and formset.is_valid()
if forms_are_valid and not preview:
if post_operation not in ("save", "approve"):
raise SuspiciousOperation("Invalid POST operation")
form_has_changed = evaluation_form.has_changed() or formset.has_changed()
evaluation_form.save()
formset.save()
if post_operation == "approve":
evaluation.editor_approve()
evaluation.save()
if form_has_changed:
messages.success(request, _("Successfully updated and approved evaluation."))
else:
messages.success(request, _("Successfully approved evaluation."))
else:
messages.success(request, _("Successfully updated evaluation."))
return redirect("contributor:index")
preview_html = None
if preview and forms_are_valid:
preview_html = render_preview(request, formset, evaluation_form, evaluation)
if not forms_are_valid and (evaluation_form.errors or formset.errors):
if preview:
messages.error(request, _("The preview could not be rendered. Please resolve the errors shown below."))
else:
messages.error(request, _("The form was not saved. Please resolve the errors shown below."))
sort_formset(request, formset)
template_data = dict(
form=evaluation_form,
formset=formset,
evaluation=evaluation,
editable=True,
preview_html=preview_html,
questionnaires_with_answers_per_contributor={},
)
return render(request, "contributor_evaluation_form.html", template_data)
@responsible_or_contributor_or_delegate_required
def evaluation_preview(request, evaluation_id):
user = request.user
evaluation = get_object_or_404(Evaluation, id=evaluation_id)
# check rights
if not (
evaluation.is_user_responsible_or_contributor_or_delegate(user)
and Evaluation.State.PREPARED <= evaluation.state <= Evaluation.State.REVIEWED
):
raise PermissionDenied
return get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True)[1]
@require_POST
@editor_or_delegate_required
def evaluation_direct_delegation(request, evaluation_id):
evaluation = get_object_or_404(Evaluation, id=evaluation_id)
delegate_user = get_object_from_dict_pk_entry_or_logged_40x(UserProfile, request.POST, "delegate_to")
contribution, created = Contribution.objects.update_or_create(
evaluation=evaluation,
contributor=delegate_user,
defaults={"role": Contribution.Role.EDITOR},
)
if created:
contribution.order = evaluation.contributions.all().aggregate(Max("order"))["order__max"] + 1
contribution.save()
template = EmailTemplate.objects.get(name=EmailTemplate.DIRECT_DELEGATION)
subject_params = {"evaluation": evaluation, "user": request.user, "delegate_user": delegate_user}
body_params = subject_params
# we don't provide the request here since send_to_user only uses it to display a warning message in case the user does not have
# an email address. In this special case, we don't want that warning. Instead, we want a mail to the admins.
template.send_to_user(delegate_user, subject_params, body_params, use_cc=True, additional_cc_users=[request.user])
messages.add_message(
request,
messages.SUCCESS,
_('{} was added as a contributor for evaluation "{}" and was sent an email with further information.').format(
str(delegate_user), str(evaluation)
),
)
return redirect("contributor:index")
def export_contributor_results(contributor):
filename = f"Evaluation_{contributor.full_name}.xls"
response = FileResponse(filename, content_type="application/vnd.ms-excel")
ResultsExporter().export(
response,
Semester.objects.all(),
[(Degree.objects.all(), CourseType.objects.all())],
include_not_enough_voters=True,
include_unpublished=False,
contributor=contributor,
)
return response
@responsible_or_contributor_or_delegate_required
def export(request):
return export_contributor_results(request.user)
| 39 | 131 | 0.723388 |
f731f62c057be653547c98aaf27b9d20dd30444f | 8,308 | py | Python | alipay/aop/api/domain/HealthServiceFamilyDoctorDrugDTO.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/HealthServiceFamilyDoctorDrugDTO.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/HealthServiceFamilyDoctorDrugDTO.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class HealthServiceFamilyDoctorDrugDTO(object):
def __init__(self):
self._catalogue_listed = None
self._dosage_forms = None
self._drug_classification = None
self._general_name = None
self._inventory = None
self._item_id = None
self._item_name = None
self._manufacturer_name = None
self._max_purchase_quantity = None
self._min_purchase_quantity = None
self._price = None
self._specifications = None
self._support_emergency_delivery = None
self._usage_dosage = None
@property
def catalogue_listed(self):
return self._catalogue_listed
@catalogue_listed.setter
def catalogue_listed(self, value):
self._catalogue_listed = value
@property
def dosage_forms(self):
return self._dosage_forms
@dosage_forms.setter
def dosage_forms(self, value):
self._dosage_forms = value
@property
def drug_classification(self):
return self._drug_classification
@drug_classification.setter
def drug_classification(self, value):
self._drug_classification = value
@property
def general_name(self):
return self._general_name
@general_name.setter
def general_name(self, value):
self._general_name = value
@property
def inventory(self):
return self._inventory
@inventory.setter
def inventory(self, value):
self._inventory = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def item_name(self):
return self._item_name
@item_name.setter
def item_name(self, value):
self._item_name = value
@property
def manufacturer_name(self):
return self._manufacturer_name
@manufacturer_name.setter
def manufacturer_name(self, value):
self._manufacturer_name = value
@property
def max_purchase_quantity(self):
return self._max_purchase_quantity
@max_purchase_quantity.setter
def max_purchase_quantity(self, value):
self._max_purchase_quantity = value
@property
def min_purchase_quantity(self):
return self._min_purchase_quantity
@min_purchase_quantity.setter
def min_purchase_quantity(self, value):
self._min_purchase_quantity = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
self._price = value
@property
def specifications(self):
return self._specifications
@specifications.setter
def specifications(self, value):
self._specifications = value
@property
def support_emergency_delivery(self):
return self._support_emergency_delivery
@support_emergency_delivery.setter
def support_emergency_delivery(self, value):
self._support_emergency_delivery = value
@property
def usage_dosage(self):
return self._usage_dosage
@usage_dosage.setter
def usage_dosage(self, value):
self._usage_dosage = value
def to_alipay_dict(self):
params = dict()
if self.catalogue_listed:
if hasattr(self.catalogue_listed, 'to_alipay_dict'):
params['catalogue_listed'] = self.catalogue_listed.to_alipay_dict()
else:
params['catalogue_listed'] = self.catalogue_listed
if self.dosage_forms:
if hasattr(self.dosage_forms, 'to_alipay_dict'):
params['dosage_forms'] = self.dosage_forms.to_alipay_dict()
else:
params['dosage_forms'] = self.dosage_forms
if self.drug_classification:
if hasattr(self.drug_classification, 'to_alipay_dict'):
params['drug_classification'] = self.drug_classification.to_alipay_dict()
else:
params['drug_classification'] = self.drug_classification
if self.general_name:
if hasattr(self.general_name, 'to_alipay_dict'):
params['general_name'] = self.general_name.to_alipay_dict()
else:
params['general_name'] = self.general_name
if self.inventory:
if hasattr(self.inventory, 'to_alipay_dict'):
params['inventory'] = self.inventory.to_alipay_dict()
else:
params['inventory'] = self.inventory
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.item_name:
if hasattr(self.item_name, 'to_alipay_dict'):
params['item_name'] = self.item_name.to_alipay_dict()
else:
params['item_name'] = self.item_name
if self.manufacturer_name:
if hasattr(self.manufacturer_name, 'to_alipay_dict'):
params['manufacturer_name'] = self.manufacturer_name.to_alipay_dict()
else:
params['manufacturer_name'] = self.manufacturer_name
if self.max_purchase_quantity:
if hasattr(self.max_purchase_quantity, 'to_alipay_dict'):
params['max_purchase_quantity'] = self.max_purchase_quantity.to_alipay_dict()
else:
params['max_purchase_quantity'] = self.max_purchase_quantity
if self.min_purchase_quantity:
if hasattr(self.min_purchase_quantity, 'to_alipay_dict'):
params['min_purchase_quantity'] = self.min_purchase_quantity.to_alipay_dict()
else:
params['min_purchase_quantity'] = self.min_purchase_quantity
if self.price:
if hasattr(self.price, 'to_alipay_dict'):
params['price'] = self.price.to_alipay_dict()
else:
params['price'] = self.price
if self.specifications:
if hasattr(self.specifications, 'to_alipay_dict'):
params['specifications'] = self.specifications.to_alipay_dict()
else:
params['specifications'] = self.specifications
if self.support_emergency_delivery:
if hasattr(self.support_emergency_delivery, 'to_alipay_dict'):
params['support_emergency_delivery'] = self.support_emergency_delivery.to_alipay_dict()
else:
params['support_emergency_delivery'] = self.support_emergency_delivery
if self.usage_dosage:
if hasattr(self.usage_dosage, 'to_alipay_dict'):
params['usage_dosage'] = self.usage_dosage.to_alipay_dict()
else:
params['usage_dosage'] = self.usage_dosage
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = HealthServiceFamilyDoctorDrugDTO()
if 'catalogue_listed' in d:
o.catalogue_listed = d['catalogue_listed']
if 'dosage_forms' in d:
o.dosage_forms = d['dosage_forms']
if 'drug_classification' in d:
o.drug_classification = d['drug_classification']
if 'general_name' in d:
o.general_name = d['general_name']
if 'inventory' in d:
o.inventory = d['inventory']
if 'item_id' in d:
o.item_id = d['item_id']
if 'item_name' in d:
o.item_name = d['item_name']
if 'manufacturer_name' in d:
o.manufacturer_name = d['manufacturer_name']
if 'max_purchase_quantity' in d:
o.max_purchase_quantity = d['max_purchase_quantity']
if 'min_purchase_quantity' in d:
o.min_purchase_quantity = d['min_purchase_quantity']
if 'price' in d:
o.price = d['price']
if 'specifications' in d:
o.specifications = d['specifications']
if 'support_emergency_delivery' in d:
o.support_emergency_delivery = d['support_emergency_delivery']
if 'usage_dosage' in d:
o.usage_dosage = d['usage_dosage']
return o
| 35.20339 | 103 | 0.630958 |
f731f79225b7db086a4396618b46b21004ef1931 | 7,642 | py | Python | armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py | albeanth/armi | 3755ffd2fcd1f7b6c557ef3e3f36126706a84c70 | [
"Apache-2.0"
] | null | null | null | armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py | albeanth/armi | 3755ffd2fcd1f7b6c557ef3e3f36126706a84c70 | [
"Apache-2.0"
] | null | null | null | armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py | albeanth/armi | 3755ffd2fcd1f7b6c557ef3e3f36126706a84c70 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An abstract class for interfaces between ARMI and programs that simulate transmutation and decay.
"""
import collections
from armi import interfaces
from armi.nucDirectory import nuclideBases
from armi.nuclearDataIO import xsLibraries
from armi.physics.neutronics.isotopicDepletion.crossSectionTable import (
CrossSectionTable,
)
from armi.reactor import composites
from armi.reactor.flags import Flags
def isDepletable(obj: composites.ArmiObject):
"""
Return True if obj or any child is flagged as DEPLETABLE.
The DEPLETABLE flag is automatically set to true if any composition contains
nuclides that are in the active nuclides list, unless flags are specifically
set and DEPLETABLE is left out.
This is often interpreted by depletion plugins as indicating which parts of the
problem to apply depletion to. Analysts may want to turn on and off depletion
in certain problems.
For example, sometimes they want the control rods to deplete
to figure out how often to replace them. But in conceptual design, they may want to just
leave them as they are as an approximation.
.. warning:: The ``DEPLETABLE`` flag is automatically added to compositions that have
active nuclides. If you explicitly define any flags at all, you must also
manually include ``DEPLETABLE`` or else the objects will silently not deplete.
Notes
-----
The auto-flagging of ``DEPLETABLE`` happens in the construction of blueprints
rather than in a plugin hook because the reactor is not available at the time
the plugin hook runs.
See Also
--------
armi.reactor.blueprints.componentBlueprint.insertDepletableNuclideKeys
"""
return obj.hasFlags(Flags.DEPLETABLE) or obj.containsAtLeastOneChildWithFlags(
Flags.DEPLETABLE
)
class AbstractIsotopicDepleter:
r"""
Interact with a depletion code
This interface and subClasses deplete under a flux defined outside this
interface
The depletion in this analysis only depends on the flux, material vectors,
nuclear data and countinuous source and loss objects.
The depleters derived from this abstract class use all the fission products
armi can handle -- i.e. do not form lumped fission products.
_depleteByName contains a ARMI objects to deplete keyed by name.
"""
name = None
function = "depletion"
def __init__(self, r=None, cs=None, o=None):
self.r = r
self.cs = cs
self.o = o
# ARMI objects to deplete keyed by name
# order is important for consistency in iterating through objects
# cinder interface input format is very dependent on object order
self._depleteByName = collections.OrderedDict()
self.efpdToBurn = None
self.allNuclidesInProblem = r.blueprints.allNuclidesInProblem if r else []
def addToDeplete(self, armiObj):
"""Add the oject to the group of objects to be depleted."""
self._depleteByName[armiObj.getName()] = armiObj
def setToDeplete(self, armiObjects):
"""Change the group of objects to deplete to the specified group."""
listOfTuples = [(obj.getName(), obj) for obj in armiObjects]
self._depleteByName = collections.OrderedDict(listOfTuples)
def getToDeplete(self):
"""Return objects to be depleted."""
return list(self._depleteByName.values())
def run(self):
r"""
Submit depletion case with external solver to the cluster.
In addition to running the physics kernel, this method calls the waitForJob method
to wait for it job to finish
comm = MPI.COMM_SELF.Spawn(sys.executable,args=['cpi.py'],maxprocs=5)
"""
raise NotImplementedError
def makeXsecTable(
compositeName,
xsType,
mgFlux,
isotxs,
headerFormat="$ xsecs for {}",
tableFormat="\n{mcnpId} {nG:.5e} {nF:.5e} {n2n:.5e} {n3n:.5e} {nA:.5e} {nP:.5e}",
):
"""
Make a cross section table for depletion physics input decks.
Parameters
----------
armiObject: armiObject
an armi object -- batch or block --
with a .p.xsType and a getMgFlux method
activeNuclides: list
a list of the nucNames of active isotopes
isotxs: isotxs object
headerFormat: string (optional)
this is the format in which the elements of the header with be returned
-- i.e. if you use a .format() call with the case name you'll return a
formatted list of string elements
tableFormat: string (optional)
this is the format in which the elements of the table with be returned
-- i.e. if you use a .format() call with mcnpId, nG, nF, n2n, n3n, nA,
and nP you'll get the format you want. If you use a .format() call with the case name you'll return a
formatted list of string elements
Results
-------
output: list
a list of string elements that together make a xsec card
See Also
--------
crossSectionTable.makeCrossSectionTable
Makes a table for arbitrary ArmiObjects
"""
xsTable = CrossSectionTable()
if not xsType or not sum(mgFlux) > 0:
return []
xsTable.setName(compositeName)
totalFlux = sum(mgFlux)
for nucLabel, nuc in isotxs.items():
if xsType != xsLibraries.getSuffixFromNuclideLabel(nucLabel):
continue
nucName = nuc.name
nb = nuclideBases.byName[nucName]
if isinstance(
nb, (nuclideBases.LumpNuclideBase, nuclideBases.DummyNuclideBase)
):
continue
microMultiGroupXS = isotxs[nucLabel].micros
if not isinstance(nb, nuclideBases.NaturalNuclideBase):
xsTable.addMultiGroupXS(nucName, microMultiGroupXS, mgFlux, totalFlux)
return xsTable.getXsecTable(headerFormat=headerFormat, tableFormat=tableFormat)
class AbstractIsotopicDepletionReader(interfaces.OutputReader):
r"""
Read number density output produced by the isotopic depletion
"""
def read(self):
r"""
read a isotopic depletion Output File and applies results to armi objects in the ToDepletion attribute
"""
raise NotImplementedError
class Csrc:
"""
Writes a continuous source term card in a depletion interface.
Notes
-----
The chemical vector is a dictionary of chemicals and their removal rate
constant -- this works like a decay constant.
The isotopic vector is used to make a source material in continuous source definitions.
This is also the base class for continuous loss cards.
"""
def __init__(self):
self._chemicalVector = {}
self._isotopicVector = {}
self.defaultVector = {"0": 0}
def setChemicalVector(self, chemicalVector):
self._chemicalVector = chemicalVector
def getChemicalVector(self):
return self._chemicalVector
def write(self):
"""
return a list of lines to write for a csrc card
"""
raise NotImplementedError
| 33.964444 | 110 | 0.691965 |
f73218bda9737f9a09039fb3e086c4956b2a87d2 | 1,903 | py | Python | get_url.py | tracysmith/RGAPepPipe | f334c2a58f41d0b38c0d5884a430e24a21788304 | [
"MIT"
] | 3 | 2017-08-06T18:01:43.000Z | 2018-06-20T04:54:49.000Z | get_url.py | tracysmith/RGAPepPipe | f334c2a58f41d0b38c0d5884a430e24a21788304 | [
"MIT"
] | 28 | 2015-01-05T18:00:48.000Z | 2016-09-06T18:30:29.000Z | otherScripts/get_url.py | pepperell-lab/RGAPepPipe | 0122dca9aca75756ad412599c7922bf08edc7f6d | [
"MIT"
] | 2 | 2017-07-27T14:07:51.000Z | 2018-07-25T15:00:05.000Z | #!/usr/bin/python
import sys, argparse, os
from subprocess import call
from multiprocessing.dummy import Pool as ThreadPool
###################################################################
#This is a phython script to download fastq files from ENA
#You can use this directly with the enaFileParser output
###################################################################
class FullPaths(argparse.Action):
"""Expand user- and relative-paths"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest,
os.path.abspath(os.path.expanduser(values)))
def is_file(filename):
"""Checks if a file exists"""
if not os.path.isfile(filename):
msg = "{0} is not a file".format(filename)
raise argparse.ArgumentTypeError(msg)
else:
return filename
def get_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description='Download fastq files from ENA')
parser.add_argument("urlFile", help="ERPXXXXXX_download.txt generated from enaFileParser.py", action=FullPaths,
type=is_file)
parser.add_argument("-t", "--threads",
help="Number of threads to use (default: 1)",
type=int, default=1)
return parser.parse_args()
def make_urlList(urlFile):
urls = []
with open(urlFile, 'r') as infile:
for line in infile:
line=line.strip()
urls.append(line)
return urls
def download_url(url):
call('wget {url}'.format(url=url), shell=True)
ftp = url.split("/")
index = len(ftp)-1
filename = ftp[index]
call('gunzip {filename}'.format(filename=filename), shell=True)
args = get_args()
urls = make_urlList(args.urlFile)
#Make the Pool of workers
pool = ThreadPool(args.threads)
#Open the urls in their own threads and return the results
pool.map(download_url, urls)
pool.close()
pool.join()
| 31.716667 | 115 | 0.636889 |
f732394175903cc275173763bb703893ecd75976 | 369 | py | Python | tests/test_initial_data.py | luiscberrocal/django-acp-calendar | 7251d7cbb1ba16983bbc3ba9af6178eb31408bee | [
"BSD-3-Clause"
] | 1 | 2016-10-05T05:17:35.000Z | 2016-10-05T05:17:35.000Z | tests/test_initial_data.py | luiscberrocal/django-acp-calendar | 7251d7cbb1ba16983bbc3ba9af6178eb31408bee | [
"BSD-3-Clause"
] | 17 | 2016-09-30T13:43:20.000Z | 2021-06-10T20:44:40.000Z | tests/test_initial_data.py | luiscberrocal/django-acp-calendar | 7251d7cbb1ba16983bbc3ba9af6178eb31408bee | [
"BSD-3-Clause"
] | 6 | 2016-04-11T14:41:44.000Z | 2017-10-20T21:16:39.000Z | from django.test import TestCase
from acp_calendar.initial_data import get_holidays_list
class TestInitialData(TestCase):
def test_get_holidays_list(self):
holidays = get_holidays_list()
self.assertEqual(144, len(holidays))
self.assertEqual('2006-01-01', holidays[0]['date'])
self.assertEqual('2018-12-25', holidays[-1]['date'])
| 28.384615 | 60 | 0.710027 |
f7325e38e6b3e37a43fb029d0d2a6c0bc984703c | 1,956 | py | Python | experiments/Scripts for creating plots/sac_performance_over_generations.py | arlo-lib/ARLO | 159669884044686e36e07bd1cc0948884ed7cc8d | [
"MIT"
] | null | null | null | experiments/Scripts for creating plots/sac_performance_over_generations.py | arlo-lib/ARLO | 159669884044686e36e07bd1cc0948884ed7cc8d | [
"MIT"
] | null | null | null | experiments/Scripts for creating plots/sac_performance_over_generations.py | arlo-lib/ARLO | 159669884044686e36e07bd1cc0948884ed7cc8d | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
x=np.arange(50)
y=np.array([-59.00138158129509,
-43.966695525591895,
-52.5277642686108,
-32.1793153104166,
-37.81484603001339,
-24.97787027415733,
-20.170115700140766,
-19.194577812051865,
-24.267556747544734,
-18.56846706310683,
-24.168507205879642,
-21.613453728913854,
-19.833679338413056,
-16.78310378266553,
-15.692655896866523,
-15.496178593312704,
-15.23787215267857,
-14.754095951096263,
-12.79724037524585,
-11.496812508420765,
-11.593305322673082,
-12.144980726639616,
-11.889169042516812,
-10.983010599192548,
-10.751331950717917,
-10.887445777009278,
-10.94197566653676,
-10.983575687515879,
-10.315668585661115,
-10.200188159394665,
-10.2623815297516,
-9.98878690162022,
-9.664489111145294,
-9.798550374351311,
-9.66769644336881,
-9.114549499466483,
-9.259332831572362,
-9.175694376996443,
-9.415038345909062,
-9.50191440403006,
-9.36517394141991,
-9.244892043097575,
-9.220243263930586,
-9.160062939634974,
-9.293750423507198,
-9.189954421974406,
-9.125946744761388,
-9.182482014624696,
-9.135265034880312,
-9.35027383852138])
plt.plot()
plt.plot(x,y) | 33.152542 | 37 | 0.463701 |
f7327283137a8089ad6e5e8c1b25b9b0b020b6c1 | 3,502 | py | Python | salt/states/mdadm.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | 1 | 2018-02-03T17:30:56.000Z | 2018-02-03T17:30:56.000Z | salt/states/mdadm.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | null | null | null | salt/states/mdadm.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Managing software RAID with mdadm
==================================
A state module for creating or destroying software RAID devices.
.. code-block:: yaml
/dev/md0:
raid.present:
- opts: level=1 chunk=256 raid-devices=2 /dev/xvdd /dev/xvde
'''
# Import python libs
import logging
# Import salt libs
import salt.utils
# Set up logger
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'raid'
def __virtual__():
'''
mdadm provides raid functions for Linux
'''
if __grains__['kernel'] != 'Linux':
return False
if not salt.utils.which('mdadm'):
return False
return __virtualname__
def present(name, opts=None):
'''
Verify that the raid is present
name
The name of raid device to be created
opts
The mdadm options to use to create the raid. See
:mod:`mdadm <salt.modules.mdadm>` for more information.
Opts can be expressed as a single string of options.
.. code-block:: yaml
/dev/md0:
raid.present:
- opts: level=1 chunk=256 raid-devices=2 /dev/xvdd /dev/xvde
Or as a list of options.
.. code-block:: yaml
/dev/md0:
raid.present:
- opts:
- level=1
- chunk=256
- raid-devices=2
- /dev/xvdd
- /dev/xvde
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
args = [name]
if isinstance(opts, str):
opts = opts.split()
args.extend(opts)
# Device exists
raids = __salt__['raid.list']()
if raids.get(name):
ret['comment'] = 'Raid {0} already present'.format(name)
return ret
# If running with test use the test_mode with create
if __opts__['test']:
args.extend(['test_mode=True'])
res = __salt__['raid.create'](*args)
ret['comment'] = 'Raid will be created with: {0}'.format(res)
ret['result'] = None
return ret
# Attempt to create the array
__salt__['raid.create'](*args)
raids = __salt__['raid.list']()
changes = raids.get(name)
if changes:
ret['comment'] = 'Raid {0} created.'.format(name)
ret['changes'] = changes
# Saving config
__salt__['raid.save_config']()
else:
ret['comment'] = 'Raid {0} failed to be created.'.format(name)
ret['result'] = False
return ret
def absent(name):
'''
Verify that the raid is absent
name
The name of raid device to be destroyed
.. code-block:: yaml
/dev/md0:
raid:
- absent
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
# Raid does not exist
if name not in __salt__['raid.list']():
ret['comment'] = 'Raid {0} already absent'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Raid {0} is set to be destroyed'.format(name)
ret['result'] = None
return ret
else:
# Attempt to destroy raid
ret['result'] = __salt__['raid.destroy'](name)
if ret['result']:
ret['comment'] = 'Raid {0} has been destroyed'.format(name)
else:
ret['comment'] = 'Raid {0} failed to be destroyed'.format(name)
return ret
| 23.823129 | 76 | 0.540548 |
f732974670da14d5adf61de60ba71cb11ddc2b88 | 1,814 | py | Python | pytorch_toolkit/face_recognition/model/blocks/mobilenet_v2_blocks.py | JinYAnGHe/openvino_training_extensions | a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee | [
"Apache-2.0"
] | null | null | null | pytorch_toolkit/face_recognition/model/blocks/mobilenet_v2_blocks.py | JinYAnGHe/openvino_training_extensions | a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee | [
"Apache-2.0"
] | null | null | null | pytorch_toolkit/face_recognition/model/blocks/mobilenet_v2_blocks.py | JinYAnGHe/openvino_training_extensions | a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch.nn as nn
from model.blocks.shared_blocks import SELayer
class InvertedResidual(nn.Module):
"""Implementation of the modified Inverted residual block"""
def __init__(self, in_channels, out_channels, stride, expand_ratio, outp_size=None):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
self.use_res_connect = self.stride == 1 and in_channels == out_channels
self.inv_block = nn.Sequential(
nn.Conv2d(in_channels, in_channels * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(in_channels * expand_ratio),
nn.PReLU(),
nn.Conv2d(in_channels * expand_ratio, in_channels * expand_ratio, 3, stride, 1,
groups=in_channels * expand_ratio, bias=False),
nn.BatchNorm2d(in_channels * expand_ratio),
nn.PReLU(),
nn.Conv2d(in_channels * expand_ratio, out_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(out_channels),
# SELayer(out_channels, 8, nn.PReLU, outp_size)
)
def forward(self, x):
if self.use_res_connect:
return x + self.inv_block(x)
return self.inv_block(x)
| 37.791667 | 91 | 0.677508 |
f732a1266d5c853c67006f483f36e4ebce514789 | 3,923 | py | Python | tests/test_nidmm.py | jonathanmendez/nitsm-python | c7bbe2e53d56cf987d2369336d32b8baf6ae806a | [
"MIT"
] | 4 | 2021-08-21T06:21:45.000Z | 2021-12-27T05:27:43.000Z | tests/test_nidmm.py | jonathanmendez/nitsm-python | c7bbe2e53d56cf987d2369336d32b8baf6ae806a | [
"MIT"
] | 51 | 2021-07-28T14:48:04.000Z | 2022-03-25T02:35:40.000Z | tests/test_nidmm.py | jonathanmendez/nitsm-python | c7bbe2e53d56cf987d2369336d32b8baf6ae806a | [
"MIT"
] | 2 | 2021-06-23T19:53:17.000Z | 2022-03-27T20:10:27.000Z | import nidmm
import pytest
from nitsm.codemoduleapi import SemiconductorModuleContext
from nitsm.pinquerycontexts import PinQueryContext
@pytest.fixture
def simulated_nidmm_sessions(standalone_tsm_context):
instrument_names = standalone_tsm_context.get_all_nidmm_instrument_names()
sessions = [
nidmm.Session(instrument_name, options={"Simulate": True})
for instrument_name in instrument_names
]
for instrument_name, session in zip(instrument_names, sessions):
standalone_tsm_context.set_nidmm_session(instrument_name, session)
yield sessions
for session in sessions:
session.close()
@pytest.mark.pin_map("nidmm.pinmap")
class TestNIDMM:
pin_map_instruments = ["DMM1", "DMM2", "DMM3"]
pin_map_dut_pins = ["DUTPin1"]
pin_map_system_pins = ["SystemPin1"]
def test_get_all_nidmm_instrument_names(
self, standalone_tsm_context: SemiconductorModuleContext
):
instrument_names = standalone_tsm_context.get_all_nidmm_instrument_names()
assert isinstance(instrument_names, tuple)
assert len(instrument_names) == len(self.pin_map_instruments)
for instrument_name in instrument_names:
assert isinstance(instrument_name, str)
assert instrument_name in self.pin_map_instruments
def test_set_nidmm_session(self, standalone_tsm_context: SemiconductorModuleContext):
instrument_names = standalone_tsm_context.get_all_nidmm_instrument_names()
for instrument_name in instrument_names:
with nidmm.Session(instrument_name, options={"Simulate": True}) as session:
standalone_tsm_context.set_nidmm_session(instrument_name, session)
assert SemiconductorModuleContext._sessions[id(session)] is session
def test_get_all_nidmm_sessions(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_nidmm_sessions
):
queried_sessions = standalone_tsm_context.get_all_nidmm_sessions()
assert isinstance(queried_sessions, tuple)
assert len(queried_sessions) == len(simulated_nidmm_sessions)
for queried_session in queried_sessions:
assert isinstance(queried_session, nidmm.Session)
assert queried_session in simulated_nidmm_sessions
def test_pin_to_nidmm_session(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_nidmm_sessions
):
pin_query_context, queried_session = standalone_tsm_context.pin_to_nidmm_session(
"SystemPin1"
)
assert isinstance(pin_query_context, PinQueryContext)
assert isinstance(queried_session, nidmm.Session)
assert queried_session in simulated_nidmm_sessions
def test_pins_to_nidmm_sessions_single_pin(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_nidmm_sessions
):
pin_query_context, queried_sessions = standalone_tsm_context.pins_to_nidmm_sessions(
"PinGroup1"
)
assert isinstance(pin_query_context, PinQueryContext)
assert isinstance(queried_sessions, tuple)
for queried_session in queried_sessions:
assert isinstance(queried_session, nidmm.Session)
assert queried_session in simulated_nidmm_sessions
def test_pins_to_nidmm_sessions_multiple_pins(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_nidmm_sessions
):
all_pins = self.pin_map_dut_pins + self.pin_map_system_pins
pin_query_context, queried_sessions = standalone_tsm_context.pins_to_nidmm_sessions(
all_pins
)
assert isinstance(pin_query_context, PinQueryContext)
assert isinstance(queried_sessions, tuple)
for queried_session in queried_sessions:
assert isinstance(queried_session, nidmm.Session)
assert queried_session in simulated_nidmm_sessions
| 44.579545 | 92 | 0.748917 |
f7331f8ce198dccf836db66c2e0a80f3f5329d05 | 1,053 | py | Python | leetcode/48_Rotate_Image.py | PhillipLeeHub/algorithm-and-data-structure | c0c27fee1b4fd634084da0b41395a26307d76e69 | [
"MIT"
] | 1 | 2020-05-01T21:29:17.000Z | 2020-05-01T21:29:17.000Z | leetcode/48_Rotate_Image.py | PhillipLeeHub/algorithm-and-data-structure | c0c27fee1b4fd634084da0b41395a26307d76e69 | [
"MIT"
] | null | null | null | leetcode/48_Rotate_Image.py | PhillipLeeHub/algorithm-and-data-structure | c0c27fee1b4fd634084da0b41395a26307d76e69 | [
"MIT"
] | 1 | 2020-06-12T23:32:14.000Z | 2020-06-12T23:32:14.000Z | '''
48. Rotate Image Medium
You are given an n x n 2D matrix representing an image, rotate the image by 90 degrees (clockwise).
You have to rotate the image in-place, which means you have to modify the input 2D matrix directly. DO NOT allocate another 2D matrix and do the rotation.
'''
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
self.transpose(matrix)
self.reflex(matrix)
def transpose(self, matrix):
# Since matrix size nxn
m_len = len(matrix)
for r in range(m_len):
for c in range(r, m_len):
matrix[c][r], matrix[r][c] = matrix[r][c], matrix[c][r]
# Reflex matrix by middle vertical axis
def reflex(self, matrix):
for r in range(len(matrix)):
for c in range(len(matrix)//2):
matrix[r][c], matrix[r][len(matrix)-1-c] = matrix[r][len(matrix)-1-c], matrix[r][c]
| 35.1 | 154 | 0.578348 |
f7332037c48c1a294de655fb80ab1613b7eb5f5e | 4,220 | py | Python | program_synthesis/algolisp/dataset/evaluation.py | kavigupta/program_synthesis | 0b04b1d3b63954ba3d404a8d96c4da18667a1b02 | [
"Apache-2.0"
] | 123 | 2018-06-09T00:49:39.000Z | 2022-03-09T05:41:20.000Z | program_synthesis/algolisp/dataset/evaluation.py | kavigupta/program_synthesis | 0b04b1d3b63954ba3d404a8d96c4da18667a1b02 | [
"Apache-2.0"
] | 9 | 2018-06-12T01:01:17.000Z | 2022-03-18T09:06:39.000Z | program_synthesis/algolisp/dataset/evaluation.py | kavigupta/program_synthesis | 0b04b1d3b63954ba3d404a8d96c4da18667a1b02 | [
"Apache-2.0"
] | 24 | 2018-06-09T00:42:46.000Z | 2021-09-29T08:23:32.000Z | import numpy as np
from program_synthesis.algolisp.tools import bleu
from program_synthesis.algolisp.dataset import executor
def is_same_code(example, res):
correct = False
if hasattr(res, 'code_sequence'):
if res.code_sequence is not None:
correct = res.code_sequence == example.code_sequence
elif res.code_tree is not None:
correct = res.code_tree == example.code_tree
else:
correct = res == example.code_sequence
return correct
def compute_bleu(example, res):
try:
if hasattr(res, 'code_sequence'):
if res.code_sequence is not None:
score = bleu.compute_bleu([example.code_sequence], [res.code_sequence])
else:
score = bleu.compute_bleu([example.code_sequence], [res])
return np.asscalar(score)
except ZeroDivisionError:
return 0.0
def get_stats_from_code(args):
res, example, executor_ = args
if len(example.tests) == 0:
return None
if executor_ is not None:
stats = executor.evaluate_code(
res.code_tree if res.code_tree else res.code_sequence, example.schema.args, example.tests,
executor_)
stats['exact-code-match'] = is_same_code(example, res)
stats['correct-program'] = int(stats['tests-executed'] == stats['tests-passed'])
else:
stats = {'tests-executed': 0, 'tests-passed': 0, 'result-none': 0, 'syntax-error': 0,
'runtime-exception': 0, 'exceptions': []}
stats['correct-program'] = stats['exact-code-match'] = is_same_code(example, res)
stats['bleu'] = compute_bleu(example, res)
stats['example'] = example.to_dict()
stats['res'] = res.to_dict() if hasattr(res, 'to_dict') else res
return stats
def run_inference(dataset, model, executor_):
"""Runs inference of given model on eval set, and executes resulting code.
Args:
dataset: Dataset, iterable of CodeExample to evaluate on.
model: Model that runs the inference.
executor: executor class from executor.py.
"""
for batch in dataset:
results = model.inference(batch)
for stats in model.worker_pool.imap(get_stats_from_code, zip(results, batch, [executor_]*len(batch))):
if stats is not None:
yield stats
return
def compute_metrics(all_stats):
tests_num = 0
programs_num = 0
bleu_acc = 0.0
correct_program_acc = 0
# Almost correct programs are those that were executed on more than one test and passed at least 50% tests.
almost_correct_program_acc = 0
exact_code_match_acc = 0
syntax_error_acc = 0
runtime_exception_acc = 0
other_exception_acc = 0
for stats in all_stats:
tests_num += stats['tests-executed']
programs_num += 1
bleu_acc += stats['bleu']
correct_program_acc += stats['correct-program']
if (stats['correct-program'] != 0 or
stats['tests-executed'] > 1 and stats['tests-passed']/stats['tests-executed'] >= 0.5):
almost_correct_program_acc += 1
exact_code_match_acc += stats['exact-code-match']
syntax_error_acc += stats['syntax-error']
runtime_exception_acc += stats['runtime-exception']
other_exception_acc += len(stats['exceptions'])
return {'bleu': (bleu_acc/programs_num) if programs_num else 0.0,
'accuracy': (correct_program_acc/programs_num) if programs_num else 0.0,
'50p_accuracy': (almost_correct_program_acc/programs_num) if programs_num else 0.0,
'exact_match_accuracy': (exact_code_match_acc/programs_num) if programs_num else 0.0,
'syntax_error_freq': (syntax_error_acc/tests_num) if tests_num else 0.0,
'runtime_exception_freq': (runtime_exception_acc/tests_num) if tests_num else 0.0,
'other_exception_freq': (other_exception_acc/tests_num) if tests_num else 0.0,
'programs_num': programs_num,
'tests_num': tests_num,
'correct_program_num': correct_program_acc,
'almost_correct_program_num': almost_correct_program_acc,
'exact_code_match_num': exact_code_match_acc,
}
| 40.576923 | 111 | 0.657346 |
f73331d5f5cbfc3043d4165144e1118dd13cb4da | 601 | py | Python | app/main/forms.py | theposter/food-server | d6a1a9e1300d35ff4642463f0a73074b1440c648 | [
"MIT"
] | null | null | null | app/main/forms.py | theposter/food-server | d6a1a9e1300d35ff4642463f0a73074b1440c648 | [
"MIT"
] | null | null | null | app/main/forms.py | theposter/food-server | d6a1a9e1300d35ff4642463f0a73074b1440c648 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired, Length, ValidationError
class SearchForm(FlaskForm):
search_query = StringField('What cuisine are you in the mood for today?', validators=[DataRequired(),
Length(3, 20, "Must be longer than 3 characters and under 20")],
render_kw=({'placeholder': "Enter a cuisine (e.g. Nepali, Thai, etc)"}))
submit = SubmitField('Search')
class SurpriseForm(FlaskForm):
surprise_me_button = SubmitField("Surprise Me") | 50.083333 | 106 | 0.685524 |
f733650de6530fb57a4ba3608b453116c353d1ef | 13,365 | py | Python | BlockServer/synoptic/synoptic_manager.py | ISISComputingGroup/EPICS-inst_servers | 056fed778ebd1190421e06b9ac9c8a0bdae0d317 | [
"BSD-3-Clause"
] | 1 | 2020-08-20T23:38:53.000Z | 2020-08-20T23:38:53.000Z | BlockServer/synoptic/synoptic_manager.py | ISISComputingGroup/EPICS-inst_servers | 056fed778ebd1190421e06b9ac9c8a0bdae0d317 | [
"BSD-3-Clause"
] | 88 | 2015-09-03T11:50:41.000Z | 2021-02-18T19:13:04.000Z | BlockServer/synoptic/synoptic_manager.py | ISISComputingGroup/EPICS-inst_servers | 056fed778ebd1190421e06b9ac9c8a0bdae0d317 | [
"BSD-3-Clause"
] | 1 | 2020-08-20T23:38:05.000Z | 2020-08-20T23:38:05.000Z | # This file is part of the ISIS IBEX application.
# Copyright (C) 2012-2016 Science & Technology Facilities Council.
# All rights reserved.
#
# This program is distributed in the hope that it will be useful.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution.
# EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM
# AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details.
#
# You should have received a copy of the Eclipse Public License v1.0
# along with this program; if not, you can obtain a copy from
# https://www.eclipse.org/org/documents/epl-v10.php or
# http://opensource.org/licenses/eclipse-1.0.php
import os
from typing import List, TYPE_CHECKING
if TYPE_CHECKING:
from block_server import BlockServer
from BlockServer.core.active_config_holder import ActiveConfigHolder
from BlockServer.core.config_list_manager import InvalidDeleteException
from BlockServer.core.file_path_manager import FILEPATH_MANAGER
from BlockServer.core.on_the_fly_pv_interface import OnTheFlyPvInterface
from BlockServer.fileIO.schema_checker import ConfigurationSchemaChecker
from lxml import etree
from server_common.common_exceptions import MaxAttemptsExceededException
from server_common.utilities import print_and_log, compress_and_hex, create_pv_name, \
convert_to_json, convert_from_json
from BlockServer.synoptic.synoptic_file_io import SynopticFileIO
# Synoptics PVs are of the form IN:DEMO:SYNOPTICS:XXXXX (no BLOCKSERVER in the name)
# This is to allow longer synoptic names without exceeded the maximum allowed length for PVs
SYNOPTIC_PRE = "SYNOPTICS:"
SYNOPTIC_GET = ":GET"
SYNOPTIC_SET = ":SET"
SYNOPTIC_NAMES = "NAMES"
SYNOPTIC_GET_DEFAULT = "GET_DEFAULT"
SYNOPTIC_BLANK = "__BLANK__"
SYNOPTIC_SET_DETAILS = "SET_DETAILS"
SYNOPTIC_DELETE = "DELETE"
SYNOPTIC_SCHEMA = "SCHEMA"
SYNOPTIC_SCHEMA_FILE = "synoptic.xsd"
class SynopticManager(OnTheFlyPvInterface):
"""Class for managing the PVs associated with synoptics"""
def __init__(self, block_server: 'BlockServer', schema_folder: str, active_configholder: ActiveConfigHolder, file_io: SynopticFileIO = SynopticFileIO()):
"""Constructor.
Args:
block_server: A reference to the BlockServer instance
schema_folder: The filepath for the synoptic schema
active_configholder: A reference to the active configuration
file_io: Responsible for file IO
"""
super(SynopticManager, self).__init__()
self.pvs_to_write.extend([SYNOPTIC_PRE + SYNOPTIC_DELETE, SYNOPTIC_PRE + SYNOPTIC_SET_DETAILS])
self._directory = FILEPATH_MANAGER.synoptic_dir
self._schema_folder = schema_folder
self._synoptic_pvs = dict()
self._bs = block_server
self._activech = active_configholder
self._file_io = file_io
self._default_syn_xml = b""
self._create_standard_pvs()
self._load_initial()
def handle_pv_write(self, pv: str, data: str):
try:
if pv == SYNOPTIC_PRE + SYNOPTIC_DELETE:
self.delete(convert_from_json(data))
self.update_monitors()
elif pv == SYNOPTIC_PRE + SYNOPTIC_SET_DETAILS:
self.save_synoptic_xml(bytes(data, encoding="utf-8"))
self.update_monitors()
except IOError as err:
print_and_log(f"Error accessing synoptic file: {err}", "MAJOR")
except Exception as err:
print_and_log(f"Error writing to PV {pv}: {err}", "MAJOR")
def handle_pv_read(self, pv):
# Nothing to do as it is all handled by monitors
pass
def update_monitors(self):
with self._bs.monitor_lock:
print_and_log("Updating synoptic monitors")
self._bs.setParam(SYNOPTIC_PRE + SYNOPTIC_GET_DEFAULT, compress_and_hex(str(self.get_default_synoptic_xml(), encoding="utf-8")))
names = convert_to_json(self.get_synoptic_list())
self._bs.setParam(SYNOPTIC_PRE + SYNOPTIC_NAMES, compress_and_hex(names))
self._bs.updatePVs()
print_and_log("Finished updating synoptic monitors")
def on_config_change(self, full_init=False):
# If the config has a default synoptic then set the PV to that
default = self._activech.get_config_meta().synoptic
self.set_default_synoptic(default)
self.update_monitors()
def _create_standard_pvs(self):
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_NAMES, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_GET_DEFAULT, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_BLANK + SYNOPTIC_GET, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_SET_DETAILS, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_DELETE, 16000)
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_SCHEMA, 16000)
# Set values for PVs that don't change
self.update_pv_value(SYNOPTIC_PRE + SYNOPTIC_BLANK + SYNOPTIC_GET,
compress_and_hex(self.get_blank_synoptic()))
self.update_pv_value(SYNOPTIC_PRE + SYNOPTIC_SCHEMA, compress_and_hex(self.get_synoptic_schema()))
def _load_initial(self):
"""Create the PVs for all the synoptics found in the synoptics directory."""
for f in self._file_io.get_list_synoptic_files(self._directory):
# Load the data, checking the schema
try:
data = self._file_io.read_synoptic_file(self._directory, f)
ConfigurationSchemaChecker.check_xml_matches_schema(
os.path.join(self._schema_folder, SYNOPTIC_SCHEMA_FILE), data, "Synoptic")
# Get the synoptic name
self._create_pv(data)
except MaxAttemptsExceededException:
print_and_log(f"Could not open synoptic file {f}. Please check the file is "
f"not in use by another process.", "MAJOR")
except Exception as err:
print_and_log(f"Error creating synoptic PV: {err}", "MAJOR")
def _create_pv(self, data: bytes):
"""Creates a single PV based on a name and data. Adds this PV to the dictionary returned on get_synoptic_list
Args:
data (bytes): Starting data for the pv, the pv name is derived from the name tag of this
"""
name = self._get_synoptic_name_from_xml(data)
if name not in self._synoptic_pvs:
# Extra check, if a non-case sensitive match exist remove it
for key in self._synoptic_pvs.keys():
if name.lower() == key.lower():
self._synoptic_pvs.pop(key)
pv = create_pv_name(name, list(self._synoptic_pvs.values()), "SYNOPTIC")
self._synoptic_pvs[name] = pv
# Create the PV
self._bs.add_string_pv_to_db(SYNOPTIC_PRE + self._synoptic_pvs[name] + SYNOPTIC_GET, 16000)
# Update the value
self.update_pv_value(SYNOPTIC_PRE + self._synoptic_pvs[name] + SYNOPTIC_GET, compress_and_hex(str(data, encoding="utf-8")))
def update_pv_value(self, name, data):
""" Updates value of a PV holding synoptic information with new data
Args:
name (string): The name of the edited synoptic
data (bytes): The new synoptic data
"""
self._bs.setParam(name, data)
self._bs.updatePVs()
def get_synoptic_list(self):
"""Gets the names and associated pvs of the synoptic files in the synoptics directory.
Returns:
list : Alphabetical list of synoptics files on the server, along with their associated pvs
"""
syn_list = []
default_is_none_synoptic = True
for k, v in self._synoptic_pvs.items():
is_default = False
if bytes(f"<name>{k}</name>", encoding="utf-8") in self._default_syn_xml:
default_is_none_synoptic = False
is_default = True
syn_list.append({"name": k, "pv": v, "is_default": is_default})
ans = sorted(syn_list, key=lambda x: x['name'].lower())
# Insert the "blank" synoptic
ans.insert(0, {"pv": "__BLANK__", "name": "-- NONE --", "is_default": default_is_none_synoptic})
return ans
def set_default_synoptic(self, name):
"""Sets the default synoptic.
Args:
name (string): the name of the synoptic to load
"""
fullname = name + ".xml"
f = self._file_io.get_list_synoptic_files(self._directory)
if fullname in f:
# Load the data
try:
data = self._file_io.read_synoptic_file(self._directory, fullname)
self._default_syn_xml = data
except MaxAttemptsExceededException:
print_and_log(f"Could not open synoptic file {fullname}. Please check the file is not "
f"in use by another process.", "MAJOR")
self._default_syn_xml = b""
else:
# No synoptic
self._default_syn_xml = b""
def get_default_synoptic_xml(self) -> bytes:
"""Gets the XML for the default synoptic.
Returns:
bytes : The XML for the synoptic
"""
return self._default_syn_xml
def _get_synoptic_name_from_xml(self, xml_data: bytes):
name = None
root = etree.fromstring(xml_data)
for child in root:
if child.tag.split('}', 1)[1] == "name":
name = child.text
if name is None:
raise Exception("Synoptic contains no name tag")
return name
def save_synoptic_xml(self, xml_data: bytes):
"""Saves the xml under the filename taken from the xml name tag.
Args:
xml_data (bytes): The XML to be saved
"""
try:
# Check against schema
ConfigurationSchemaChecker.check_xml_matches_schema(os.path.join(self._schema_folder, SYNOPTIC_SCHEMA_FILE),
xml_data, "Synoptic")
# Update PVs
self._create_pv(xml_data)
except Exception as err:
print_and_log(err)
raise
name = self._get_synoptic_name_from_xml(xml_data)
save_path = FILEPATH_MANAGER.get_synoptic_path(name)
try:
self._file_io.write_synoptic_file(name, save_path, xml_data)
except MaxAttemptsExceededException:
raise IOError(f"Could not save to synoptic file at {save_path}. Please check the file is "
f"not in use by another process.")
print_and_log("Synoptic saved: " + name)
def delete(self, delete_list: List[str]):
"""Takes a list of synoptics and removes them from the file system and any relevant PVs.
Args:
delete_list (list): The synoptics to delete
"""
print_and_log("Deleting: " + ', '.join(list(delete_list)), "INFO")
delete_list = set(delete_list)
if not delete_list.issubset(self._synoptic_pvs.keys()):
raise InvalidDeleteException("Delete list contains unknown configurations")
for synoptic in delete_list:
self._delete_synoptic(synoptic)
def _delete_synoptic(self, synoptic: str):
fullname = synoptic + ".xml"
try:
self._file_io.delete_synoptic(self._directory, fullname)
except MaxAttemptsExceededException:
print_and_log(f"Could not delete synoptic file {fullname}. Please check the file is "
f"not in use by another process.", "MINOR")
return
self._bs.delete_pv_from_db(SYNOPTIC_PRE + self._synoptic_pvs[synoptic] + SYNOPTIC_GET)
del self._synoptic_pvs[synoptic]
def update(self, xml_data: str):
"""Updates the synoptic list when modifications are made via the filesystem.
Args:
xml_data (str): The xml data to update the PV with
"""
# Convert to bytes
bytes_xml_data = bytes(xml_data, encoding="utf-8")
name = self._get_synoptic_name_from_xml(bytes_xml_data)
names = self._synoptic_pvs.keys()
if name in names:
self.update_pv_value(SYNOPTIC_PRE + self._synoptic_pvs[name] + SYNOPTIC_GET, compress_and_hex(xml_data))
else:
self._create_pv(bytes_xml_data)
self.update_monitors()
def get_synoptic_schema(self):
"""Gets the XSD data for the synoptic.
Returns:
string : The XML for the synoptic schema
"""
schema = ""
with open(os.path.join(self._schema_folder, SYNOPTIC_SCHEMA_FILE), 'r') as schemafile:
schema = schemafile.read()
return schema
def get_blank_synoptic(self):
"""Gets a blank synoptic.
Returns:
string : The XML for the blank synoptic
"""
return """<?xml version="1.0" ?><instrument xmlns="http://www.isis.stfc.ac.uk//instrument">
<name>-- NONE --</name><components/></instrument>"""
| 43.676471 | 157 | 0.655518 |
f73374d5193383680e6567226fe55556e50468a7 | 7,809 | py | Python | parsl/utils.py | aquanauts/parsl | 978bb483a4a41b3cef083aa242b2a78614a02dd0 | [
"Apache-2.0"
] | null | null | null | parsl/utils.py | aquanauts/parsl | 978bb483a4a41b3cef083aa242b2a78614a02dd0 | [
"Apache-2.0"
] | null | null | null | parsl/utils.py | aquanauts/parsl | 978bb483a4a41b3cef083aa242b2a78614a02dd0 | [
"Apache-2.0"
] | null | null | null | import inspect
import logging
import os
import shlex
import subprocess
import time
import typeguard
from contextlib import contextmanager
from typing import List
import parsl
from parsl.version import VERSION
logger = logging.getLogger(__name__)
@typeguard.typechecked
def get_version() -> str:
version = parsl.__version__
work_tree = os.path.dirname(os.path.dirname(__file__))
git_dir = os.path.join(work_tree, '.git')
if os.path.exists(git_dir):
env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}
try:
cmd = shlex.split('git rev-parse --short HEAD')
head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)
status = 'dirty' if diff else 'clean'
version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
except Exception:
pass
return version
@typeguard.typechecked
def get_all_checkpoints(rundir: str = "runinfo") -> List[str]:
"""Finds the checkpoints from all last runs.
Note that checkpoints are incremental, and this helper will not find
previous checkpoints from earlier than the most recent run. It probably
should be made to do so.
Kwargs:
- rundir(str) : Path to the runinfo directory
Returns:
- a list suitable for the checkpointFiles parameter of the DataFlowKernel
constructor
"""
if(not os.path.isdir(rundir)):
return []
dirs = sorted(os.listdir(rundir))
checkpoints = []
for runid in dirs:
checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))
if os.path.isdir(checkpoint):
checkpoints.append(checkpoint)
return checkpoints
@typeguard.typechecked
def get_last_checkpoint(rundir: str = "runinfo") -> List[str]:
"""Finds the checkpoint from the last run, if one exists.
Note that checkpoints are incremental, and this helper will not find
previous checkpoints from earlier than the most recent run. It probably
should be made to do so.
Kwargs:
- rundir(str) : Path to the runinfo directory
Returns:
- a list suitable for the checkpointFiles parameter of the DataFlowKernel
constructor, with 0 or 1 elements
"""
if not os.path.isdir(rundir):
return []
dirs = sorted(os.listdir(rundir))
if len(dirs) == 0:
return []
last_runid = dirs[-1]
last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))
if(not(os.path.isdir(last_checkpoint))):
return []
return [last_checkpoint]
def get_std_fname_mode(fdname, stdfspec):
import parsl.app.errors as pe
if stdfspec is None:
return None, None
elif isinstance(stdfspec, str):
fname = stdfspec
mode = 'a+'
elif isinstance(stdfspec, tuple):
if len(stdfspec) != 2:
raise pe.BadStdStreamFile("std descriptor %s has incorrect tuple length %s" % (fdname, len(stdfspec)), TypeError('Bad Tuple Length'))
fname, mode = stdfspec
if not isinstance(fname, str) or not isinstance(mode, str):
raise pe.BadStdStreamFile("std descriptor %s has unexpected type %s" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type'))
else:
raise pe.BadStdStreamFile("std descriptor %s has unexpected type %s" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type'))
return fname, mode
@contextmanager
def wait_for_file(path, seconds=10):
for i in range(0, int(seconds * 100)):
time.sleep(seconds / 100.)
if os.path.exists(path):
break
yield
@contextmanager
def time_limited_open(path, mode, seconds=1):
with wait_for_file(path, seconds):
logger.debug("wait_for_file yielded")
f = open(path, mode)
yield f
f.close()
def wtime_to_minutes(time_string):
''' wtime_to_minutes
Convert standard wallclock time string to minutes.
Args:
- Time_string in HH:MM:SS format
Returns:
(int) minutes
'''
hours, mins, seconds = time_string.split(':')
total_mins = int(hours) * 60 + int(mins)
if total_mins < 1:
logger.warning("Time string '{}' parsed to {} minutes, less than 1".format(time_string, total_mins))
return total_mins
class RepresentationMixin(object):
"""A mixin class for adding a __repr__ method.
The __repr__ method will return a string equivalent to the code used to instantiate
the child class, with any defaults included explicitly. The __max_width__ class variable
controls the maximum width of the representation string. If this width is exceeded,
the representation string will be split up, with one argument or keyword argument per line.
Any arguments or keyword arguments in the constructor must be defined as attributes, or
an AttributeError will be raised.
Examples
--------
>>> from parsl.utils import RepresentationMixin
>>> class Foo(RepresentationMixin):
def __init__(self, first, second, third='three', fourth='fourth'):
self.first = first
self.second = second
self.third = third
self.fourth = fourth
>>> bar = Foo(1, 'two', fourth='baz')
>>> bar
Foo(1, 'two', third='three', fourth='baz')
"""
__max_width__ = 80
def __repr__(self):
init = self.__init__
# This test looks for a single layer of wrapping performed by
# functools.update_wrapper, commonly used in decorators. This will
# allow RepresentationMixin to see through a single such decorator
# applied to the __init__ method of a class, and find the underlying
# arguments. It will not see through multiple layers of such
# decorators, or cope with other decorators which do not use
# functools.update_wrapper.
if hasattr(init, '__wrapped__'):
init = init.__wrapped__
argspec = inspect.getfullargspec(init)
if len(argspec.args) > 1 and argspec.defaults is not None:
defaults = dict(zip(reversed(argspec.args), reversed(argspec.defaults)))
else:
defaults = {}
for arg in argspec.args[1:]:
if not hasattr(self, arg):
template = 'class {} uses {} in the constructor, but does not define it as an attribute'
raise AttributeError(template.format(self.__class__.__name__, arg))
if len(defaults) != 0:
args = [getattr(self, a) for a in argspec.args[1:-len(defaults)]]
else:
args = [getattr(self, a) for a in argspec.args[1:]]
kwargs = {key: getattr(self, key) for key in defaults}
def assemble_multiline(args, kwargs):
def indent(text):
lines = text.splitlines()
if len(lines) <= 1:
return text
return "\n".join(" " + l for l in lines).strip()
args = ["\n {},".format(indent(repr(a))) for a in args]
kwargs = ["\n {}={}".format(k, indent(repr(v)))
for k, v in sorted(kwargs.items())]
info = "".join(args) + ", ".join(kwargs)
return self.__class__.__name__ + "({}\n)".format(info)
def assemble_line(args, kwargs):
kwargs = ['{}={}'.format(k, repr(v)) for k, v in sorted(kwargs.items())]
info = ", ".join([repr(a) for a in args] + kwargs)
return self.__class__.__name__ + "({})".format(info)
if len(assemble_line(args, kwargs)) <= self.__class__.__max_width__:
return assemble_line(args, kwargs)
else:
return assemble_multiline(args, kwargs)
| 32.810924 | 145 | 0.631579 |
f7338f6dd3f181e895e19eec66ca21d59cbbdafa | 14,786 | py | Python | Source/JavaScriptCore/inspector/scripts/codegen/cpp_generator.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | 6 | 2021-07-05T16:09:39.000Z | 2022-03-06T22:44:42.000Z | Source/JavaScriptCore/inspector/scripts/codegen/cpp_generator.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | 7 | 2022-03-15T13:25:39.000Z | 2022-03-15T13:25:44.000Z | Source/JavaScriptCore/inspector/scripts/codegen/cpp_generator.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2014-2018 Apple Inc. All rights reserved.
# Copyright (c) 2014 University of Washington. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os.path
import re
try:
from .generator import ucfirst, Generator
from .models import PrimitiveType, ObjectType, ArrayType, EnumType, AliasedType, Frameworks
except ValueError:
from generator import ucfirst, Generator
from models import PrimitiveType, ObjectType, ArrayType, EnumType, AliasedType, Frameworks
log = logging.getLogger('global')
_PRIMITIVE_TO_CPP_NAME_MAP = {
'boolean': 'bool',
'integer': 'int',
'number': 'double',
'string': 'String',
'object': 'JSON::Object',
'array': 'JSON::Array',
'any': 'JSON::Value'
}
class CppGenerator(Generator):
def __init__(self, *args, **kwargs):
Generator.__init__(self, *args, **kwargs)
def protocol_name(self):
return self.model().framework.setting('cpp_protocol_group', '')
def helpers_namespace(self):
return '%sHelpers' % self.protocol_name()
# Miscellaneous text manipulation routines.
@staticmethod
def cpp_getter_method_for_type(_type):
if isinstance(_type, ObjectType):
return 'getObject'
if isinstance(_type, ArrayType):
return 'getArray'
if isinstance(_type, PrimitiveType):
if _type.raw_name() == 'integer':
return 'getInteger'
elif _type.raw_name() == 'number':
return 'getDouble'
elif _type.raw_name() == 'any':
return 'getValue'
else:
return 'get' + ucfirst(_type.raw_name())
if isinstance(_type, AliasedType):
return CppGenerator.cpp_getter_method_for_type(_type.aliased_type)
if isinstance(_type, EnumType):
return CppGenerator.cpp_getter_method_for_type(_type.primitive_type)
@staticmethod
def cpp_setter_method_for_type(_type):
if isinstance(_type, ObjectType):
return 'setObject'
if isinstance(_type, ArrayType):
return 'setArray'
if isinstance(_type, PrimitiveType):
if _type.raw_name() == 'integer':
return 'setInteger'
elif _type.raw_name() == 'number':
return 'setDouble'
elif _type.raw_name() == 'any':
return 'setValue'
else:
return 'set' + ucfirst(_type.raw_name())
if isinstance(_type, AliasedType):
return CppGenerator.cpp_setter_method_for_type(_type.aliased_type)
if isinstance(_type, EnumType):
return CppGenerator.cpp_setter_method_for_type(_type.primitive_type)
# Generate type representations for various situations.
@staticmethod
def cpp_protocol_type_for_type(_type):
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through to enum or primitive.
if isinstance(_type, ObjectType) and len(_type.members) == 0:
return 'JSON::Object'
if isinstance(_type, ArrayType):
if _type.raw_name() is None: # Otherwise, fall through and use typedef'd name.
return 'JSON::ArrayOf<%s>' % CppGenerator.cpp_protocol_type_for_type(_type.element_type)
if isinstance(_type, (ObjectType, EnumType, ArrayType)):
return 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
if isinstance(_type, PrimitiveType):
return CppGenerator.cpp_name_for_primitive_type(_type)
@staticmethod
def cpp_protocol_type_for_type_member(type_member, object_declaration):
if isinstance(type_member.type, EnumType) and type_member.type.is_anonymous:
return '::'.join([CppGenerator.cpp_protocol_type_for_type(object_declaration.type), ucfirst(type_member.member_name)])
else:
return CppGenerator.cpp_protocol_type_for_type(type_member.type)
@staticmethod
def cpp_type_for_unchecked_formal_in_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through to enum or primitive.
if isinstance(_type, EnumType):
_type = _type.primitive_type # Fall through to primitive.
# This handles the 'any' type and objects with defined properties.
if isinstance(_type, ObjectType) or _type.qualified_name() == 'object':
cpp_name = 'JSON::Object'
if parameter.is_optional:
return 'const %s*' % cpp_name
else:
return 'const %s&' % cpp_name
if isinstance(_type, ArrayType):
cpp_name = 'JSON::Array'
if parameter.is_optional:
return 'const %s*' % cpp_name
else:
return 'const %s&' % cpp_name
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return 'const %s*' % cpp_name
elif _type.raw_name() in ['string']:
return 'const %s&' % cpp_name
else:
return cpp_name
return "unknown_unchecked_formal_in_parameter_type"
@staticmethod
def cpp_type_for_checked_formal_event_parameter(parameter):
return CppGenerator.cpp_type_for_type_with_name(parameter.type, parameter.parameter_name, parameter.is_optional)
@staticmethod
def cpp_type_for_type_member(member):
return CppGenerator.cpp_type_for_type_with_name(member.type, member.member_name, False)
@staticmethod
def cpp_type_for_type_with_name(_type, type_name, is_optional):
if isinstance(_type, (ArrayType, ObjectType)):
return 'RefPtr<%s>' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, AliasedType):
builder_type = CppGenerator.cpp_protocol_type_for_type(_type)
if is_optional:
return 'const %s*' % builder_type
elif _type.aliased_type.qualified_name() in ['integer', 'number']:
return CppGenerator.cpp_name_for_primitive_type(_type.aliased_type)
elif _type.aliased_type.qualified_name() in ['string']:
return 'const %s&' % builder_type
else:
return builder_type
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if _type.qualified_name() in ['object']:
return 'RefPtr<JSON::Object>'
elif _type.qualified_name() in ['any']:
return 'RefPtr<JSON::Value>'
elif is_optional:
return 'const %s*' % cpp_name
elif _type.qualified_name() in ['string']:
return 'const %s&' % cpp_name
else:
return cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
enum_type_name = ucfirst(type_name)
else:
enum_type_name = 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
if is_optional:
return '%s*' % enum_type_name
else:
return '%s' % enum_type_name
@staticmethod
def cpp_type_for_formal_out_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through.
if isinstance(_type, (ObjectType, ArrayType)):
return 'RefPtr<%s>&' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return "Optional<%s>&" % cpp_name
else:
return '%s*' % cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
return '%sBackendDispatcherHandler::%s*' % (_type.type_domain().domain_name, ucfirst(parameter.parameter_name))
else:
return 'Inspector::Protocol::%s::%s*' % (_type.type_domain().domain_name, _type.raw_name())
raise ValueError("unknown formal out parameter type.")
# FIXME: this is only slightly different from out parameters; they could be unified.
@staticmethod
def cpp_type_for_formal_async_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through.
if isinstance(_type, (ObjectType, ArrayType)):
return 'RefPtr<%s>&&' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return "Optional<%s>&" % cpp_name
elif _type.qualified_name() in ['integer', 'number']:
return CppGenerator.cpp_name_for_primitive_type(_type)
elif _type.qualified_name() in ['string']:
return 'const %s&' % cpp_name
else:
return cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
cpp_name = '%sBackendDispatcherHandler::%s' % (_type.type_domain().domain_name, ucfirst(parameter.parameter_name))
else:
cpp_name = 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
if parameter.is_optional:
return "Optional<%s>" % cpp_name
else:
return cpp_name
raise ValueError("Unknown formal async parameter type.")
# In-parameters don't use builder types, because they could be passed
# "open types" that are manually constructed out of InspectorObjects.
# FIXME: Only parameters that are actually open types should need non-builder parameter types.
@staticmethod
def cpp_type_for_stack_in_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through.
if isinstance(_type, EnumType):
_type = _type.primitive_type # Fall through.
if isinstance(_type, ObjectType):
return "RefPtr<JSON::Object>"
if isinstance(_type, ArrayType):
return "RefPtr<JSON::Array>"
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if _type.qualified_name() in ['any', 'object']:
return "RefPtr<%s>" % CppGenerator.cpp_name_for_primitive_type(_type)
elif parameter.is_optional and _type.qualified_name() not in ['boolean', 'string', 'integer', 'number']:
return "Optional<%s>" % cpp_name
else:
return cpp_name
@staticmethod
def cpp_type_for_stack_out_parameter(parameter):
_type = parameter.type
if isinstance(_type, (ArrayType, ObjectType)):
return 'RefPtr<%s>' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, AliasedType):
builder_type = CppGenerator.cpp_protocol_type_for_type(_type)
if parameter.is_optional:
return "Optional<%s>" % builder_type
return '%s' % builder_type
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return "Optional<%s>" % cpp_name
else:
return cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
return '%sBackendDispatcherHandler::%s' % (_type.type_domain().domain_name, ucfirst(parameter.parameter_name))
else:
return 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
@staticmethod
def cpp_assertion_method_for_type_member(type_member, object_declaration):
def assertion_method_for_type(_type):
return 'BindingTraits<%s>::assertValueHasExpectedType' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(type_member.type, AliasedType):
return assertion_method_for_type(type_member.type.aliased_type)
if isinstance(type_member.type, EnumType) and type_member.type.is_anonymous:
return 'BindingTraits<%s>::assertValueHasExpectedType' % CppGenerator.cpp_protocol_type_for_type_member(type_member, object_declaration)
return assertion_method_for_type(type_member.type)
@staticmethod
def cpp_name_for_primitive_type(_type):
return _PRIMITIVE_TO_CPP_NAME_MAP.get(_type.raw_name())
# Decide whether certain helpers are necessary in a situation.
@staticmethod
def should_use_wrapper_for_return_type(_type):
return not isinstance(_type, (ArrayType, ObjectType))
@staticmethod
def should_use_references_for_type(_type):
return isinstance(_type, (ArrayType, ObjectType)) or (isinstance(_type, (PrimitiveType)) and _type.qualified_name() in ["any", "object"])
@staticmethod
def should_pass_by_copy_for_return_type(_type):
return isinstance(_type, (ArrayType, ObjectType)) or (isinstance(_type, (PrimitiveType)) and _type.qualified_name() == "object")
| 44.269461 | 148 | 0.653524 |