hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
09ae8831a6d867988d83ddc73254812deec0f07b | 4,972 | py | Python | EQUATIONS/FOR_RESOLUTION_STUDY/TurbulentKineticEnergyResolutionStudy.py | mmicromegas/ransX | 2faaa786e00cfd14dce0e18f0793cd0252428d2a | [
"BSD-2-Clause"
] | 4 | 2019-04-22T11:43:47.000Z | 2020-09-16T00:28:15.000Z | EQUATIONS/FOR_RESOLUTION_STUDY/TurbulentKineticEnergyResolutionStudy.py | mmicromegas/ransX | 2faaa786e00cfd14dce0e18f0793cd0252428d2a | [
"BSD-2-Clause"
] | 34 | 2019-07-01T09:11:00.000Z | 2022-03-30T13:35:43.000Z | EQUATIONS/FOR_RESOLUTION_STUDY/TurbulentKineticEnergyResolutionStudy.py | mmicromegas/ransX | 2faaa786e00cfd14dce0e18f0793cd0252428d2a | [
"BSD-2-Clause"
] | 1 | 2020-09-16T00:28:17.000Z | 2020-09-16T00:28:17.000Z | import numpy as np
from scipy import integrate
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
import sys
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class TurbulentKineticEnergyResolutionStudy(Calculus, SetAxisLimit, Tools, Errors, object):
def __init__(self, filename, ig, intc, data_prefix):
super(TurbulentKineticEnergyResolutionStudy, self).__init__(ig)
# load data to list of structured arrays
eht = []
for ffile in filename:
eht.append(self.customLoad(ffile))
# declare data lists
xzn0, nx, ny, nz = [], [], [], []
dd, ddux, dduy, dduz, dduxux, dduyuy, dduzuz, uxffuxff, uyffuyff, uzffuzff, tke = \
[], [], [], [], [], [], [], [], [], [], []
for i in range(len(filename)):
# load grid
xzn0.append(np.asarray(eht[i].item().get('xzn0')))
nx.append(np.asarray(eht[i].item().get('nx')))
ny.append(np.asarray(eht[i].item().get('ny')))
nz.append(np.asarray(eht[i].item().get('nz')))
# pick specific Reynolds-averaged mean fields according to:
# https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf
dd.append(np.asarray(eht[i].item().get('dd')[intc]))
ddux.append(np.asarray(eht[i].item().get('ddux')[intc]))
dduy.append(np.asarray(eht[i].item().get('dduy')[intc]))
dduz.append(np.asarray(eht[i].item().get('dduz')[intc]))
dduxux.append(np.asarray(eht[i].item().get('dduxux')[intc]))
dduyuy.append(np.asarray(eht[i].item().get('dduyuy')[intc]))
dduzuz.append(np.asarray(eht[i].item().get('dduzuz')[intc]))
uxffuxff.append((dduxux[i] / dd[i] - ddux[i] * ddux[i] / (dd[i] * dd[i])))
uyffuyff.append((dduyuy[i] / dd[i] - dduy[i] * dduy[i] / (dd[i] * dd[i])))
uzffuzff.append((dduzuz[i] / dd[i] - dduz[i] * dduz[i] / (dd[i] * dd[i])))
tke.append(0.5 * (uxffuxff[i] + uyffuyff[i] + uzffuzff[i]))
# share data globally
self.data_prefix = data_prefix
self.xzn0 = xzn0
self.nx = nx
self.ny = ny
self.nz = nz
self.tke = tke
self.ig = ig
def plot_tke(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot turbulent kinetic energy in the model"""
if (LAXIS != 2):
print("ERROR(TurbulentKineticEnergyResolutionStudy.py): Only LAXIS=2 is supported.")
sys.exit()
# load x GRID
grd = self.xzn0
# load DATA to plot
plt1 = self.tke
nx = self.nx
ny = self.ny
nz = self.nz
# find maximum resolution data
grd_maxres = self.maxresdata(grd)
plt1_maxres = self.maxresdata(plt1)
plt_interp = []
for i in range(len(grd)):
plt_interp.append(np.interp(grd_maxres, grd[i], plt1[i]))
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
plt10_tmp = plt1[0]
plt11_tmp = plt1[0]
plt1_foraxislimit = []
plt1max = np.max(plt1[0])
for plt1i in plt1:
if (np.max(plt1i) > plt1max):
plt1_foraxislimit = plt1i
# set plot boundaries
to_plot = [plt1_foraxislimit]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('Turbulent Kinetic Energy')
for i in range(len(grd)):
plt.plot(grd[i], plt1[i], label=str(self.nx[i]) + ' x ' + str(self.ny[i]) + ' x ' + str(self.nz[i]))
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"$\widetilde{k}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"$\widetilde{k}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 18})
# display PLOT
plt.show(block=False)
# save PLOT
plt.savefig('RESULTS/' + self.data_prefix + 'mean_turbkineticenergy.png')
plt.savefig('RESULTS/' + self.data_prefix + 'mean_turbkineticenergy.eps')
# find data with maximum resolution
def maxresdata(self, data):
tmp = 0
for idata in data:
if idata.shape[0] > tmp:
data_maxres = idata
else:
tmp = idata.shape[0]
return data_maxres
| 33.594595 | 112 | 0.566573 |
dfba752653a4101a0470cdd8409c123458a69e2e | 50,194 | py | Python | botocore/utils.py | countergram/botocore | 67d27822fa0f80020e78802158f4c7adc4569853 | [
"Apache-2.0"
] | null | null | null | botocore/utils.py | countergram/botocore | 67d27822fa0f80020e78802158f4c7adc4569853 | [
"Apache-2.0"
] | null | null | null | botocore/utils.py | countergram/botocore | 67d27822fa0f80020e78802158f4c7adc4569853 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import time
import logging
import datetime
import hashlib
import binascii
import functools
import weakref
import random
import os
import socket
import cgi
import dateutil.parser
from dateutil.tz import tzlocal, tzutc
import botocore
import botocore.awsrequest
import botocore.httpsession
from botocore.compat import json, quote, zip_longest, urlsplit, urlunsplit
from botocore.compat import OrderedDict, six, urlparse
from botocore.vendored.six.moves.urllib.request import getproxies, proxy_bypass
from botocore.exceptions import (
InvalidExpressionError, ConfigNotFound, InvalidDNSNameError, ClientError,
MetadataRetrievalError, EndpointConnectionError, ReadTimeoutError,
ConnectionClosedError, ConnectTimeoutError,
)
logger = logging.getLogger(__name__)
DEFAULT_METADATA_SERVICE_TIMEOUT = 1
METADATA_BASE_URL = 'http://169.254.169.254/'
# These are chars that do not need to be urlencoded.
# Based on rfc2986, section 2.3
SAFE_CHARS = '-._~'
LABEL_RE = re.compile(r'[a-z0-9][a-z0-9\-]*[a-z0-9]')
RETRYABLE_HTTP_ERRORS = (
ReadTimeoutError, EndpointConnectionError, ConnectionClosedError,
ConnectTimeoutError,
)
S3_ACCELERATE_WHITELIST = ['dualstack']
# In switching events from using service name / endpoint prefix to service
# id, we have to preserve compatibility. This maps the instances where either
# is different than the transformed service id.
EVENT_ALIASES = {
"a4b": "alexa-for-business",
"alexaforbusiness": "alexa-for-business",
"api.mediatailor": "mediatailor",
"api.pricing": "pricing",
"api.sagemaker": "sagemaker",
"apigateway": "api-gateway",
"application-autoscaling": "application-auto-scaling",
"appstream2": "appstream",
"autoscaling": "auto-scaling",
"autoscaling-plans": "auto-scaling-plans",
"ce": "cost-explorer",
"cloudhsmv2": "cloudhsm-v2",
"cloudsearchdomain": "cloudsearch-domain",
"cognito-idp": "cognito-identity-provider",
"config": "config-service",
"cur": "cost-and-usage-report-service",
"data.iot": "iot-data-plane",
"data.jobs.iot": "iot-jobs-data-plane",
"data.mediastore": "mediastore-data",
"datapipeline": "data-pipeline",
"devicefarm": "device-farm",
"devices.iot1click": "iot-1click-devices-service",
"directconnect": "direct-connect",
"discovery": "application-discovery-service",
"dms": "database-migration-service",
"ds": "directory-service",
"dynamodbstreams": "dynamodb-streams",
"elasticbeanstalk": "elastic-beanstalk",
"elasticfilesystem": "efs",
"elasticloadbalancing": "elastic-load-balancing",
"elasticmapreduce": "emr",
"elastictranscoder": "elastic-transcoder",
"elb": "elastic-load-balancing",
"elbv2": "elastic-load-balancing-v2",
"email": "ses",
"entitlement.marketplace": "marketplace-entitlement-service",
"es": "elasticsearch-service",
"events": "eventbridge",
"cloudwatch-events": "eventbridge",
"iot-data": "iot-data-plane",
"iot-jobs-data": "iot-jobs-data-plane",
"iot1click-devices": "iot-1click-devices-service",
"iot1click-projects": "iot-1click-projects",
"kinesisanalytics": "kinesis-analytics",
"kinesisvideo": "kinesis-video",
"lex-models": "lex-model-building-service",
"lex-runtime": "lex-runtime-service",
"logs": "cloudwatch-logs",
"machinelearning": "machine-learning",
"marketplace-entitlement": "marketplace-entitlement-service",
"marketplacecommerceanalytics": "marketplace-commerce-analytics",
"metering.marketplace": "marketplace-metering",
"meteringmarketplace": "marketplace-metering",
"mgh": "migration-hub",
"models.lex": "lex-model-building-service",
"monitoring": "cloudwatch",
"mturk-requester": "mturk",
"opsworks-cm": "opsworkscm",
"projects.iot1click": "iot-1click-projects",
"resourcegroupstaggingapi": "resource-groups-tagging-api",
"route53": "route-53",
"route53domains": "route-53-domains",
"runtime.lex": "lex-runtime-service",
"runtime.sagemaker": "sagemaker-runtime",
"sdb": "simpledb",
"secretsmanager": "secrets-manager",
"serverlessrepo": "serverlessapplicationrepository",
"servicecatalog": "service-catalog",
"states": "sfn",
"stepfunctions": "sfn",
"storagegateway": "storage-gateway",
"streams.dynamodb": "dynamodb-streams",
"tagging": "resource-groups-tagging-api"
}
def ensure_boolean(val):
"""Ensures a boolean value if a string or boolean is provided
For strings, the value for True/False is case insensitive
"""
if isinstance(val, bool):
return val
else:
return val.lower() == 'true'
def is_json_value_header(shape):
"""Determines if the provided shape is the special header type jsonvalue.
:type shape: botocore.shape
:param shape: Shape to be inspected for the jsonvalue trait.
:return: True if this type is a jsonvalue, False otherwise
:rtype: Bool
"""
return (hasattr(shape, 'serialization') and
shape.serialization.get('jsonvalue', False) and
shape.serialization.get('location') == 'header' and
shape.type_name == 'string')
def get_service_module_name(service_model):
"""Returns the module name for a service
This is the value used in both the documentation and client class name
"""
name = service_model.metadata.get(
'serviceAbbreviation',
service_model.metadata.get(
'serviceFullName', service_model.service_name))
name = name.replace('Amazon', '')
name = name.replace('AWS', '')
name = re.sub(r'\W+', '', name)
return name
def normalize_url_path(path):
if not path:
return '/'
return remove_dot_segments(path)
def remove_dot_segments(url):
# RFC 3986, section 5.2.4 "Remove Dot Segments"
# Also, AWS services require consecutive slashes to be removed,
# so that's done here as well
if not url:
return ''
input_url = url.split('/')
output_list = []
for x in input_url:
if x and x != '.':
if x == '..':
if output_list:
output_list.pop()
else:
output_list.append(x)
if url[0] == '/':
first = '/'
else:
first = ''
if url[-1] == '/' and output_list:
last = '/'
else:
last = ''
return first + '/'.join(output_list) + last
def validate_jmespath_for_set(expression):
# Validates a limited jmespath expression to determine if we can set a
# value based on it. Only works with dotted paths.
if not expression or expression == '.':
raise InvalidExpressionError(expression=expression)
for invalid in ['[', ']', '*']:
if invalid in expression:
raise InvalidExpressionError(expression=expression)
def set_value_from_jmespath(source, expression, value, is_first=True):
# This takes a (limited) jmespath-like expression & can set a value based
# on it.
# Limitations:
# * Only handles dotted lookups
# * No offsets/wildcards/slices/etc.
if is_first:
validate_jmespath_for_set(expression)
bits = expression.split('.', 1)
current_key, remainder = bits[0], bits[1] if len(bits) > 1 else ''
if not current_key:
raise InvalidExpressionError(expression=expression)
if remainder:
if current_key not in source:
# We've got something in the expression that's not present in the
# source (new key). If there's any more bits, we'll set the key
# with an empty dictionary.
source[current_key] = {}
return set_value_from_jmespath(
source[current_key],
remainder,
value,
is_first=False
)
# If we're down to a single key, set it.
source[current_key] = value
class _RetriesExceededError(Exception):
"""Internal exception used when the number of retries are exceeded."""
pass
class IMDSFetcher(object):
_RETRIES_EXCEEDED_ERROR_CLS = _RetriesExceededError
def __init__(self, timeout=DEFAULT_METADATA_SERVICE_TIMEOUT,
num_attempts=1, base_url=METADATA_BASE_URL,
env=None, user_agent=None):
self._timeout = timeout
self._num_attempts = num_attempts
self._base_url = base_url
if env is None:
env = os.environ.copy()
self._disabled = env.get('AWS_EC2_METADATA_DISABLED', 'false').lower()
self._disabled = self._disabled == 'true'
self._user_agent = user_agent
self._session = botocore.httpsession.URLLib3Session(
timeout=self._timeout,
proxies=get_environ_proxies(self._base_url),
)
def _get_request(self, url_path, retry_func):
"""Make a get request to the Instance Metadata Service.
:type url_path: str
:param url_path: The path component of the URL to make a get request.
This arg is appended to the base_url taht was provided in the
initializer.
:type retry_func: callable
:param retry_func: A function that takes the response as an argument
and determines if it needs to retry. By default empty and non
200 OK responses are retried.
"""
if self._disabled:
logger.debug("Access to EC2 metadata has been disabled.")
raise self._RETRIES_EXCEEDED_ERROR_CLS()
if retry_func is None:
retry_func = self._default_retry
url = self._base_url + url_path
headers = {}
if self._user_agent is not None:
headers['User-Agent'] = self._user_agent
for i in range(self._num_attempts):
try:
request = botocore.awsrequest.AWSRequest(
method='GET', url=url, headers=headers)
response = self._session.send(request.prepare())
if not retry_func(response):
return response
except RETRYABLE_HTTP_ERRORS as e:
logger.debug(
"Caught retryable HTTP exception while making metadata "
"service request to %s: %s", url, e, exc_info=True)
raise self._RETRIES_EXCEEDED_ERROR_CLS()
def _default_retry(self, response):
return (
self._is_non_ok_response(response) or
self._is_empty(response)
)
def _is_non_ok_response(self, response):
if response.status_code != 200:
self._log_imds_response(response, 'non-200', log_body=True)
return True
return False
def _is_empty(self, response):
if not response.content:
self._log_imds_response(response, 'no body', log_body=True)
return True
return False
def _log_imds_response(self, response, reason_to_log, log_body=False):
statement = (
"Metadata service returned %s response "
"with status code of %s for url: %s"
)
logger_args = [
reason_to_log, response.status_code, response.url
]
if log_body:
statement += ", content body: %s"
logger_args.append(response.content)
logger.debug(statement, *logger_args)
class InstanceMetadataFetcher(IMDSFetcher):
_URL_PATH = 'latest/meta-data/iam/security-credentials/'
_REQUIRED_CREDENTIAL_FIELDS = [
'AccessKeyId', 'SecretAccessKey', 'Token', 'Expiration'
]
def retrieve_iam_role_credentials(self):
try:
role_name = self._get_iam_role()
credentials = self._get_credentials(role_name)
if self._contains_all_credential_fields(credentials):
return {
'role_name': role_name,
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['Token'],
'expiry_time': credentials['Expiration'],
}
else:
# IMDS can return a 200 response that has a JSON formatted
# error message (i.e. if ec2 is not trusted entity for the
# attached role). We do not necessarily want to retry for
# these and we also do not necessarily want to raise a key
# error. So at least log the problematic response and return
# an empty dictionary to signal that it was not able to
# retrieve credentials. These error will contain both a
# Code and Message key.
if 'Code' in credentials and 'Message' in credentials:
logger.debug('Error response received when retrieving'
'credentials: %s.', credentials)
return {}
except self._RETRIES_EXCEEDED_ERROR_CLS:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
self._num_attempts)
return {}
def _get_iam_role(self):
return self._get_request(
url_path=self._URL_PATH,
retry_func=self._needs_retry_for_role_name
).text
def _get_credentials(self, role_name):
r = self._get_request(
url_path=self._URL_PATH + role_name,
retry_func=self._needs_retry_for_credentials
)
return json.loads(r.text)
def _is_invalid_json(self, response):
try:
json.loads(response.text)
return False
except ValueError:
self._log_imds_response(response, 'invalid json')
return True
def _needs_retry_for_role_name(self, response):
return (
self._is_non_ok_response(response) or
self._is_empty(response)
)
def _needs_retry_for_credentials(self, response):
return (
self._is_non_ok_response(response) or
self._is_empty(response) or
self._is_invalid_json(response)
)
def _contains_all_credential_fields(self, credentials):
for field in self._REQUIRED_CREDENTIAL_FIELDS:
if field not in credentials:
logger.debug(
'Retrieved credentials is missing required field: %s',
field)
return False
return True
def merge_dicts(dict1, dict2, append_lists=False):
"""Given two dict, merge the second dict into the first.
The dicts can have arbitrary nesting.
:param append_lists: If true, instead of clobbering a list with the new
value, append all of the new values onto the original list.
"""
for key in dict2:
if isinstance(dict2[key], dict):
if key in dict1 and key in dict2:
merge_dicts(dict1[key], dict2[key])
else:
dict1[key] = dict2[key]
# If the value is a list and the ``append_lists`` flag is set,
# append the new values onto the original list
elif isinstance(dict2[key], list) and append_lists:
# The value in dict1 must be a list in order to append new
# values onto it.
if key in dict1 and isinstance(dict1[key], list):
dict1[key].extend(dict2[key])
else:
dict1[key] = dict2[key]
else:
# At scalar types, we iterate and merge the
# current dict that we're on.
dict1[key] = dict2[key]
def lowercase_dict(original):
"""Copies the given dictionary ensuring all keys are lowercase strings. """
copy = {}
for key in original:
copy[key.lower()] = original[key]
return copy
def parse_key_val_file(filename, _open=open):
try:
with _open(filename) as f:
contents = f.read()
return parse_key_val_file_contents(contents)
except OSError:
raise ConfigNotFound(path=filename)
def parse_key_val_file_contents(contents):
# This was originally extracted from the EC2 credential provider, which was
# fairly lenient in its parsing. We only try to parse key/val pairs if
# there's a '=' in the line.
final = {}
for line in contents.splitlines():
if '=' not in line:
continue
key, val = line.split('=', 1)
key = key.strip()
val = val.strip()
final[key] = val
return final
def percent_encode_sequence(mapping, safe=SAFE_CHARS):
"""Urlencode a dict or list into a string.
This is similar to urllib.urlencode except that:
* It uses quote, and not quote_plus
* It has a default list of safe chars that don't need
to be encoded, which matches what AWS services expect.
If any value in the input ``mapping`` is a list type,
then each list element wil be serialized. This is the equivalent
to ``urlencode``'s ``doseq=True`` argument.
This function should be preferred over the stdlib
``urlencode()`` function.
:param mapping: Either a dict to urlencode or a list of
``(key, value)`` pairs.
"""
encoded_pairs = []
if hasattr(mapping, 'items'):
pairs = mapping.items()
else:
pairs = mapping
for key, value in pairs:
if isinstance(value, list):
for element in value:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(element)))
else:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(value)))
return '&'.join(encoded_pairs)
def percent_encode(input_str, safe=SAFE_CHARS):
"""Urlencodes a string.
Whereas percent_encode_sequence handles taking a dict/sequence and
producing a percent encoded string, this function deals only with
taking a string (not a dict/sequence) and percent encoding it.
If given the binary type, will simply URL encode it. If given the
text type, will produce the binary type by UTF-8 encoding the
text. If given something else, will convert it to the text type
first.
"""
# If its not a binary or text string, make it a text string.
if not isinstance(input_str, (six.binary_type, six.text_type)):
input_str = six.text_type(input_str)
# If it's not bytes, make it bytes by UTF-8 encoding it.
if not isinstance(input_str, six.binary_type):
input_str = input_str.encode('utf-8')
return quote(input_str, safe=safe)
def parse_timestamp(value):
"""Parse a timestamp into a datetime object.
Supported formats:
* iso8601
* rfc822
* epoch (value is an integer)
This will return a ``datetime.datetime`` object.
"""
if isinstance(value, (int, float)):
# Possibly an epoch time.
return datetime.datetime.fromtimestamp(value, tzlocal())
else:
try:
return datetime.datetime.fromtimestamp(float(value), tzlocal())
except (TypeError, ValueError):
pass
try:
# In certain cases, a timestamp marked with GMT can be parsed into a
# different time zone, so here we provide a context which will
# enforce that GMT == UTC.
return dateutil.parser.parse(value, tzinfos={'GMT': tzutc()})
except (TypeError, ValueError) as e:
raise ValueError('Invalid timestamp "%s": %s' % (value, e))
def parse_to_aware_datetime(value):
"""Converted the passed in value to a datetime object with tzinfo.
This function can be used to normalize all timestamp inputs. This
function accepts a number of different types of inputs, but
will always return a datetime.datetime object with time zone
information.
The input param ``value`` can be one of several types:
* A datetime object (both naive and aware)
* An integer representing the epoch time (can also be a string
of the integer, i.e '0', instead of 0). The epoch time is
considered to be UTC.
* An iso8601 formatted timestamp. This does not need to be
a complete timestamp, it can contain just the date portion
without the time component.
The returned value will be a datetime object that will have tzinfo.
If no timezone info was provided in the input value, then UTC is
assumed, not local time.
"""
# This is a general purpose method that handles several cases of
# converting the provided value to a string timestamp suitable to be
# serialized to an http request. It can handle:
# 1) A datetime.datetime object.
if isinstance(value, datetime.datetime):
datetime_obj = value
else:
# 2) A string object that's formatted as a timestamp.
# We document this as being an iso8601 timestamp, although
# parse_timestamp is a bit more flexible.
datetime_obj = parse_timestamp(value)
if datetime_obj.tzinfo is None:
# I think a case would be made that if no time zone is provided,
# we should use the local time. However, to restore backwards
# compat, the previous behavior was to assume UTC, which is
# what we're going to do here.
datetime_obj = datetime_obj.replace(tzinfo=tzutc())
else:
datetime_obj = datetime_obj.astimezone(tzutc())
return datetime_obj
def datetime2timestamp(dt, default_timezone=None):
"""Calculate the timestamp based on the given datetime instance.
:type dt: datetime
:param dt: A datetime object to be converted into timestamp
:type default_timezone: tzinfo
:param default_timezone: If it is provided as None, we treat it as tzutc().
But it is only used when dt is a naive datetime.
:returns: The timestamp
"""
epoch = datetime.datetime(1970, 1, 1)
if dt.tzinfo is None:
if default_timezone is None:
default_timezone = tzutc()
dt = dt.replace(tzinfo=default_timezone)
d = dt.replace(tzinfo=None) - dt.utcoffset() - epoch
if hasattr(d, "total_seconds"):
return d.total_seconds() # Works in Python 2.7+
return (d.microseconds + (d.seconds + d.days * 24 * 3600) * 10**6) / 10**6
def calculate_sha256(body, as_hex=False):
"""Calculate a sha256 checksum.
This method will calculate the sha256 checksum of a file like
object. Note that this method will iterate through the entire
file contents. The caller is responsible for ensuring the proper
starting position of the file and ``seek()``'ing the file back
to its starting location if other consumers need to read from
the file like object.
:param body: Any file like object. The file must be opened
in binary mode such that a ``.read()`` call returns bytes.
:param as_hex: If True, then the hex digest is returned.
If False, then the digest (as binary bytes) is returned.
:returns: The sha256 checksum
"""
checksum = hashlib.sha256()
for chunk in iter(lambda: body.read(1024 * 1024), b''):
checksum.update(chunk)
if as_hex:
return checksum.hexdigest()
else:
return checksum.digest()
def calculate_tree_hash(body):
"""Calculate a tree hash checksum.
For more information see:
http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html
:param body: Any file like object. This has the same constraints as
the ``body`` param in calculate_sha256
:rtype: str
:returns: The hex version of the calculated tree hash
"""
chunks = []
required_chunk_size = 1024 * 1024
sha256 = hashlib.sha256
for chunk in iter(lambda: body.read(required_chunk_size), b''):
chunks.append(sha256(chunk).digest())
if not chunks:
return sha256(b'').hexdigest()
while len(chunks) > 1:
new_chunks = []
for first, second in _in_pairs(chunks):
if second is not None:
new_chunks.append(sha256(first + second).digest())
else:
# We're at the end of the list and there's no pair left.
new_chunks.append(first)
chunks = new_chunks
return binascii.hexlify(chunks[0]).decode('ascii')
def _in_pairs(iterable):
# Creates iterator that iterates over the list in pairs:
# for a, b in _in_pairs([0, 1, 2, 3, 4]):
# print(a, b)
#
# will print:
# 0, 1
# 2, 3
# 4, None
shared_iter = iter(iterable)
# Note that zip_longest is a compat import that uses
# the itertools izip_longest. This creates an iterator,
# this call below does _not_ immediately create the list
# of pairs.
return zip_longest(shared_iter, shared_iter)
class CachedProperty(object):
"""A read only property that caches the initially computed value.
This descriptor will only call the provided ``fget`` function once.
Subsequent access to this property will return the cached value.
"""
def __init__(self, fget):
self._fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
else:
computed_value = self._fget(obj)
obj.__dict__[self._fget.__name__] = computed_value
return computed_value
class ArgumentGenerator(object):
"""Generate sample input based on a shape model.
This class contains a ``generate_skeleton`` method that will take
an input/output shape (created from ``botocore.model``) and generate
a sample dictionary corresponding to the input/output shape.
The specific values used are place holder values. For strings either an
empty string or the member name can be used, for numbers 0 or 0.0 is used.
The intended usage of this class is to generate the *shape* of the input
structure.
This can be useful for operations that have complex input shapes.
This allows a user to just fill in the necessary data instead of
worrying about the specific structure of the input arguments.
Example usage::
s = botocore.session.get_session()
ddb = s.get_service_model('dynamodb')
arg_gen = ArgumentGenerator()
sample_input = arg_gen.generate_skeleton(
ddb.operation_model('CreateTable').input_shape)
print("Sample input for dynamodb.CreateTable: %s" % sample_input)
"""
def __init__(self, use_member_names=False):
self._use_member_names = use_member_names
def generate_skeleton(self, shape):
"""Generate a sample input.
:type shape: ``botocore.model.Shape``
:param shape: The input shape.
:return: The generated skeleton input corresponding to the
provided input shape.
"""
stack = []
return self._generate_skeleton(shape, stack)
def _generate_skeleton(self, shape, stack, name=''):
stack.append(shape.name)
try:
if shape.type_name == 'structure':
return self._generate_type_structure(shape, stack)
elif shape.type_name == 'list':
return self._generate_type_list(shape, stack)
elif shape.type_name == 'map':
return self._generate_type_map(shape, stack)
elif shape.type_name == 'string':
if self._use_member_names:
return name
if shape.enum:
return random.choice(shape.enum)
return ''
elif shape.type_name in ['integer', 'long']:
return 0
elif shape.type_name == 'float':
return 0.0
elif shape.type_name == 'boolean':
return True
elif shape.type_name == 'timestamp':
return datetime.datetime(1970, 1, 1, 0, 0, 0)
finally:
stack.pop()
def _generate_type_structure(self, shape, stack):
if stack.count(shape.name) > 1:
return {}
skeleton = OrderedDict()
for member_name, member_shape in shape.members.items():
skeleton[member_name] = self._generate_skeleton(
member_shape, stack, name=member_name)
return skeleton
def _generate_type_list(self, shape, stack):
# For list elements we've arbitrarily decided to
# return two elements for the skeleton list.
name = ''
if self._use_member_names:
name = shape.member.name
return [
self._generate_skeleton(shape.member, stack, name),
]
def _generate_type_map(self, shape, stack):
key_shape = shape.key
value_shape = shape.value
assert key_shape.type_name == 'string'
return OrderedDict([
('KeyName', self._generate_skeleton(value_shape, stack)),
])
def is_valid_endpoint_url(endpoint_url):
"""Verify the endpoint_url is valid.
:type endpoint_url: string
:param endpoint_url: An endpoint_url. Must have at least a scheme
and a hostname.
:return: True if the endpoint url is valid. False otherwise.
"""
parts = urlsplit(endpoint_url)
hostname = parts.hostname
if hostname is None:
return False
if len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1]
allowed = re.compile(
r"^((?!-)[A-Z\d-]{1,63}(?<!-)\.)*((?!-)[A-Z\d-]{1,63}(?<!-))$",
re.IGNORECASE)
return allowed.match(hostname)
def check_dns_name(bucket_name):
"""
Check to see if the ``bucket_name`` complies with the
restricted DNS naming conventions necessary to allow
access via virtual-hosting style.
Even though "." characters are perfectly valid in this DNS
naming scheme, we are going to punt on any name containing a
"." character because these will cause SSL cert validation
problems if we try to use virtual-hosting style addressing.
"""
if '.' in bucket_name:
return False
n = len(bucket_name)
if n < 3 or n > 63:
# Wrong length
return False
if n == 1:
if not bucket_name.isalnum():
return False
match = LABEL_RE.match(bucket_name)
if match is None or match.end() != len(bucket_name):
return False
return True
def fix_s3_host(request, signature_version, region_name,
default_endpoint_url=None, **kwargs):
"""
This handler looks at S3 requests just before they are signed.
If there is a bucket name on the path (true for everything except
ListAllBuckets) it checks to see if that bucket name conforms to
the DNS naming conventions. If it does, it alters the request to
use ``virtual hosting`` style addressing rather than ``path-style``
addressing.
"""
if request.context.get('use_global_endpoint', False):
default_endpoint_url = 's3.amazonaws.com'
try:
switch_to_virtual_host_style(
request, signature_version, default_endpoint_url)
except InvalidDNSNameError as e:
bucket_name = e.kwargs['bucket_name']
logger.debug('Not changing URI, bucket is not DNS compatible: %s',
bucket_name)
def switch_to_virtual_host_style(request, signature_version,
default_endpoint_url=None, **kwargs):
"""
This is a handler to force virtual host style s3 addressing no matter
the signature version (which is taken in consideration for the default
case). If the bucket is not DNS compatible an InvalidDNSName is thrown.
:param request: A AWSRequest object that is about to be sent.
:param signature_version: The signature version to sign with
:param default_endpoint_url: The endpoint to use when switching to a
virtual style. If None is supplied, the virtual host will be
constructed from the url of the request.
"""
if request.auth_path is not None:
# The auth_path has already been applied (this may be a
# retried request). We don't need to perform this
# customization again.
return
elif _is_get_bucket_location_request(request):
# For the GetBucketLocation response, we should not be using
# the virtual host style addressing so we can avoid any sigv4
# issues.
logger.debug("Request is GetBucketLocation operation, not checking "
"for DNS compatibility.")
return
parts = urlsplit(request.url)
request.auth_path = parts.path
path_parts = parts.path.split('/')
# Retrieve what the endpoint we will be prepending the bucket name to.
if default_endpoint_url is None:
default_endpoint_url = parts.netloc
if len(path_parts) > 1:
bucket_name = path_parts[1]
if not bucket_name:
# If the bucket name is empty we should not be checking for
# dns compatibility.
return
logger.debug('Checking for DNS compatible bucket for: %s',
request.url)
if check_dns_name(bucket_name):
# If the operation is on a bucket, the auth_path must be
# terminated with a '/' character.
if len(path_parts) == 2:
if request.auth_path[-1] != '/':
request.auth_path += '/'
path_parts.remove(bucket_name)
# At the very least the path must be a '/', such as with the
# CreateBucket operation when DNS style is being used. If this
# is not used you will get an empty path which is incorrect.
path = '/'.join(path_parts) or '/'
global_endpoint = default_endpoint_url
host = bucket_name + '.' + global_endpoint
new_tuple = (parts.scheme, host, path,
parts.query, '')
new_uri = urlunsplit(new_tuple)
request.url = new_uri
logger.debug('URI updated to: %s', new_uri)
else:
raise InvalidDNSNameError(bucket_name=bucket_name)
def _is_get_bucket_location_request(request):
return request.url.endswith('?location')
def instance_cache(func):
"""Method decorator for caching method calls to a single instance.
**This is not a general purpose caching decorator.**
In order to use this, you *must* provide an ``_instance_cache``
attribute on the instance.
This decorator is used to cache method calls. The cache is only
scoped to a single instance though such that multiple instances
will maintain their own cache. In order to keep things simple,
this decorator requires that you provide an ``_instance_cache``
attribute on your instance.
"""
func_name = func.__name__
@functools.wraps(func)
def _cache_guard(self, *args, **kwargs):
cache_key = (func_name, args)
if kwargs:
kwarg_items = tuple(sorted(kwargs.items()))
cache_key = (func_name, args, kwarg_items)
result = self._instance_cache.get(cache_key)
if result is not None:
return result
result = func(self, *args, **kwargs)
self._instance_cache[cache_key] = result
return result
return _cache_guard
def switch_host_s3_accelerate(request, operation_name, **kwargs):
"""Switches the current s3 endpoint with an S3 Accelerate endpoint"""
# Note that when registered the switching of the s3 host happens
# before it gets changed to virtual. So we are not concerned with ensuring
# that the bucket name is translated to the virtual style here and we
# can hard code the Accelerate endpoint.
parts = urlsplit(request.url).netloc.split('.')
parts = [p for p in parts if p in S3_ACCELERATE_WHITELIST]
endpoint = 'https://s3-accelerate.'
if len(parts) > 0:
endpoint += '.'.join(parts) + '.'
endpoint += 'amazonaws.com'
if operation_name in ['ListBuckets', 'CreateBucket', 'DeleteBucket']:
return
_switch_hosts(request, endpoint, use_new_scheme=False)
def switch_host_with_param(request, param_name):
"""Switches the host using a parameter value from a JSON request body"""
request_json = json.loads(request.data.decode('utf-8'))
if request_json.get(param_name):
new_endpoint = request_json[param_name]
_switch_hosts(request, new_endpoint)
def _switch_hosts(request, new_endpoint, use_new_scheme=True):
final_endpoint = _get_new_endpoint(
request.url, new_endpoint, use_new_scheme)
request.url = final_endpoint
def _get_new_endpoint(original_endpoint, new_endpoint, use_new_scheme=True):
new_endpoint_components = urlsplit(new_endpoint)
original_endpoint_components = urlsplit(original_endpoint)
scheme = original_endpoint_components.scheme
if use_new_scheme:
scheme = new_endpoint_components.scheme
final_endpoint_components = (
scheme,
new_endpoint_components.netloc,
original_endpoint_components.path,
original_endpoint_components.query,
''
)
final_endpoint = urlunsplit(final_endpoint_components)
logger.debug('Updating URI from %s to %s' % (
original_endpoint, final_endpoint))
return final_endpoint
def deep_merge(base, extra):
"""Deeply two dictionaries, overriding existing keys in the base.
:param base: The base dictionary which will be merged into.
:param extra: The dictionary to merge into the base. Keys from this
dictionary will take precedence.
"""
for key in extra:
# If the key represents a dict on both given dicts, merge the sub-dicts
if key in base and isinstance(base[key], dict)\
and isinstance(extra[key], dict):
deep_merge(base[key], extra[key])
continue
# Otherwise, set the key on the base to be the value of the extra.
base[key] = extra[key]
def hyphenize_service_id(service_id):
"""Translate the form used for event emitters.
:param service_id: The service_id to convert.
"""
return service_id.replace(' ', '-').lower()
class S3RegionRedirector(object):
def __init__(self, endpoint_bridge, client, cache=None):
self._endpoint_resolver = endpoint_bridge
self._cache = cache
if self._cache is None:
self._cache = {}
# This needs to be a weak ref in order to prevent memory leaks on
# python 2.6
self._client = weakref.proxy(client)
def register(self, event_emitter=None):
emitter = event_emitter or self._client.meta.events
emitter.register('needs-retry.s3', self.redirect_from_error)
emitter.register('before-call.s3', self.set_request_url)
emitter.register('before-parameter-build.s3',
self.redirect_from_cache)
def redirect_from_error(self, request_dict, response, operation, **kwargs):
"""
An S3 request sent to the wrong region will return an error that
contains the endpoint the request should be sent to. This handler
will add the redirect information to the signing context and then
redirect the request.
"""
if response is None:
# This could be none if there was a ConnectionError or other
# transport error.
return
if request_dict.get('context', {}).get('s3_redirected'):
logger.debug(
'S3 request was previously redirected, not redirecting.')
return
error = response[1].get('Error', {})
error_code = error.get('Code')
response_metadata = response[1].get('ResponseMetadata', {})
# We have to account for 400 responses because
# if we sign a Head* request with the wrong region,
# we'll get a 400 Bad Request but we won't get a
# body saying it's an "AuthorizationHeaderMalformed".
is_special_head_object = (
error_code in ['301', '400'] and
operation.name == 'HeadObject'
)
is_special_head_bucket = (
error_code in ['301', '400'] and
operation.name == 'HeadBucket' and
'x-amz-bucket-region' in response_metadata.get('HTTPHeaders', {})
)
is_wrong_signing_region = (
error_code == 'AuthorizationHeaderMalformed' and
'Region' in error
)
is_redirect_status = response[0] is not None and \
response[0].status_code in [301, 302, 307]
is_permanent_redirect = error_code == 'PermanentRedirect'
if not any([is_special_head_object, is_wrong_signing_region,
is_permanent_redirect, is_special_head_bucket,
is_redirect_status]):
return
bucket = request_dict['context']['signing']['bucket']
client_region = request_dict['context'].get('client_region')
new_region = self.get_bucket_region(bucket, response)
if new_region is None:
logger.debug(
"S3 client configured for region %s but the bucket %s is not "
"in that region and the proper region could not be "
"automatically determined." % (client_region, bucket))
return
logger.debug(
"S3 client configured for region %s but the bucket %s is in region"
" %s; Please configure the proper region to avoid multiple "
"unnecessary redirects and signing attempts." % (
client_region, bucket, new_region))
endpoint = self._endpoint_resolver.resolve('s3', new_region)
endpoint = endpoint['endpoint_url']
signing_context = {
'region': new_region,
'bucket': bucket,
'endpoint': endpoint
}
request_dict['context']['signing'] = signing_context
self._cache[bucket] = signing_context
self.set_request_url(request_dict, request_dict['context'])
request_dict['context']['s3_redirected'] = True
# Return 0 so it doesn't wait to retry
return 0
def get_bucket_region(self, bucket, response):
"""
There are multiple potential sources for the new region to redirect to,
but they aren't all universally available for use. This will try to
find region from response elements, but will fall back to calling
HEAD on the bucket if all else fails.
:param bucket: The bucket to find the region for. This is necessary if
the region is not available in the error response.
:param response: A response representing a service request that failed
due to incorrect region configuration.
"""
# First try to source the region from the headers.
service_response = response[1]
response_headers = service_response['ResponseMetadata']['HTTPHeaders']
if 'x-amz-bucket-region' in response_headers:
return response_headers['x-amz-bucket-region']
# Next, check the error body
region = service_response.get('Error', {}).get('Region', None)
if region is not None:
return region
# Finally, HEAD the bucket. No other choice sadly.
try:
response = self._client.head_bucket(Bucket=bucket)
headers = response['ResponseMetadata']['HTTPHeaders']
except ClientError as e:
headers = e.response['ResponseMetadata']['HTTPHeaders']
region = headers.get('x-amz-bucket-region', None)
return region
def set_request_url(self, params, context, **kwargs):
endpoint = context.get('signing', {}).get('endpoint', None)
if endpoint is not None:
params['url'] = _get_new_endpoint(params['url'], endpoint, False)
def redirect_from_cache(self, params, context, **kwargs):
"""
This handler retrieves a given bucket's signing context from the cache
and adds it into the request context.
"""
bucket = params.get('Bucket')
signing_context = self._cache.get(bucket)
if signing_context is not None:
context['signing'] = signing_context
else:
context['signing'] = {'bucket': bucket}
class ContainerMetadataFetcher(object):
TIMEOUT_SECONDS = 2
RETRY_ATTEMPTS = 3
SLEEP_TIME = 1
IP_ADDRESS = '169.254.170.2'
_ALLOWED_HOSTS = [IP_ADDRESS, 'localhost', '127.0.0.1']
def __init__(self, session=None, sleep=time.sleep):
if session is None:
session = botocore.httpsession.URLLib3Session(
timeout=self.TIMEOUT_SECONDS
)
self._session = session
self._sleep = sleep
def retrieve_full_uri(self, full_url, headers=None):
"""Retrieve JSON metadata from container metadata.
:type full_url: str
:param full_url: The full URL of the metadata service.
This should include the scheme as well, e.g
"http://localhost:123/foo"
"""
self._validate_allowed_url(full_url)
return self._retrieve_credentials(full_url, headers)
def _validate_allowed_url(self, full_url):
parsed = botocore.compat.urlparse(full_url)
is_whitelisted_host = self._check_if_whitelisted_host(
parsed.hostname)
if not is_whitelisted_host:
raise ValueError(
"Unsupported host '%s'. Can only "
"retrieve metadata from these hosts: %s" %
(parsed.hostname, ', '.join(self._ALLOWED_HOSTS)))
def _check_if_whitelisted_host(self, host):
if host in self._ALLOWED_HOSTS:
return True
return False
def retrieve_uri(self, relative_uri):
"""Retrieve JSON metadata from ECS metadata.
:type relative_uri: str
:param relative_uri: A relative URI, e.g "/foo/bar?id=123"
:return: The parsed JSON response.
"""
full_url = self.full_url(relative_uri)
return self._retrieve_credentials(full_url)
def _retrieve_credentials(self, full_url, extra_headers=None):
headers = {'Accept': 'application/json'}
if extra_headers is not None:
headers.update(extra_headers)
attempts = 0
while True:
try:
return self._get_response(
full_url, headers, self.TIMEOUT_SECONDS)
except MetadataRetrievalError as e:
logger.debug("Received error when attempting to retrieve "
"container metadata: %s", e, exc_info=True)
self._sleep(self.SLEEP_TIME)
attempts += 1
if attempts >= self.RETRY_ATTEMPTS:
raise
def _get_response(self, full_url, headers, timeout):
try:
AWSRequest = botocore.awsrequest.AWSRequest
request = AWSRequest(method='GET', url=full_url, headers=headers)
response = self._session.send(request.prepare())
response_text = response.content.decode('utf-8')
if response.status_code != 200:
raise MetadataRetrievalError(
error_msg=(
"Received non 200 response (%s) from ECS metadata: %s"
) % (response.status_code, response_text))
try:
return json.loads(response_text)
except ValueError:
error_msg = (
"Unable to parse JSON returned from ECS metadata services"
)
logger.debug('%s:%s', error_msg, response_text)
raise MetadataRetrievalError(error_msg=error_msg)
except RETRYABLE_HTTP_ERRORS as e:
error_msg = ("Received error when attempting to retrieve "
"ECS metadata: %s" % e)
raise MetadataRetrievalError(error_msg=error_msg)
def full_url(self, relative_uri):
return 'http://%s%s' % (self.IP_ADDRESS, relative_uri)
def get_environ_proxies(url):
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
# NOTE: requests allowed for ip/cidr entries in no_proxy env that we don't
# support current as urllib only checks DNS suffix
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
if proxy_bypass(urlparse(url).netloc):
return True
except (TypeError, socket.gaierror):
pass
return False
def get_encoding_from_headers(headers, default='ISO-8859-1'):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:param default: default encoding if the content-type is text
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return default
| 36.451707 | 82 | 0.637327 |
08507dcdc11e3bed20ac7864cd0821afaf3c43e6 | 15,576 | py | Python | src/prefect/agent/docker/agent.py | szelenka/prefect | 839b02a2a458b89fb0c1ec7f78192c88e710fe30 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-05-10T14:32:32.000Z | 2020-05-10T14:32:32.000Z | src/prefect/agent/docker/agent.py | szelenka/prefect | 839b02a2a458b89fb0c1ec7f78192c88e710fe30 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/prefect/agent/docker/agent.py | szelenka/prefect | 839b02a2a458b89fb0c1ec7f78192c88e710fe30 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import re
import multiprocessing
import ntpath
import posixpath
from sys import platform
from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
from prefect import config, context
from prefect.agent import Agent
from prefect.environments.storage import Docker
from prefect.serialization.storage import StorageSchema
from prefect.utilities.graphql import GraphQLResult
if TYPE_CHECKING:
import docker
class DockerAgent(Agent):
"""
Agent which deploys flow runs locally as Docker containers. Information on using the
Docker Agent can be found at https://docs.prefect.io/cloud/agents/docker.html
Environment variables may be set on the agent to be provided to each flow run's container:
```
prefect agent start docker --env MY_SECRET_KEY=secret --env OTHER_VAR=$OTHER_VAR
```
The default Docker daemon may be overridden by providing a different `base_url`:
```
prefect agent start docker --base-url "tcp://0.0.0.0:2375"
```
Args:
- name (str, optional): An optional name to give this agent. Can also be set through
the environment variable `PREFECT__CLOUD__AGENT__NAME`. Defaults to "agent"
- labels (List[str], optional): a list of labels, which are arbitrary string identifiers used by Prefect
Agents when polling for work
- env_vars (dict, optional): a dictionary of environment variables and values that will be set
on each flow run that this agent submits for execution
- max_polls (int, optional): maximum number of times the agent will poll Prefect Cloud for flow runs;
defaults to infinite
- base_url (str, optional): URL for a Docker daemon server. Defaults to
`unix:///var/run/docker.sock` however other hosts such as
`tcp://0.0.0.0:2375` can be provided
- no_pull (bool, optional): Flag on whether or not to pull flow images.
Defaults to `False` if not provided here or in context.
- show_flow_logs (bool, optional): a boolean specifying whether the agent should re-route Flow run logs
to stdout; defaults to `False`
- volumes (List[str], optional): a list of Docker volume mounts to be attached to any and all created containers.
"""
def __init__(
self,
name: str = None,
labels: Iterable[str] = None,
env_vars: dict = None,
max_polls: int = None,
base_url: str = None,
no_pull: bool = None,
volumes: List[str] = None,
show_flow_logs: bool = False,
) -> None:
super().__init__(
name=name, labels=labels, env_vars=env_vars, max_polls=max_polls
)
if platform == "win32":
default_url = "npipe:////./pipe/docker_engine"
else:
default_url = "unix://var/run/docker.sock"
self.logger.debug(
"Platform {} and default docker daemon {}".format(platform, default_url)
)
# Determine Daemon URL
self.base_url = base_url or context.get("base_url", default_url)
self.logger.debug("Base docker daemon url {}".format(self.base_url))
# Determine pull specification
self.no_pull = no_pull or context.get("no_pull", False)
self.logger.debug("no_pull set to {}".format(self.no_pull))
# Resolve volumes from specs
(
self.named_volumes,
self.container_mount_paths,
self.host_spec,
) = self._parse_volume_spec(volumes or [])
self.failed_connections = 0
self.docker_client = self._get_docker_client()
self.show_flow_logs = show_flow_logs
self.processes = [] # type: List[multiprocessing.Process]
# Ping Docker daemon for connection issues
try:
self.logger.debug("Pinging docker daemon")
self.docker_client.ping()
except Exception as exc:
self.logger.exception(
"Issue connecting to the Docker daemon. Make sure it is running."
)
raise exc
def _get_docker_client(self) -> "docker.APIClient":
# 'import docker' is expensive time-wise, we should do this just-in-time to keep
# the 'import prefect' time low
import docker
return docker.APIClient(base_url=self.base_url, version="auto")
def heartbeat(self) -> None:
try:
if not self.docker_client.ping():
raise RuntimeError("Unexpected Docker ping result")
if self.failed_connections > 0:
self.logger.info("Reconnected to Docker daemon")
self.failed_connections = 0
except Exception as exc:
self.logger.warning("Failed heartbeat: {}".format(repr(exc)))
self.failed_connections += 1
if self.failed_connections >= 6:
self.logger.error(
"Cannot reconnect to Docker daemon. Agent is shutting down."
)
raise SystemExit()
def on_shutdown(self) -> None:
"""
Cleanup any child processes created for streaming logs. This is to prevent
logs from displaying on the terminal after the agent exits.
"""
for proc in self.processes:
if proc.is_alive():
proc.terminate()
def _is_named_volume_unix(self, canditate_path: str) -> bool:
if not canditate_path:
return False
return not canditate_path.startswith((".", "/", "~"))
def _is_named_volume_win32(self, canditate_path: str) -> bool:
result = self._is_named_volume_unix(canditate_path)
return (
result
and not re.match(r"^[A-Za-z]\:\\.*", canditate_path)
and not canditate_path.startswith("\\")
)
def _parse_volume_spec(
self, volume_specs: List[str]
) -> Tuple[Iterable[str], Iterable[str], Dict[str, Dict[str, str]]]:
if platform == "win32":
return self._parse_volume_spec_win32(volume_specs)
return self._parse_volume_spec_unix(volume_specs)
def _parse_volume_spec_win32(
self, volume_specs: List[str]
) -> Tuple[Iterable[str], Iterable[str], Dict[str, Dict[str, str]]]:
named_volumes = [] # type: List[str]
container_mount_paths = [] # type: List[str]
host_spec = {} # type: Dict[str, Dict[str, str]]
for volume_spec in volume_specs:
fields = volume_spec.split(":")
if fields[-1] in ("ro", "rw"):
mode = fields.pop()
else:
mode = "rw"
if len(fields) == 3 and len(fields[0]) == 1:
# C:\path1:/path2 <-- extenal and internal path
external = ntpath.normpath(":".join(fields[0:2]))
internal = posixpath.normpath(fields[2])
elif len(fields) == 2:
combined_path = ":".join(fields)
(drive, path) = ntpath.splitdrive(combined_path)
if drive:
# C:\path1 <-- assumed container path of /path1
external = ntpath.normpath(combined_path)
# C:\path1 --> /c/path1
path = str("/" + drive.lower().rstrip(":") + path).replace(
"\\", "/"
)
internal = posixpath.normpath(path)
else:
# /path1:\path2 <-- extenal and internal path (relative to current drive)
# C:/path2 <-- valid named volume
external = ntpath.normpath(fields[0])
internal = posixpath.normpath(fields[1])
elif len(fields) == 1:
# \path1 <-- assumed container path of /path1 (relative to current drive)
external = ntpath.normpath(fields[0])
internal = external
else:
raise ValueError(
"Unable to parse volume specification '{}'".format(volume_spec)
)
container_mount_paths.append(internal)
if external and self._is_named_volume_win32(external):
named_volumes.append(external)
if mode != "rw":
raise ValueError(
"Named volumes can only have 'rw' mode, provided '{}'".format(
mode
)
)
else:
if not external:
# no internal container path given, assume the host path is the same as the internal path
external = internal
host_spec[external] = {
"bind": internal,
"mode": mode,
}
return named_volumes, container_mount_paths, host_spec
def _parse_volume_spec_unix(
self, volume_specs: List[str]
) -> Tuple[Iterable[str], Iterable[str], Dict[str, Dict[str, str]]]:
named_volumes = [] # type: List[str]
container_mount_paths = [] # type: List[str]
host_spec = {} # type: Dict[str, Dict[str, str]]
for volume_spec in volume_specs:
fields = volume_spec.split(":")
if len(fields) > 3:
raise ValueError(
"Docker volume format is invalid: {} (should be 'external:internal[:mode]')".format(
volume_spec
)
)
if len(fields) == 1:
external = None
internal = posixpath.normpath(fields[0].strip())
else:
external = posixpath.normpath(fields[0].strip())
internal = posixpath.normpath(fields[1].strip())
mode = "rw"
if len(fields) == 3:
mode = fields[2]
container_mount_paths.append(internal)
if external and self._is_named_volume_unix(external):
named_volumes.append(external)
if mode != "rw":
raise ValueError(
"Named volumes can only have 'rw' mode, provided '{}'".format(
mode
)
)
else:
if not external:
# no internal container path given, assume the host path is the same as the internal path
external = internal
host_spec[external] = {
"bind": internal,
"mode": mode,
}
return named_volumes, container_mount_paths, host_spec
def deploy_flow(self, flow_run: GraphQLResult) -> str:
"""
Deploy flow runs on your local machine as Docker containers
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
Returns:
- str: Information about the deployment
Raises:
- ValueError: if deployment attempted on unsupported Storage type
"""
self.logger.info(
"Deploying flow run {}".format(flow_run.id) # type: ignore
)
# 'import docker' is expensive time-wise, we should do this just-in-time to keep
# the 'import prefect' time low
import docker
storage = StorageSchema().load(flow_run.flow.storage)
if not isinstance(StorageSchema().load(flow_run.flow.storage), Docker):
self.logger.error(
"Storage for flow run {} is not of type Docker.".format(flow_run.id)
)
raise ValueError("Unsupported Storage type")
env_vars = self.populate_env_vars(flow_run=flow_run)
if not self.no_pull and storage.registry_url:
self.logger.info("Pulling image {}...".format(storage.name))
pull_output = self.docker_client.pull(
storage.name, stream=True, decode=True
)
for line in pull_output:
self.logger.debug(line)
self.logger.info("Successfully pulled image {}...".format(storage.name))
# Create any named volumes (if they do not already exist)
for named_volume_name in self.named_volumes:
try:
self.docker_client.inspect_volume(name=named_volume_name)
except docker.errors.APIError:
self.logger.debug("Creating named volume {}".format(named_volume_name))
self.docker_client.create_volume(
name=named_volume_name,
driver="local",
labels={"prefect_created": "true"},
)
# Create a container
self.logger.debug("Creating Docker container {}".format(storage.name))
container_mount_paths = self.container_mount_paths
if not container_mount_paths:
host_config = None
else:
host_config = self.docker_client.create_host_config(binds=self.host_spec)
container = self.docker_client.create_container(
storage.name,
command="prefect execute cloud-flow",
environment=env_vars,
volumes=container_mount_paths,
host_config=host_config,
)
# Start the container
self.logger.debug(
"Starting Docker container with ID {}".format(container.get("Id"))
)
self.docker_client.start(container=container.get("Id"))
if self.show_flow_logs:
proc = multiprocessing.Process(
target=self.stream_container_logs,
kwargs={"container_id": container.get("Id")},
)
proc.start()
self.processes.append(proc)
self.logger.debug("Docker container {} started".format(container.get("Id")))
return "Container ID: {}".format(container.get("Id"))
def stream_container_logs(self, container_id: str) -> None:
"""
Stream container logs back to stdout
Args:
- container_id (str): ID of a container to stream logs
"""
for log in self.docker_client.logs(
container=container_id, stream=True, follow=True
):
print(str(log, "utf-8").rstrip())
def populate_env_vars(self, flow_run: GraphQLResult) -> dict:
"""
Populate metadata and variables in the environment variables for a flow run
Args:
- flow_run (GraphQLResult): A flow run object
Returns:
- dict: a dictionary representing the populated environment variables
"""
if "localhost" in config.cloud.api:
api = "http://host.docker.internal:{}".format(config.server.port)
else:
api = config.cloud.api
return {
"PREFECT__CLOUD__API": api,
"PREFECT__CLOUD__AUTH_TOKEN": config.cloud.agent.auth_token,
"PREFECT__CLOUD__AGENT__LABELS": str(self.labels),
"PREFECT__CONTEXT__FLOW_RUN_ID": flow_run.id, # type: ignore
"PREFECT__CLOUD__USE_LOCAL_SECRETS": "false",
"PREFECT__LOGGING__LOG_TO_CLOUD": str(self.log_to_cloud).lower(),
"PREFECT__LOGGING__LEVEL": "DEBUG",
"PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudFlowRunner",
"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner",
**self.env_vars,
}
if __name__ == "__main__":
DockerAgent().start()
| 38.459259 | 121 | 0.576977 |
395dd362e484cffff38cfebf805c5a0d6370f7de | 4,340 | py | Python | tests/test_ress.py | gferraro2019/python-meegkit | aed858dc3603a3b71e620df3f29da6ae1a8f68da | [
"BSD-3-Clause"
] | null | null | null | tests/test_ress.py | gferraro2019/python-meegkit | aed858dc3603a3b71e620df3f29da6ae1a8f68da | [
"BSD-3-Clause"
] | null | null | null | tests/test_ress.py | gferraro2019/python-meegkit | aed858dc3603a3b71e620df3f29da6ae1a8f68da | [
"BSD-3-Clause"
] | null | null | null | """Test RESS."""
import matplotlib.pyplot as plt
import numpy as np
import pytest
import scipy.signal as ss
from meegkit import ress
from meegkit.utils import fold, rms, unfold, snr_spectrum, matmul3d
def create_data(n_times, n_chans=10, n_trials=20, freq=12, sfreq=250,
noise_dim=8, SNR=.8, t0=100, show=False):
"""Create synthetic data.
Returns
-------
noisy_data: array, shape=(n_times, n_channels, n_trials)
Simulated data with oscillatory component strting at t0.
"""
# source
source = np.sin(2 * np.pi * freq * np.arange(n_times - t0) / sfreq)[None].T
s = source * np.random.randn(1, n_chans)
s = s[:, :, np.newaxis]
s = np.tile(s, (1, 1, n_trials))
signal = np.zeros((n_times, n_chans, n_trials))
signal[t0:, :, :] = s
# noise
noise = np.dot(
unfold(np.random.randn(n_times, noise_dim, n_trials)),
np.random.randn(noise_dim, n_chans))
noise = fold(noise, n_times)
# mix signal and noise
signal = SNR * signal / rms(signal.flatten())
noise = noise / rms(noise.flatten())
noisy_data = signal + noise
if show:
f, ax = plt.subplots(3)
ax[0].plot(signal[:, 0, 0], label='source')
ax[1].plot(noise[:, 1, 0], label='noise')
ax[2].plot(noisy_data[:, 1, 0], label='mixture')
ax[0].legend()
ax[1].legend()
ax[2].legend()
plt.show()
return noisy_data, signal
@pytest.mark.parametrize('target', [12, 15, 20])
@pytest.mark.parametrize('n_trials', [16, 20])
@pytest.mark.parametrize('peak_width', [.5, 1])
@pytest.mark.parametrize('neig_width', [.5, 1])
@pytest.mark.parametrize('neig_freq', [.5, 1])
def test_ress(target, n_trials, peak_width, neig_width, neig_freq, show=False):
"""Test RESS."""
sfreq = 250
data, source = create_data(n_times=1000, n_trials=n_trials, freq=target,
sfreq=sfreq, show=False)
out = ress.RESS(data, sfreq=sfreq, peak_freq=target, neig_freq=neig_freq,
peak_width=peak_width, neig_width=neig_width)
nfft = 500
bins, psd = ss.welch(out.squeeze(1), sfreq, window="boxcar",
nperseg=nfft / (peak_width * 2),
noverlap=0, axis=0, average='mean')
# psd = np.abs(np.fft.fft(out, nfft, axis=0))
# psd = psd[0:psd.shape[0] // 2 + 1]
# bins = np.linspace(0, sfreq // 2, psd.shape[0])
# print(psd.shape)
# print(bins[:10])
psd = psd.mean(axis=-1, keepdims=True) # average over trials
snr = snr_spectrum(psd + psd.max() / 20, bins, skipbins=1, n_avg=2)
# snr = snr.mean(1)
if show:
f, ax = plt.subplots(2)
ax[0].plot(bins, snr, ':o')
ax[0].axhline(1, ls=':', c='grey', zorder=0)
ax[0].axvline(target, ls=':', c='grey', zorder=0)
ax[0].set_ylabel('SNR (a.u.)')
ax[0].set_xlabel('Frequency (Hz)')
ax[0].set_xlim([0, 40])
ax[0].set_ylim([0, 10])
ax[1].plot(bins, psd)
ax[1].axvline(target, ls=':', c='grey', zorder=0)
ax[1].set_ylabel('PSD')
ax[1].set_xlabel('Frequency (Hz)')
ax[1].set_xlim([0, 40])
# plt.show()
assert snr[bins == target] > 10
assert (snr[(bins <= target - 2) | (bins >= target + 2)] < 2).all()
# test multiple components
out, maps = ress.RESS(data, sfreq=sfreq, peak_freq=target,
neig_freq=neig_freq, peak_width=peak_width,
neig_width=neig_width, n_keep=1, return_maps=True)
_ = ress.RESS(data, sfreq=sfreq, peak_freq=target, n_keep=2)
_ = ress.RESS(data, sfreq=sfreq, peak_freq=target, n_keep=-1)
proj = matmul3d(out, maps.T)
assert proj.shape == data.shape
if show:
f, ax = plt.subplots(data.shape[1], 2, sharey='col')
for c in range(data.shape[1]):
ax[c, 0].plot(data[:, c].mean(-1), lw=.5, label='data')
ax[c, 1].plot(proj[:, c].mean(-1), lw=.5, label='projection')
if c < data.shape[1]:
ax[c, 0].set_xticks([])
ax[c, 1].set_xticks([])
ax[0, 0].set_title('Before')
ax[0, 1].set_title('After')
plt.legend()
plt.show()
if __name__ == '__main__':
import pytest
pytest.main([__file__])
# test_ress(12, 20, show=True)
| 34.444444 | 79 | 0.571659 |
e7628c834c7dd31643f557ddaf1fb25c44e70a4b | 7,266 | py | Python | bdantic/models/directives.py | jmgilman/bdantic | 3caa66d681da7a0cf0dbd6481c3f9005a8f2d8b9 | [
"MIT"
] | 3 | 2022-02-02T19:38:59.000Z | 2022-02-16T03:39:50.000Z | bdantic/models/directives.py | jmgilman/bdantic | 3caa66d681da7a0cf0dbd6481c3f9005a8f2d8b9 | [
"MIT"
] | null | null | null | bdantic/models/directives.py | jmgilman/bdantic | 3caa66d681da7a0cf0dbd6481c3f9005a8f2d8b9 | [
"MIT"
] | null | null | null | """Provides models for all beancount directives."""
from __future__ import annotations
from decimal import Decimal
from typing import Any, Dict, List, Literal, Optional, Set, Union
from beancount.core import data
from .base import Base, BaseDirective, Meta # noqa: F401
from .data import Account, Amount, Cost, CostSpec, Currency, Flag
class Balance(BaseDirective):
"""A model representing a `beancount.core.data.Balance`.
Attributes:
ty: A string literal identifying this model.
account: The account whose balance to check at the given date.
amount: The number of expected units for the account at the given date.
diff_amount: The difference between the expected and actual amounts.
tolerance: The amount of tolerance to use in the verification.
"""
_sibling = data.Balance
ty: Literal["Balance"] = "Balance"
account: Account
amount: Amount
tolerance: Optional[Decimal] = None
diff_amount: Optional[Amount] = None
class Close(BaseDirective):
"""A model representing a `beancount.core.data.Close`.
Attributes:
ty: A string literal identifying this model.
account: The name of the account being closed.
"""
_sibling = data.Close
ty: Literal["Close"] = "Close"
account: Account
class Commodity(BaseDirective):
"""A model representing a `beancount.core.data.Commodity`.
Attributes:
ty: A string literal identifying this model.
currency: The commodity under consideration.
"""
_sibling = data.Commodity
ty: Literal["Commodity"] = "Commodity"
currency: str
class Custom(BaseDirective):
"""A model representing a `beancount.core.data.Custom`.
Attributes:
ty: A string literal identifying this model.
type: The type of this custom directive.
values: A list of values of simple types supported by the grammar.
"""
_sibling = data.Custom
ty: Literal["Custom"] = "Custom"
type: str
values: List[Any]
class Document(BaseDirective):
"""A model representing a `beancount.core.data.Document`.
Attributes:
ty: A string literal identifying this model.
account: The account the document is associated with.
filename: The absolute filename of the document.
tags: A set of tag strings.
links: A set of link strings.
"""
_sibling = data.Document
ty: Literal["Document"] = "Document"
account: Account
filename: str
tags: Optional[Set] = None
links: Optional[Set] = None
class Event(BaseDirective):
"""A model representing a `beancount.core.data.Event`.
Attributes:
ty: A string literal identifying this model.
type: A unique string identifying this event.
description: The value of the above type at the given date.
"""
_sibling = data.Event
ty: Literal["Event"] = "Event"
type: str
description: str
class Note(BaseDirective):
"""A model representing a `beancount.core.data.Note`.
Attributes:
ty: A string literal identifying this model.
account: The account this note is attached to.
comment: The string contents of the note.
"""
_sibling = data.Note
ty: Literal["Note"] = "Note"
account: Account
comment: str
class Open(BaseDirective):
"""A model representing a `beancount.core.data.Open`.
Attributes:
ty: A string literal identifying this model.
account: The name of the account being opened.
currencies: Currencies that are allowed in this account.
booking: Booking method used to disambiguate postings to this account.
"""
_sibling = data.Open
ty: Literal["Open"] = "Open"
account: Account
currencies: Optional[List[Currency]] = None
booking: Optional[data.Booking] = None
class Pad(BaseDirective):
"""A model representing a `beancount.core.data.Pad`.
Attributes:
ty: A string literal identifying this model.
account: The name of the account which needs to be filled.
source_account: The name of the account used for debiting.
"""
_sibling = data.Pad
ty: Literal["Pad"] = "Pad"
account: Account
source_account: Account
class Posting(Base):
"""A model representing a `beancount.core.data.Posting`.
Attributes:
ty: A string literal identifying this model.
account: The account that is modified by this posting.
units: The units of the position.
cost: The cost of the position.
price: The optional price at which the position took place.
flag: An optional flag to associate with the posting.
meta: Optional metadata attached to the posting.
"""
_sibling = data.Posting
ty: Literal["Posting"] = "Posting"
account: Account
units: Optional[Amount] = None
cost: Optional[Union[Cost, CostSpec]] = None
price: Optional[Amount] = None
flag: Optional[str] = None
meta: Optional[Dict[str, Any]] = None
class Price(BaseDirective):
"""A model representing a `beancount.core.data.Price`.
Attributes:
ty: A string literal identifying this model.
currency: The currency that is being priced.
amount: The value of the currency.
"""
_sibling = data.Price
ty: Literal["Price"] = "Price"
currency: Currency
amount: Amount
class Query(BaseDirective):
"""A model representing a `beancount.core.data.Query`.
Attributes:
ty: A string literal identifying this model.
name: The unique identifier for the query.
query_string: The SQL query string to run or be made available.
"""
_sibling = data.Query
ty: Literal["Query"] = "Query"
name: str
query_string: str
class Transaction(BaseDirective):
"""A model representing a `beancount.core.data.Transaction`.
Attributes:
ty: A string literal identifying this model.
flag: A flag denoting the state of the transaction.
payee: The payee of the transaction.
narration: A description of the transaction.
tags: A set of tag strings.
links: A set of link strings.
postings: A list of postings attached to this transaction.
"""
_sibling = data.Transaction
ty: Literal["Transaction"] = "Transaction"
flag: Flag
payee: Optional[str] = None
narration: str
tags: Optional[Set[str]] = None
links: Optional[Set[str]] = None
postings: List[Posting]
class TxnPosting(Base):
"""A model representing a `beancount.core.data.TxnPosting`.
Attributes:
ty: A string literal identifying this model.
txn: The parent transaction instance.
posting: The posting instance.
"""
_sibling = data.TxnPosting
ty: Literal["TxnPosting"] = "TxnPosting"
txn: Transaction
posting: Posting
# Update forward references
Balance.update_forward_refs()
Close.update_forward_refs()
Commodity.update_forward_refs()
Custom.update_forward_refs()
Document.update_forward_refs()
Event.update_forward_refs()
Note.update_forward_refs()
Open.update_forward_refs()
Pad.update_forward_refs()
Price.update_forward_refs()
Query.update_forward_refs()
Transaction.update_forward_refs()
| 26.713235 | 79 | 0.674099 |
f94354f8424f7b1fb3f4379a87cef82422d7644b | 940 | py | Python | coldtype/animation/timeline.py | beesandbombs/coldtype | d02c7dd36bf1576fa37dc8c50d5c1a6e47b1c5ea | [
"Apache-2.0"
] | 1 | 2021-04-04T15:25:06.000Z | 2021-04-04T15:25:06.000Z | coldtype/animation/timeline.py | beesandbombs/coldtype | d02c7dd36bf1576fa37dc8c50d5c1a6e47b1c5ea | [
"Apache-2.0"
] | null | null | null | coldtype/animation/timeline.py | beesandbombs/coldtype | d02c7dd36bf1576fa37dc8c50d5c1a6e47b1c5ea | [
"Apache-2.0"
] | null | null | null | from coldtype.animation import Timeable
class Timeline(Timeable):
__name__ = "Generic"
def __init__(self, duration, fps=30, storyboard=None, tracks=None):
self.fps = fps
self.start = 0
self.end = duration
self.tracks = tracks or []
if not storyboard:
self.storyboard = [0]
else:
self.storyboard = storyboard
if len(self.storyboard) == 0:
self.storyboard.append(0)
self.storyboard.sort()
def __str__(self):
return "<coldtype.animation.timeline({:s}):{:04d}f@{:02.2f}fps[{:s}]>".format(self.__name__, self.duration, self.fps, ",".join([str(i) for i in self.storyboard]))
def __getitem__(self, item):
if isinstance(item, str):
for t in self.tracks:
if hasattr(t, "name") and t.name == item:
return t
else:
return self.tracks[item] | 32.413793 | 170 | 0.56383 |
894444a88ab8f3dfc1c4be9a3046bc6cbafb1f73 | 2,088 | py | Python | empirical/bay-area/edge_report.py | veg/edge-filtering | dc1c214efd5b3203c12b87ff1acfd79a9ed97b97 | [
"MIT"
] | null | null | null | empirical/bay-area/edge_report.py | veg/edge-filtering | dc1c214efd5b3203c12b87ff1acfd79a9ed97b97 | [
"MIT"
] | 5 | 2019-05-21T13:22:38.000Z | 2019-10-01T19:35:48.000Z | empirical/bay-area/edge_report.py | veg/edge-filtering | dc1c214efd5b3203c12b87ff1acfd79a9ed97b97 | [
"MIT"
] | null | null | null | import json
def get_json(fn):
with open(fn) as f:
return json.loads(f.read())
def consolidate_edge_reports(pairs, input, output):
report = {}
# for each edge report, create key based on filename
for pair, fn in zip(pairs, input):
report[pair] = get_json(fn)
with open(output, 'w') as jsonfile:
json.dump(report, jsonfile)
return
def generate_edge_report(test_filter_results, no_filter_results, transmission_chain):
report = {}
# How many clusters are there?
num_clusters = len(test_filter_results["Cluster sizes"])
num_edges = test_filter_results["Network Summary"]["Edges"]
num_nodes = test_filter_results["Network Summary"]["Nodes"]
report['num_edges'] = num_edges
report['num_nodes'] = num_nodes
report['num_edges_removed'] = num_edges_removed
edges = [tuple(e["sequences"]) for e in test_filter_results["Edges"]]
flipped_edges = [tuple(reversed(e)) for e in edges]
return report
def edge_report(results_json, no_filter_json, cycle_json, cycle_report, output_fn):
## has been filtered ##
results = ''
## has NOT been filtered ##
no_filter_results = ''
with open(results_json) as f:
results = json.loads(f.read())
with open(no_filter_json) as f:
no_filter_results = json.loads(f.read())
with open(cycle_json) as f:
filter_cycle_results = json.loads(f.read())
# Only read the first line of the cycle report
with open(cycle_report) as f:
cycle_report = json.loads(f.readline())
results = results["trace_results"]
no_filter_results = no_filter_results["trace_results"]
filter_cycle_results = filter_cycle_results["trace_results"]
report = {}
report['filter-report'] = generate_edge_report(results, no_filter_results, transmission_chain)
report['cycles'] = cycle_report['cycles']
report['cycle-report'] = generate_edge_report(filter_cycle_results, no_filter_results, transmission_chain)
with open(output_fn, 'w') as jsonfile:
json.dump(report, jsonfile)
return
| 28.216216 | 110 | 0.689655 |
307e101d15efd65ac9b175b63778982dad76a08c | 2,101 | py | Python | parlai/scripts/display_data.py | kifish/ParlAI | 93a0f31f3d6b03a97c1a081927427dbe1eb1242e | [
"MIT"
] | 55 | 2020-09-16T02:11:28.000Z | 2022-01-27T01:03:19.000Z | parlai/scripts/display_data.py | kifish/ParlAI | 93a0f31f3d6b03a97c1a081927427dbe1eb1242e | [
"MIT"
] | 14 | 2020-03-13T19:08:56.000Z | 2020-05-12T07:38:41.000Z | parlai/scripts/display_data.py | kifish/ParlAI | 93a0f31f3d6b03a97c1a081927427dbe1eb1242e | [
"MIT"
] | 7 | 2020-09-21T14:06:27.000Z | 2021-07-20T10:01:32.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Basic example which iterates through the tasks specified and prints them out. Used for
verification of data loading and iteration.
For example, to make sure that bAbI task 1 (1k exs) loads one can run and to
see a few of them:
Examples
--------
.. code-block:: shell
python display_data.py -t babi:task1k:1
"""
from parlai.core.params import ParlaiParser
from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
from parlai.core.worlds import create_task
import random
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True, 'Display data from a task')
# Get command line arguments
parser.add_argument('-n', '-ne', '--num-examples', type=int, default=10)
parser.add_argument('-mdl', '--max-display-len', type=int, default=1000)
parser.add_argument('--display-ignore-fields', type=str, default='agent_reply')
parser.set_defaults(datatype='train:stream')
return parser
def display_data(opt):
# create repeat label agent and assign it to the specified task
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
# Show some example dialogs.
for _ in range(opt['num_examples']):
world.parley()
# NOTE: If you want to look at the data from here rather than calling
# world.display() you could access world.acts[0] directly
print(world.display() + '\n~~')
if world.epoch_done():
print('EPOCH DONE')
break
try:
# print dataset size if available
print(
'[ loaded {} episodes with a total of {} examples ]'.format(
world.num_episodes(), world.num_examples()
)
)
except Exception:
pass
if __name__ == '__main__':
random.seed(42)
# Get command line arguments
parser = setup_args()
opt = parser.parse_args()
display_data(opt)
| 28.391892 | 86 | 0.669681 |
62b85f2836287c77a1c2f3599adbf96518d0dbcc | 11,873 | py | Python | models/mongodb/basemodel.py | pythononwheels/redmonty | 6255bb0c48575e29c0234a143ad05eba72a6b8c6 | [
"MIT"
] | 3 | 2019-09-29T07:05:00.000Z | 2019-11-13T06:50:33.000Z | models/mongodb/basemodel.py | pythononwheels/redmonty | 6255bb0c48575e29c0234a143ad05eba72a6b8c6 | [
"MIT"
] | 2 | 2019-09-28T21:10:13.000Z | 2019-09-28T21:13:20.000Z | models/mongodb/basemodel.py | pythononwheels/redmonty | 6255bb0c48575e29c0234a143ad05eba72a6b8c6 | [
"MIT"
] | null | null | null | from redmonty.database.mongodblib import db, client
from redmonty.powlib import pluralize
import datetime
import xmltodict
import simplejson as json
import datetime, decimal
from redmonty.config import myapp
from redmonty.powlib import merge_two_dicts
from redmonty.encoders import pow_json_serializer
from redmonty.models.modelobject import ModelObject
from bson.json_util import dumps
import pymongo
import uuid
class MongoBaseModel(ModelObject):
"""
The Raw BaseModel Class
"""
def init_on_load(self, *args, **kwargs):
"""
basic setup for all mongoDB models.
"""
#print("executin init_on_load")
super().init_on_load()
self.basic_schema = {
"id" : { "type" : "string", "default" : None },
"_uuid" : { "type" : "string", "default" : None },
"created_at" : { "type" : "datetime", "default" : None },
"last_updated" : { "type" : "datetime", "default" : None },
}
#create an index for our own id field.
self.setup_instance_schema()
#
# if there is a schema (cerberus) set it in the instance
#
# if "schema" in self.__class__.__dict__:
# #print(" .. found a schema for: " +str(self.__class__.__name__) + " in class dict")
# self.schema = merge_two_dicts(
# self.__class__.__dict__["schema"],
# self.__class__.basic_schema)
#print(" .. Schema is now: " + str(self.schema))
# setup the instance attributes from schema
#for key in self.schema.keys():
# if self.schema[key].get("default", None) != None:
# setattr(self,key,self.schema[key].get("default"))
# self.schema[key].pop("default", None)
# else:
# #print("no default for: " + str(self.schema[key]))
# setattr(self, key, None)
self.setup_instance_values()
#
# setup values from kwargs or from init_from_<format> if format="someformat"
# example: m = Model( data = { 'test' : 1 }, format="json")
# will call m.init_from_json(data)
#
if "format" in kwargs:
# set the format and call the according init_from_<format> method
# which initializes the instance with the given vaules (from data)
# e.g. Model(format=json, data={data})
f = getattr(self, "init_from_" + kwargs["format"], None)
if f:
f(kwargs)
else:
# initializes the instanmce with the given kwargs values:
# e.g.: Model(test="sometext", title="sometitle")
for key in kwargs.keys():
#if key in self.__class__.__dict__:
if key in self.schema:
setattr(self, key, kwargs[key])
self.table = db[pluralize(self.__class__.__name__.lower())]
self.collection = self.table
self.table.create_index([('id', pymongo.ASCENDING)], unique=True)
self.tablename = pluralize(self.__class__.__name__.lower())
#self.table = self.__class__.table
self._id = None
self.id = str(uuid.uuid4())
self._uuid = self.id
#print("new id is: " + self.id)
self.init_observers()
#self.setup_dirty_model()
#
# These Methods should be implemented by every subclass
#
def get(self, name):
return getattr(self,name)
def to_json(self):
""" just dump to json formatted string
parameter: res must be pymongo cursor.
Example: res = self.table.find()
"""
# uses bson.json_util dumps
from bson.json_util import DEFAULT_JSON_OPTIONS
DEFAULT_JSON_OPTIONS.datetime_representation = 2
return dumps(self.to_dict())
# def init_from_json(self, data, ignore=False):
# """
# makes a py dict from input json and
# sets the instance attributes
# """
# from bson.json_util import loads
# print(data)
# try:
# d=loads(data)
# except Exception as e:
# print("Ex1 : " + str(e))
# try:
# d=loads(data.decode("utf-8") )
# except Exception as e:
# print("E2: " + str(e))
# raise e
# print(d)
# print(str(type(d)))
# return self.init_from_dict(d, ignore)
def json_result_to_object(self, res):
"""
returns a list of objects from a given json list (string)
"""
raise NotImplementedError("Subclasses should overwrite this Method.")
def _get_next_object(self, cursor):
"""
return a generator that creates a Model object
for each next call.
"""
for elem in cursor:
m=self.__class__()
m.init_from_dict(elem)
yield m
def _return_find(self, res):
"""
returns a list of models from a given cursor.
parameter: res can be pymongo cursor or is handled as a single document (dict).
Example: res = self.table.find()
returns: a sinlge Model or a [Models]
"""
# single result element
if not isinstance(res, (pymongo.cursor.Cursor)):
m=self.__class__()
m.init_from_dict(res)
#print("returning: " +str(m))
#print(" type: " + str(type(m)))
return m
# return the generator function.
return self._get_next_object(res)
# handle cursor (many result elelemts)
# reslist = []
# for elem in res:
# m=self.__class__()
# m.init_from_dict(elem)
# #print("appending: " +str(m))
# #print(" type: " + str(type(m)))
# reslist.append(m)
# return reslist
def print_full(self):
""" Subclasses should overwrite this Method.
prints every attribute including related objects in FULL
lenghty but you see everything.
"""
raise NotImplementedError("Subclasses should overwrite this Method.")
def json_load_from_db(self, data, keep_id=False):
""" refresh the object from db and return json """
raise NotImplementedError("Subclasses should overwrite this Method.")
def print_db_schema(self):
""" Subclasses should overwrite this Method.
Shows the schema as returned by the db
"""
raise NotImplementedError("Subclasses should overwrite this Method.")
def get_relationships(self):
""" Subclasses should overwrite this Method.
Shows all related classes
"""
raise NotImplementedError("Subclasses should overwrite this Method.")
def get_relations(self):
""" Subclasses should overwrite this Method.
Shows all related classes
"""
raise NotImplementedError("Subclasses should overwrite this Method.")
def create_table(self):
"""
create the physical table in the DB
"""
raise NotImplementedError("creat_table is not implemented, yet")
def drop_table(self):
"""
drop the physical table in the DB
"""
raise NotImplementedError("drop_table is not implemented, yet.")
def upsert(self):
""" insert or update intelligently """
#self.last_updated = datetime.datetime.utcnow().strftime(myapp["datetime_format"])
if self.observers_initialized:
for observer in self.observers:
try:
observer.before_upsert(self)
except:
pass
self.last_updated = datetime.datetime.utcnow()
if self.observers_initialized:
for observer in self.observers:
try:
ret = observer.before_upsert(self)
except:
pass
if self._id == None:
#print("** insert **")
# insert. so set created at
self.created_at = datetime.datetime.utcnow().strftime(myapp["datetime_format"])
self.last_updated = self.created_at
ior = self.table.insert_one(self.to_dict())
self._id = ior.inserted_id
return self._id
else:
# update
#print("** update **")
#print(self.to_dict())
self.last_updated = datetime.datetime.utcnow().strftime(myapp["datetime_format"])
ior = self.table.update_one({"_id" : self._id}, {"$set": self.to_dict()}, upsert=False )
return ior
# clean dirty marks
self.dirty = {}
self.is_dirty = False
def delete(self, filter=None, many=False):
""" delete item """
if filter == None:
filter = {"id" : self.id }
# clean dirty marks
self.dirty = {}
self.is_dirty = False
if not many:
return self.table.delete_one(filter)
else:
return self.table.delete_many(filter)
def find_by_id(self, id, use_object_id=False):
""" return result by id (only)
parameter: use_object_id if true find by MongoDB ObjectID
else use the PoW id (uuid4)
"""
if use_object_id:
return self.find_one({"_id": id})
else:
return self.find_one({"id": id})
def from_statement(self, statement):
""" execute a given DB statement raw """
raise NotImplementedError("from_statement is not available for mongoDB.")
def page(self, filter={}, page=0, page_size=None):
""" return the next page of results. See config["myapp"].page_size
actually means: (taking the sql understandng)
page === offset
limit === limit
"""
if page_size == None:
page_size = myapp["page_size"]
return self._return_find(self.table.find(filter).skip(page*page_size).limit(page_size))
def find(self,filter={}, raw=False):
""" Find something given a query or criterion
filter = { "key" : value, ..}
"""
#print("Find parameter:" + str(filter))
if raw:
return self.table.find(filter)
return self._return_find(self.table.find(filter))
def find_all(self, filter=None, raw=False, limit=0, offset=0):
""" Find something given a query or criterion and parameters """
if (limit>0) or (offset>0):
return self.page(filter=filter, limit=limit, offset=offset)
else:
return self.find(filter)
def get_all(self):
""" just a synonym for find_all . but without any filters or limits. """
return self.find_all()
def find_one(self, filter={}):
""" find only one result. Raise Excaption if more than one was found"""
res = self.table.find_one(filter)
if res != None:
return self._return_find(res)
else:
return None
def find_first(self, filter={}):
""" return the first hit, or None"""
raise NotImplementedError("Not available for MongoDB")
def q(self):
""" return a raw query so the user can do
everything the DB offers without limitations
for sqlalchemy: return session.query(self.__class__)
for elastic: return Q
for tinyDB return Query
for MongoDB: not implemented
"""
return self.table
| 35.547904 | 100 | 0.556388 |
4d092f34372b8d9ab7753e8e30f2e7f5a4c6bd01 | 3,139 | py | Python | data/p2DJ/New/R2/benchmark/startQiskit_QC127.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/R2/benchmark/startQiskit_QC127.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/R2/benchmark/startQiskit_QC127.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=2
# total number=10
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[1]) # number=4
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.x(input_qubit[1]) # number=2
prog.cx(input_qubit[0],input_qubit[1]) # number=7
prog.x(input_qubit[1]) # number=8
prog.cx(input_qubit[0],input_qubit[1]) # number=9
prog.cx(input_qubit[1],input_qubit[0]) # number=5
prog.cx(input_qubit[1],input_qubit[0]) # number=6
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_QC127.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.279279 | 82 | 0.624721 |
0d88a3153bdafa22f3dce0d5ed68558fa642612d | 3,440 | py | Python | deprecated/acnet/acnet_fusion.py | sysu-shey/ACNet | 6d967d3fff2d79a37f85799b78a21ffbd9001bd2 | [
"MIT"
] | 767 | 2019-10-08T01:32:47.000Z | 2022-03-27T14:39:44.000Z | deprecated/acnet/acnet_fusion.py | ShawnDing1994/ACNet | 9586a269d7065805aafb8f1d69d425e84cec55f1 | [
"MIT"
] | 48 | 2019-11-04T12:05:15.000Z | 2021-11-28T06:50:30.000Z | deprecated/acnet/acnet_fusion.py | ShawnDing1994/ACNet | 9586a269d7065805aafb8f1d69d425e84cec55f1 | [
"MIT"
] | 140 | 2019-10-29T07:49:24.000Z | 2022-03-27T13:01:22.000Z | from utils.misc import read_hdf5, save_hdf5
import numpy as np
SQUARE_KERNEL_KEYWORD = 'square_conv.weight'
def _fuse_kernel(kernel, gamma, std):
b_gamma = np.reshape(gamma, (kernel.shape[0], 1, 1, 1))
b_gamma = np.tile(b_gamma, (1, kernel.shape[1], kernel.shape[2], kernel.shape[3]))
b_std = np.reshape(std, (kernel.shape[0], 1, 1, 1))
b_std = np.tile(b_std, (1, kernel.shape[1], kernel.shape[2], kernel.shape[3]))
return kernel * b_gamma / b_std
def _add_to_square_kernel(square_kernel, asym_kernel):
asym_h = asym_kernel.shape[2]
asym_w = asym_kernel.shape[3]
square_h = square_kernel.shape[2]
square_w = square_kernel.shape[3]
square_kernel[:, :, square_h // 2 - asym_h // 2: square_h // 2 - asym_h // 2 + asym_h,
square_w // 2 - asym_w // 2 : square_w // 2 - asym_w // 2 + asym_w] += asym_kernel
def convert_acnet_weights(train_weights, deploy_weights, eps):
train_dict = read_hdf5(train_weights)
print(train_dict.keys())
deploy_dict = {}
square_conv_var_names = [name for name in train_dict.keys() if SQUARE_KERNEL_KEYWORD in name]
for square_name in square_conv_var_names:
square_kernel = train_dict[square_name]
square_mean = train_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'square_bn.running_mean')]
square_std = np.sqrt(train_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'square_bn.running_var')] + eps)
square_gamma = train_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'square_bn.weight')]
square_beta = train_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'square_bn.bias')]
ver_kernel = train_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'ver_conv.weight')]
ver_mean = train_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'ver_bn.running_mean')]
ver_std = np.sqrt(train_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'ver_bn.running_var')] + eps)
ver_gamma = train_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'ver_bn.weight')]
ver_beta = train_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'ver_bn.bias')]
hor_kernel = train_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'hor_conv.weight')]
hor_mean = train_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'hor_bn.running_mean')]
hor_std = np.sqrt(train_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'hor_bn.running_var')] + eps)
hor_gamma = train_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'hor_bn.weight')]
hor_beta = train_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'hor_bn.bias')]
fused_bias = square_beta + ver_beta + hor_beta - square_mean * square_gamma / square_std \
- ver_mean * ver_gamma / ver_std - hor_mean * hor_gamma / hor_std
fused_kernel = _fuse_kernel(square_kernel, square_gamma, square_std)
_add_to_square_kernel(fused_kernel, _fuse_kernel(ver_kernel, ver_gamma, ver_std))
_add_to_square_kernel(fused_kernel, _fuse_kernel(hor_kernel, hor_gamma, hor_std))
deploy_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'fused_conv.weight')] = fused_kernel
deploy_dict[square_name.replace(SQUARE_KERNEL_KEYWORD, 'fused_conv.bias')] = fused_bias
for k, v in train_dict.items():
if 'hor_' not in k and 'ver_' not in k and 'square_' not in k:
deploy_dict[k] = v
save_hdf5(deploy_dict, deploy_weights)
| 54.603175 | 122 | 0.71686 |
35c67c703ecbea0c8f7d87d5dfb7d15c31babdce | 3,290 | py | Python | app.py | aws-samples/aws-autonomous-driving-data-lake-ros-bag-scene-detection-pipeline | 2e4011f0964becd6b5507c70eb298b90399acc42 | [
"MIT-0"
] | 6 | 2021-07-30T17:46:54.000Z | 2022-03-01T12:23:03.000Z | app.py | aws-samples/aws-autonomous-driving-data-lake-ros-bag-scene-detection-pipeline | 2e4011f0964becd6b5507c70eb298b90399acc42 | [
"MIT-0"
] | null | null | null | app.py | aws-samples/aws-autonomous-driving-data-lake-ros-bag-scene-detection-pipeline | 2e4011f0964becd6b5507c70eb298b90399acc42 | [
"MIT-0"
] | 1 | 2022-03-01T08:46:24.000Z | 2022-03-01T08:46:24.000Z | #!/usr/bin/env python3
from aws_cdk import core
from infrastructure.ecs_stack import Fargate
import os
import json
from infrastructure.emr_launch.cluster_definition import EMRClusterDefinition
from infrastructure.emr_orchestration.stack import StepFunctionStack
from infrastructure.emr_trigger.stack import EmrTriggerStack
# Load config
project_dir = os.path.dirname(os.path.abspath(__file__))
app = core.App()
config = app.node.try_get_context('config')
stack_id = config["stack-id"]
def fargate(config, stack_id):
image_name = config["image-name"]
ecr_repository_name = config["ecr-repository-name"]
cpu = config["cpu"]
memory_limit_mib = config["memory-limit-mib"]
timeout_minutes = config["timeout-minutes"]
s3_filters = config["s3-filters"]
default_environment_vars = config["environment-variables"]
input_bucket_name = config["input-bucket-name"]
output_bucket_name = config["output-bucket-name"]
topics_to_extract = ",".join(config["topics-to-extract"])
fargate_stack = Fargate(
app,
stack_id,
image_name=image_name,
environment_vars=default_environment_vars,
ecr_repository_name=ecr_repository_name,
cpu=cpu,
memory_limit_mib=memory_limit_mib,
timeout_minutes=timeout_minutes,
s3_filters=s3_filters,
input_bucket_name=input_bucket_name,
output_bucket_name=output_bucket_name,
topics_to_extract=topics_to_extract,
glue_db_name=config["glue-db-name"],
)
return fargate_stack
def emr(config, input_buckets: [str]):
environment_variables = [
"CLUSTER_NAME",
"MASTER_INSTANCE_TYPE",
"CORE_INSTANCE_TYPE",
"CORE_INSTANCE_COUNT",
"CORE_INSTANCE_MARKET",
"TASK_INSTANCE_TYPE",
"TASK_INSTANCE_COUNT",
"TASK_INSTANCE_MARKET",
"RELEASE_LABEL",
"APPLICATIONS",
"CONFIGURATION",
]
list_vars = ["APPLICATIONS"]
int_vars = [
"CORE_INSTANCE_COUNT",
"TASK_INSTANCE_COUNT",
]
json_vars = ["CONFIGURATION"]
clean_config = {"INPUT_BUCKETS": input_buckets}
for v in environment_variables:
val = config[v]
clean_config[v] = val
return EMRClusterDefinition(
app, id=config["CLUSTER_NAME"] + "-stack", config=clean_config
)
fargate_stack = fargate(config["fargate"], stack_id)
print("Output bucket: " + fargate_stack.output_bucket.bucket_arn)
emr_cluster_stack = emr(
config["emr"], input_buckets=[fargate_stack.output_bucket.bucket_arn]
)
emr_orchestration_stack = StepFunctionStack(
app,
id=f"{stack_id}-emr-orchestration",
emr_launch_stack=emr_cluster_stack,
artifact_bucket=emr_cluster_stack.artifact_bucket,
synchronized_bucket=emr_cluster_stack.synchronized_bucket,
scenes_bucket=emr_cluster_stack.scenes_bucket,
glue_db_name=config["fargate"]["glue-db-name"],
)
emr_trigger_stack = EmrTriggerStack(
app,
id=f"{stack_id}-emr-trigger",
target_step_function_arn=emr_orchestration_stack.state_machine.state_machine_arn,
source_bucket_sns=fargate_stack.new_files_topic,
dynamo_table=emr_orchestration_stack.dynamo_table,
num_rosbag_topics=len(config["fargate"]["topics-to-extract"]),
)
app.synth()
| 28.119658 | 85 | 0.718237 |
5311e34d65435b5eb4f8a76dc0b4c88c6326a2bf | 8,776 | py | Python | downloaded_kernels/house_sales/converted_notebooks/kernel_30.py | josepablocam/common-code-extraction | a6978fae73eee8ece6f1db09f2f38cf92f03b3ad | [
"MIT"
] | null | null | null | downloaded_kernels/house_sales/converted_notebooks/kernel_30.py | josepablocam/common-code-extraction | a6978fae73eee8ece6f1db09f2f38cf92f03b3ad | [
"MIT"
] | null | null | null | downloaded_kernels/house_sales/converted_notebooks/kernel_30.py | josepablocam/common-code-extraction | a6978fae73eee8ece6f1db09f2f38cf92f03b3ad | [
"MIT"
] | 2 | 2021-07-12T00:48:08.000Z | 2021-08-11T12:53:05.000Z | #!/usr/bin/env python
# coding: utf-8
# # Feature engineering for a score .99 using CatBoost and geohash clus
# ## This is achieved with the following assumptions
# 1. Price is always predicted for future date. Thus train with 80% is data from available dates and we are trying to predict for the future dates
# 2. We calculate price per squarefeet for given location for train data. Additional features derived also will be calculated in train data and copied to test data. This prevents data leaks from test to train. This in general is a known fact when anyone tries to buy a property.
#
# Feature ranking is adapted from Anisotropic (https://www.kaggle.com/arthurtok/feature-ranking-rfe-random-forest-linear-models)
# In[ ]:
import pandas as pd
import numpy as np
from datetime import datetime
from dateutil.relativedelta import relativedelta
#import geohash
from catboost import CatBoostRegressor
import catboost
# # Feature engineering helpers
# In[ ]:
def returnYear(row):
if row['yr_renovated']!=0:
return datetime.strptime(str(row['yr_renovated']),'%Y')
else:
return row['yr_built']
def deltaInYearsAge(row):
difference = relativedelta(row['date'], row['yr_built'])
years = difference.years
return years
def deltaInYearsRenovated(row):
difference = relativedelta(row['yr_renovated'], row['yr_built'])
years = difference.years
return years
# # Since kaggle does not support geohash libraries, using one from git
# Original source https://github.com/vinsci/geohash/blob/master/Geohash/geohash.py
# The libraies gave a much better result of 0.96
# In[ ]:
from math import log10
__base32 = '0123456789bcdefghjkmnpqrstuvwxyz'
def geohashEncode(latitude, longitude, precision=12):
"""
Encode a position given in float arguments latitude, longitude to
a geohash which will have the character count precision.
"""
lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0)
geohash = []
bits = [ 16, 8, 4, 2, 1 ]
bit = 0
ch = 0
even = True
while len(geohash) < precision:
if even:
mid = (lon_interval[0] + lon_interval[1]) / 2
if longitude > mid:
ch |= bits[bit]
lon_interval = (mid, lon_interval[1])
else:
lon_interval = (lon_interval[0], mid)
else:
mid = (lat_interval[0] + lat_interval[1]) / 2
if latitude > mid:
ch |= bits[bit]
lat_interval = (mid, lat_interval[1])
else:
lat_interval = (lat_interval[0], mid)
even = not even
if bit < 4:
bit += 1
else:
geohash += __base32[ch]
bit = 0
ch = 0
return ''.join(geohash)
# # Load Data and define the target variable
# In[ ]:
house = pd.read_csv('../input/housesalesprediction/kc_house_data.csv')
print (house.shape)
house.drop_duplicates('id',inplace=True)
print(house.shape)
targetVariableColumnName = 'price'
# In[ ]:
house.columns
# # creating features based on location. Geohash with different accuracies is handy for clustering/grouping
# In[ ]:
house['date'] = pd.to_datetime(house['date'])
house.sort_values('date',inplace=True)
house['yr_built'] = house.yr_built.apply(lambda x:datetime.strptime(str(x),'%Y') )
house['yr_renovated'] = house.apply(returnYear,axis=1)
house['age']=house.apply(deltaInYearsAge,axis=1)
house['renovatedAge']=house.apply(deltaInYearsRenovated,axis=1)
house['geohash']=house.apply(lambda points: geohashEncode(points.lat, points.long,precision=4),axis = 1)
house['pricepersqft']=house['price']/house['sqft_living']
# In[ ]:
house.shape[0]*0.8
# In[ ]:
train = house.head(17148)
# # Groupby functions on getting bias over neighborhood
# In[ ]:
train=train.join(train.groupby(['geohash'])['pricepersqft'].mean(),on='geohash',rsuffix='priceaverage600m')
train=train.join(train.groupby(['geohash'])['pricepersqft'].min(),on='geohash',rsuffix='pricemin600m')
train=train.join(train.groupby(['geohash'])['pricepersqft'].max(),on='geohash',rsuffix='pricemax600m')
train=train.join(train.groupby(['geohash'])['pricepersqft'].max(),on='geohash',rsuffix='pricemax600m')
# In[ ]:
pd.set_option('display.float_format', lambda x: '%.0f' % x)
print (train.shape)
train.drop_duplicates('id',inplace=True)
print (train.shape)
train.describe().T
# In[ ]:
test = house.tail(4465)
test.to_csv('original_test.csv')
currentIds=set(test['id'].values)
print (test.shape)
test=pd.merge(test, train[['geohash','pricepersqftpriceaverage600m','pricepersqftpricemin600m', 'pricepersqftpricemax600m']], on="geohash")
test.drop_duplicates('id',inplace=True)
test.to_csv('merged_test.csv')
currentIds1=set(test['id'].values)
print (currentIds.difference(currentIds1))
print (test.shape)
# # now drop the items already covered in added features
# zip code, lat, lon are covered in addl features with respect to location
# year renowated and built are added as age and renowated age
# other columns logprice, geohash ...
# In[ ]:
columns=list(train.columns.values)
columns.remove(targetVariableColumnName)
columns=[item for item in columns if item not in ['zipcode', 'lat','long','id','yr_renovated','yr_built','date','geohash','geohash_70m','Log_price']]
print (columns)
# # Feature ranking
#
# In[ ]:
# First extract the target variable which is our House prices
Y = train.price.values
# Drop price from the house dataframe and create a matrix out of the house data
X = train[columns].as_matrix()
# Store the column/feature names into a list "colnames"
colnames = columns
# In[ ]:
ranks = {}
# Create our function which stores the feature rankings to the ranks dictionary
def ranking(ranks, names, order=1):
minmax = MinMaxScaler()
ranks = minmax.fit_transform(order*np.array([ranks]).T).T[0]
ranks = map(lambda x: round(x,2), ranks)
return dict(zip(names, ranks))
# In[ ]:
from sklearn.feature_selection import RFE, f_regression
from sklearn.linear_model import (LinearRegression, Ridge, Lasso, RandomizedLasso)
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestRegressor
# In[ ]:
rf = RandomForestRegressor(n_jobs=-1, n_estimators=50, verbose=3)
rf.fit(X,Y)
ranks["RF"] = ranking(rf.feature_importances_, colnames);
# In[ ]:
# Finally let's run our Selection Stability method with Randomized Lasso
rlasso = RandomizedLasso(alpha=0.04)
rlasso.fit(X, Y)
ranks["rlasso/Stability"] = ranking(np.abs(rlasso.scores_), colnames)
print('finished')
# In[ ]:
# Construct our Linear Regression model
lr = LinearRegression(normalize=True)
lr.fit(X,Y)
#stop the search when only the last feature is left
rfe = RFE(lr, n_features_to_select=1, verbose =3 )
rfe.fit(X,Y)
ranks["RFE"] = ranking(list(map(float, rfe.ranking_)), colnames, order=-1)
# In[ ]:
# Using Linear Regression
lr = LinearRegression(normalize=True)
lr.fit(X,Y)
ranks["LinReg"] = ranking(np.abs(lr.coef_), colnames)
# Using Ridge
ridge = Ridge(alpha = 7)
ridge.fit(X,Y)
ranks['Ridge'] = ranking(np.abs(ridge.coef_), colnames)
# Using Lasso
lasso = Lasso(alpha=.05)
lasso.fit(X, Y)
ranks["Lasso"] = ranking(np.abs(lasso.coef_), colnames)
# In[ ]:
# Create empty dictionary to store the mean value calculated from all the scores
r = {}
for name in colnames:
r[name] = round(np.mean([ranks[method][name]
for method in ranks.keys()]), 2)
methods = sorted(ranks.keys())
ranks["Mean"] = r
methods.append("Mean")
# In[ ]:
# Put the mean scores into a Pandas dataframe
meanplot = pd.DataFrame(list(r.items()), columns= ['Feature','Mean Ranking'])
# Sort the dataframe
meanplot = meanplot.sort_values('Mean Ranking', ascending=False)
# In[ ]:
import seaborn as sns
# Let's plot the ranking of the features
sns.factorplot(x="Mean Ranking", y="Feature", data = meanplot, kind="bar",
size=11)
# # Let's Predict with CatBoost Library
# In[ ]:
cbc = CatBoostRegressor(random_seed=0).fit(train[columns].values,train[targetVariableColumnName].values)
# In[ ]:
test['predictionsCatBoost'] = cbc.predict(test[columns])
# In[ ]:
from sklearn.metrics import explained_variance_score,median_absolute_error
print (explained_variance_score(test['price'], test['predictionsCatBoost']),median_absolute_error(test['price'], test['predictionsCatBoost']))
# In[ ]:
test['predictionsCatBoost']=test['predictionsCatBoost'].apply(lambda x: int(round(x)))
test[['price','predictionsCatBoost','age','id']].head()
# In[ ]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[ ]:
import matplotlib
matplotlib.pyplot.scatter(test['predictionsCatBoost'],test[targetVariableColumnName])
# In[ ]:
| 25.002849 | 279 | 0.697015 |
23e0631bc21a082a3bb705e8ce8d49113a913390 | 10,415 | py | Python | source/ppi_traj/mdtraj_utils/trajectory_utils.py | PolyachenkoYA/masif_2021 | 93ff3395696d2f2515f569c2d0218af251168e34 | [
"Apache-2.0"
] | null | null | null | source/ppi_traj/mdtraj_utils/trajectory_utils.py | PolyachenkoYA/masif_2021 | 93ff3395696d2f2515f569c2d0218af251168e34 | [
"Apache-2.0"
] | null | null | null | source/ppi_traj/mdtraj_utils/trajectory_utils.py | PolyachenkoYA/masif_2021 | 93ff3395696d2f2515f569c2d0218af251168e34 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import mdtraj as md
def join_trajectories(traj_list, selection="all"):
# align trajectories
ids_sim = align(traj_list[0][0], *traj_list[1:], selection=selection)
# topology checks
df_topo_ref = traj_list[0].topology.to_dataframe()[0].iloc[ids_sim[:,0]][['name', 'resName']]
for k in range(1,len(traj_list)):
assert np.all(df_topo_ref.values == traj_list[k].topology.to_dataframe()[0].iloc[ids_sim[:,k]][['name', 'resName']].values)
# create new trajectory
xyz = np.concatenate([traj_list[k].xyz[:,ids_sim[:,k],:] for k in range(len(traj_list))], axis=0)
topology = traj_list[0].atom_slice(ids_sim[:,0]).topology
return md.Trajectory(xyz, topology=topology)
# alignment
def get_atoms_per_chain(traj, selection='all'):
# define filter for atom type
return [np.array([a.index for a in chain.atoms]) for chain in traj.topology.chains]
def chain_atom_indices(traj, chain_id):
return np.array([a.index for a in traj.topology.chain(chain_id).atoms])
def chain_atom_names(traj, chain_id):
return np.array([a.name for a in traj.topology.chain(chain_id).atoms])
def compare_chains_in_trajs(traj1, traj2, chain_id1=0, chain_id2=0, traj_type='traj'):
chainA_1_names = chain_atom_names(traj1, chain_id=chain_id1)
chainA_2_names = chain_atom_names(traj2, chain_id=chain_id2)
N_atoms = len(chainA_1_names)
res = None
if(len(chainA_2_names) == N_atoms):
for i in range(N_atoms):
if(chainA_1_names[i] != chainA_2_names[i]):
res = i
break
else:
res = -1
return res
def unwrap_pbc(traj):
# setup meshgrid for PBC repetitions
dgrid = np.array([0.0, 1.0, -1.0])
dX, dY, dZ = np.meshgrid(dgrid, dgrid, dgrid)
dV = np.stack([dX.ravel(), dY.ravel(), dZ.ravel()], -1)
# get indices of atoms for each molecules
ids_mol_l = get_atoms_per_chain(traj)
# compute center of mass of each molecule and its images
pcm_rep_mol = np.zeros((len(ids_mol_l), 27, traj.xyz.shape[0], 3))
for i in range(len(ids_mol_l)):
# compute center of mass
pcm = md.geometry.distance.compute_center_of_mass(traj.atom_slice(ids_mol_l[i]))
# compute CM for all nearest periodic images
for k in range(dV.shape[0]):
pcm_rep_mol[i][k] = (pcm + traj.unitcell_lengths * dV[k].reshape(1,-1))
# choose reference molecule with CM in reference cell
pcm_ref = pcm_rep_mol[0][0]
# make copy of trajectory
traj_fix = traj[:]
# for each other molecule
for i in range(1,pcm_rep_mol.shape[0]):
# compute distance of all images with reference molecule
dcm_rep = np.sqrt(np.sum(np.square(pcm_rep_mol[i] - np.expand_dims(pcm_ref,0)), axis=2))
# find molecule image closest to reference molecule
ids_img = np.argmin(dcm_rep, axis=0)
# update position of molecule
traj_fix.xyz[:,ids_mol_l[i],:] += np.expand_dims(traj.unitcell_lengths * dV[ids_img],1)
return traj_fix
def identify(top_a, top_b):
# identify similar and mutated atoms pairs
ids_sim_l = []
ids_mut_l = []
chain_a_used = set()
chain_b_used = set()
for chain_a in top_a.chains:
# get number of residue of chain from molecule a
n_res_a = len(chain_a._residues)
for chain_b in top_b.chains:
# get number of residue of chain from molecule b
n_res_b = len(chain_b._residues)
# length check
if (n_res_a == n_res_b) and (chain_a.index not in chain_a_used) and (chain_b.index not in chain_b_used):
# single residue chains (molecules, ions)
if n_res_a == 1:
if list(chain_a.residues)[0].name.lower() != list(chain_b.residues)[0].name.lower():
continue
# sequence check
for res_a, res_b in zip(chain_a.residues, chain_b.residues):
# mutation warning
if res_a.name.lower() != res_b.name.lower():
print("WARNING: [{}]{} != [{}]{}".format(chain_a.index, res_a, chain_b.index, res_b))
# get indices of matching residues
for ra, rb in zip(chain_a.residues, chain_b.residues):
if ra.name.lower() == rb.name.lower():
# get all atoms of corresponding residues
ra_atoms = [a for a in ra.atoms]
rb_atoms = [b for b in rb.atoms]
# check that the two residues have the same number of atoms
if (len(ra_atoms) != len(rb_atoms)):
# if not same number of atoms -> nothing to do
print("ERROR: different number of atoms for {}({}) : {}({})".format(ra, len(ra_atoms), rb, len(rb_atoms)))
else:
# try to find unique ordering of atoms
a_names = [a.name for a in ra.atoms]
b_names = [b.name for b in rb.atoms]
# if not unique -> nothing to do
if ((len(a_names) != len(np.unique(a_names))) or (len(b_names) != len(np.unique(b_names)))):
print("ERROR: non-unique atoms mismatch for {} : {}".format(ra, rb))
elif np.all([a_name==b_name for a_name,b_name in zip(a_names,b_names)]):
for a, b in zip(ra.atoms, rb.atoms):
ids_sim_l.append([a.index, b.index])
else:
print("INFO: reordering atoms mismatch for {} : {}".format(ra, rb))
# find unique ordering
ids_reo_a = np.argsort(a_names)
ids_reo_b = np.argsort(b_names)
# get corresponding reordered atom indices
a_ids = np.array([a.index for a in ra.atoms])[ids_reo_a]
b_ids = np.array([b.index for b in rb.atoms])[ids_reo_b]
for ia, ib in zip(a_ids, b_ids):
ids_sim_l.append([ia, ib])
else:
ids_mut_l.append(([a.index for a in ra.atoms], [b.index for b in rb.atoms]))
# history chain used
chain_a_used.add(chain_a.index)
chain_b_used.add(chain_b.index)
return np.array(ids_sim_l), ids_mut_l
def align(traj_ref, *trajs, selection="all"):
# reference trajectory with selection
ids_sel_ref = traj_ref.topology.select(selection)
traj_sel_ref = traj_ref[0].atom_slice(ids_sel_ref)
# for each input trajectory
ids_sim_l = []
for traj in trajs:
# selected trajectory
ids_sel = traj.topology.select(selection)
traj_sel = traj[0].atom_slice(ids_sel)
# identify to reference trajectory
ids_sim_sel, _ = identify(traj_sel_ref.topology, traj_sel.topology)
# get indices for input and not selected subset
ids_sim_l.append(np.stack([ids_sel_ref[ids_sim_sel[:,0]], ids_sel[ids_sim_sel[:,1]]], axis=-1))
# find common atoms between all trajectories
ids_sim = ids_sim_l[0].copy()
for k in range(1, len(ids_sim_l)):
# intersection masks
m0 = np.in1d(ids_sim[:,0], ids_sim_l[k][:,0])
m1 = np.in1d(ids_sim_l[k][:,0], ids_sim[:,0])
# filter previous indices and insert new indices
ids_sim = np.concatenate([ids_sim[m0], ids_sim_l[k][m1,1].reshape(-1,1)], axis=1)
return ids_sim
def center(traj):
traj_c = traj[:]
traj_c.xyz = (traj_c.xyz - np.expand_dims(np.mean(traj_c.xyz,axis=1),1))
return traj_c
def superpose_transform(xyz_ref, xyz):
# copy data
p = xyz.copy()
p_ref = xyz_ref.copy()
# centering
t = np.expand_dims(np.mean(p,axis=1),1)
t_ref = np.expand_dims(np.mean(p_ref,axis=1),1)
# SVD decomposition
U, S, Vt = np.linalg.svd(np.matmul(np.swapaxes(p_ref-t_ref,1,2), p-t))
# reflection matrix
Z = np.zeros(U.shape) + np.expand_dims(np.eye(U.shape[1], U.shape[2]),0)
Z[:,-1,-1] = np.linalg.det(U) * np.linalg.det(Vt)
R = np.matmul(np.swapaxes(Vt,1,2), np.matmul(Z, np.swapaxes(U,1,2)))
return t, R, t_ref # np.matmul(xyz - t, R) + t_ref
def superpose(traj_ref, *trajs, selection='name CA'):
# identify same chains
ids_sim = align(traj_ref, *trajs, selection=selection)
# get reference positions
xyz_ref = traj_ref.xyz[:,ids_sim[:,0],:]
# align each input trajectory to reference
traj_sup_l = []
for k in range(len(trajs)):
# get positions
xyz = trajs[k].xyz[:,ids_sim[:,k+1],:]
# compute the alignment transformation
t, R, t_ref = superpose_transform(xyz_ref, xyz)
# superpose trajectory to reference
traj_sup_l.append(trajs[k][:])
traj_sup_l[-1].xyz = np.matmul(traj_sup_l[-1].xyz-t, R) + t_ref
return tuple(traj_sup_l + [ids_sim])
def atoms_to_residue_contacts(topology, ic_l, dc_l):
# get all resids
resids = np.array([a.residue.index for a in topology.atoms])
# setup mapping between resids and atom id
mr = np.isclose(resids.reshape(-1,1), np.unique(resids).reshape(1,-1))
# find residue-residue contacts
resids_int_l = []
dmin_rr_l = []
for k in range(len(ic_l)):
if len(ic_l[k]) > 0:
# get residue to atom at interface A and B
resids_ia = np.where(mr[ic_l[k][:,0]])[1]
resids_ib = np.where(mr[ic_l[k][:,1]])[1]
# get unique residue-residue contacts
resids_int, ids_inv = np.unique(np.stack([resids_ia, resids_ib], axis=1), return_inverse=True, axis=0)
# find minimum distances for each residue-residue contact
dmin_rr = np.zeros(resids_int.shape[0], dtype=np.float32)
for i in np.unique(ids_inv):
dmin_rr[i] = np.min(dc_l[k][np.where(ids_inv == i)[0]])
else:
resids_int = np.array([])
dmin_rr = np.array([])
# store data
resids_int_l.append(resids_int)
dmin_rr_l.append(dmin_rr)
return resids_int_l, dmin_rr_l
| 38.290441 | 134 | 0.585502 |
fc6ddb312ef64a36dd7f600f2c4a2b8e29ade2f4 | 16,049 | py | Python | typer/core.py | madkinsz/typer | a1520dcda685220a9a796288f5eaaebd00d68845 | [
"MIT"
] | 7,615 | 2019-12-24T13:08:20.000Z | 2022-03-31T22:07:53.000Z | typer/core.py | madkinsz/typer | a1520dcda685220a9a796288f5eaaebd00d68845 | [
"MIT"
] | 351 | 2019-12-24T22:17:54.000Z | 2022-03-31T15:35:08.000Z | typer/core.py | jina-ai/typer | 8b5e14b25ddf0dd777403015883301b17bedcee0 | [
"MIT"
] | 360 | 2019-12-24T15:29:59.000Z | 2022-03-30T20:33:10.000Z | import inspect
import os
import sys
from gettext import gettext as _
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
import click
import click.core
import click.formatting
import click.parser
import click.types
from .utils import _get_click_major
if TYPE_CHECKING: # pragma: no cover
import click.shell_completion
# TODO: when deprecating Click 7, remove this
def _typer_param_shell_complete(
self: click.core.Parameter, ctx: click.Context, incomplete: str
) -> List["click.shell_completion.CompletionItem"]:
if self._custom_shell_complete is not None:
results = self._custom_shell_complete(ctx, self, incomplete)
if results and isinstance(results[0], str):
from click.shell_completion import CompletionItem
results = [CompletionItem(c) for c in results]
return cast(List["click.shell_completion.CompletionItem"], results)
return self.type.shell_complete(ctx, self, incomplete)
def _typer_param_setup_autocompletion_compat(
self: click.Parameter,
*,
autocompletion: Optional[
Callable[[click.Context, List[str], str], List[Union[Tuple[str, str], str]]]
] = None,
) -> None:
if autocompletion is not None and self._custom_shell_complete is None:
import warnings
warnings.warn(
"'autocompletion' is renamed to 'shell_complete'. The old name is"
" deprecated and will be removed in Click 8.1. See the docs about"
" 'Parameter' for information about new behavior.",
DeprecationWarning,
stacklevel=2,
)
def compat_autocompletion(
ctx: click.Context, param: click.core.Parameter, incomplete: str
) -> List["click.shell_completion.CompletionItem"]:
from click.shell_completion import CompletionItem
out = []
for c in autocompletion(ctx, [], incomplete): # type: ignore
if isinstance(c, tuple):
c = CompletionItem(c[0], help=c[1])
elif isinstance(c, str):
c = CompletionItem(c)
if c.value.startswith(incomplete):
out.append(c)
return out
self._custom_shell_complete = compat_autocompletion
class TyperArgument(click.core.Argument):
def __init__(
self,
*,
# Parameter
param_decls: List[str],
type: Optional[Any] = None,
required: Optional[bool] = None,
default: Optional[Any] = None,
callback: Optional[Callable[..., Any]] = None,
nargs: Optional[int] = None,
metavar: Optional[str] = None,
expose_value: bool = True,
is_eager: bool = False,
envvar: Optional[Union[str, List[str]]] = None,
shell_complete: Optional[
Callable[
[click.Context, click.Parameter, str],
Union[List["click.shell_completion.CompletionItem"], List[str]],
]
] = None,
autocompletion: Optional[Callable[..., Any]] = None,
# TyperArgument
show_default: Union[bool, str] = True,
show_choices: bool = True,
show_envvar: bool = True,
help: Optional[str] = None,
hidden: bool = False,
):
self.help = help
self.show_default = show_default
self.show_choices = show_choices
self.show_envvar = show_envvar
self.hidden = hidden
kwargs: Dict[str, Any] = {
"param_decls": param_decls,
"type": type,
"required": required,
"default": default,
"callback": callback,
"nargs": nargs,
"metavar": metavar,
"expose_value": expose_value,
"is_eager": is_eager,
"envvar": envvar,
}
if _get_click_major() > 7:
kwargs["shell_complete"] = shell_complete
else:
kwargs["autocompletion"] = autocompletion
super().__init__(**kwargs)
if _get_click_major() > 7:
_typer_param_setup_autocompletion_compat(
self, autocompletion=autocompletion
)
def get_help_record(self, ctx: click.Context) -> Optional[Tuple[str, str]]:
# Modified version of click.core.Option.get_help_record()
# to support Arguments
if self.hidden:
return None
name = self.make_metavar()
help = self.help or ""
extra = []
if self.show_envvar:
envvar = self.envvar
# allow_from_autoenv is currently not supported in Typer for CLI Arguments
if envvar is not None:
var_str = (
", ".join(str(d) for d in envvar)
if isinstance(envvar, (list, tuple))
else envvar
)
extra.append(f"env var: {var_str}")
if self.default is not None and (self.show_default or ctx.show_default):
if isinstance(self.show_default, str):
default_string = f"({self.show_default})"
elif isinstance(self.default, (list, tuple)):
default_string = ", ".join(str(d) for d in self.default)
elif inspect.isfunction(self.default):
default_string = "(dynamic)"
else:
default_string = str(self.default)
extra.append(f"default: {default_string}")
if self.required:
extra.append("required")
if extra:
extra_str = ";".join(extra)
help = f"{help} [{extra_str}]" if help else f"[{extra_str}]"
return name, help
def make_metavar(self) -> str:
# Modified version of click.core.Argument.make_metavar()
# to include Argument name
if self.metavar is not None:
return self.metavar
var = (self.name or "").upper()
if not self.required:
var = "[{}]".format(var)
type_var = self.type.get_metavar(self)
if type_var:
var += f":{type_var}"
if self.nargs != 1:
var += "..."
return var
def shell_complete(
self, ctx: click.Context, incomplete: str
) -> List["click.shell_completion.CompletionItem"]:
return _typer_param_shell_complete(self, ctx=ctx, incomplete=incomplete)
class TyperOption(click.core.Option):
def __init__(
self,
*,
# Parameter
param_decls: List[str],
type: Optional[Union[click.types.ParamType, Any]] = None,
required: Optional[bool] = None,
default: Optional[Any] = None,
callback: Optional[Callable[..., Any]] = None,
nargs: Optional[int] = None,
metavar: Optional[str] = None,
expose_value: bool = True,
is_eager: bool = False,
envvar: Optional[Union[str, List[str]]] = None,
shell_complete: Optional[
Callable[
[click.Context, click.Parameter, str],
Union[List["click.shell_completion.CompletionItem"], List[str]],
]
] = None,
autocompletion: Optional[Callable[..., Any]] = None,
# Option
show_default: Union[bool, str] = False,
prompt: Union[bool, str] = False,
confirmation_prompt: Union[bool, str] = False,
prompt_required: bool = True,
hide_input: bool = False,
is_flag: Optional[bool] = None,
flag_value: Optional[Any] = None,
multiple: bool = False,
count: bool = False,
allow_from_autoenv: bool = True,
help: Optional[str] = None,
hidden: bool = False,
show_choices: bool = True,
show_envvar: bool = False,
):
# TODO: when deprecating Click 7, remove custom kwargs with prompt_required
# and call super().__init__() directly
kwargs: Dict[str, Any] = {
"param_decls": param_decls,
"type": type,
"required": required,
"default": default,
"callback": callback,
"nargs": nargs,
"metavar": metavar,
"expose_value": expose_value,
"is_eager": is_eager,
"envvar": envvar,
"show_default": show_default,
"prompt": prompt,
"confirmation_prompt": confirmation_prompt,
"hide_input": hide_input,
"is_flag": is_flag,
"flag_value": flag_value,
"multiple": multiple,
"count": count,
"allow_from_autoenv": allow_from_autoenv,
"help": help,
"hidden": hidden,
"show_choices": show_choices,
"show_envvar": show_envvar,
}
if _get_click_major() > 7:
kwargs["prompt_required"] = prompt_required
kwargs["shell_complete"] = shell_complete
else:
kwargs["autocompletion"] = autocompletion
super().__init__(**kwargs)
if _get_click_major() > 7:
_typer_param_setup_autocompletion_compat(
self, autocompletion=autocompletion
)
def get_help_record(self, ctx: click.Context) -> Optional[Tuple[str, str]]:
# Click 7.x was not breaking this use case, so in that case, re-use its logic
if _get_click_major() < 8:
return super().get_help_record(ctx)
# Duplicate all of Click's logic only to modify a single line, to allow boolean
# flags with only names for False values as it's currently supported by Typer
# Ref: https://typer.tiangolo.com/tutorial/parameter-types/bool/#only-names-for-false
if self.hidden:
return None
any_prefix_is_slash = False
def _write_opts(opts: Sequence[str]) -> str:
nonlocal any_prefix_is_slash
rv, any_slashes = click.formatting.join_options(opts)
if any_slashes:
any_prefix_is_slash = True
if not self.is_flag and not self.count:
rv += f" {self.make_metavar()}"
return rv
rv = [_write_opts(self.opts)]
if self.secondary_opts:
rv.append(_write_opts(self.secondary_opts))
help = self.help or ""
extra = []
if self.show_envvar:
envvar = self.envvar
if envvar is None:
if (
self.allow_from_autoenv
and ctx.auto_envvar_prefix is not None
and self.name is not None
):
envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}"
if envvar is not None:
var_str = (
envvar
if isinstance(envvar, str)
else ", ".join(str(d) for d in envvar)
)
extra.append(_("env var: {var}").format(var=var_str))
# Temporarily enable resilient parsing to avoid type casting
# failing for the default. Might be possible to extend this to
# help formatting in general.
resilient = ctx.resilient_parsing
ctx.resilient_parsing = True
try:
default_value = self.get_default(ctx, call=False)
finally:
ctx.resilient_parsing = resilient
show_default_is_str = isinstance(self.show_default, str)
if show_default_is_str or (
default_value is not None and (self.show_default or ctx.show_default)
):
if show_default_is_str:
default_string = f"({self.show_default})"
elif isinstance(default_value, (list, tuple)):
default_string = ", ".join(str(d) for d in default_value)
elif callable(default_value):
default_string = _("(dynamic)")
elif self.is_bool_flag and self.secondary_opts:
# For boolean flags that have distinct True/False opts,
# use the opt without prefix instead of the value.
# Typer override, original commented
# default_string = click.parser.split_opt(
# (self.opts if self.default else self.secondary_opts)[0]
# )[1]
if self.default:
if self.opts:
default_string = click.parser.split_opt(self.opts[0])[1]
else:
default_string = str(default_value)
else:
default_string = click.parser.split_opt(self.secondary_opts[0])[1]
# Typer override end
elif self.is_bool_flag and not self.secondary_opts and not default_value:
default_string = ""
else:
default_string = str(default_value)
if default_string:
extra.append(_("default: {default}").format(default=default_string))
if isinstance(self.type, click.types._NumberRangeBase):
range_str = self.type._describe_range()
if range_str:
extra.append(range_str)
if self.required:
extra.append(_("required"))
if extra:
extra_str = "; ".join(extra)
help = f"{help} [{extra_str}]" if help else f"[{extra_str}]"
return ("; " if any_prefix_is_slash else " / ").join(rv), help
def shell_complete(
self, ctx: click.Context, incomplete: str
) -> List["click.shell_completion.CompletionItem"]:
return _typer_param_shell_complete(self, ctx=ctx, incomplete=incomplete)
def _typer_format_options(
self: click.core.Command, *, ctx: click.Context, formatter: click.HelpFormatter
) -> None:
args = []
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
if param.param_type_name == "argument":
args.append(rv)
elif param.param_type_name == "option":
opts.append(rv)
# TODO: explore adding Click's gettext support, e.g.:
# from gettext import gettext as _
# with formatter.section(_("Options")):
# ...
if args:
with formatter.section("Arguments"):
formatter.write_dl(args)
if opts:
with formatter.section("Options"):
formatter.write_dl(opts)
def _typer_main_shell_completion(
self: click.core.Command,
*,
ctx_args: Dict[str, Any],
prog_name: str,
complete_var: Optional[str] = None,
) -> None:
if complete_var is None:
complete_var = f"_{prog_name}_COMPLETE".replace("-", "_").upper()
instruction = os.environ.get(complete_var)
if not instruction:
return
from .completion import shell_complete
rv = shell_complete(self, ctx_args, prog_name, complete_var, instruction)
sys.exit(rv)
class TyperCommand(click.core.Command):
def format_options(
self, ctx: click.Context, formatter: click.HelpFormatter
) -> None:
_typer_format_options(self, ctx=ctx, formatter=formatter)
def _main_shell_completion(
self,
ctx_args: Dict[str, Any],
prog_name: str,
complete_var: Optional[str] = None,
) -> None:
_typer_main_shell_completion(
self, ctx_args=ctx_args, prog_name=prog_name, complete_var=complete_var
)
class TyperGroup(click.core.Group):
def format_options(
self, ctx: click.Context, formatter: click.HelpFormatter
) -> None:
_typer_format_options(self, ctx=ctx, formatter=formatter)
self.format_commands(ctx, formatter)
def _main_shell_completion(
self,
ctx_args: Dict[str, Any],
prog_name: str,
complete_var: Optional[str] = None,
) -> None:
_typer_main_shell_completion(
self, ctx_args=ctx_args, prog_name=prog_name, complete_var=complete_var
)
| 34.07431 | 93 | 0.578042 |
df20bef13ed2ede685f3ae7fcbb95005e4e6edd0 | 50,454 | py | Python | functions.py | KasaiKonoru/Auto-Voice-Channels | 0f0023050774f8a8e136f1819341c7e71f0472b5 | [
"MIT"
] | null | null | null | functions.py | KasaiKonoru/Auto-Voice-Channels | 0f0023050774f8a8e136f1819341c7e71f0472b5 | [
"MIT"
] | null | null | null | functions.py | KasaiKonoru/Auto-Voice-Channels | 0f0023050774f8a8e136f1819341c7e71f0472b5 | [
"MIT"
] | null | null | null | import asyncio
import traceback
from datetime import datetime
from copy import deepcopy
from math import ceil
from random import choice, seed
from statistics import mean
from time import time
import cfg
import discord
import translate
import utils
from utils import log
try:
import patreon_info
except ImportError:
patreon_info = None
@utils.func_timer()
def lock_channel_request(channel, offset=0):
cfg.CURRENT_REQUESTS[channel.id] = time() + offset
# print("Locking", channel.id, cfg.CURRENT_REQUESTS)
@utils.func_timer()
def channel_is_requested(channel):
# print("Checking", channel.id, cfg.CURRENT_REQUESTS)
channel_age = datetime.utcnow().timestamp() - channel.created_at.timestamp()
if channel_age < 5:
return True
if channel.id in cfg.CURRENT_REQUESTS:
if time() - cfg.CURRENT_REQUESTS[channel.id] < 5:
return True
return False
@utils.func_timer()
def unlock_channel_request(channel):
try:
del cfg.CURRENT_REQUESTS[channel.id]
except KeyError:
pass
# print("Unlocking", channel.id, cfg.CURRENT_REQUESTS)
@utils.func_timer()
def lock_user_request(user, offset=0):
cfg.USER_REQUESTS[user.id] = time() + offset
@utils.func_timer()
def user_request_is_locked(user):
if user.id in cfg.USER_REQUESTS:
if time() - cfg.USER_REQUESTS[user.id] < 2:
return True
return False
@utils.func_timer()
def detect_abuse(user):
if user.id in cfg.USER_REQUESTS:
v = 1 if user.id not in cfg.USER_ABUSE_EVENTS else cfg.USER_ABUSE_EVENTS[user.id] + 1
cfg.USER_ABUSE_EVENTS[user.id] = v
return v
return False
@utils.func_timer()
def esc_md(text):
return discord.utils.escape_markdown(text)
@utils.func_timer()
def user_hash(user):
return esc_md(user.name) + '#' + user.discriminator
@utils.func_timer()
def check_primary_permissions(channel, me):
perms = channel.permissions_for(me)
perms_required = [
perms.manage_channels,
perms.read_messages,
perms.send_messages,
perms.move_members,
]
if channel.category:
perms = channel.category.permissions_for(me)
perms_required += [
perms.manage_channels,
perms.read_messages,
perms.send_messages,
perms.move_members,
]
return all(perms_required)
@utils.func_timer()
def set_template(guild, chid, template):
settings = utils.get_serv_settings(guild)
for p in settings['auto_channels']:
for sid in settings['auto_channels'][p]['secondaries']:
if sid == chid:
settings['auto_channels'][p]['template'] = template
utils.set_serv_settings(guild, settings)
return
@utils.func_timer()
async def set_default_limit(guild, c, limit):
chid = c.id
await c.edit(user_limit=limit)
settings = utils.get_serv_settings(guild)
for p in settings['auto_channels']:
for sid in settings['auto_channels'][p]['secondaries']:
if sid == chid:
settings['auto_channels'][p]['limit'] = limit
utils.set_serv_settings(guild, settings)
pc = guild.get_channel(int(p))
if pc.user_limit:
await pc.edit(user_limit=0)
return
@utils.func_timer()
def toggle_position(guild, chid):
settings = utils.get_serv_settings(guild)
for p in settings['auto_channels']:
for sid in settings['auto_channels'][p]['secondaries']:
if sid == chid:
above = True
if 'above' in settings['auto_channels'][p]:
above = settings['auto_channels'][p]['above']
settings['auto_channels'][p]['above'] = not above
utils.set_serv_settings(guild, settings)
above = not above
return "above" if above else "below"
return "error"
@utils.func_timer()
def get_channel_games(channel):
settings = utils.get_serv_settings(channel.guild)
general = ["General"] if 'general' not in settings else [settings['general']]
games = {}
for m in sorted(channel.members, key=lambda x: x.display_name.lower()):
if not m.bot:
for act in [a for a in m.activities if a.type == discord.ActivityType.playing]:
gname = act.name
if gname == "Custom Status":
continue
if gname in games:
games[gname] += 1
else:
games[gname] = 1
if not games:
return general
games_l = list((x, games[x]) for x in games) # Convert dict to 2D list
games_l.sort(key=lambda c: c[1], reverse=True) # Sort by most players
biggest_game, most_players = games_l[0]
gnames = [biggest_game]
games_l = games_l[1:] # remaining games (excluding most popular one)
for gn, gp in games_l:
if gp == most_players:
gnames.append(gn)
if len(gnames) > 2:
# More than 2 games with the same number of players
return general
else:
return gnames
@utils.func_timer()
def get_alias(g, settings):
std_aliases = {
"League of Legends": "LoL",
"Counter-Strike: Global Offensive": "CS:GO",
"Team Fortress 2": "TF2",
"Grand Theft Auto V": "GTAV",
"PLAYERUNKNOWN'S BATTLEGROUNDS": "PUBG",
"MONSTER HUNTER: WORLD": "MH:W",
"The Elder Scrolls V: Skyrim": "Skyrim",
"The Elder Scrolls V: Skyrim Special Edition": "Skyrim",
"The Elder Scrolls Online": "ESO",
"Tom Clancy's Rainbow Six Siege": "Rainbow Six Siege",
"FINAL FANTASY XIV": "FFXIV",
"FINAL FANTASY XIV Online": "FFXIV",
"Warhammer End Times Vermintide": "Vermintide 1",
"Warhammer: Vermintide 2": "Vermintide 2",
"World of Warcraft Classic": "WoW Classic",
"World of Warcraft": "WoW",
"Call of Dutyː Modern Warfare": "CoDːMW",
"Call of Duty®️ː Modern Warfare®️": "CoDːMW",
}
if g in settings['aliases']:
g = settings['aliases'][g]
elif g in std_aliases:
g = std_aliases[g]
return g
@utils.func_timer()
def get_game_name(channel, games):
settings = utils.get_serv_settings(channel.guild)
general = ["General"] if 'general' not in settings else [settings['general']]
if games == general:
return games[0]
for i, g in enumerate(games):
games[i] = get_alias(g, settings)
tmp = games
games = []
for g in tmp:
if g not in games:
games.append(g)
return ', '.join(games)
@utils.func_timer()
def get_party_info(channel, game, asip, default=""):
settings = utils.get_serv_settings(channel.guild)
parties = {}
states = {}
details = {}
num_playing = {}
sizes = {}
sneakies = 0
for m in channel.members:
act = m.activity
act_name = get_alias(act.name, settings) if act else None
if act and act_name == game:
pid = -1
if hasattr(act, 'party') and act.party:
if 'id' in act.party:
pid = act.party['id']
if pid == -1:
# No party ID is given, so we make our own based on other info
pid = act_name
if hasattr(act, 'party') and act.party:
if 'size' in act.party:
pid += ('/'.join(str(v) for v in act.party['size']))
if hasattr(act, 'state') and act.state:
pid += act.state
if hasattr(act, 'details') and act.details:
pid += act.details
if hasattr(act, 'state') and act.state:
states[pid] = act.state
if hasattr(act, 'details') and act.details:
details[pid] = act.details
if hasattr(act, 'party') and act.party:
if 'size' in act.party:
num_playing[pid] = str(act.party['size'][0])
try:
sizes[pid] = str(act.party['size'][1])
except IndexError:
sizes[pid] = "0"
parties[pid] = parties[pid] + 1 if pid in parties else 1
elif not act and asip:
sneakies += 1
biggest_party = [None, 0]
for p, v in parties.items():
if v > biggest_party[1]:
biggest_party = [p, v]
pid, players = biggest_party
info = {
'state': default,
'details': default,
'rich': False,
'sneakies': "0",
'num_playing': "0",
'size': "0",
}
if pid is not None:
info['state'] = states[pid] if pid in states else default
info['details'] = details[pid] if pid in details else default
info['rich'] = pid in states or pid in details
info['sneakies'] = sneakies
if pid in num_playing:
info['num_playing'] = num_playing[pid]
else:
info['num_playing'] = str(players + sneakies)
if pid in sizes:
info['size'] = sizes[pid]
elif channel.user_limit:
info['size'] = str(channel.user_limit)
return info
@utils.func_timer()
async def update_bitrate(channel, settings, user_left=None, reset=False):
if 'custom_bitrates' not in settings:
return False
custom_bitrates = []
for m in channel.members:
if str(m.id) in settings['custom_bitrates']:
custom_bitrates.append(settings['custom_bitrates'][str(m.id)])
if not custom_bitrates:
if reset or (user_left and str(user_left.id) in settings['custom_bitrates']):
p = utils.get_primary_channel(channel.guild, settings, channel)
bitrate = p.bitrate
else:
return False
else:
bitrate = min(channel.guild.bitrate_limit, mean(custom_bitrates) * 1000)
if bitrate == channel.bitrate:
return False
await channel.edit(bitrate=bitrate)
return bitrate
@utils.func_timer()
async def update_text_channel_role(guild, member, channel, mode):
if mode == 'leave' and len(channel.members) <= 0:
return # Last person leaving, channel will be deleted, no need to update roles
settings = utils.get_serv_settings(guild)
for p, pv in settings['auto_channels'].items():
for s, sv in pv['secondaries'].items():
if s == channel.id:
if 'tcr' in sv:
r = guild.get_role(sv['tcr'])
if r:
if mode == 'join':
await member.add_roles(r)
elif mode == 'leave':
try:
await member.remove_roles(r)
except discord.errors.NotFound:
pass # It's possible someone joins too quick and the role doesn't exist yet?
# Ensure existing members have the role in case they joined too quickly
members = [m for m in channel.members if m != member]
for m in members:
if r not in m.roles:
await m.add_roles(r)
return
@utils.func_timer()
async def dm_user(user, msg, embed=None, error=True):
if user is None:
log("Failed to DM unknown user.")
return
if user.dm_channel is None:
await user.create_dm()
try:
last_message = await user.dm_channel.history(limit=1).flatten()
except discord.errors.Forbidden:
log("Forbidden to get user dm_history {}".format(user.id))
return
if len(last_message) > 0:
last_message = last_message[0]
else:
last_message = None
if error and last_message and last_message.id in cfg.DM_ERROR_MESSAGES:
return
try:
m = await user.dm_channel.send(content=msg, embed=embed)
if error:
cfg.DM_ERROR_MESSAGES[m.id] = time()
except discord.errors.Forbidden:
log("Forbidden to DM user {}".format(user.id))
@utils.func_timer()
async def echo(msg, channel, user=None):
max_chars = 1950 # Discord has a character limit of 2000 per message. Use 1950 to be safe.
msg = str(msg)
if len(msg) > max_chars:
chunks = list([msg[i:i + max_chars] for i in range(0, len(msg), max_chars)])
else:
chunks = [msg]
for c in chunks:
try:
await channel.send(c)
except discord.errors.Forbidden:
log("Forbidden to echo", channel.guild)
if user:
await dm_user(
user,
"I don't have permission to send messages in the "
"`#{}` channel of **{}**.".format(channel.name, channel.guild.name)
)
return False
except Exception:
log("Failed to echo", channel.guild)
print(traceback.format_exc())
return False
return True
@utils.func_timer()
async def blind_echo(msg, guild):
settings = utils.get_serv_settings(guild)
msg_channel = None
last_message = None
if 'last_channel' in settings:
msg_channel = guild.get_channel(settings['last_channel'])
if msg_channel:
last_message = msg_channel.last_message
if not msg_channel:
server_contact = guild.get_member(settings['server_contact'])
if server_contact is not None:
if server_contact.dm_channel is None:
await server_contact.create_dm()
msg_channel = server_contact.dm_channel
last_message = await msg_channel.history(limit=1).flatten()
if len(last_message) > 0:
last_message = last_message[0]
if msg_channel:
if last_message and last_message.id in cfg.ERROR_MESSAGES:
# Don't spam multiple error messages in a row
return
try:
m = await msg_channel.send(msg)
except:
settings['last_channel'] = 0 # Don't try use this channel in future
utils.set_serv_settings(guild, settings)
return
cfg.ERROR_MESSAGES[m.id] = time()
@utils.func_timer()
async def admin_log(msg, client, important=False):
admin = client.get_user(cfg.CONFIG['admin_id'])
if admin.dm_channel is None:
await admin.create_dm()
mention = admin.mention
if important and len(msg + "\n" + mention) <= 2000:
msg = msg + "\n" + mention
admin_channel = admin.dm_channel
if 'admin_channel' in cfg.CONFIG:
admin_channel = client.get_channel(cfg.CONFIG['admin_channel'])
await admin_channel.send(msg)
@utils.func_timer()
async def server_log(guild, msg, msg_level, settings=None):
if settings is None:
settings = utils.get_serv_settings(guild)
if 'logging' not in settings or settings['logging'] is False:
return
log_level = settings['log_level']
if msg_level > log_level:
return
if not is_gold(guild):
return
if msg_level == 3 and not is_sapphire(guild):
return
try:
channel = guild.get_channel(settings['logging'])
except:
# Channel no longer exists, or we can't get it, either way we can't log anything.
return
try:
msg = msg.replace('➕', '+') # Make the default plus sign more visible
await channel.send(msg)
except discord.errors.Forbidden:
log("Forbidden to log", guild)
except Exception:
log("Failed to log", guild)
print(traceback.format_exc())
return
@utils.func_timer()
async def check_patreon(force_update=False, client=None):
if patreon_info is None:
return
print("Checking Patreon...{}".format(" (F)" if force_update else ""))
previous_patrons = deepcopy(cfg.PATRONS)
patrons = patreon_info.fetch_patrons(force_update=force_update)
if client and previous_patrons and patrons != previous_patrons:
for p, r in patrons.items():
if p not in previous_patrons:
pu = client.get_user(p)
try:
pn = pu.display_name
except AttributeError:
pn = "<UNKNOWN>"
important = True if r in ['sapphire', 'diamond'] else False
await admin_log("🎉 New {} patron! **{}** (`{}`)".format(cfg.TIER_ICONS[r], pn, p), client, important)
msg = ("🎉 **Thanks for your support!** 🎉\nTo activate your patron-exclusive features, "
"simply run `vc/power-overwhelming` in your server.")
if r in ['sapphire', 'diamond']:
msg += "\n\nGive me a few hours to set up your private "
msg += "bot" if r == 'sapphire' else "server"
msg += ", and then I'll contact you in the support server to make the switch."
if r == 'diamond':
msg += ("\nPlease let me know whether you prefer a server in Europe, "
"North America or Asia by replying to this message.")
await dm_user(pu, msg)
for p, r in previous_patrons.items():
if p not in patrons:
pu = client.get_user(p)
try:
pn = pu.display_name
except AttributeError:
pn = "<UNKNOWN>"
important = True if r in ['sapphire', 'diamond'] else False
await admin_log("😱 Lost {} patron! **{}** (`{}`)".format(cfg.TIER_ICONS[r], pn, p), client, important)
patreon_info.update_patron_servers(patrons)
print("{} patrons".format(len(patrons)))
@utils.func_timer()
def is_gold(guild):
if patreon_info is None:
return True
gold_servers = [
607246684367618049, # T4
]
if isinstance(guild, int):
guild_id = guild
else:
guild_id = guild.id
gold_servers += cfg.GOLD_SERVERS
return guild_id in gold_servers or is_sapphire(guild_id)
@utils.func_timer()
def is_sapphire(guild):
if patreon_info is None:
return True
sapphire_servers = [
332246283601313794, # Salt Sanc
601015720200896512, # Dots Bots
460459401086763010, # T1
607246539101831168, # T2
]
if isinstance(guild, int):
guild_id = guild
else:
guild_id = guild.id
sapphire_servers += cfg.SAPPHIRE_SERVERS
return guild_id in sapphire_servers
@utils.func_timer()
def get_sapphire_id(guild):
for s, sv in cfg.CONFIG["sapphires"].items():
if guild.id in sv["servers"]:
return int(s)
return None
@utils.func_timer()
def get_guilds(client):
guilds = []
am_sapphire_bot = cfg.SAPPHIRE_ID is not None
for g in client.guilds:
if g is not None and g.name is not None:
if am_sapphire_bot:
if is_sapphire(g) and g.id in cfg.CONFIG["sapphires"][str(cfg.SAPPHIRE_ID)]["servers"]:
guilds.append(g)
else:
if not is_sapphire(g) or get_sapphire_id(g) is None:
guilds.append(g)
return guilds
@utils.func_timer()
async def react(message, r):
try:
await message.add_reaction(r)
except discord.errors.Forbidden:
return False
except discord.errors.NotFound:
return False
return True
@utils.func_timer()
async def custom_name(guild, c, u, n):
settings = utils.get_serv_settings(guild)
for p, pv in settings['auto_channels'].items():
for s, sv in pv['secondaries'].items():
if s == c.id:
if n.lower() == 'reset':
del settings['auto_channels'][p]['secondaries'][s]['name']
else:
if 'uniquenames' in settings and settings['uniquenames']:
existing_names = []
for t_p, t_pv in settings['auto_channels'].items():
for t_s, t_sv in t_pv['secondaries'].items():
if 'name' in t_sv and t_s != c.id:
existing_names.append(t_sv['name'])
if n in existing_names:
return False, "That name is already used by another channel, please pick another."
settings['auto_channels'][p]['secondaries'][s]['name'] = n
utils.set_serv_settings(guild, settings)
await server_log(
guild,
":regional_indicator_n: {} (`{}`) changed the channel (`{}`) name to \"{}\"".format(
user_hash(u), u.id, c.id, esc_md(n)
), 2, settings)
return True, None
@utils.func_timer()
async def set_creator(guild, cid, creator):
settings = utils.get_serv_settings(guild)
for p, pv in settings['auto_channels'].items():
for s, sv in pv['secondaries'].items():
if s == cid:
settings['auto_channels'][p]['secondaries'][s]['creator'] = creator.id
try:
jc = guild.get_channel(settings['auto_channels'][p]['secondaries'][s]['jc'])
await jc.edit(name="⇧ Join {}".format(creator.display_name))
except (KeyError, AttributeError):
pass
if s in cfg.PRIV_CHANNELS:
cfg.PRIV_CHANNELS[s]['creator'] = creator
break
utils.set_serv_settings(guild, settings)
return True
@utils.func_timer(1.5)
async def rename_channel(guild, channel, settings, primary_id, templates=None, i=-1, ignore_lock=False):
if not settings:
settings = utils.get_serv_settings(guild)
if ignore_lock and not channel.members:
# Sometimes channel.members doesn't update immediately after moving user into it.
await asyncio.sleep(1)
channel = guild.get_channel(channel.id)
if not templates:
templates = {}
if "template" in settings['auto_channels'][primary_id]:
try:
templates[channel.id] = settings['auto_channels'][primary_id]['template']
except AttributeError:
return # channel has no ID
if channel.members and (ignore_lock or not channel_is_requested(channel)):
if channel.id in templates:
cname = templates[channel.id]
else:
cname = settings['channel_name_template']
guild_is_gold = is_gold(guild)
guild_is_sapphire = is_sapphire(guild)
has_expression = '{{' in cname and '}}' in cname and cname.count('{{') == cname.count('}}') and guild_is_gold
is_private = settings['priv'] if 'priv' in settings else False
cname = cname.replace("@@num_players@@", "@@num_playing@@") # Common mistake
if '@@game_name@@' in cname or '@@party_' in cname or '@@num_playing@@' in cname or has_expression:
games = get_channel_games(channel)
gname = get_game_name(channel, games)
if '@@party_' in cname or '@@num_playing@@' in cname or has_expression:
party = get_party_info(channel, gname, settings['asip'] if 'asip' in settings else False)
if ('@@creator@@' in cname or '@@num_others@@' in cname or '@@stream_name@@' in cname or
has_expression or is_private):
creator = None
creator_name = "Unknown"
creator_id = utils.get_creator_id(settings, channel)
if creator_id:
creator_found = False
for m in channel.members:
if m.id == creator_id:
creator_found = True
creator = m
creator_name = utils.get_display_name(settings, m)
break
if not creator_found: # Creator not in channel anymore, use top member
members = [m for m in channel.members if not m.bot]
if members:
creator = sorted(members, key=lambda x: x.display_name.lower())[0]
await set_creator(guild, channel.id, creator)
creator_name = utils.get_display_name(settings, creator)
creator_id = creator.id
else:
# Only time we can get here is if a bot is the last one in the channel,
# meaning it'll be deleted very soon and we can skip renaming it.
return
i_str = str(i + 1)
if i == -1:
i_str = "?"
cname = cname.replace('##', '#' + i_str)
for x in range(5):
cname = cname.replace('${}#'.format('0' * x), i_str.zfill(x + 1))
random_set = 0
while (guild_is_gold and
'[[' in cname and
']]' in cname and
('/' in cname.split('[[', 1)[1].split(']]', 1)[0] or
'\\' in cname.split('[[', 1)[1].split(']]', 1)[0])):
seed_c = channel.id + random_set
seed_d = cfg.SEED + channel.id + random_set
b, m = cname.split('[[', 1)
m, e = m.split(']]', 1)
if '\\' in m:
words = m.split('\\')
seed(seed_d)
m = choice(words)
else:
words = m.split('/')
seed(seed_c)
m = choice(words)
cname = b + m + e
random_set += 1
if '@@nato@@' in cname and guild_is_gold:
nato = ['Alpha', 'Bravo', 'Charlie', 'Delta', 'Echo', 'Foxtrot', 'Golf', 'Hotel', 'India', 'Juliett',
'Kilo', 'Lima', 'Mike', 'November', 'Oscar', 'Papa', 'Quebec', 'Romeo', 'Sierra', 'Tango',
'Uniform', 'Victor', 'Whiskey', 'X Ray', 'Yankee', 'Zulu']
if i < len(nato):
nato = nato[i]
else:
nato = nato[i % len(nato)] + " " + str(ceil((i + 1) / len(nato)))
cname = cname.replace('@@nato@@', nato)
if '@@num@@' in cname:
members = [m for m in channel.members if not m.bot]
cname = cname.replace('@@num@@', str(len(members)))
if '@@num_playing@@' in cname and guild_is_sapphire:
cname = cname.replace('@@num_playing@@', party['num_playing'])
if '@@party_size@@' in cname and guild_is_sapphire:
cname = cname.replace('@@party_size@@', party['size'])
if '@@party_state@@' in cname and guild_is_sapphire:
cname = cname.replace('@@party_state@@', party['state'])
if '@@party_details@@' in cname and guild_is_sapphire:
cname = cname.replace('@@party_details@@', party['details'])
others = -1
if '@@num_others@@' in cname:
others = len([m for m in channel.members if (
not m.bot and
m.id != creator_id
)])
cname = cname.replace('@@num_others@@', str(others))
while ('<<' in cname and
'>>' in cname and
('/' in cname.split('<<', 1)[1].split('>>', 1)[0] or
'\\' in cname.split('<<', 1)[1].split('>>', 1)[0])):
b, m = cname.split('<<', 1)
m, e = m.split('>>', 1)
c = None
if m.count('/') == 1:
c = '/'
n = len([m for m in channel.members if not m.bot])
elif m.count('\\') == 1:
c = '\\'
if others == -1:
n = len([m for m in channel.members if (
not m.bot and
m.id != utils.get_creator_id(settings, channel)
)])
else:
n = others
if c is not None:
s, p = m.split(c, 1)
if n == 1:
m = s
else:
m = p
cname = b + m + e
if '@@bitrate@@' in cname and guild_is_gold:
cname = cname.replace('@@bitrate@@', "{}kbps".format(round(channel.bitrate / 1000)))
while '{{' in cname and '}}' in cname and cname.count('{{') == cname.count('}}') and guild_is_gold:
m, e = cname.split('}}', 1)
sections = m.split('{{')
b = '{{'.join(sections[:-1])
m = sections[-1]
m = utils.eval_expression(m, guild_is_sapphire, creator, party, gname)
cname = b + m + e
if '@@game_name@@' in cname:
cname = cname.replace('@@game_name@@', gname)
if '@@creator@@' in cname:
cname = cname.replace('@@creator@@', creator_name)
if '@@stream_name@@' in cname:
stream_name = ""
for act in creator.activities:
if act.type == discord.ActivityType.streaming:
stream_name = act.name
break
cname = cname.replace('@@stream_name@@', stream_name)
while '""' in cname and cname.count('""') % 2 == 0 and ':' in cname.split('""', 1)[1].split('""')[0]:
b, m = cname.split('""', 1)
m, e = m.split('""', 1)
m, s = m.split(':', 1)
s = s.strip()
modes = m.split('+')
ops = {
'caps': str.upper,
'upper': str.upper,
'lower': str.lower,
'title': utils.capitalize,
'swap': str.swapcase,
'rand': utils.random_case,
'usd': utils.upsidedown,
'acro': utils.acronym,
'remshort': utils.remove_short_words,
'spaces': utils.full_strip,
'uwu': translate.uwu,
'scaps': translate.small_caps,
'bold': translate.bold,
'italic': translate.italic,
'bolditalic': translate.bolditalic,
'script': translate.script,
'boldscript': translate.boldscript,
'fraktur': translate.fraktur,
'boldfraktur': translate.boldfraktur,
'double': translate.double,
'sans': translate.sans,
'boldsans': translate.boldsans,
'italicsans': translate.italicsans,
'bolditalicsans': translate.bolditalicsans,
'mono': translate.mono,
}
for mode in modes:
mode = mode.lower().strip()
if mode in ops:
s = ops[mode](s)
continue
if mode.endswith('w') and len(mode) <= 3:
try:
n = mode[:-1].strip()
n = int(n)
except ValueError:
pass
else:
s = utils.first_n_words(s, n)
cname = b + s + e
cname = cname.strip()[:100] # Discord has a character limit of 100 for voice channel names
if not cname: # Can't have empty channel name
cname = "-"
if channel.id in cfg.ATTEMPTED_CHANNEL_NAMES:
previously_unsuccessful_name = cfg.ATTEMPTED_CHANNEL_NAMES[channel.id]
else:
previously_unsuccessful_name = channel.name
if cname != previously_unsuccessful_name and cname != channel.name:
log("{0} Renaming {1} to {2}".format(str(channel.id)[-4:], channel.name, cname), guild)
try:
await channel.edit(name=cname)
except discord.errors.Forbidden:
log("Cannot rename channel {}: Missing permissions".format(channel.id), guild)
await blind_echo(":warning: **Error!** I don't have permission to rename channel `{}`{}".format(
channel.id, " in the \"{}\" category".format(channel.category) if channel.category else ""), guild)
except discord.errors.HTTPException as e:
log("Cannot rename channel {}: {}".format(channel.id, e.text), guild)
if channel.name != cname:
# Template/game/user name contains illegal characters, store attempted name for future comparison.
cfg.ATTEMPTED_CHANNEL_NAMES[channel.id] = cname
else:
if channel.id in cfg.ATTEMPTED_CHANNEL_NAMES:
del cfg.ATTEMPTED_CHANNEL_NAMES[channel.id]
return channel.name
@utils.func_timer()
def get_secondaries(guild, settings=None, include_jc=False):
if not settings:
settings = utils.get_serv_settings(guild)
secondaries = []
for p in settings['auto_channels']:
for s, sv in settings['auto_channels'][p]['secondaries'].items():
secondaries.append(s)
if include_jc and 'jc' in sv:
secondaries.append(sv['jc'])
return secondaries
@utils.func_timer()
def get_join_channels(guild, settings=None):
if not settings:
settings = utils.get_serv_settings(guild)
jcs = {}
for p in settings['auto_channels']:
for s, sv in settings['auto_channels'][p]['secondaries'].items():
if 'jc' in sv:
sv['vc'] = s
jcs[sv['jc']] = sv
return jcs
@utils.func_timer()
def get_voice_context_channel_ids(guild, settings=None):
if not settings:
settings = utils.get_serv_settings(guild)
channel_ids = []
for p in settings['auto_channels']:
for s, sv in settings['auto_channels'][p]['secondaries'].items():
if 'tc' in sv:
channel_ids.append(sv['tc'])
return channel_ids
@utils.func_timer()
async def create_primary(guild, cname, author):
overwrites = {
guild.me: discord.PermissionOverwrite(read_messages=True,
connect=True,
manage_channels=True,
move_members=True)
}
c = await guild.create_voice_channel(cname, overwrites=overwrites)
settings = utils.get_serv_settings(guild)
settings['auto_channels'][c.id] = {"secondaries": {}}
settings['server_contact'] = author.id
utils.set_serv_settings(guild, settings)
await server_log(
guild,
"🆕 {} (`{}`) created a new primary channel channel (`{}`)".format(
user_hash(author), author.id, c.id
), 1, settings
)
return c
@utils.func_timer(2.5)
async def create_secondary(guild, primary, creator, private=False):
# Create voice channel above/below primary one and return it
settings = utils.get_serv_settings(guild)
# Double check creator is still in primary in attempt to solve infinite creation bug.
if creator not in primary.members:
log("{} no longer in primary".format(creator.display_name), guild)
return
# Check we're allowed to make the channel
if user_request_is_locked(creator):
return
elif not check_primary_permissions(primary, guild.me):
lock_user_request(creator)
log("{} ({}) tried creating a channel where I don't have permissions".format(creator.display_name,
creator.id), guild)
msg = "{} ❌ You tried creating a channel where I don't have the right permissions.".format(creator.mention)
server_contact = guild.get_member(settings['server_contact'])
msg += "\n\nPlease make sure I have the following permissions"
if primary.category:
msg += " in the \"{}\" category:\n".format(primary.category.name)
else:
msg += ":\n"
msg += "- **Manage Channel**\n"
msg += "- **Read Text Channels & See Voice Channels**\n"
msg += "- **Send Messages**\n"
msg += "- **Connect** *(under voice channel permissions)*\n"
msg += "- **Move members**\n\n"
if server_contact is not None and server_contact != creator:
msg += "If you are not an admin/manager of this server, "
msg += "{} might be able to help you.\n".format(server_contact.mention)
msg += "If you need any help, you can join my support server: <https://discord.gg/qhMrz6u>.\n\n"
msg += "This message will repeat every 5 minutes if the problem is not resolved. "
msg += "To stop this, either fix the issue or leave the voice channel causing the problem."
await blind_echo(msg, guild)
try:
await creator.move_to(None) # Kick them from voice channel
except discord.errors.Forbidden:
# If we can't create channels, we probably also don't have permission to kick people.
pass
return
else:
abuse_count = detect_abuse(creator)
if abuse_count >= cfg.ABUSE_THRESHOLD:
if abuse_count == cfg.ABUSE_THRESHOLD:
log("{} ({}) is creating channels too quickly".format(creator.display_name, creator.id), guild)
await dm_user(
creator,
":warning: **Please slow down.** :warning:\n"
"You are trying to create voice channels in **{}** too quickly "
"and have been placed on cooldown for 15 seconds.\n"
"It's perfectly okay to stress test me initially, but continued abuse or any deliberate attempt at "
"sabotage may eventually result in you being blacklisted and ignored.".format(guild.name)
)
await server_log(
guild,
"⚠ {} (`{}`) tried creating channels too quickly and has entered cooldown".format(
user_hash(creator), creator.id
), 1, settings
)
return
lock_user_request(creator, offset=20) # Add offset in case creating the channel takes more than 3s
# Find what the channel position is supposed to be
# Channel.position is relative to channels of any type, but Channel.edit(position) is relative to
# channels of that type. So we need to find that first.
c_position = 0
voice_channels = [x for x in guild.channels if isinstance(x, type(primary))]
voice_channels.sort(key=lambda ch: ch.position)
above = True
if ('above' in settings['auto_channels'][primary.id] and
settings['auto_channels'][primary.id]['above'] is False):
above = False
if above:
for x in voice_channels:
if x.id == primary.id:
break
c_position += 1
else:
secondaries = []
for p in settings['auto_channels']:
for s in settings['auto_channels'][primary.id]['secondaries']:
secondaries.append(s)
past_primary = False
for x in voice_channels:
if x.id == primary.id:
past_primary = True
elif past_primary and x.id not in secondaries:
break
c_position += 1
# Copy stuff from primary channel
user_limit = 0
if primary.user_limit:
user_limit = primary.user_limit
elif 'limit' in settings['auto_channels'][primary.id]:
user_limit = settings['auto_channels'][primary.id]['limit']
bitrate = primary.bitrate
try:
bitrate = min(guild.bitrate_limit, settings['custom_bitrates'][str(creator.id)] * 1000)
except KeyError:
pass
perms_source = (settings['auto_channels'][primary.id]['inheritperms']
if 'inheritperms' in settings['auto_channels'][primary.id]
else 'PRIMARY')
overwrites = primary.overwrites
if perms_source == 'CATEGORY':
if primary.category:
overwrites = primary.category.overwrites
elif isinstance(perms_source, int):
try:
overwrites = guild.get_channel(perms_source).overwrites
except (discord.errors.Forbidden, AttributeError):
pass
if private:
k = guild.default_role
v = overwrites[k] if k in overwrites else discord.PermissionOverwrite()
v.update(connect=False)
overwrites[k] = v
k = guild.me
v = overwrites[k] if k in overwrites else discord.PermissionOverwrite()
v.update(read_messages=True, connect=True, manage_channels=True, move_members=True)
overwrites[k] = v
# Let there be sound
try:
c = await guild.create_voice_channel(
"⌛",
category=primary.category,
position=c_position,
bitrate=bitrate,
user_limit=user_limit,
overwrites=overwrites
)
except discord.errors.HTTPException as e:
if "Maximum number of channels in category reached" in e.text:
log("Failed to create channel for {}: Max channels reached".format(creator.display_name), guild)
await dm_user(
creator,
":warning: Sorry, I was unable to create a channel for you as the maximum number of channels in that "
"category has been reached. Please let an admin of the server **{}** know about this issue so that "
"they can make another category for voice channels.".format(esc_md(guild.name)))
await creator.move_to(None) # Kick them from voice channel
lock_user_request(creator)
return
else:
raise e
log("{} Creating channel for {}".format(str(c.id)[-4:], creator.display_name), guild)
utils.permastore_secondary(c.id)
lock_channel_request(c)
settings = utils.get_serv_settings(guild)
sv = {"creator": creator.id}
if private:
sv['priv'] = True
settings['auto_channels'][primary.id]['secondaries'][c.id] = sv
settings['left'] = False # Just in case a returning guild's "on_guild_join" call wasn't caught.
settings['last_activity'] = int(time())
utils.set_serv_settings(guild, settings)
try:
await c.edit(position=c_position) # Set position again, sometimes create_voice_channel gets it wrong.
except discord.errors.Forbidden:
# Harmless error, no idea why it sometimes throws this, seems like a bug.
pass
# Move user
try:
await creator.move_to(c)
except discord.errors.HTTPException as e:
log("Failed to move user {}: {}".format(creator.display_name, e.text), guild)
lock_user_request(creator)
return c
lock_user_request(creator, 5) # Lock again just to remove the 20s offset used earlier
# Rename channel
num_siblings = len([s for s in settings['auto_channels'][primary.id]['secondaries'] if s != c.id])
name = await rename_channel(
guild=guild,
channel=c,
settings=None,
primary_id=primary.id,
i=num_siblings,
ignore_lock=True
)
# Logging
log_msg = "✅ {} (`{}`) created \"**{}**\" (`{}`) using \"**{}**\" (`{}`)".format(
user_hash(creator), creator.id,
"None" if not name else esc_md(name), c.id,
esc_md(primary.name), primary.id
)
if bitrate != primary.bitrate:
log_msg += " [{}kbps]".format(round(bitrate / 1000))
await server_log(guild, log_msg, 1, settings)
# Text Channel
settings = utils.get_serv_settings(guild)
if 'text_channels' in settings and settings['text_channels'] and is_gold(guild):
try:
r = await guild.create_role(name="🎤🤖vc {}".format(c.id))
except discord.errors.Forbidden:
return c
await creator.add_roles(r)
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True),
r: discord.PermissionOverwrite(read_messages=True),
}
if 'stct' in settings:
showto_r = guild.get_role(settings['stct'])
if showto_r:
overwrites[showto_r] = discord.PermissionOverwrite(read_messages=True)
tc = await guild.create_text_channel(
utils.nice_cname(settings['text_channel_name']) if 'text_channel_name' in settings else "voice context",
category=primary.category,
overwrites=overwrites,
topic=(":eye: This channel is only visible to members of your voice channel, "
"and admins of this server. It will be deleted when everyone leaves. VC ID: {}".format(c.id)))
settings = utils.get_serv_settings(guild)
settings['auto_channels'][primary.id]['secondaries'][c.id]['tc'] = tc.id
settings['auto_channels'][primary.id]['secondaries'][c.id]['tcr'] = r.id
utils.set_serv_settings(guild, settings)
if creator in c.members:
unlock_channel_request(c)
else:
log("{} Still trying to move {}".format(str(c.id)[-4:], creator.display_name), guild)
lock_channel_request(c, 20)
lock_user_request(creator, 20)
return c
@utils.func_timer()
async def delete_secondary(guild, channel):
if channel_is_requested(channel):
return
lock_channel_request(channel)
log("{} Deleting {}".format(str(channel.id)[-4:], channel.name), guild)
cid = channel.id
try:
await channel.delete()
except discord.errors.NotFound:
pass
except discord.errors.Forbidden:
log("Forbidden to delete channel {} in guild {}".format(channel.id, guild.id), guild)
await blind_echo(":warning: **Error!** I don't have permission to delete channel `{}`{}".format(
channel.id, " in the \"{}\" category".format(channel.category) if channel.category else ""), guild)
lock_channel_request(channel, 10)
except Exception:
log("Failed to delete channel {} in guild {}".format(channel.id, guild.id), guild)
lock_channel_request(channel, 10)
print(traceback.format_exc())
else:
settings = utils.get_serv_settings(guild)
for p in settings['auto_channels']:
tmp = settings['auto_channels'][p]['secondaries'].copy()
for s, sv in tmp.items():
if s == cid:
if 'jc' in sv:
jc = guild.get_channel(sv['jc'])
if jc:
try:
await jc.delete()
except discord.errors.NotFound:
# Small chance of channel disappearing before we can delete it
pass
if 'tc' in sv:
tc = guild.get_channel(sv['tc'])
if tc:
try:
await tc.delete()
except discord.errors.NotFound:
# Small chance of channel disappearing before we can delete it
pass
if 'tcr' in sv:
tcr = guild.get_role(sv['tcr'])
if tcr:
try:
await tcr.delete()
except discord.errors.NotFound:
# Small chance of role disappearing before we can delete it
pass
del settings['auto_channels'][p]['secondaries'][s]
utils.set_serv_settings(guild, settings)
if channel.id in cfg.ATTEMPTED_CHANNEL_NAMES:
del cfg.ATTEMPTED_CHANNEL_NAMES[channel.id]
unlock_channel_request(channel)
await server_log(
guild,
"❌ \"**{}**\" (`{}`) was deleted".format(
esc_md(channel.name), channel.id
), 2, settings
)
@utils.func_timer()
async def remove_broken_channels(guild):
voice_channels = [x for x in guild.channels if isinstance(x, discord.VoiceChannel)]
for v in voice_channels:
if v.name in ['⌛', '⚠'] and not channel_is_requested(v):
if not v.members:
lock_channel_request(v)
try:
await v.delete()
except discord.errors.Forbidden:
log("Forbidden to delete channel {} in guild {}".format(v.id, guild.id), guild)
await blind_echo(":warning: **Error!** I don't have permission to delete channel `{}`{}".format(
v.id, " in the \"{}\" category".format(v.category) if v.category else ""), guild)
lock_channel_request(v, 10)
except discord.errors.NotFound:
log("Failed to delete channel {} in guild {} (NotFound)".format(v.id, guild.id), guild)
lock_channel_request(v, 10)
except Exception:
log("Failed to delete channel {} in guild {}".format(v.id, guild.id), guild)
lock_channel_request(v, 10)
print(traceback.format_exc())
unlock_channel_request(v)
# TODO gold only
text_channels = [x for x in guild.channels if isinstance(x, discord.TextChannel)]
for c in text_channels:
front = (":eye: This channel is only visible to members of your voice channel, "
"and admins of this server. It will be deleted when everyone leaves. VC ID: ")
if c.topic and c.topic.startswith(front):
try:
vcid = int(c.topic.split(front)[1])
except ValueError:
continue
vc = guild.get_channel(vcid)
if not vc:
try:
await c.delete()
except discord.errors.NotFound:
pass
for r in guild.roles:
front = "🎤🤖vc "
if r.name.startswith("🎤🤖vc "):
try:
vcid = int(r.name.split(front)[1])
except ValueError:
continue
vc = guild.get_channel(vcid)
if not vc:
try:
await r.delete()
except discord.errors.NotFound:
pass
| 37.849962 | 120 | 0.558945 |
1f2aa6da91f2bd6e8d325474c416eba4e1c4e3d0 | 13,999 | py | Python | melodic/lib/python2.7/dist-packages/roslaunch/remoteprocess.py | Dieptranivsr/Ros_Diep | d790e75e6f5da916701b11a2fdf3e03b6a47086b | [
"MIT"
] | null | null | null | melodic/lib/python2.7/dist-packages/roslaunch/remoteprocess.py | Dieptranivsr/Ros_Diep | d790e75e6f5da916701b11a2fdf3e03b6a47086b | [
"MIT"
] | 1 | 2021-07-08T10:26:06.000Z | 2021-07-08T10:31:11.000Z | melodic/lib/python2.7/dist-packages/roslaunch/remoteprocess.py | Dieptranivsr/Ros_Diep | d790e75e6f5da916701b11a2fdf3e03b6a47086b | [
"MIT"
] | null | null | null | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
"""
Process handler for launching ssh-based roslaunch child processes.
"""
import os
import socket
import traceback
try:
from xmlrpc.client import ServerProxy
except ImportError:
from xmlrpclib import ServerProxy
import rosgraph
from roslaunch.core import printlog, printerrlog
import roslaunch.pmon
import roslaunch.server
import logging
_logger = logging.getLogger("roslaunch.remoteprocess")
# #1975 timeout for creating ssh connections
TIMEOUT_SSH_CONNECT = 30.
def ssh_check_known_hosts(ssh, address, port, username=None, logger=None):
"""
Validation routine for loading the host keys and making sure that
they are configured properly for the desired SSH. The behavior of
this routine can be modified by the ROSLAUNCH_SSH_UNKNOWN
environment variable, which enables the paramiko.AutoAddPolicy.
:param ssh: paramiko SSH client, :class:`paramiko.SSHClient`
:param address: SSH IP address, ``str``
:param port: SSH port, ``int``
:param username: optional username to include in error message if check fails, ``str``
:param logger: (optional) logger to record tracebacks to, :class:`logging.Logger`
:returns: error message if improperly configured, or ``None``. ``str``
"""
import paramiko
try:
try:
if os.path.isfile('/etc/ssh/ssh_known_hosts'): #default ubuntu location
ssh.load_system_host_keys('/etc/ssh/ssh_known_hosts')
except IOError:
pass
ssh.load_system_host_keys() #default user location
except:
if logger:
logger.error(traceback.format_exc())
# as seen in #767, base64 raises generic Error.
#
# A corrupt pycrypto build can also cause this, though
# the cause of the corrupt builds has been fixed.
return "cannot load SSH host keys -- your known_hosts file may be corrupt"
# #3158: resolve the actual host using the user's ~/.ssh/config
ssh_config = paramiko.SSHConfig()
try:
with open(os.path.join(os.path.expanduser('~'), '.ssh', 'config')) as f:
ssh_config.parse(f)
config_lookup = ssh_config.lookup(address)
resolved_address = config_lookup['hostname'] if 'hostname' in config_lookup else address
except:
resolved_address = address
# #1849: paramiko will raise an SSHException with an 'Unknown
# server' message if the address is not in the known_hosts
# file. This causes a lot of confusion to users, so we try
# and diagnose this in advance and offer better guidance
# - ssh.get_host_keys() does not return the system host keys
hk = ssh._system_host_keys
override = os.environ.get('ROSLAUNCH_SSH_UNKNOWN', 0)
if override == '1':
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
elif hk.lookup(resolved_address) is None:
port_str = user_str = ''
if port != 22:
port_str = "-p %s "%port
if username:
user_str = username+'@'
return """%s is not in your SSH known_hosts file.
Please manually:
ssh %s%s%s
then try roslaunching again.
If you wish to configure roslaunch to automatically recognize unknown
hosts, please set the environment variable ROSLAUNCH_SSH_UNKNOWN=1"""%(resolved_address, port_str, user_str, resolved_address)
class SSHChildROSLaunchProcess(roslaunch.server.ChildROSLaunchProcess):
"""
Process wrapper for launching and monitoring a child roslaunch process over SSH
"""
def __init__(self, run_id, name, server_uri, machine, master_uri=None):
"""
:param machine: Machine instance. Must be fully configured.
machine.env_loader is required to be set.
"""
if not machine.env_loader:
raise ValueError("machine.env_loader must have been assigned before creating ssh child instance")
args = [machine.env_loader, 'roslaunch', '-c', name, '-u', server_uri, '--run_id', run_id]
# env is always empty dict because we only use env_loader
super(SSHChildROSLaunchProcess, self).__init__(name, args, {})
self.machine = machine
self.master_uri = master_uri
self.ssh = self.sshin = self.sshout = self.ssherr = None
self.started = False
self.uri = None
# self.is_dead is a flag set by is_alive that affects whether or not we
# log errors during a stop().
self.is_dead = False
def _ssh_exec(self, command, address, port, username=None, password=None):
"""
:returns: (ssh pipes, message). If error occurs, returns (None, error message).
"""
if self.master_uri:
env_command = 'env %s=%s' % (rosgraph.ROS_MASTER_URI, self.master_uri)
command = '%s %s' % (env_command, command)
try:
import paramiko
except ImportError as e:
_logger.error("cannot use SSH: paramiko is not installed")
return None, "paramiko is not installed"
#load user's ssh configuration
config_block = {'hostname': None, 'user': None, 'identityfile': None}
ssh_config = paramiko.SSHConfig()
try:
with open(os.path.join(os.path.expanduser('~'), '.ssh','config')) as f:
ssh_config.parse(f)
config_block.update(ssh_config.lookup(address))
except:
pass
address = config_block['hostname'] or address
username = username or config_block['user']
identity_file = None
if config_block.get('identityfile', None):
if isinstance(config_block['identityfile'], list):
identity_file = [os.path.expanduser(f) for f in config_block['identityfile']]
else:
identity_file = os.path.expanduser(config_block['identityfile'])
#load ssh client and connect
ssh = paramiko.SSHClient()
err_msg = ssh_check_known_hosts(ssh, address, port, username=username, logger=_logger)
if not err_msg:
username_str = '%s@'%username if username else ''
try:
if password is None: #use SSH agent
ssh.connect(address, port, username, timeout=TIMEOUT_SSH_CONNECT, key_filename=identity_file)
else: #use SSH with login/pass
ssh.connect(address, port, username, password, timeout=TIMEOUT_SSH_CONNECT)
except paramiko.BadHostKeyException:
_logger.error(traceback.format_exc())
err_msg = "Unable to verify host key for remote computer[%s:%s]"%(address, port)
except paramiko.AuthenticationException:
_logger.error(traceback.format_exc())
err_msg = "Authentication to remote computer[%s%s:%s] failed.\nA common cause of this error is a missing key in your authorized_keys file."%(username_str, address, port)
except paramiko.SSHException as e:
_logger.error(traceback.format_exc())
if str(e).startswith("Unknown server"):
pass
err_msg = "Unable to establish ssh connection to [%s%s:%s]: %s"%(username_str, address, port, e)
except socket.error as e:
# #1824
if e.args[0] == 111:
err_msg = "network connection refused by [%s:%s]"%(address, port)
else:
err_msg = "network error connecting to [%s:%s]: %s"%(address, port, str(e))
if err_msg:
return None, err_msg
else:
printlog("launching remote roslaunch child with command: [%s]"%(str(command)))
sshin, sshout, ssherr = ssh.exec_command(command)
return (ssh, sshin, sshout, ssherr), "executed remotely"
def start(self):
"""
Start the remote process. This will create an SSH connection
to the remote host.
"""
self.started = False #won't set to True until we are finished
self.ssh = self.sshin = self.sshout = self.ssherr = None
with self.lock:
name = self.name
m = self.machine
if m.user is not None:
printlog("remote[%s]: creating ssh connection to %s:%s, user[%s]"%(name, m.address, m.ssh_port, m.user))
else:
printlog("remote[%s]: creating ssh connection to %s:%s"%(name, m.address, m.ssh_port))
_logger.info("remote[%s]: invoking with ssh exec args [%s]"%(name, ' '.join(self.args)))
sshvals, msg = self._ssh_exec(' '.join(self.args), m.address, m.ssh_port, m.user, m.password)
if sshvals is None:
printerrlog("remote[%s]: failed to launch on %s:\n\n%s\n\n"%(name, m.name, msg))
return False
self.ssh, self.sshin, self.sshout, self.ssherr = sshvals
printlog("remote[%s]: ssh connection created"%name)
self.started = True
return True
def getapi(self):
"""
:returns: ServerProxy to remote client XMLRPC server, `ServerProxy`
"""
if self.uri:
return ServerProxy(self.uri)
else:
return None
def is_alive(self):
"""
:returns: ``True`` if the process is alive. is_alive needs to be
called periodically as it drains the SSH buffer, ``bool``
"""
if self.started and not self.ssh:
return False
elif not self.started:
return True #not started is equivalent to alive in our logic
s = self.ssherr
s.channel.settimeout(0)
try:
#drain the pipes
data = s.read(2048)
if not len(data):
self.is_dead = True
return False
# #2012 il8n: ssh *should* be UTF-8, but often isn't
# (e.g. Japan)
data = data.decode('utf-8')
printerrlog("remote[%s]: %s"%(self.name, data))
except socket.timeout:
pass
except IOError:
return False
except UnicodeDecodeError:
# #2012: soft fail, printing is not essential. This occurs
# with older machines that don't send utf-8 over ssh
pass
s = self.sshout
s.channel.settimeout(0)
try:
#drain the pipes
#TODO: write to log file
data = s.read(2048)
if not len(data):
self.is_dead = True
return False
except socket.timeout:
pass
except IOError:
return False
return True
def stop(self, errors=None):
"""
Terminate this process, including the SSH connection.
"""
if errors is None:
errors = []
with self.lock:
if not self.ssh:
return
# call the shutdown API first as closing the SSH connection
# won't actually kill the process unless it triggers SIGPIPE
try:
api = self.getapi()
if api is not None:
#TODO: probably need a timeout on this
api.shutdown()
except socket.error:
# normal if process is already dead
address, port = self.machine.address, self.machine.ssh_port
if not self.is_dead:
printerrlog("remote[%s]: unable to contact [%s] to shutdown remote processes!"%(self.name, address))
else:
printlog("remote[%s]: unable to contact [%s] to shutdown cleanly. The remote roslaunch may have exited already."%(self.name, address))
except:
# temporary: don't really want to log here as this
# may occur during shutdown
traceback.print_exc()
_logger.info("remote[%s]: closing ssh connection", self.name)
self.sshin.close()
self.sshout.close()
self.ssherr.close()
self.ssh.close()
self.sshin = None
self.sshout = None
self.ssherr = None
self.ssh = None
_logger.info("remote[%s]: ssh connection closed", self.name)
| 42.421212 | 185 | 0.615044 |
85b229b06a1822db33306dabe12e22ee9b07d599 | 530 | py | Python | determined_ai_sphinx_theme/__init__.py | determined-ai/pedl_sphinx_theme | 9edfa7c6ce6926def9fc69b8ddd7666f3419a907 | [
"MIT"
] | null | null | null | determined_ai_sphinx_theme/__init__.py | determined-ai/pedl_sphinx_theme | 9edfa7c6ce6926def9fc69b8ddd7666f3419a907 | [
"MIT"
] | 2 | 2020-03-10T00:15:46.000Z | 2020-04-04T19:39:15.000Z | determined_ai_sphinx_theme/__init__.py | determined-ai/pedl_sphinx_theme | 9edfa7c6ce6926def9fc69b8ddd7666f3419a907 | [
"MIT"
] | null | null | null | """Determined AI Sphinx theme.
From https://github.com/determined-ai/pedl_sphinx_theme.
"""
from os import path
__version__ = '0.0.25'
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = path.abspath(path.dirname(path.dirname(__file__)))
return cur_dir
# See http://www.sphinx-doc.org/en/stable/theming.html#distribute-your-theme-as-a-python-package
def setup(app):
app.add_html_theme('determined_ai_sphinx_theme', path.abspath(path.dirname(__file__)))
| 26.5 | 96 | 0.749057 |
e3a0cc511c877a7b7d6a9128228d05331ea8e0f6 | 6,268 | py | Python | src/encode_task_idr.py | CollinsLabBioComp/chip-seq-pipeline2 | 61f77dd94b3afe39bd718b30f1a9a6a7b9676c30 | [
"MIT"
] | 1 | 2019-12-08T08:04:15.000Z | 2019-12-08T08:04:15.000Z | src/encode_task_idr.py | leezx/chip-seq-pipeline2 | 61f77dd94b3afe39bd718b30f1a9a6a7b9676c30 | [
"MIT"
] | null | null | null | src/encode_task_idr.py | leezx/chip-seq-pipeline2 | 61f77dd94b3afe39bd718b30f1a9a6a7b9676c30 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# ENCODE DCC IDR wrapper
# Author: Jin Lee (leepc12@gmail.com)
import sys
import os
import argparse
import math
from encode_lib_common import (
assert_file_not_empty, log, ls_l, mkdir_p, rm_f, run_shell_cmd)
from encode_lib_genomic import (
peak_to_bigbed, peak_to_hammock)
from encode_lib_blacklist_filter import blacklist_filter
from encode_lib_frip import frip, frip_shifted
def parse_arguments():
parser = argparse.ArgumentParser(
prog='ENCODE DCC IDR.',
description='NarrowPeak or RegionPeak only.')
parser.add_argument('peak1', type=str,
help='Peak file 1.')
parser.add_argument('peak2', type=str,
help='Peak file 2.')
parser.add_argument('peak_pooled', type=str,
help='Pooled peak file.')
parser.add_argument('--prefix', default='idr', type=str,
help='Prefix basename for output IDR peak.')
parser.add_argument('--peak-type', type=str, required=True,
choices=['narrowPeak', 'regionPeak',
'broadPeak', 'gappedPeak'],
help='Peak file type.')
parser.add_argument('--idr-thresh', default=0.1, type=float,
help='IDR threshold.')
parser.add_argument('--idr-rank', default='p.value', type=str,
choices=['p.value', 'q.value', 'signal.value'],
help='IDR ranking method.')
parser.add_argument('--blacklist', type=str,
help='Blacklist BED file.')
parser.add_argument('--regex-bfilt-peak-chr-name',
help='Keep chromosomes matching this pattern only '
'in .bfilt. peak files.')
parser.add_argument('--ta', type=str,
help='TAGALIGN file for FRiP.')
parser.add_argument('--chrsz', type=str,
help='2-col chromosome sizes file.')
parser.add_argument('--fraglen', type=int, default=0,
help='Fragment length for TAGALIGN file. \
If given, do shifted FRiP (for ChIP-Seq).')
parser.add_argument('--out-dir', default='', type=str,
help='Output directory.')
parser.add_argument('--log-level', default='INFO',
choices=['NOTSET', 'DEBUG', 'INFO',
'WARNING', 'CRITICAL', 'ERROR',
'CRITICAL'],
help='Log level')
args = parser.parse_args()
if args.blacklist is None or args.blacklist.endswith('null'):
args.blacklist = ''
log.setLevel(args.log_level)
log.info(sys.argv)
return args
def get_npeak_col_by_rank(rank):
if rank == 'signal.value':
return 7
elif rank == 'p.value':
return 8
elif rank == 'q.value':
return 9
else:
raise Exception('Invalid score ranking method')
# only for narrowPeak (or regionPeak) type
def idr(basename_prefix, peak1, peak2, peak_pooled, peak_type,
thresh, rank, out_dir):
prefix = os.path.join(out_dir, basename_prefix)
prefix += '.idr{}'.format(thresh)
idr_peak = '{}.{}.gz'.format(prefix, peak_type)
idr_out_gz = '{}.unthresholded-peaks.txt.gz'.format(prefix)
idr_plot = '{}.unthresholded-peaks.txt.png'.format(prefix)
idr_stdout = '{}.log'.format(prefix)
# temporary
idr_12col_bed = '{}.12-col.bed.gz'.format(peak_type)
idr_out = '{}.unthresholded-peaks.txt'.format(prefix)
cmd1 = 'idr --samples {} {} --peak-list {} --input-file-type narrowPeak '
cmd1 += '--output-file {} --rank {} --soft-idr-threshold {} '
cmd1 += '--plot --use-best-multisummit-IDR --log-output-file {}'
cmd1 = cmd1.format(
peak1,
peak2,
peak_pooled,
idr_out,
rank,
thresh,
idr_stdout)
run_shell_cmd(cmd1)
col = get_npeak_col_by_rank(rank)
neg_log10_thresh = -math.log10(thresh)
# LC_COLLATE=C
cmd2 = 'awk \'BEGIN{{OFS="\\t"}} $12>={} '
cmd2 += '{{if ($2<0) $2=0; '
cmd2 += 'print $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12}}\' {} '
cmd2 += '| sort | uniq | sort -grk{},{} | gzip -nc > {}'
cmd2 = cmd2.format(
neg_log10_thresh,
idr_out,
col,
col,
idr_12col_bed)
run_shell_cmd(cmd2)
cmd3 = 'zcat {} | '
cmd3 += 'awk \'BEGIN{{OFS="\\t"}} '
cmd3 += '{{print $1,$2,$3,$4,$5,$6,$7,$8,$9,$10}}\' | '
cmd3 += 'gzip -nc > {}'
cmd3 = cmd3.format(
idr_12col_bed,
idr_peak)
run_shell_cmd(cmd3)
cmd4 = 'gzip -f {}'.format(idr_out)
run_shell_cmd(cmd4)
rm_f([idr_out, idr_12col_bed])
rm_f('{}.*.noalternatesummitpeaks.png'.format(prefix))
return idr_peak, idr_plot, idr_out_gz, idr_stdout
def main():
# read params
args = parse_arguments()
log.info('Initializing and making output directory...')
mkdir_p(args.out_dir)
log.info('Do IDR...')
idr_peak, idr_plot, idr_out_gz, idr_stdout = idr(
args.prefix,
args.peak1, args.peak2, args.peak_pooled, args.peak_type,
args.idr_thresh, args.idr_rank, args.out_dir)
log.info('Blacklist-filtering peaks...')
bfilt_idr_peak = blacklist_filter(
idr_peak, args.blacklist, args.regex_bfilt_peak_chr_name, args.out_dir)
log.info('Checking if output is empty...')
assert_file_not_empty(bfilt_idr_peak)
log.info('Converting peak to bigbed...')
peak_to_bigbed(bfilt_idr_peak, args.peak_type, args.chrsz,
args.out_dir)
log.info('Converting peak to hammock...')
peak_to_hammock(bfilt_idr_peak, args.out_dir)
if args.ta: # if TAG-ALIGN is given
if args.fraglen: # chip-seq
log.info('Shifted FRiP with fragment length...')
frip_shifted(args.ta, bfilt_idr_peak,
args.chrsz, args.fraglen, args.out_dir)
else: # atac-seq
log.info('FRiP without fragment length...')
frip(args.ta, bfilt_idr_peak, args.out_dir)
log.info('List all files in output directory...')
ls_l(args.out_dir)
log.info('All done.')
if __name__ == '__main__':
main()
| 34.629834 | 79 | 0.582642 |
423a55f507416848cd152b3a2acbed2f9d8f5bb8 | 2,607 | py | Python | clay/cli.py | lucuma/Clay | 07923ddb5b710cdea21fd7c4667e4189ebc0640e | [
"Apache-2.0"
] | 35 | 2015-02-12T15:34:47.000Z | 2021-09-27T17:16:04.000Z | clay/cli.py | lucuma/Clay | 07923ddb5b710cdea21fd7c4667e4189ebc0640e | [
"Apache-2.0"
] | 11 | 2015-01-11T20:48:49.000Z | 2020-12-23T18:52:22.000Z | clay/cli.py | lucuma/Clay | 07923ddb5b710cdea21fd7c4667e4189ebc0640e | [
"Apache-2.0"
] | 10 | 2015-02-27T19:48:46.000Z | 2018-06-24T09:39:27.000Z | import hecto
from pyceo import Manager
from pyceo import option
from pyceo import param
from .main import Clay, BLUEPRINT
from .server import make_app
from .version import __version__
cli = Manager(f"<b>Clay v{__version__}", catch_errors=False)
@cli.command(help="Creates a new Clay project at `dest`.")
@param("dest", help="Where to create the new project.")
@option("tmpl", help="Optional template to use to create the project.")
@option("quiet", help="Supress the status output.")
def new(dest, tmpl=BLUEPRINT, quiet=False):
"""The `clay new` command creates a new Clay project with a default
structure at the path you specify.
You can also specify an optional project template as can be an absolute or
relative path or a git URL. For the URLs, "gh:" works as a shortcut of
"https://github.com/" and "gl:"as a shortcut of "https://gitlab.com/".
Examples:
# Render a default project to the "myapp/" folder
clay new myapp
# Custom template from an absolute or relative path.
clay new myapp /path/to/project/template
# Custom template from GitHub repo. Note the ".git" postfix.
clay new myapp https://github.com/lucuma/clay-template.git
# Custom template from the same GitHub repo with shortcut
clay new myapp gh:/lucuma/clay-template.git
"""
hecto.copy(tmpl, dest, quiet=quiet)
print(f"\n Done! Now go to the `{dest}` folder")
print(" and do `clay run` to start the server.\n")
@cli.command(help="Run Clay’s development server.")
@option("host", help="0.0.0.0 by default")
@option("port", type=int, help="8080 by default")
@option("source", help="Where to find the project. By default in the current folder.")
def run(host="0.0.0.0", port=8080, source="."): # pragma: no cover
clay = Clay(source)
app = make_app(clay)
app.run(host, port)
@cli.command(help="Generates a static copy of the project in a `build` folder.")
@option("source", help="Where to find the project. By default in the current folder.")
@option("quiet", help="Supress the status output.")
def build(source=".", quiet=False):
clay = Clay(source)
clay.build(quiet=quiet)
print("\n Done! You'll find a static version of your ")
print(f" project in the `build` folder.\n")
@cli.command(help="Return a list of the available pages")
@option("source", help="Where to find the project. By default in the current folder.")
def pages(source="."):
clay = Clay(source)
pages = clay.list_pages()
for page in pages:
print(page)
def run_cli(): # pragma: no cover
cli.run()
| 33.423077 | 86 | 0.682394 |
2e8df986933b149da24aa4ddf86b3f284c06117b | 9,873 | py | Python | embeddingWord.py | MrJohnsson77/Sentiment-Analysis-in-Event-Driven-Stock-Price-Movement-Prediction | f88b1a127613169442b021802876d5b8f806bee0 | [
"MIT"
] | 1 | 2018-01-06T11:51:51.000Z | 2018-01-06T11:51:51.000Z | embeddingWord.py | shaz13/Sentiment-Analysis-in-Event-Driven-Stock-Price-Movement-Prediction | f88b1a127613169442b021802876d5b8f806bee0 | [
"MIT"
] | null | null | null | embeddingWord.py | shaz13/Sentiment-Analysis-in-Event-Driven-Stock-Price-Movement-Prediction | f88b1a127613169442b021802876d5b8f806bee0 | [
"MIT"
] | 1 | 2020-10-19T06:05:53.000Z | 2020-10-19T06:05:53.000Z | #!/usr/bin/python
import os
import json
import numpy as np
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
import en
import operator
from datetime import datetime
from sklearn.utils import shuffle
from nltk.corpus import reuters
# reference https://github.com/lazyprogrammer/machine_learning_examples/blob/master/nlp_class2/glove.py
class Glove:
def __init__(self, D, V, context_sz):
self.D = D
self.V = V
self.context_sz = context_sz
def fit(self, sentences, cc_matrix=None, learning_rate=10e-5, reg=0.1, xmax=100, alpha=0.75, epochs=10, gd=False, use_theano=True):
# build co-occurrence matrix
# paper calls it X, so we will call it X, instead of calling
# the training data X
# TODO: would it be better to use a sparse matrix?
t0 = datetime.now()
V = self.V
D = self.D
if os.path.exists(cc_matrix):
X = np.load(cc_matrix)
else:
X = np.zeros((V, V))
N = len(sentences)
print "number of sentences to process:", N
it = 0
for sentence in sentences:
it += 1
if it % 10000 == 0:
print "processed", it, "/", N
n = len(sentence)
for i in xrange(n):
wi = sentence[i]
start = max(0, i - self.context_sz)
end = min(n, i + self.context_sz)
# we can either choose only one side as context, or both
# here we are doing both
# make sure "start" and "end" tokens are part of some context
# otherwise their f(X) will be 0 (denominator in bias update)
if i - self.context_sz < 0:
points = 1.0 / (i + 1)
X[wi,0] += points
X[0,wi] += points
if i + self.context_sz > n:
points = 1.0 / (n - i)
X[wi,1] += points
X[1,wi] += points
for j in xrange(start, i):
if j == i: continue
wj = sentence[j]
points = 1.0 / abs(i - j) # this is +ve
X[wi,wj] += points
X[wj,wi] += points
# save the cc matrix because it takes forever to create
np.save(cc_matrix, X)
print "max in X:", X.max()
# weighting
fX = np.zeros((V, V))
fX[X < xmax] = (X[X < xmax] / float(xmax)) ** alpha
fX[X >= xmax] = 1
print "max in f(X):", fX.max()
# target
logX = np.log(X + 1)
print "max in log(X):", logX.max()
print "time to build co-occurrence matrix:", (datetime.now() - t0)
# initialize weights
W = np.random.randn(V, D) / np.sqrt(V + D)
b = np.zeros(V)
U = np.random.randn(V, D) / np.sqrt(V + D)
c = np.zeros(V)
mu = logX.mean()
if gd and use_theano:
thW = theano.shared(W)
thb = theano.shared(b)
thU = theano.shared(U)
thc = theano.shared(c)
thLogX = T.matrix('logX')
thfX = T.matrix('fX')
params = [thW, thb, thU, thc]
thDelta = thW.dot(thU.T) + T.reshape(thb, (V, 1)) + T.reshape(thc, (1, V)) + mu - thLogX
thCost = ( thfX * thDelta * thDelta ).sum()
grads = T.grad(thCost, params)
updates = [(p, p - learning_rate*g) for p, g in zip(params, grads)]
train_op = theano.function(
inputs=[thfX, thLogX],
updates=updates,
)
costs = []
sentence_indexes = range(len(sentences))
for epoch in xrange(epochs):
delta = W.dot(U.T) + b.reshape(V, 1) + c.reshape(1, V) + mu - logX
cost = ( fX * delta * delta ).sum()
costs.append(cost)
print "epoch:", epoch, "cost:", cost
if gd:
# gradient descent method
if use_theano:
train_op(fX, logX)
W = thW.get_value()
b = thb.get_value()
U = thU.get_value()
c = thc.get_value()
else:
# update W
oldW = W.copy()
for i in xrange(V):
W[i] -= learning_rate*(fX[i,:]*delta[i,:]).dot(U)
W -= learning_rate*reg*W
# update b
for i in xrange(V):
b[i] -= learning_rate*fX[i,:].dot(delta[i,:])
b -= learning_rate*reg*b
# update U
for j in xrange(V):
U[j] -= learning_rate*(fX[:,j]*delta[:,j]).dot(oldW)
U -= learning_rate*reg*U
# update c
for j in xrange(V):
c[j] -= learning_rate*fX[:,j].dot(delta[:,j])
c -= learning_rate*reg*c
else:
# ALS method
# update W
# fast way
# t0 = datetime.now()
for i in xrange(V):
# matrix = reg*np.eye(D) + np.sum((fX[i,j]*np.outer(U[j], U[j]) for j in xrange(V)), axis=0)
matrix = reg*np.eye(D) + (fX[i,:]*U.T).dot(U)
# assert(np.abs(matrix - matrix2).sum() < 10e-5)
vector = (fX[i,:]*(logX[i,:] - b[i] - c - mu)).dot(U)
W[i] = np.linalg.solve(matrix, vector)
# print "fast way took:", (datetime.now() - t0)
# update b
for i in xrange(V):
denominator = fX[i,:].sum()
# assert(denominator > 0)
numerator = fX[i,:].dot(logX[i,:] - W[i].dot(U.T) - c - mu)
# for j in xrange(V):
# numerator += fX[i,j]*(logX[i,j] - W[i].dot(U[j]) - c[j])
b[i] = numerator / denominator / (1 + reg)
# print "updated b"
# update U
for j in xrange(V):
matrix = reg*np.eye(D) + (fX[:,j]*W.T).dot(W)
vector = (fX[:,j]*(logX[:,j] - b - c[j] - mu)).dot(W)
U[j] = np.linalg.solve(matrix, vector)
# update c
for j in xrange(V):
denominator = fX[:,j].sum()
numerator = fX[:,j].dot(logX[:,j] - W.dot(U[j]) - b - mu)
c[j] = numerator / denominator / (1 + reg)
self.W = W
self.U = U
plt.plot(costs)
plt.show()
def save(self, fn):
# function word_analogies expects a (V,D) matrx and a (D,V) matrix
arrays = [self.W, self.U.T]
np.savez(fn, *arrays)
def unify_word(word): # went -> go, apples -> apple, BIG -> big
try: word = en.verb.present(word) # unify tense
except: pass
try: word = en.noun.singular(word) # unify noun
except: pass
return word.lower()
def get_reuters_data(n_vocab):
# return variables
sentences = []
word2idx = {'START': 0, 'END': 1}
idx2word = ['START', 'END']
current_idx = 2
word_idx_count = {0: float('inf'), 1: float('inf')}
tag = 0
for field in reuters.fileids():
sentence = reuters.words(field)
tokens = [unify_word(t) for t in sentence]
for t in tokens:
if t not in word2idx:
word2idx[t] = current_idx
idx2word.append(t)
current_idx += 1
idx = word2idx[t]
word_idx_count[idx] = word_idx_count.get(idx, 0) + 1
sentence_by_idx = [word2idx[t] for t in tokens]
sentences.append(sentence_by_idx)
tag += 1
print(tag)
# restrict vocab size
sorted_word_idx_count = sorted(word_idx_count.items(), key=operator.itemgetter(1), reverse=True)
word2idx_small = {}
new_idx = 0
idx_new_idx_map = {}
for idx, count in sorted_word_idx_count[:n_vocab]:
word = idx2word[idx]
print word, count
word2idx_small[word] = new_idx
idx_new_idx_map[idx] = new_idx
new_idx += 1
# let 'unknown' be the last token
word2idx_small['UNKNOWN'] = new_idx
unknown = new_idx
# map old idx to new idx
sentences_small = []
for sentence in sentences:
if len(sentence) > 1:
new_sentence = [idx_new_idx_map[idx] if idx in idx_new_idx_map else unknown for idx in sentence]
sentences_small.append(new_sentence)
return sentences_small, word2idx_small
def main(we_file, w2i_file, sen):
cc_matrix = "./input/cc_matrix.npy"
if not os.path.isfile(w2i_file):
sentences, word2idx = get_reuters_data(n_vocab=2000)
with open(w2i_file, 'w') as f:
json.dump(word2idx, f)
with open(sen, 'w') as f:
json.dump(sentences, f)
else:
with open(w2i_file) as data_file:
word2idx = json.load(data_file)
with open(sen) as data_file:
sentences = json.load(data_file)
V = len(word2idx)
model = Glove(50, V, 10)
# model.fit(sentences, cc_matrix=cc_matrix, epochs=20) # ALS
model.fit(
sentences,
cc_matrix=cc_matrix,
learning_rate=3*10e-5,
reg=0.01,
epochs=2000,
gd=True,
use_theano=True
) # gradient descent
model.save(we_file)
if __name__ == '__main__':
we = './input/glove_model_50.npz'
w2i = './input/word2idx.json'
sen = './input/sentences.json'
main(we, w2i, sen)
| 33.696246 | 135 | 0.482629 |
5bf4fa5cbca978f7162823457788dc8aed0954ed | 6,287 | py | Python | python/opscore/utility/astrotime.py | sdss/opscore | dd4f2b2ad525fe3dfe3565463de2c079a7e1232e | [
"BSD-3-Clause"
] | null | null | null | python/opscore/utility/astrotime.py | sdss/opscore | dd4f2b2ad525fe3dfe3565463de2c079a7e1232e | [
"BSD-3-Clause"
] | 1 | 2021-08-17T21:08:14.000Z | 2021-08-17T21:08:14.000Z | python/opscore/utility/astrotime.py | sdss/opscore | dd4f2b2ad525fe3dfe3565463de2c079a7e1232e | [
"BSD-3-Clause"
] | null | null | null | """
Enhancements to the standard datetime package for astronomical applications.
"""
# Created 29-Jul-2008 by David Kirkby (dkirkby@uci.edu)
from math import floor
from datetime import tzinfo, datetime, timedelta
class AstroTimeException(Exception):
pass
class AstroTime(datetime):
"""
Enhanced version of datetime suitable for astronomical applications.
Wraps the datetime class to add support for leap-second adjustments
specified via the timezone and conversion to/from Modified Julian
Date formats.
"""
# a datetime whose MJD is exactly 50000.
mjdEpoch = datetime(1995, 10, 10)
def __new__(cls, *args, **kargs):
"""
Extends constructor to support up-casting from a datetime object.
AstroTime(datetime=dt)
AstroTime(datetime=dt,deltasecs=+33)
"""
if (
len(args) == 0
and "datetime" in kargs
and (len(kargs) == 1 or (len(kargs) == 2 and "deltasecs" in kargs))
):
if not isinstance(kargs["datetime"], datetime):
raise AstroTimeException("expect datetime instance")
deltasecs = kargs["deltasecs"] if "deltasecs" in kargs else 0
dt = kargs["datetime"] + timedelta(seconds=deltasecs)
return datetime.__new__(
cls,
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
dt.microsecond,
dt.tzinfo,
)
else:
return datetime.__new__(cls, *args, **kargs)
@staticmethod
def now(tz=None):
"""Identical to datetime.now() but returns an AstroTime"""
dt = AstroTime(datetime=datetime.now(tz))
delta = dt.__leapseconds(tz)
return AstroTime(datetime=dt, deltasecs=delta)
@staticmethod
def fromtimestamp(timestamp, tz=None):
"""Identical to datetime.fromtimestamp() but returns an AstroTime"""
dt = AstroTime(datetime=datetime.fromtimestamp(timestamp, tz))
delta = dt.__leapseconds(tz)
return AstroTime(datetime=dt, deltasecs=delta)
@staticmethod
def combine(date, time):
"""Identical to datetime.combine() but returns an AstroTime"""
return AstroTime(datetime=datetime.combine(date, time))
def __leapseconds(self, tz, default=0):
"""Returns the leap-second adjustment of tz or default if none is available"""
result = default
try:
result = tz.leapseconds(self)
except AttributeError:
pass
return result
def utcoffset(self):
"""Returns our offset from UTC, including any leap-second adjustments."""
return datetime.utcoffset(self) + timedelta(
seconds=self.__leapseconds(self.tzinfo)
)
def utctimetuple(self):
dt = self - timedelta(seconds=self.__leapseconds(self.tzinfo))
return dt.utctimetuple()
def astimezone(self, tz):
"""
Identical to datetime.astimezone() but returns an AstroTime.
Performs leap-second adjustments if necessary.
"""
delta = self.__leapseconds(tz) - self.__leapseconds(self.tzinfo)
return AstroTime(datetime=datetime.astimezone(self, tz), deltasecs=delta)
def timetz(self):
delta = self.__leapseconds(self.tzinfo, 0)
if not delta == 0:
raise AstroTimeException("time.time does not support leap seconds")
return datetime.timetz(self)
def MJD(self):
"""Returns the Modified Julian Date corresponding to our date and time."""
if self.year <= 0:
raise AstroTimeException("MJD calculations not supported for BC dates")
(y, m, d) = (self.year, self.month, self.day)
jd = (
367 * y
- floor(7 * (y + floor((m + 9) / 12)) / 4)
- floor(3 * (floor((y + (m - 9) / 7) / 100) + 1) / 4)
+ floor(275 * m / 9)
+ d
+ 1721028.5
)
mjd = jd - 2400000.5
(h, m, s, us) = (self.hour, self.minute, self.second, self.microsecond)
mjd += (h + (m + (s + us / 1000000.0) / 60.0) / 60.0) / 24.0
return mjd
@staticmethod
def fromMJD(mjd, tz=None):
"""
Returns an AstroTime initialized from an MJD value.
No timezone or leap-second adjustments are made since the MJD
value is assumed to already be in the specified time zone.
"""
dt = AstroTime.mjdEpoch.replace(tzinfo=tz) + timedelta(days=mjd - 50000.0)
return AstroTime(datetime=dt)
def __str__(self):
formatted = datetime.__str__(self)
delta = self.__leapseconds(self.tzinfo, None)
if delta is not None:
formatted += "%+03d" % self.tzinfo.leapseconds(self)
formatted += " MJD %.6f" % self.MJD()
if self.tzname() is not None:
formatted += " %s" % self.tzname()
return formatted
def __repr__(self):
return datetime.__repr__(self).replace(
"datetime.datetime", self.__class__.__name__
)
ZERO = timedelta(0)
class CoordinatedUniversalTime(tzinfo):
"""
A timezone class for tagging a datetime as being in UTC.
"""
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def leapseconds(self, dt):
return int(0)
UTC = CoordinatedUniversalTime()
class InternationalAtomicTime(CoordinatedUniversalTime):
"""
A timezone class for tagging a datetime as being in TAI and converting to/from TAI.
Leapseconds are documented at ftp://maia.usno.navy.mil/ser7/tai-utc.dat
"""
def tzname(self, dt):
return "TAI"
def leapseconds(self, dt):
if dt.year < 1999:
raise AstroTimeException("Leap seconds not tabulated before 1999")
elif dt.year < 2006:
# leap second added 31 Dec 1999
return int(+32)
elif dt.year < 2009:
# leap second added 31 Dec 2005
return int(+33)
else:
# leap second added 31 Dec 2008
return int(+34)
TAI = InternationalAtomicTime()
| 30.818627 | 87 | 0.594719 |
c9913930468d140d8afc1bdab0c5b2e7985ca515 | 3,183 | py | Python | time_series_prediction/LSTM/example2.py | 18279406017/code-of-csdn | 0c22f3abda9605f9a46e4f639739904ed271e6d7 | [
"MIT"
] | 71 | 2019-01-24T09:47:09.000Z | 2020-08-08T11:45:44.000Z | time_series_prediction/LSTM/example2.py | qinyao2016/code-of-csdn | 0c22f3abda9605f9a46e4f639739904ed271e6d7 | [
"MIT"
] | 1 | 2021-05-30T07:11:07.000Z | 2021-05-30T07:11:07.000Z | time_series_prediction/LSTM/example2.py | qinyao2016/code-of-csdn | 0c22f3abda9605f9a46e4f639739904ed271e6d7 | [
"MIT"
] | 151 | 2019-01-31T01:20:45.000Z | 2020-08-12T11:48:28.000Z | import numpy as np
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import time
def normalise_windows(window_data): # 数据全部除以最开始的数据再减一
normalised_data = []
for window in window_data:
normalised_window = [((float(p) / float(window[0])) - 1) for p in window]
normalised_data.append(normalised_window)
return normalised_data
def load_data(filename, seq_len, normalise_window):
f = open(filename, 'r').read() # 读取文件中的数据
data = f.split('\n') # split() 方法用于把一个字符串分割成字符串数组,这里就是换行分割
sequence_lenghth = seq_len + 1 # #得到长度为seq_len+1的向量,最后一个作为label
result = []
for index in range(len(data)-sequence_lenghth):
result.append(data[index : index+sequence_lenghth]) # 制作数据集,从data里面分割数据
if normalise_window:
result = normalise_windows(result)
result = np.array(result) # shape (4121,51) 4121代表行,51是seq_len+1
row = round(0.9*result.shape[0]) # round() 方法返回浮点数x的四舍五入值
train = result[:int(row), :] # 取前90%
np.random.shuffle(train) # shuffle() 方法将序列的所有元素随机排序。
x_train = train[:, :-1] # 取前50列,作为训练数据
y_train = train[:, -1] # 取最后一列作为标签
x_test = result[int(row):, :-1] # 取后10% 的前50列作为测试集
y_test = result[int(row):, -1] # 取后10% 的最后一列作为标签
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) # 最后一个维度1代表一个数据的维度
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
return [x_train, y_train, x_test, y_test]
x_train, y_train, x_test, y_test = load_data('./sp500.csv', 50, True)
model = Sequential()
model.add(LSTM(input_dim = 1, output_dim=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(100, return_sequences= False))
model.add(Dropout(0.2))
model.add(Dense(output_dim = 1))
model.add(Activation('linear'))
start = time.time()
model.compile(loss='mse', optimizer='rmsprop')
print ('compilation time : ', time.time() - start)
model.fit(x_train, y_train, batch_size= 512, nb_epoch=1, validation_split=0.05)
import warnings
warnings.filterwarnings("ignore")
from numpy import newaxis
def predict_sequences_multiple(model, data, window_size, prediction_len):
prediction_seqs = []
for i in range(int(len(data) / prediction_len)): # 定滑动窗口的起始点
curr_frame = data[i * prediction_len]
predicted = []
for j in range(prediction_len): # 与滑动窗口一样分析
predicted.append(model.predict(curr_frame[newaxis, :, :])[0, 0])
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size - 1], predicted[-1], axis=0)
prediction_seqs.append(predicted)
return prediction_seqs
predictions = predict_sequences_multiple(model, x_test, 50, 50)
import matplotlib.pylab as plt
def plot_results_multiple(predicted_data, true_data, prediction_len):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Data')
for i, data in enumerate(predicted_data):
padding = [None for p in range(i * prediction_len)]
plt.plot(padding + data, label='Prediction')
plt.legend()
plt.show()
plot_results_multiple(predictions, y_test, 50) | 44.208333 | 93 | 0.69934 |
2f0d79a79936c3ad4b75b83358f71706ce9d46b5 | 6,537 | py | Python | samples/client/petstore/python-experimental/petstore_api/models/additional_properties_class.py | yasammez/openapi-generator | cbc12543a9c949ca0eacc73db5f8c383bf3d0a75 | [
"Apache-2.0"
] | 1 | 2021-03-29T03:25:42.000Z | 2021-03-29T03:25:42.000Z | samples/client/petstore/python-experimental/petstore_api/models/additional_properties_class.py | yasammez/openapi-generator | cbc12543a9c949ca0eacc73db5f8c383bf3d0a75 | [
"Apache-2.0"
] | null | null | null | samples/client/petstore/python-experimental/petstore_api/models/additional_properties_class.py | yasammez/openapi-generator | cbc12543a9c949ca0eacc73db5f8c383bf3d0a75 | [
"Apache-2.0"
] | 1 | 2020-02-03T11:29:20.000Z | 2020-02-03T11:29:20.000Z | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class AdditionalPropertiesClass(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'map_string': ({str: (str,)},), # noqa: E501
'map_number': ({str: (float,)},), # noqa: E501
'map_integer': ({str: (int,)},), # noqa: E501
'map_boolean': ({str: (bool,)},), # noqa: E501
'map_array_integer': ({str: ([int],)},), # noqa: E501
'map_array_anytype': ({str: ([bool, date, datetime, dict, float, int, list, str],)},), # noqa: E501
'map_map_string': ({str: ({str: (str,)},)},), # noqa: E501
'map_map_anytype': ({str: ({str: (bool, date, datetime, dict, float, int, list, str,)},)},), # noqa: E501
'anytype_1': (bool, date, datetime, dict, float, int, list, str,), # noqa: E501
'anytype_2': (bool, date, datetime, dict, float, int, list, str,), # noqa: E501
'anytype_3': (bool, date, datetime, dict, float, int, list, str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
'map_string': 'map_string', # noqa: E501
'map_number': 'map_number', # noqa: E501
'map_integer': 'map_integer', # noqa: E501
'map_boolean': 'map_boolean', # noqa: E501
'map_array_integer': 'map_array_integer', # noqa: E501
'map_array_anytype': 'map_array_anytype', # noqa: E501
'map_map_string': 'map_map_string', # noqa: E501
'map_map_anytype': 'map_map_anytype', # noqa: E501
'anytype_1': 'anytype_1', # noqa: E501
'anytype_2': 'anytype_2', # noqa: E501
'anytype_3': 'anytype_3', # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set([
'_data_store',
'_check_type',
'_from_server',
'_path_to_item',
'_configuration',
])
def __init__(self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs): # noqa: E501
"""additional_properties_class.AdditionalPropertiesClass - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
map_string ({str: (str,)}): [optional] # noqa: E501
map_number ({str: (float,)}): [optional] # noqa: E501
map_integer ({str: (int,)}): [optional] # noqa: E501
map_boolean ({str: (bool,)}): [optional] # noqa: E501
map_array_integer ({str: ([int],)}): [optional] # noqa: E501
map_array_anytype ({str: ([bool, date, datetime, dict, float, int, list, str],)}): [optional] # noqa: E501
map_map_string ({str: ({str: (str,)},)}): [optional] # noqa: E501
map_map_anytype ({str: ({str: (bool, date, datetime, dict, float, int, list, str,)},)}): [optional] # noqa: E501
anytype_1 (bool, date, datetime, dict, float, int, list, str): [optional] # noqa: E501
anytype_2 (bool, date, datetime, dict, float, int, list, str): [optional] # noqa: E501
anytype_3 (bool, date, datetime, dict, float, int, list, str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
setattr(self, var_name, var_value)
| 41.373418 | 174 | 0.595227 |
61339f3363f349ec97662004430e330176955579 | 3,505 | py | Python | tests/distro/test_protocol_util.py | luthes/azureNoDev | de88c490061f970ddf787764d3fba9071e1ce318 | [
"Apache-2.0"
] | 1 | 2018-12-14T10:04:34.000Z | 2018-12-14T10:04:34.000Z | tests/distro/test_protocol_util.py | luthes/azureNoDev | de88c490061f970ddf787764d3fba9071e1ce318 | [
"Apache-2.0"
] | null | null | null | tests/distro/test_protocol_util.py | luthes/azureNoDev | de88c490061f970ddf787764d3fba9071e1ce318 | [
"Apache-2.0"
] | 1 | 2018-05-09T13:05:48.000Z | 2018-05-09T13:05:48.000Z | # Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
#
# Implements parts of RFC 2131, 1541, 1497 and
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
from tests.tools import *
from azurelinuxagent.distro.loader import get_distro
from azurelinuxagent.exception import *
from azurelinuxagent.distro.default.protocolUtil import *
@patch("time.sleep")
class TestProtocolUtil(AgentTestCase):
@distros()
@patch("azurelinuxagent.distro.default.protocolUtil.MetadataProtocol")
@patch("azurelinuxagent.distro.default.protocolUtil.WireProtocol")
def test_detect_protocol(self, distro_name, distro_version, distro_full_name,
WireProtocol, MetadataProtocol, _, *distro_args):
WireProtocol.return_value = MagicMock()
MetadataProtocol.return_value = MagicMock()
distro = get_distro(distro_name, distro_version, distro_full_name)
distro.dhcp_handler = MagicMock()
distro.dhcp_handler.endpoint = "foo.bar"
#Test wire protocol is available
protocol = distro.protocol_util.detect_protocol()
self.assertEquals(WireProtocol.return_value, protocol)
#Test wire protocol is not available
distro.protocol_util.protocol = None
WireProtocol.side_effect = ProtocolError()
protocol = distro.protocol_util.detect_protocol()
self.assertEquals(MetadataProtocol.return_value, protocol)
#Test no protocol is available
distro.protocol_util.protocol = None
WireProtocol.side_effect = ProtocolError()
MetadataProtocol.side_effect = ProtocolError()
self.assertRaises(ProtocolError, distro.protocol_util.detect_protocol)
@distros()
def test_detect_protocol_by_file(self, distro_name, distro_version,
distro_full_name, _):
distro = get_distro(distro_name, distro_version, distro_full_name)
protocol_util = distro.protocol_util
protocol_util._detect_wire_protocol = Mock()
protocol_util._detect_metadata_protocol = Mock()
tag_file = os.path.join(self.tmp_dir, TAG_FILE_NAME)
#Test tag file doesn't exist
protocol_util.detect_protocol_by_file()
protocol_util._detect_wire_protocol.assert_any_call()
protocol_util._detect_metadata_protocol.assert_not_called()
#Test tag file exists
protocol_util.protocol = None
protocol_util._detect_wire_protocol.reset_mock()
protocol_util._detect_metadata_protocol.reset_mock()
with open(tag_file, "w+") as tag_fd:
tag_fd.write("")
protocol_util.detect_protocol_by_file()
protocol_util._detect_metadata_protocol.assert_any_call()
protocol_util._detect_wire_protocol.assert_not_called()
if __name__ == '__main__':
unittest.main()
| 38.944444 | 82 | 0.723538 |
cce7e81eb9b962756741a0a1602868191c834c3b | 1,458 | py | Python | namespaces/pods/containers.py | jcpowermac/aos-api-examples | 5b056127a09ba041d8fe10611933822685e8ef99 | [
"Apache-2.0"
] | null | null | null | namespaces/pods/containers.py | jcpowermac/aos-api-examples | 5b056127a09ba041d8fe10611933822685e8ef99 | [
"Apache-2.0"
] | null | null | null | namespaces/pods/containers.py | jcpowermac/aos-api-examples | 5b056127a09ba041d8fe10611933822685e8ef99 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from subprocess import Popen, PIPE
import requests
import pprint
import json
import yaml
def gettoken():
p = Popen(['oc', 'whoami', '-t'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
rc = p.returncode
return output.rstrip('\n')
def aosget_text(url, token):
headers = {'Authorization': 'Bearer %s' % token, 'Accept': 'application/yaml'}
req = requests.get(url, headers=headers, verify=False)
if req.status_code == 200:
text = req.text
return text
return ""
def aosget_yaml(url, token):
headers = {'Authorization': 'Bearer %s' % token, 'Accept': 'application/yaml'}
req = requests.get(url, headers=headers, verify=False)
if req.status_code == 200:
text = req.text
return yaml.load(text)
return {}
def aosget(url, token):
headers = {'Authorization': 'Bearer %s' % token }
req = requests.get(url, headers=headers, verify=False)
if req.status_code == 200:
print "here"
json = req.json()
return json
print req.text
return {}
def main():
pp = pprint.PrettyPrinter(indent=4)
token = gettoken()
url = "https://origin-master1.virtomation.com:8443/api/v1/namespaces/cake/pods"
#url = "https://origin-master1.virtomation.com:8443/oapi/v1/projects/cake/pods"
results = aosget_text(url, token)
print results
if __name__ == '__main__':
main()
| 22.090909 | 83 | 0.631687 |
76c7b0d6e5c96a4dbc22f2cc4953011dc6b06ed4 | 3,310 | py | Python | wuyudong/UTDALLAS/UTDALLAS.py | doge-search/webdoge | 443e758b5c1f962d5c2fe792cdbed01e1208b1cb | [
"Unlicense"
] | null | null | null | wuyudong/UTDALLAS/UTDALLAS.py | doge-search/webdoge | 443e758b5c1f962d5c2fe792cdbed01e1208b1cb | [
"Unlicense"
] | null | null | null | wuyudong/UTDALLAS/UTDALLAS.py | doge-search/webdoge | 443e758b5c1f962d5c2fe792cdbed01e1208b1cb | [
"Unlicense"
] | null | null | null | __author__ = 'cutylewiwi'
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re
class Prof:
def __init__(self, name, photoUrl, pUrl, title, area, office, phone, email):
self.name = name
self.photoUrl = photoUrl
self.pUrl = pUrl
self.title = title
self.area = area
self.office = office
self.phone = phone
self.email = email
class ProfList:
def __init__(self, baseUrl):
self.baseUrl = baseUrl
self.profs = []
def getPage(self):
try:
url = self.baseUrl
request = urllib2.Request(url)
response = urllib2.urlopen(request)
# print response.read()
return response
except urllib2.URLError, e:
if hasattr(e,"reason"):
print u"Faild to get Prof List at University of Delaware",e.reason
return None
def getProfList(self):
page = self.getPage()
#regex = '<div class=\"wdn-grid-set\">.*?<a class=\"wdn-button\" title=\"Web page for.*?\" href=\"(.*?)\".*?<strong>(.*?)</strong>.*?<div class=\"gs-fac-rsch\">(.*?)(<br />)?</div>'
regex = '<table width="100%" class="table " style="table-striped table-bordered">.*?<tbody>(.*?)</tbody>.*?</table>'
myItems = re.findall(regex, page.read(), re.S)
tmpStr = myItems[0]
regex = '<tr>.*?<td>.*?<a.*?href="(.*?)".*?>(.*?)</a>.*?</td>.*?<td>.*?<a.*?href="mailto:.*?".*?>(.*?)</a>.*?</td>.*?</tr>'
myItems = re.findall(regex, tmpStr, re.S)
for item in myItems:
ProfName = item[1]
ProfPhotoUrl = ""
ProfPUrl = item[0]
ProfTitle = ""
ProfArea = ""
ProfOffice = ""
ProfPhone = ""
ProfEmail = item[2]
# print ProfName
# print ProfPhotoUrl
# print ProfPUrl
# print ProfTitle
# print ProfArea
# print ProfOffice
# print ProfPhone
# print ProfEmail
# print " "
self.profs.append(Prof(ProfName, ProfPhotoUrl, ProfPUrl, ProfTitle, ProfArea, ProfOffice, ProfPhone, ProfEmail))
def outPutProf(self):
result = "<?xml version=\"1.0\" ?>\n\t<institution>\n"
self.getProfList()
for prof in self.profs:
result += "\t\t<professor>\n"
result += "\t\t\t<name>%s</name>\n" % (prof.name)
result += "\t\t\t<title>%s</title>\n" % (prof.title)
result += "\t\t\t<office>%s</office>\n" % (prof.office)
result += "\t\t\t<email>%s</email>\n" % (prof.email)
result += "\t\t\t<phone>%s</phone>\n" % (prof.phone)
result += "\t\t\t<website>%s</website>\n" % (prof.pUrl)
result += "\t\t\t\n" % (prof.photoUrl)
result += "\t\t</professor>\n"
result += "\t</institution>\n"
# print result
fileName = "UTDALLAS.xml"
# outputDir = "result"
file = open(fileName,"w")
file.writelines(result)
baseURL = 'http://cs.utdallas.edu/people/faculty/'
pl = ProfList(baseURL)
pl.outPutProf()
# pl.getPage()
# pl.getProfList()
| 35.591398 | 190 | 0.503021 |
adce7a9d5be0c5e04ca4da71b16228e1e48e5a46 | 9,808 | py | Python | heat/engine/resources/openstack/manila/share_network.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 265 | 2015-01-02T09:33:22.000Z | 2022-03-26T23:19:54.000Z | heat/engine/resources/openstack/manila/share_network.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 8 | 2015-09-01T15:43:19.000Z | 2021-12-14T05:18:23.000Z | heat/engine/resources/openstack/manila/share_network.py | noironetworks/heat | 7cdadf1155f4d94cf8f967635b98e4012a7acfb7 | [
"Apache-2.0"
] | 295 | 2015-01-06T07:00:40.000Z | 2021-09-06T08:05:06.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
from heat.engine import translation
class ManilaShareNetwork(resource.Resource):
"""A resource that stores network information for share servers.
Stores network information that will be used by share servers,
where shares are hosted.
"""
support_status = support.SupportStatus(version='5.0.0')
PROPERTIES = (
NAME, NEUTRON_NETWORK, NEUTRON_SUBNET, NOVA_NETWORK,
DESCRIPTION, SECURITY_SERVICES,
) = (
'name', 'neutron_network', 'neutron_subnet', 'nova_network',
'description', 'security_services',
)
ATTRIBUTES = (
SEGMENTATION_ID, CIDR, IP_VERSION, NETWORK_TYPE,
) = (
'segmentation_id', 'cidr', 'ip_version', 'network_type',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the share network.'),
update_allowed=True
),
NEUTRON_NETWORK: properties.Schema(
properties.Schema.STRING,
_('Neutron network id.'),
update_allowed=True,
constraints=[constraints.CustomConstraint('neutron.network')]
),
NEUTRON_SUBNET: properties.Schema(
properties.Schema.STRING,
_('Neutron subnet id.'),
update_allowed=True,
constraints=[constraints.CustomConstraint('neutron.subnet')]
),
NOVA_NETWORK: properties.Schema(
properties.Schema.STRING,
_('Nova network id.'),
update_allowed=True,
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Share network description.'),
update_allowed=True
),
SECURITY_SERVICES: properties.Schema(
properties.Schema.LIST,
_('A list of security services IDs or names.'),
schema=properties.Schema(
properties.Schema.STRING
),
update_allowed=True,
default=[]
)
}
attributes_schema = {
SEGMENTATION_ID: attributes.Schema(
_('VLAN ID for VLAN networks or tunnel-id for GRE/VXLAN '
'networks.'),
type=attributes.Schema.STRING
),
CIDR: attributes.Schema(
_('CIDR of subnet.'),
type=attributes.Schema.STRING
),
IP_VERSION: attributes.Schema(
_('Version of IP address.'),
type=attributes.Schema.STRING
),
NETWORK_TYPE: attributes.Schema(
_('The physical mechanism by which the virtual network is '
'implemented.'),
type=attributes.Schema.STRING
),
}
default_client_name = 'manila'
entity = 'share_networks'
def _request_network(self):
return self.client().share_networks.get(self.resource_id)
def _resolve_attribute(self, name):
if self.resource_id is None:
return
network = self._request_network()
return getattr(network, name, None)
def validate(self):
super(ManilaShareNetwork, self).validate()
if (self.properties[self.NEUTRON_NETWORK] and
self.properties[self.NOVA_NETWORK]):
raise exception.ResourcePropertyConflict(self.NEUTRON_NETWORK,
self.NOVA_NETWORK)
if (self.properties[self.NOVA_NETWORK] and
self.properties[self.NEUTRON_SUBNET]):
raise exception.ResourcePropertyConflict(self.NEUTRON_SUBNET,
self.NOVA_NETWORK)
if self.is_using_neutron() and self.properties[self.NOVA_NETWORK]:
msg = _('With Neutron enabled you need to pass Neutron network '
'and Neutron subnet instead of Nova network')
raise exception.StackValidationFailed(message=msg)
if (self.properties[self.NEUTRON_NETWORK] and not
self.properties[self.NEUTRON_SUBNET]):
raise exception.ResourcePropertyDependency(
prop1=self.NEUTRON_NETWORK, prop2=self.NEUTRON_SUBNET)
if (self.properties[self.NEUTRON_NETWORK] and
self.properties[self.NEUTRON_SUBNET]):
plg = self.client_plugin('neutron')
subnet_id = plg.find_resourceid_by_name_or_id(
plg.RES_TYPE_SUBNET, self.properties[self.NEUTRON_SUBNET])
net_id = plg.network_id_from_subnet_id(subnet_id)
provided_net_id = plg.find_resourceid_by_name_or_id(
plg.RES_TYPE_NETWORK, self.properties[self.NEUTRON_NETWORK])
if net_id != provided_net_id:
msg = (_('Provided %(subnet)s does not belong '
'to provided %(network)s.')
% {'subnet': self.NEUTRON_SUBNET,
'network': self.NEUTRON_NETWORK})
raise exception.StackValidationFailed(message=msg)
def translation_rules(self, props):
neutron_client_plugin = self.client_plugin('neutron')
translation_rules = [
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.NEUTRON_NETWORK],
client_plugin=neutron_client_plugin,
finder='find_resourceid_by_name_or_id',
entity=neutron_client_plugin.RES_TYPE_NETWORK
),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.NEUTRON_SUBNET],
client_plugin=neutron_client_plugin,
finder='find_resourceid_by_name_or_id',
entity=neutron_client_plugin.RES_TYPE_SUBNET
)
]
return translation_rules
def handle_create(self):
neutron_subnet_id = self.properties[self.NEUTRON_SUBNET]
neutron_net_id = self.properties[self.NEUTRON_NETWORK]
if neutron_subnet_id and not neutron_net_id:
neutron_net_id = self.client_plugin(
'neutron').network_id_from_subnet_id(neutron_subnet_id)
network = self.client().share_networks.create(
name=self.properties[self.NAME],
neutron_net_id=neutron_net_id,
neutron_subnet_id=neutron_subnet_id,
nova_net_id=self.properties[self.NOVA_NETWORK],
description=self.properties[self.DESCRIPTION])
self.resource_id_set(network.id)
for service in self.properties.get(self.SECURITY_SERVICES):
self.client().share_networks.add_security_service(
self.resource_id,
self.client_plugin().get_security_service(service).id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if self.SECURITY_SERVICES in prop_diff:
services = prop_diff.pop(self.SECURITY_SERVICES)
s_curr = set([self.client_plugin().get_security_service(s).id
for s in self.properties.get(
self.SECURITY_SERVICES)])
s_new = set([self.client_plugin().get_security_service(s).id
for s in services])
for service in s_curr - s_new:
self.client().share_networks.remove_security_service(
self.resource_id, service)
for service in s_new - s_curr:
self.client().share_networks.add_security_service(
self.resource_id, service)
if prop_diff:
neutron_subnet_id = prop_diff.get(self.NEUTRON_SUBNET)
neutron_net_id = prop_diff.get(self.NEUTRON_NETWORK)
if neutron_subnet_id and not neutron_net_id:
neutron_net_id = self.client_plugin(
'neutron').network_id_from_subnet_id(neutron_subnet_id)
self.client().share_networks.update(
self.resource_id,
name=prop_diff.get(self.NAME),
neutron_net_id=neutron_net_id,
neutron_subnet_id=neutron_subnet_id,
nova_net_id=prop_diff.get(self.NOVA_NETWORK),
description=prop_diff.get(self.DESCRIPTION))
def parse_live_resource_data(self, resource_properties, resource_data):
result = super(ManilaShareNetwork, self).parse_live_resource_data(
resource_properties, resource_data)
sec_list = self.client().security_services.list(
search_opts={'share_network_id': self.resource_id})
result.update({
self.NOVA_NETWORK: resource_data.get('nova_net_id'),
self.NEUTRON_NETWORK: resource_data.get('neutron_net_id'),
self.NEUTRON_SUBNET: resource_data.get('neutron_subnet_id'),
self.SECURITY_SERVICES: [service.id for service in sec_list]}
)
return result
def resource_mapping():
return {'OS::Manila::ShareNetwork': ManilaShareNetwork}
| 40.196721 | 78 | 0.622553 |
77417ad5451804639bdf83fd409d9f6652fae04f | 6,028 | py | Python | not-used/autoloadfut.py | EJOOSTEROP/twint | 1564fcf11194af31208c8399711dcff0570f644b | [
"MIT"
] | null | null | null | not-used/autoloadfut.py | EJOOSTEROP/twint | 1564fcf11194af31208c8399711dcff0570f644b | [
"MIT"
] | null | null | null | not-used/autoloadfut.py | EJOOSTEROP/twint | 1564fcf11194af31208c8399711dcff0570f644b | [
"MIT"
] | null | null | null | import twint
import schedule
import time
import json
from datetime import datetime
import os
TWT_FILE_NAME = 'cibc.json'
TWT_LIMIT_RESULT = 5000
TWT_SINCE_RESULT = "2019-01-01"
TWT_SEARCH_FOR = '@cibc'
TWT_HIDE_OUTPUT = True
def jobsone():
'''First search.
Since only a limited number of tweets are returned (seemingly random in number),
subsequent jobs are required to get the full set of results.
'''
print ("Fetching Initial Set of Tweets")
c = twint.Config()
# choose username (optional)
#c.Username = "insert username here"
# choose search term (optional)
c.Search = TWT_SEARCH_FOR
# choose beginning time (narrow results)
c.Since = TWT_SINCE_RESULT
# set limit on total tweets
c.Limit = TWT_LIMIT_RESULT
# no idea, but makes the csv format properly
#c.Store_csv = True
# format of the csv
#c.Custom = ["date", "time", "username", "tweet", "link", "likes", "retweets", "replies", "mentions", "hashtags"]
c.Store_json = True
# change the name of the output file
c.Output = TWT_FILE_NAME
c.Hide_output = TWT_HIDE_OUTPUT
twint.run.Search(c)
def jobstwo():
'''Subsequent search.
Since only a limited number of tweets are returned (seemingly random in number),
subsequent jobs are required to get the full set of results.
'''
#CONCERN: This never stops
print ("Fetching Subsequent Tweets")
c = twint.Config()
# choose username (optional)
#c.Username = "insert username here"
# choose search term (optional)
c.Search = TWT_SEARCH_FOR
# choose beginning time (narrow results)
c.Until = str(earliest_tweet_in_file())
c.Since = TWT_SINCE_RESULT
# set limit on total tweets
c.Limit = TWT_LIMIT_RESULT
# no idea, but makes the csv format properly
#c.Store_csv = True
# format of the csv
#c.Custom = ["date", "time", "username", "tweet", "link", "likes", "retweets", "replies", "mentions", "hashtags"]
c.Store_json = True
# change the name of the output file
c.Output = TWT_FILE_NAME
c.Hide_output = TWT_HIDE_OUTPUT
print("---Fetching until: ", c.Until)
twint.run.Search(c)
print("---Done.")
def earliest_tweet_in_file():
'''Find earliest tweet captured in file.
'''
#CONCERN: not optimized; hard coded file name and likely other elements; no error catching
tweetsmetad = []
earliest_tweet_dt = datetime.now()
for line in open(TWT_FILE_NAME, 'r', encoding="utf8"): # without this encoding french characters don't show right; also causes errors for others
tweetsmetad.append(json.loads(line))
if datetime.strptime(tweetsmetad[-1]['created_at'], '%Y-%m-%d %H:%M:%S %Z')<earliest_tweet_dt:
earliest_tweet_dt = datetime.strptime(tweetsmetad[-1]['created_at'], '%Y-%m-%d %H:%M:%S %Z')
print("...Teets in file before search: ", len(tweetsmetad))
return(earliest_tweet_dt)
'''
if not os.path.isfile(TWT_FILE_NAME):
jobsone()
schedule.every(2).minutes.do(jobstwo)
#schedule.every().hour.do(jobstwo)
# schedule.every().day.at("10:30").do(jobstwo)
# schedule.every().monday.do(jobstwo)
# schedule.every().wednesday.at("13:15").do(jobstwo)
while True:
schedule.run_pending()
time.sleep(1)
'''
#####################################
def jobsthree(filename_str, search_str):
'''Subsequent search.
Since only a limited number of tweets are returned (seemingly random in number),
subsequent jobs are required to get the full set of results.
'''
#CONCERN: This never stops
print ("Fetching Subsequent Tweets")
c = twint.Config()
# choose username (optional)
#c.Username = "insert username here"
# choose search term (optional)
c.Search = search_str
# choose beginning time (narrow results)
#c.Until = str(earliest_tweet_in_file())
c.Since = str(latest_tweet_in_file(filename_str))
# set limit on total tweets
c.Limit = TWT_LIMIT_RESULT
# no idea, but makes the csv format properly
#c.Store_csv = True
# format of the csv
#c.Custom = ["date", "time", "username", "tweet", "link", "likes", "retweets", "replies", "mentions", "hashtags"]
c.Store_json = True
# change the name of the output file
c.Output = filename_str
c.Hide_output = TWT_HIDE_OUTPUT
print("---", search_str)
print("---Fetching from: ", c.Since)
twint.run.Search(c)
print("---Done.")
def jobsfour():
'''Appends recent tweets to existing file
Quick shortcut code. Many short comings, incl:
- exits with error when file does not exist
- no guarantee that there will be no gaps between existing tweets in file and new tweets
(as twint seems to return an arbitrary number of tweets, with most recent first; though
I am not sure this will even be consistent.)
'''
jobsthree('cibc.json', '@cibc')
jobsthree('bmo.json', '@BMO')
jobsthree('national.json', '@nationalbank')
jobsthree('rbc.json', '@RBC')
jobsthree('td.json', '@TD_Canada')
jobsthree('scotia.json', '@scotiabank')
print("Going to sleep")
def latest_tweet_in_file(filename_str):
'''Find earliest tweet captured in file.
'''
#CONCERN: not optimized; hard coded file name and likely other elements; no error catching
tweetsmetad = []
#latest_tweet_dt = datetime.now()
print("---Starting ", filename_str)
latest_tweet_dt = datetime(1990, 5, 17) # arbitraty, but Twitter did not exist at this date
for line in open(filename_str, 'r', encoding="utf8"): # without this encoding french characters don't show right; also causes errors for others
tweetsmetad.append(json.loads(line))
if datetime.strptime(tweetsmetad[-1]['created_at'], '%Y-%m-%d %H:%M:%S %Z')>latest_tweet_dt:
latest_tweet_dt = datetime.strptime(tweetsmetad[-1]['created_at'], '%Y-%m-%d %H:%M:%S %Z')
print("...Teets in file before search: ", len(tweetsmetad))
return(latest_tweet_dt)
#schedule.every(1).minutes.do(jobsfour)
schedule.every(1).hour.do(jobsfour)
# schedule.every().day.at("10:30").do(jobstwo)
# schedule.every().monday.do(jobstwo)
# schedule.every().wednesday.at("13:15").do(jobstwo)
while True:
schedule.run_pending()
time.sleep(1)
| 34.843931 | 147 | 0.695255 |
d2f081e841689c6d5d3e947f4488741a5122039a | 11,325 | py | Python | code/python/StocksAPIforDigitalPortals/v2/fds/sdk/StocksAPIforDigitalPortals/model/stock_notation_screener_search_data_index_membership.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/StocksAPIforDigitalPortals/v2/fds/sdk/StocksAPIforDigitalPortals/model/stock_notation_screener_search_data_index_membership.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/StocksAPIforDigitalPortals/v2/fds/sdk/StocksAPIforDigitalPortals/model/stock_notation_screener_search_data_index_membership.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | """
Prime Developer Trial
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.StocksAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.StocksAPIforDigitalPortals.exceptions import ApiAttributeError
class StockNotationScreenerSearchDataIndexMembership(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('ids',): {
'max_items': 20,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'ids': ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'ids': 'ids', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""StockNotationScreenerSearchDataIndexMembership - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
ids ([str]): List of index instrument identifiers.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""StockNotationScreenerSearchDataIndexMembership - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
ids ([str]): List of index instrument identifiers.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.725869 | 124 | 0.57404 |
9518c707fb6b17d50510696df529dd7e9dd7a9b1 | 17,167 | py | Python | smarts/core/utils/math.py | zbzhu99/SMARTS | 652aa23e71bd4e2732e2742140cfcd0ec082a7da | [
"MIT"
] | null | null | null | smarts/core/utils/math.py | zbzhu99/SMARTS | 652aa23e71bd4e2732e2742140cfcd0ec082a7da | [
"MIT"
] | null | null | null | smarts/core/utils/math.py | zbzhu99/SMARTS | 652aa23e71bd4e2732e2742140cfcd0ec082a7da | [
"MIT"
] | null | null | null | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import math
from dataclasses import dataclass
from math import factorial
from typing import Callable, List, Optional, Tuple, Union
@dataclass(frozen=True)
class CubicPolynomial:
"""A cubic polynomial."""
a: float
b: float
c: float
d: float
@classmethod
def from_list(cls, coefficients: List[float]):
"""Generates CubicPolynomial.
Args:
coefficients: The list of coefficients [a, b, c, d]
Returns:
A new CubicPolynomial.
"""
return cls(
a=coefficients[0],
b=coefficients[1],
c=coefficients[2],
d=coefficients[3],
)
def eval(self, ds: float) -> float:
""" Evaluate a value along the polynomial."""
return self.a + self.b * ds + self.c * ds * ds + self.d * ds * ds * ds
def constrain_angle(angle: float) -> float:
"""Constrain an angle within the inclusive range [-pi, pi]"""
angle %= 2 * math.pi
if angle > math.pi:
angle -= 2 * math.pi
return angle
import numpy as np
def batches(list_, n):
"""Split an indexable container into `n` batches.
Args:
list_:
The iterable to split into parts
n:
The number of batches
"""
for i in range(0, len(list_), n):
yield list_[i : i + n]
def yaw_from_quaternion(quaternion) -> float:
"""Converts a quaternion to the yaw value.
Args:
np.narray: np.array([x, y, z, w])
Returns:
A float angle in radians.
"""
assert len(quaternion) == 4, f"len({quaternion}) != 4"
siny_cosp = 2 * (quaternion[0] * quaternion[1] + quaternion[3] * quaternion[2])
cosy_cosp = (
quaternion[3] ** 2
+ quaternion[0] ** 2
- quaternion[1] ** 2
- quaternion[2] ** 2
)
yaw = np.arctan2(siny_cosp, cosy_cosp)
return yaw
def fast_quaternion_from_angle(angle: float) -> np.ndarray:
"""Converts a float to a quaternion.
Args:
angle: An angle in radians.
Returns:
np.ndarray: np.array([x, y, z, w])
"""
half_angle = angle * 0.5
return np.array([0, 0, math.sin(half_angle), math.cos(half_angle)])
def mult_quat(q1, q2):
"""Specialized quaternion multiplication as required by the unique attributes of quaternions.
Returns:
The product of the quaternions.
"""
q3 = np.copy(q1)
q3[0] = q1[0] * q2[0] - q1[1] * q2[1] - q1[2] * q2[2] - q1[3] * q2[3]
q3[1] = q1[0] * q2[1] + q1[1] * q2[0] + q1[2] * q2[3] - q1[3] * q2[2]
q3[2] = q1[0] * q2[2] - q1[1] * q2[3] + q1[2] * q2[0] + q1[3] * q2[1]
q3[3] = q1[0] * q2[3] + q1[1] * q2[2] - q1[2] * q2[1] + q1[3] * q2[0]
return q3
def rotate_quat(quat, vect):
"""Rotate a vector with the rotation defined by a quaternion."""
# Transform a vector into an quaternion
vect = np.append([0], vect)
# Normalize it
norm_vect = np.linalg.norm(vect)
vect /= norm_vect
# Computes the conjugate of quat
quat_ = np.append(quat[0], -quat[1:])
# The result is given by: quat * vect * quat_
res = mult_quat(quat, mult_quat(vect, quat_)) * norm_vect
return res[1:]
def clip(val, min_val, max_val):
"""Constrain a value between a min and max by clamping exterior values to the extremes. """
assert (
min_val <= max_val
), f"min_val({min_val}) must be less than max_val({max_val})"
return min_val if val < min_val else max_val if val > max_val else val
def get_linear_segments_for_range(
s_start: float, s_end: float, segment_size: float
) -> List[float]:
"""Given a range from s_start to s_end, give a linear segment of size segment_size."""
num_segments = int((s_end - s_start) / segment_size) + 1
return [s_start + seg * segment_size for seg in range(num_segments)]
def squared_dist(a, b) -> float:
"""Computes the squared distance between a and b.
Args:
a, b: same dimension numpy.array([..])
Returns:
float: dist**2
"""
delta = b - a
return np.dot(delta, delta)
def signed_dist_to_line(point, line_point, line_dir_vec) -> float:
"""Computes the signed distance to a directed line
The signed of the distance is:
- negative if point is on the right of the line
- positive if point is on the left of the line
>>> import numpy as np
>>> signed_dist_to_line(np.array([2, 0]), np.array([0, 0]), np.array([0, 1.]))
-2.0
>>> signed_dist_to_line(np.array([-1.5, 0]), np.array([0, 0]), np.array([0, 1.]))
1.5
"""
p = vec_2d(point)
p1 = line_point
p2 = line_point + line_dir_vec
u = abs(
line_dir_vec[1] * p[0] - line_dir_vec[0] * p[1] + p2[0] * p1[1] - p2[1] * p1[0]
)
d = u / np.linalg.norm(line_dir_vec)
line_normal = np.array([-line_dir_vec[1], line_dir_vec[0]])
_sign = np.sign(np.dot(p - p1, line_normal))
return d * _sign
def vec_2d(v) -> np.ndarray:
"""Converts a higher order vector to a 2D vector."""
assert len(v) >= 2
return np.array(v[:2])
def sign(x) -> int:
"""Finds the sign of a numeric type.
Args:
x: A signed numeric type
Returns:
The sign [-1|1] of the input number
"""
return 1 - (x < 0) * 2
def lerp(a, b, p):
"""Linear interpolation between a and b with p
.. math:: a * (1.0 - p) + b * p
Args:
a, b: interpolated values
p: [0..1] float describing the weight of a to b
"""
assert 0 <= p and p <= 1
return a * (1.0 - p) + b * p
def low_pass_filter(
input_value,
previous_filter_state,
filter_constant,
time_step,
lower_bound=-1,
raw_value=0,
):
"""Filters out large value jumps by taking a filter state and returning a filter state.
This is generally intended for filtering out high frequencies from raw signal values.
Args:
input_value: The raw signal value.
previous_filter_state: The last generated value from the filter.
filter_constant: The scale of the filter
time_step: The length of time between the previously processed signal and the current signal.
lower_bound: The lowest possible value allowed.
raw_value: A scalar addition to the signal value.
Returns:
The processed raw signal value.
"""
previous_filter_state += (
time_step * filter_constant * (input_value - previous_filter_state)
)
previous_filter_state = np.clip(previous_filter_state + raw_value, lower_bound, 1)
return previous_filter_state
def radians_to_vec(radians) -> np.ndarray:
"""Convert a radian value to a unit directional vector."""
# +y = 0 rad.
angle = (radians + math.pi * 0.5) % (2 * math.pi)
return np.array((math.cos(angle), math.sin(angle)))
def vec_to_radians(v) -> float:
"""Converts a vector to a radian value."""
# See: https://stackoverflow.com/a/15130471
assert len(v) == 2, f"Vector must be 2D: {repr(v)}"
x, y = v
r = math.atan2(abs(y), abs(x))
# Adjust angle based on quadrant where +y = 0 rad.
if x < 0:
if y < 0:
return (r + 0.5 * math.pi) % (2 * math.pi) # quad 3
return (0.5 * math.pi - r) % (2 * math.pi) # quad 2
elif y < 0:
return (1.5 * math.pi - r) % (2 * math.pi) # quad 4
return (r - 0.5 * math.pi) % (2 * math.pi) # quad 1
def is_close(a: float, b: float, rel_tol: float = 1e-09, abs_tol: float = 0.0) -> bool:
"""Determines if two values are close as defined by the inputs.
Args:
a: The first value.
b: The other value.
rel_tol: Difference required to be close relative to the magnitude
abs_tol: Absolute different allowed to be close.
Returns:
If the two values are "close".
"""
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def euclidean_distance(p1: Tuple[float], p2: Tuple[float]) -> float:
"""The distance taking measuring a direct line between p1 and p2."""
dx = p1[0] - p2[0]
dy = p1[1] - p2[1]
return math.sqrt(dx * dx + dy * dy)
def position_at_offset(
p1: Tuple[float], p2: Tuple[float], offset: float
) -> Optional[Tuple[float]]:
"""A point between p1 and p2 given an offset less than the distance between p1 and p2."""
if is_close(offset, 0.0): # for pathological cases with dist == 0 and offset == 0
return p1
dist = euclidean_distance(p1, p2)
if is_close(dist, offset):
return p2
if offset > dist:
return None
return p1[0] + (p2[0] - p1[0]) * (offset / dist), p1[1] + (p2[1] - p1[1]) * (
offset / dist
)
def offset_along_shape(
point: Tuple[float], shape: List[Tuple[float]]
) -> Union[float, int]:
"""An offset on a shape defined as a vector path determined by the closest location on the
path to the point.
"""
if point not in shape:
return polygon_offset_with_minimum_distance_to_point(point, shape)
offset = 0
for i in range(len(shape) - 1):
if shape[i] == point:
break
offset += euclidean_distance(shape[i], shape[i + 1])
return offset
def position_at_shape_offset(
shape: List[Tuple[float]], offset: float
) -> Optional[Tuple[float]]:
"""A point defined as the offset into a shape defined as vector path."""
seen_length = 0
curr = shape[0]
for next_p in shape[1:]:
next_length = euclidean_distance(curr, next_p)
if seen_length + next_length > offset:
return position_at_offset(curr, next_p, offset - seen_length)
seen_length += next_length
curr = next_p
return shape[-1]
def line_offset_with_minimum_distance_to_point(
point: Tuple[float],
line_start: Tuple[float],
line_end: Tuple[float],
perpendicular: bool = False,
) -> Union[float, int]:
"""Return the offset from line (line_start, line_end) where the distance to
point is minimal"""
p = point
p1 = line_start
p2 = line_end
d = euclidean_distance(p1, p2)
u = ((p[0] - p1[0]) * (p2[0] - p1[0])) + ((p[1] - p1[1]) * (p2[1] - p1[1]))
if d == 0.0 or u < 0.0 or u > d * d:
if perpendicular:
return -1
if u < 0.0:
return 0.0
return d
return u / d
def polygon_offset_with_minimum_distance_to_point(
point: Tuple[float], polygon: List[Tuple[float]]
) -> Union[float, int]:
"""Return the offset and the distance from the polygon start where the distance to the point is minimal"""
p = point
s = polygon
seen = 0
min_dist = 1e400
min_offset = -1
for i in range(len(s) - 1):
p_offset = line_offset_with_minimum_distance_to_point(p, s[i], s[i + 1])
dist = (
min_dist
if p_offset == -1
else euclidean_distance(p, position_at_offset(s[i], s[i + 1], p_offset))
)
if dist < min_dist:
min_dist = dist
min_offset = p_offset + seen
seen += euclidean_distance(s[i], s[i + 1])
return min_offset
def distance_point_to_line(
point: Tuple[float],
line_start: Tuple[float],
line_end: Tuple[float],
perpendicular: bool = False,
) -> Union[float, int]:
"""Return the minimum distance between point and the line (line_start, line_end)"""
p1 = line_start
p2 = line_end
offset = line_offset_with_minimum_distance_to_point(
point, line_start, line_end, perpendicular
)
if offset == -1:
return -1
if offset == 0:
return euclidean_distance(point, p1)
u = offset / euclidean_distance(line_start, line_end)
intersection = (p1[0] + u * (p2[0] - p1[0]), p1[1] + u * (p2[1] - p1[1]))
return euclidean_distance(point, intersection)
def distance_point_to_polygon(
point: Tuple[float], polygon: List[Tuple[float]], perpendicular: bool = False
) -> Union[float, int]:
"""Return the minimum distance between point and polygon"""
p = point
s = polygon
min_dist = None
for i in range(len(s) - 1):
dist = distance_point_to_line(p, s[i], s[i + 1], perpendicular)
if dist == -1 and perpendicular and i != 0:
# distance to inner corner
dist = euclidean_distance(point, s[i])
if dist != -1:
if min_dist is None or dist < min_dist:
min_dist = dist
if min_dist is not None:
return min_dist
return -1
def rotate_around_point(point, radians, origin=(0, 0)) -> np.ndarray:
"""Rotate a point around a given origin."""
x, y = point
ox, oy = origin
qx = ox + math.cos(radians) * (x - ox) + math.sin(radians) * (y - oy)
qy = oy + -math.sin(radians) * (x - ox) + math.cos(radians) * (y - oy)
return np.array([qx, qy])
def min_angles_difference_signed(first, second) -> float:
"""The minimum signed difference between angles(radians)."""
return ((first - second) + math.pi) % (2 * math.pi) - math.pi
def position_to_ego_frame(position, ego_position, ego_heading):
"""
Get the position in ego vehicle frame given the pose (of either a vehicle or some point) in global frame.
Egocentric frame: The ego position becomes origin, and ego heading direction is positive x-axis.
Args:
position: [x,y,z]
ego_position: Ego vehicle [x,y,z]
ego_heading: Ego vehicle heading in radians
Returns:
new_pose: The pose [x,y,z] in egocentric view
"""
transform_matrix = np.eye(3)
ego_rel_position = np.asarray(position) - np.asarray(ego_position)
transform_matrix[0, 0] = np.cos(-ego_heading)
transform_matrix[0, 1] = -np.sin(-ego_heading)
transform_matrix[1, 0] = np.sin(-ego_heading)
transform_matrix[1, 1] = np.cos(-ego_heading)
new_position = np.matmul(transform_matrix, ego_rel_position.T).T
return new_position.tolist()
def comb(n, k):
"""Binomial coefficient"""
return factorial(n) // (factorial(k) * factorial(n - k))
def get_bezier_curve(points):
"""Get the curve function given a series of points.
Returns:
A curve function that takes a normalized offset [0:1] into the curve.
"""
n = len(points) - 1
return lambda t: sum(
comb(n, i) * t ** i * (1 - t) ** (n - i) * points[i] for i in range(n + 1)
)
def evaluate_bezier(points, total):
"""Generate the approximated points of a bezier curve given a series of control points.
Args:
points: The bezier control points.
total: The number of points generated from approximating the curve.
Returns:
An approximation of the bezier curve.
"""
bezier = get_bezier_curve(points)
new_points = np.array([bezier(t) for t in np.linspace(0, 1, total)])
return new_points[:, 0], new_points[:, 1]
def inplace_unwrap(wp_array):
"""Unwraps an array in place."""
## minor optimization hack adapted from
## https://github.com/numpy/numpy/blob/v1.20.0/numpy/lib/function_base.py#L1492-L1546
## to avoid unnecessary (slow) np array copy
## (as seen in profiling).
p = np.asarray(wp_array)
dd = np.subtract(p[1:], p[:-1])
ddmod = np.mod(dd + math.pi, 2 * math.pi) - math.pi
np.copyto(ddmod, math.pi, where=(ddmod == -math.pi) & (dd > 0))
ph_correct = ddmod - dd
np.copyto(ph_correct, 0, where=abs(dd) < math.pi)
p[1:] += ph_correct.cumsum(axis=-1)
return p
def round_param_for_dt(dt: float) -> int:
"""for a given dt, returns what to pass as the second parameter
to the `round()` function in order to not lose precision.
Note that for whole numbers, like 100, the result will be negative.
For example, `round_param_for_dt(100) == -2`,
such that `round(190, -2) = 200`."""
strep = np.format_float_positional(dt)
decimal = strep.find(".")
if decimal >= len(strep) - 1:
return 1 - decimal
return len(strep) - decimal - 1
def rounder_for_dt(dt: float) -> Callable[[float], float]:
"""return a rounding function appropriate for timestepping."""
rp = round_param_for_dt(dt)
return lambda f: round(f, rp)
| 32.451796 | 110 | 0.624396 |
32d24f479e3d738a9c0f15796a5376a53792712f | 58,440 | py | Python | code/Experiments/neon-master/neon/backends/abstract_backend.py | matthijsvk/convNets | 7e65db7857a4e6abfbcab264953eb7741319de6c | [
"Apache-2.0"
] | 53 | 2017-04-18T10:06:20.000Z | 2021-12-29T21:26:07.000Z | code/Experiments/neon-master/neon/backends/abstract_backend.py | matthijsvk/convNets | 7e65db7857a4e6abfbcab264953eb7741319de6c | [
"Apache-2.0"
] | null | null | null | code/Experiments/neon-master/neon/backends/abstract_backend.py | matthijsvk/convNets | 7e65db7857a4e6abfbcab264953eb7741319de6c | [
"Apache-2.0"
] | 20 | 2017-05-03T03:27:09.000Z | 2022-03-24T07:07:45.000Z | # ----------------------------------------------------------------------------
# Copyright 2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Defines interface that any backend must implement
"""
import abc
from future.utils import with_metaclass
class Backend_ABC_Meta(abc.ABCMeta):
"""
metaclass for the backend objects
takes care of registering all the backend subclasses
"""
def __init__(self, name, bases, dict_):
if not hasattr(self, 'backends'):
self.backends = {}
else:
name = getattr(self, 'backend_name', name)
if name not in ['Backend']:
self.backends[name] = self
super(Backend_ABC_Meta, self).__init__(name, bases, dict_)
class AbstractBackend(with_metaclass(Backend_ABC_Meta, object)):
def __del__(self):
self.cleanup_backend()
@abc.abstractmethod
def cleanup_backend(self):
"""Release any resources that have been acquired by this backend."""
raise NotImplementedError()
@abc.abstractmethod
def gen_rng(self, seed=None):
"""
Setup the random number generator(s) and store the state
in self.init_rng_state.
Arguments:
seed (int or None): RNG seed, if the seed is None,
then a seed will be randomly chosen
Returns:
np.random.RandomState: numpy RNG
"""
raise NotImplementedError()
@abc.abstractmethod
def rng_get_state(self, state):
"""
Get the random number generator state to a specific state.
Returns a tuple since some backends have multiple RNG states
(e.g. on-host and on-device)
Returns:
tuple: array of numpy ndarray which defines the current
state of the RNGs
"""
raise NotImplementedError()
@abc.abstractmethod
def rng_reset(self):
"""
Reset the random state to the state where the Backend is first
initialized.
"""
raise NotImplementedError()
@abc.abstractmethod
def rng_set_state(self, state):
"""
Set the random number generator state to a specific state.
Arguments:
state (np.array): array which is used to define the RNG
state
"""
raise NotImplementedError()
@abc.abstractmethod
def empty(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of this backend's Tensor class, without
initializing element values. This is slightly faster than
:py:func:`~neon.backends.Backend.array`,
:py:func:`~neon.backends.Backend.ones`,
:py:func:`~neon.backends.Backend.zeros`, but the values will be
random.
Arguments:
shape (int, list): length of each dimension of the Tensor.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
parallel (bool, optional): If True and using multi-GPU backend,
replicate copies of this tensor across
devices. Defaults to False, and has no
effect on CPU, or (single) GPU backends.
distributed (bool, optional): If True and using multi-GPU backend,
this tensor is fragmented and
partitioned across devices. Defaults
to False, and has no effect on CPU,
or (single) GPU backends.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.array`,
:py:func:`~neon.backends.backend.Backend.zeros`,
:py:func:`~neon.backends.backend.Backend.ones`
"""
raise NotImplementedError()
@abc.abstractmethod
def array(self, ary, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of this backend's Tensor class, populating
elements based on ary values.
Arguments:
ary (array_like): input array object to construct from. Can be
built-in python scalar or list (of lists), or a
numpy.ndarray
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
parallel (bool, optional): If True and using multi-GPU backend,
replicate copies of this tensor across
devices. Defaults to False, and has no
effect on CPU, or (single) GPU backends.
distributed (bool, optional): If True and using multi-GPU backend,
this tensor is fragmented and
partitioned across devices. Defaults
to False, and has no effect on CPU,
or (single) GPU backends.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.zeros`,
:py:func:`~neon.backends.backend.Backend.ones`
"""
raise NotImplementedError()
@abc.abstractmethod
def zeros(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of this backend's Tensor class, populating
each element with a value of 0.
Arguments:
shape (int, list): length of each dimension of the Tensor.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
parallel (bool, optional): If True and using multi-GPU backend,
replicate copies of this tensor across
devices. Defaults to False, and has no
effect on CPU, or (single) GPU backends.
distributed (bool, optional): If True and using multi-GPU backend,
this tensor is fragmented and
partitioned across devices. Defaults
to False, and has no effect on CPU,
or (single) GPU backends.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.ones`,
:py:func:`~neon.backends.backend.Backend.array`
"""
raise NotImplementedError()
@abc.abstractmethod
def ones(self, shape, dtype=None, name=None, persist_values=True,
parallel=False, distributed=False):
"""
Instantiate a new instance of this backend's Tensor class, populating
each element with a value of 1.
Arguments:
shape (int, list): length of each dimension of the Tensor.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
name (str, optional): name indentifying the tensor (used in printing).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
parallel (bool, optional): If True and using multi-GPU backend,
replicate copies of this tensor across
devices. Defaults to False, and has no
effect on CPU, or (single) GPU backends.
distributed (bool, optional): If True and using multi-GPU backend,
this tensor is fragmented and
partitioned across devices. Defaults
to False, and has no effect on CPU,
or (single) GPU backends.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.zeros`,
:py:func:`~neon.backends.backend.Backend.array`
"""
raise NotImplementedError()
@abc.abstractmethod
def empty_like(self, other_ary, name=None, persist_values=True):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from other_ary.
Arguments:
other_ary (tensor object): Tensor to inherit the dimensions of.
name (str, optional): name indentifying the tensor (used in printing).
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.ones`,
:py:func:`~neon.backends.backend.Backend.array`
"""
raise NotImplementedError()
@abc.abstractmethod
def zeros_like(self, other_ary, name=None, persist_values=True):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from other_ary and populating each element with a value of 0.
Arguments:
other_ary (tensor object): Tensor to inherit the dimensions of.
name (str, optional): name indentifying the tensor (used in printing).
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls.
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.ones`,
:py:func:`~neon.backends.backend.Backend.array`
"""
raise NotImplementedError()
@abc.abstractmethod
def dot(self, a, b, out=None):
"""
Dot product of two Tensors.
Arguments:
a (Tensor): left-hand side operand.
b (Tensor): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Note that this object should differ from
left and right.
Returns:
OpTreeNode: the resulting op-tree from this operation.
"""
raise NotImplementedError()
@abc.abstractmethod
def compound_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False):
"""
Perform one of the following operations (* is dot product)
C = alpha * A * B + beta * C
C = alpha * A.T * B + beta * C
C = alpha * A * B.T + beta * C.
relu: if true, applied before output (and prior to beta addition)
The operation will be short-circuited to: out <- alpha * left * right
if beta has value 0 (the default).
Arguments:
A (Tensor): left-hand side operand.
B (Tensor): right-hand side operand.
C (Tensor): output operand
alpha (float. optional): scale A*B term
beta (float, optional): scale C term before sum
relu (bool, optional): If True apply ReLu non-linearity before
output. Defaults to False.
"""
raise NotImplementedError()
@abc.abstractmethod
def batched_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False):
"""
Perform one of the following operations:
1 For fprop: A(K, C), B(X,C,N), C(X,K,N) --> call batched_dot(A, B, C)
2 For bprop: A(K, C), B(X,K,N), C(X,C,N) --> call batched_dot(A.T, B, C)
3 For update: A(X,K,N), B(X,C,N), C(K,C) --> call batched_dot(A, B.T, C)
Arguments:
A (Tensor): left-hand input operand
B (Tensor): right-hand input operand
C (Tensor): output operand
alpha (float. optional): scale A*B term
beta (float, optional): scale C term before sum
relu (bool, optional): If True apply ReLu non-linearity before
output. Defaults to False.
"""
raise NotImplementedError()
@abc.abstractmethod
def make_binary_mask(self, out, keepthresh=0.5):
"""
Create a binary mask for dropout layers.
Arguments:
out (Tensor): Output tensor
keepthresh (float, optional): fraction of ones. Defaults to 0.5
"""
raise NotImplementedError()
@abc.abstractmethod
def add(self, a, b, out=None):
"""
Perform element-wise addition on the operands, storing the resultant
values in the out Tensor. Each operand and out must have identical
shape or be broadcastable as such.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def subtract(self, a, b, out=None):
"""
Perform element-wise subtraction on the operands, storing the resultant
values in the out Tensor. Each operand and out must have identical
shape or be broadcastable as such.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def multiply(self, a, b, out=None):
"""
Perform element-wise multiplication on the operands, storing the
resultant values in the out Tensor. Each operand and out must have
identical shape or be broadcastable as such.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def divide(self, a, b, out=None):
"""
Perform element-wise division on the operands, storing the
resultant values in the out Tensor. Each operand and out must have
identical shape or be broadcastable as such.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def true_divide(self, a, b, out=None):
"""
Here it is an alias of divide.
Instead of the Python traditional 'floor division', this returns a
true division.
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def power(self, a, b, out=None):
"""
Perform element-wise raise of tsr values to specified power,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
b (Tensor, numeric): exponentiated value to be applied to
element. Examples include 2 (square),
0.5 (sqaure root).
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def reciprocal(self, a, out=None):
"""
Perform element-wise reciprocal of Tensor `a`, storing the result in
Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
power (Tensor, numeric): exponentiated value to be applied to
element. Examples include 2 (square),
0.5 (sqaure root).
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def negative(self, a, out=None):
"""
Perform element-wise negation of Tensor `a`, storing the result in
Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def sgn(self, a, out=None):
"""
Perform element-wise indication of the sign of Tensor `a`, storing the
result in Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def absolute(self, a, out=None):
"""
Perform element-wise absolute value of Tensor `a`, storing the result in
Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def fabs(self, a, out=None):
"""
Perform element-wise absolute value of Tensor `a`, storing the result
in Tensor out. Both Tensor's should have identical shape. Implemented as
an alias of absolute.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def sqrt(self, a, out=None):
"""
Perform element-wise square-root of Tensor `a`, storing the result in
Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def square(self, a, out=None):
"""
Perform element-wise square of Tensor `a`, storing the result in Tensor
out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def exp(self, a, out=None):
"""
Perform element-wise exponential transformation on Tensor `a`, storing
the result in Tensor out. Both Tensor's should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def exp2(self, a, out=None):
"""
Perform element-wise 2-based exponential transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def safelog(self, a, out=None):
"""
Perform element-wise natural logarithm transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape. This log function has built in safety for underflow.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def log(self, a, out=None):
"""
Perform element-wise natural logarithm transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def log2(self, a, out=None):
"""
Perform element-wise 2-based logarithm transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def sig(self, a, out=None):
"""
Perform element-wise sigmoid transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def sig2(self, a, out=None):
"""
Perform element-wise 2-based sigmoid logarithm transformation on
Tensor `a`, storing the result in Tensor out. Both Tensor's should
have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def tanh(self, a, out=None):
"""
Perform element-wise hyperbolic tangent transformation on Tensor `a`,
storing the result in Tensor out. Both Tensor's should have identical
shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def tanh2(self, a, out=None):
"""
Perform element-wise 2-based hyperbolic tangent transformation on Tensor
`a`, storing the result in Tensor out. Both Tensor's should have
identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def finite(self, a, out=None):
"""
Perform element-wise test of finiteness (not infinity or not Not a
Number) on Tensor `a`, storing the result in Tensor out. Both Tensor's
should have identical shape.
Arguments:
a (Tensor): input to be transformed.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def equal(self, a, b, out=None):
"""
Performs element-wise equality testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def not_equal(self, a, b, out=None):
"""
Performs element-wise non-equality testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def less(self, a, b, out=None):
"""
Performs element-wise less than testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def less_equal(self, a, b, out=None):
"""
Performs element-wise less than or equal testing on each element of
left and right, storing the result in out. Each operand is assumed to
be the same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def greater(self, a, b, out=None):
"""
Performs element-wise greater than testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only theshape op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def greater_equal(self, a, b, out=None):
"""
Performs element-wise greater than or equal testing on each element of
left and right, storing the result in out. Each operand is assumed to
be the same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def maximum(self, a, b, out=None):
"""
Performs element-wise maximum value assignment based on corresponding
elements of left and right, storing the result in out. Each operand is
assumed to be the same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def minimum(self, a, b, out=None):
"""
Performs element-wise minimum value assignment based on corresponding
elements of left and right, storing the result in out. Each operand is
assumed to be the same shape (or broadcastable as such).
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def clip(self, a, a_min, a_max, out=None):
"""
Performs element-wise clipping of Tensor `a`, storing the result in out.
The clipped value will be between [a_min, a_max].
Arguments:
a (Tensor, numeric): left-hand side operand.
b (Tensor, numeric): right-hand side operand.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def sum(self, a, axis=None, out=None, keepdims=True):
"""
Calculates the summation of the elements along the specified axis.
Arguments:
a (Tensor): the Tensor on which to perform the sum
axis (int, optional): the dimension along which to compute.
If set to None, we will sum over all
dimensions.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def max(self, a, axis=None, out=None, keepdims=True):
"""
Calculates the maximal element value along the specified axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take max over all
dimensions.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def min(self, a, axis=None, out=None, keepdims=True):
"""
Calculates the minimal element value along the specified axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take min over all
dimensions.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def argmax(self, a, axis=1, out=None, keepdims=True):
"""
Calculates the indices of the maximal element value along the specified
axis. If multiple elements contain the maximum, only the indices of
the first are returned.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take argmax over all
dimensions. Defaults to 1
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def argmin(self, a, axis=1, out=None, keepdims=True):
"""
Calculates the indices of the minimal element value along the specified
axis. If multiple elements contain the minimum, only the indices of
the first are returned.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take argmin over all
dimensions. Defaults to 1
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def mean(self, a: object, axis: object = None, partial: object = None, out: object = None, keepdims: object = True) -> object:
"""
Calculates the arithmetic mean of the elements along the specified
axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take mean over all
dimensions. Defaults to None
partial (bool, optional): Not currently used.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def var(self, a, axis=None, partial=None, out=None, keepdims=True):
"""
Calculates the variance of the elements along the specified
axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take var over all
dimensions. Defaults to None
partial (bool, optional): Not currently used.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def std(self, a, axis=None, partial=None, out=None, keepdims=True):
"""
Calculates the standard deviation of the elements along the specified
axes.
Arguments:
a (Tensor): the Tensor on which to perform the operation
axis (int, optional): the dimension along which to compute.
If set to None, we will take std over all
dimensions.
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
partial (bool, optional): Not currently used.
keepdims (bool, optional): Keep the axes being computed over in the
output (with size 1), instead of
collapsing. Defaults to True.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def take(self, a, indices, axis, out=None):
"""
Extract elements based on the indices along a given axis.
Arguments:
a (Tensor): the Tensor on which to perform the operation
indices (Tensor, numpy ndarray): indicies of elements to select
axis (int, optional): the dimension along which to compute.
If set to None, we will extract over all
dimensions (flattened first)
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
"""
raise NotImplementedError()
@abc.abstractmethod
def onehot(self, indices, axis, out=None):
"""
Generate optree for converting `indices` to a onehot representation.
Arguments:
indices (Tensor): Elements must be of numpy integer type for gpu
onehot to work.
axis (int): the axis along the feature length dimension
out (Tensor, optional): where the result will be stored. If out is
None, only the op-tree will be returned.
Returns:
OpTreeNode: the resulting op-tree
"""
raise NotImplementedError()
@abc.abstractmethod
def update_fc_bias(self, err, out):
"""
Compute the updated bias gradient for a fully connected network layer.
Arguments:
err (Tensor): backpropagated error
out (Tensor): Where to store the updated gradient value.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_fc_bias(self, inputs, bias):
"""
Add the bias for a fully connected network layer.
Arguments:
inputs (Tensor): the input to update.
bias (Tensor): the amount to increment
"""
self.ng.add(inputs, bias, out=inputs)
@abc.abstractmethod
def conv_layer(self, dtype,
N, C, K,
D=1, H=1, W=1,
T=1, R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1,
relu=False, bsum=False):
"""
Create a new ConvLayer parameter object.
This is then passed as an argument to all the convolution operations.
Arguments:
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
N (int): Number of images in mini-batch
C (int): Number of input feature maps
K (int): Number of output feature maps
D (int, optional): Depth of input image. Defaults to 1
H (int, optional): Height of input image. Defaults to 1
W (int, optional): Width of input image. Defaults to 1
T (int, optional): Depth of filter kernel. Defaults to 1
R (int, optional): Height of filter kernel. Defaults to 1
S (int, optional): Width of filter kernel. Defaults to 1
pad_d (int, optional): amount of zero-padding around the depth edge
Defaults to 0.
pad_h (int, optional): amount of zero-padding around the height edge
Defaults to 0.
pad_w (int, optional): amount of zero-padding around the width edge
Defaults to 0.
str_d (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_h (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_w (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
relu (bool, optional): apply a relu transform to the output for
fprop or bprop. Defaults to False
bsum (bool, optional): calculate the sum along the batchnorm axis
for fprop or bprop. Outputs an fp32 tensor
of size Kx1. Defaults to False.
"""
raise NotImplementedError()
@abc.abstractmethod
def fprop_conv(self, layer, I, F, O, alpha=1.0, relu=False, repeat=1):
"""
Forward propagate the inputs of a convolutional network layer to
produce output.
Arguments:
layer: the conv layer as a parameter object
I (Tensor): inputs
F (Tensor): the weights (filters)
O (Tensor): outputs
alpha (float, optional): linear scaling. Defaults to 1.0
relu (bool, optional): apply ReLu before output. Default not to.
repeat (int, optional): Repeat this operation the specified number
of times. Defaults to 1.
"""
raise NotImplementedError()
@abc.abstractmethod
def bprop_conv(self, layer, F, E, grad_I, alpha=1.0, repeat=1):
"""
Backward propagate the error through a convolutional network layer.
Arguments:
layer: the conv layer as a parameter object
F (Tensor): the weights (filters)
E (Tensor): errors
grad_I (Tensor): gradient to inputs (output delta)
alpha (float, optional): linear scaling. Defaults to 1.0
repeat (int, optional): Repeat this operation the specified number
of times. Defaults to 1.
"""
raise NotImplementedError()
@abc.abstractmethod
def update_conv(self, layer, I, E, grad_F, alpha=1.0, repeat=1):
"""
Compute the updated gradient for a convolutional network layer.
Arguments:
layer: the conv layer as a parameter object
I (Tensor): the inputs
E (Tensor): the errors
grad_F (Tensor): filter gradients (weights) to update.
alpha (float, optional): linear scaling. Defaults to 1.0
repeat (int, optional): Repeat this operation the specified number
of times. Defaults to 1.
"""
raise NotImplementedError()
@abc.abstractmethod
def deconv_layer(self, dtype,
N, C, K,
P, Q,
R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1):
"""
Create a new Deconvolution parameter object.
This then is passed as an argument to all deconvolution kernels.
Arguments:
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
N (int): Number of images in mini-batch
C (int): Number of input feature maps
K (int): Number of output feature maps
P (int): Height of output
Q (int): Width of output
R (int, optional): Height of filter kernel. Defaults to 1
S (int, optional): Width of filter kernel. Defaults to 1
pad_d (int, optional): amount of zero-padding around the depth edge
Defaults to 0.
pad_h (int, optional): amount of zero-padding around the height edge
Defaults to 0.
pad_w (int, optional): amount of zero-padding around the width edge
Defaults to 0.
str_d (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_h (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_w (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
Leave spatial dimensions at 1 to allow feature map pooling in the fc layers.
"""
raise NotImplementedError()
@abc.abstractmethod
def pool_layer(self, dtype,
op, N, C,
D=1, H=1, W=1,
J=1, T=1, R=1, S=1,
pad_j=0, pad_d=0, pad_h=0, pad_w=0,
str_j=None, str_d=None, str_h=None, str_w=None):
"""
Create a new PoolLayer parameter object.
This then is passed as an argument to all pooling kernels.
Arguments:
op (str): "max", "avg", "l2" pooling (currently bprop only supports
max, but not avg and l2)
N (int): Number of images in mini-batch
C (int): Number of input feature maps
D (int, optional): Depth of input image. Defaults to 1
H (int, optional): Height of input image. Defaults to 1
W (int, optional): Width of input image. Defaults to 1
J (int, optional): Size of feature map pooling window
(maxout n_pieces). Defaults to 1
T (int, optional): Depth of pooling window. Defaults to 1
R (int, optional): Height of pooling window. Defaults to 1
S (int, optional): Width of pooling window. Defaults to 1
pad_j (int, optional): amount of zero-padding around the fm pooling
window edge. Defaults to 0.
pad_d (int, optional): amount of zero-padding around the depth edge
Defaults to 0.
pad_h (int, optional): amount of zero-padding around the height edge
Defaults to 0.
pad_w (int, optional): amount of zero-padding around the width edge
Defaults to 0.
str_j (int, optional): factor to step the filters by in the fm
pooling window direction. Defaults to 1
str_d (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_h (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
str_w (int, optional): factor to step the filters by in the depth
direction. Defaults to 1
Leave spatial dimensions at 1 to allow feature map pooling in the fc layers.
"""
raise NotImplementedError()
@abc.abstractmethod
def fprop_pool(self, layer, I, O):
"""
Forward propagate pooling layer.
Arguments:
layer (PoolLayer): The pool layer object, different backends have
different pool layers.
I (Tensor): Input tensor.
O (Tensor): output tensor.
"""
raise NotImplementedError()
@abc.abstractmethod
def bprop_pool(self, layer, I, E, grad_I):
"""
Backward propagate pooling layer.
Arguments:
layer (PoolLayer): The pool layer object. Different backends have
different pool layers.
I (Tensor): Input tensor.
E (Tensor): Error tensor.
grad_I (Tensor): Gradient tensor (delta)
"""
raise NotImplementedError()
@abc.abstractmethod
def compound_bprop_lut(self, nin, inputs, error, error_t, dW, pad_idx, alpha=1.0, beta=0):
"""
Backward propagate lookup table layer.
Arguments:
nin (int): Number of input word_ids.
inputs (Tensor): Input tensor.
error (Tensor): Error tensor.
error_t (Tensor): Transposed error tensor.
dW (Tensor): Gradient tensor (delta).
pad_idx (int):
alpha (float):
beta (float):
"""
raise NotImplementedError()
| 40.92437 | 130 | 0.545791 |
518a2450f41e5c358bd84259ca1bbd7d614165ce | 453 | py | Python | env/Lib/site-packages/plotly/validators/scattergeo/legendgrouptitle/font/_color.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | venv/Lib/site-packages/plotly/validators/scattergeo/legendgrouptitle/font/_color.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | venv/Lib/site-packages/plotly/validators/scattergeo/legendgrouptitle/font/_color.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="scattergeo.legendgrouptitle.font",
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs
)
| 26.647059 | 66 | 0.626932 |
ff1c8e3e5d2865afddf7fe0f364d71deaaa8d9c0 | 1,645 | py | Python | mlprodict/asv_benchmark/template/skl_model_multi_classifier.py | xadupre/mlprodict | f82c8a26a60104948c67849b1c4af95ca812c153 | [
"MIT"
] | 1 | 2020-12-18T03:49:53.000Z | 2020-12-18T03:49:53.000Z | mlprodict/asv_benchmark/template/skl_model_multi_classifier.py | xadupre/mlprodict | f82c8a26a60104948c67849b1c4af95ca812c153 | [
"MIT"
] | null | null | null | mlprodict/asv_benchmark/template/skl_model_multi_classifier.py | xadupre/mlprodict | f82c8a26a60104948c67849b1c4af95ca812c153 | [
"MIT"
] | null | null | null | """
A template to benchmark a model
with :epkg:`asv`. The benchmark can be run through
file :epkg:`run_asv.sh` on Linux or :epkg:`run_asv.bat` on
Windows.
.. warning::
On Windows, you should avoid cloning the repository
on a folder with a long full name. Visual Studio tends to
abide by the rule of the maximum path length even though
the system is told otherwise.
"""
import numpy # pylint: disable=W0611
from mlprodict.tools.asv_options_helper import get_opset_number_from_onnx
# Import specific to this model.
from sklearn.tree import DecisionTreeClassifier # pylint: disable=C0411
from mlprodict.asv_benchmark import _CommonAsvSklBenchmarkMultiClassifier # pylint: disable=C0412
from mlprodict.onnx_conv import to_onnx # pylint: disable=W0611, C0412
from mlprodict.onnxrt import OnnxInference # pylint: disable=W0611, C0412
class TemplateBenchmarkMultiClassifier(_CommonAsvSklBenchmarkMultiClassifier):
"""
:epkg:`asv` example for a classifier,
Full template can be found in
`common_asv_skl.py <https://github.com/sdpython/mlprodict/
blob/master/mlprodict/asv_benchmark/common_asv_skl.py>`_.
"""
params = [
['skl', 'pyrtc', 'ort'], # values for runtime
[1, 10, 100, 1000, 10000, 100000], # values for N
[4, 20], # values for nf
[get_opset_number_from_onnx()], # values for opset
['float', 'double'], # values for dtype
[None], # values for optim
]
# additional parameters
def setup_cache(self): # pylint: disable=W0235
super().setup_cache()
def _create_model(self):
return DecisionTreeClassifier()
| 35.76087 | 98 | 0.711246 |
00925837c66ed0947174c225b0a8d54e5a364ca9 | 1,304 | py | Python | azure-mgmt-network/azure/mgmt/network/v2018_12_01/models/verification_ip_flow_result_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-network/azure/mgmt/network/v2018_12_01/models/verification_ip_flow_result_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-network/azure/mgmt/network/v2018_12_01/models/verification_ip_flow_result_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VerificationIPFlowResult(Model):
"""Results of IP flow verification on the target resource.
:param access: Indicates whether the traffic is allowed or denied.
Possible values include: 'Allow', 'Deny'
:type access: str or ~azure.mgmt.network.v2018_12_01.models.Access
:param rule_name: Name of the rule. If input is not matched against any
security rule, it is not displayed.
:type rule_name: str
"""
_attribute_map = {
'access': {'key': 'access', 'type': 'str'},
'rule_name': {'key': 'ruleName', 'type': 'str'},
}
def __init__(self, *, access=None, rule_name: str=None, **kwargs) -> None:
super(VerificationIPFlowResult, self).__init__(**kwargs)
self.access = access
self.rule_name = rule_name
| 37.257143 | 78 | 0.611196 |
dc159b93a3781cb2cf90eb99a0a9d9e1aecf573b | 2,188 | py | Python | tensorflow/contrib/seq2seq/__init__.py | xincao79/tensorflow | 7fa0cf39f854d5fdaaa19ad6425dfed02f5fea64 | [
"Apache-2.0"
] | 5 | 2019-01-17T08:47:31.000Z | 2020-05-06T06:10:56.000Z | tensorflow/contrib/seq2seq/__init__.py | xincao79/tensorflow | 7fa0cf39f854d5fdaaa19ad6425dfed02f5fea64 | [
"Apache-2.0"
] | 3 | 2020-03-24T18:15:52.000Z | 2021-02-02T22:28:38.000Z | tensorflow/contrib/seq2seq/__init__.py | xincao79/tensorflow | 7fa0cf39f854d5fdaaa19ad6425dfed02f5fea64 | [
"Apache-2.0"
] | 3 | 2017-06-09T10:39:33.000Z | 2021-04-08T16:13:30.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building neural network seq2seq decoders and losses.
See the @{$python/contrib.seq2seq} guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long
from tensorflow.contrib.seq2seq.python.ops.attention_wrapper import *
from tensorflow.contrib.seq2seq.python.ops.basic_decoder import *
from tensorflow.contrib.seq2seq.python.ops.beam_search_decoder import *
from tensorflow.contrib.seq2seq.python.ops.beam_search_ops import *
from tensorflow.contrib.seq2seq.python.ops.decoder import *
from tensorflow.contrib.seq2seq.python.ops.helper import *
from tensorflow.contrib.seq2seq.python.ops.loss import *
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,widcard-import,line-too-long
_allowed_symbols = [
"sequence_loss",
"Decoder",
"dynamic_decode",
"BasicDecoder",
"BasicDecoderOutput",
"BeamSearchDecoder",
"BeamSearchDecoderOutput",
"BeamSearchDecoderState",
"Helper",
"CustomHelper",
"FinalBeamSearchDecoderOutput",
"gather_tree",
"GreedyEmbeddingHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
"TrainingHelper",
"BahdanauAttention",
"LuongAttention",
"hardmax",
"AttentionWrapperState",
"AttentionWrapper",
"AttentionMechanism",
"tile_batch"]
remove_undocumented(__name__, _allowed_symbols)
| 34.730159 | 80 | 0.741773 |
5e281da3495cbffd342faec139651e3e9a1bb429 | 12,926 | py | Python | Parabola/prop1.py | pdcxs/ManimProjects | 550a281e430a1a1568ae1978ccfe817bedcc9ef8 | [
"WTFPL"
] | 29 | 2019-12-09T13:57:37.000Z | 2022-02-15T12:18:25.000Z | Parabola/prop1.py | pdcxs/ManimProjects | 550a281e430a1a1568ae1978ccfe817bedcc9ef8 | [
"WTFPL"
] | 1 | 2019-12-22T09:15:18.000Z | 2019-12-23T02:16:43.000Z | Parabola/prop1.py | pdcxs/ManimProjects | 550a281e430a1a1568ae1978ccfe817bedcc9ef8 | [
"WTFPL"
] | 4 | 2020-04-16T12:50:09.000Z | 2021-07-09T12:39:04.000Z | from manimlib.imports import *
from ManimProjects.utils.Parabola import Parabola
from ManimProjects.utils.geometry import CText
# To Run this code, you should modify manim source files.
# In manimlib/mobject/mobject.py, add "plot_depth": 0 in the CONFIG of Mobject class
# In manimlib/camera/camera.py
# change the return line in extract_mpbject_family_members method to
# return remove_list_redundancies(list(it.chain(*[method(m) for m in (mobjects.sort(key=lambda m:m.plot_depth) if len(mobjects)>0 else [])])))
class OpenScene(Scene):
def construct(self):
text1 = CText('这是一条重要的性质')
text2 = CText('后面会经常用到')
group = VGroup(text1, text2)
group.arrange(DOWN)
self.play(Write(text1))
self.wait()
self.play(Write(text2))
self.wait()
self.play(FadeOut(group))
class Prop1(Parabola):
CONFIG = {
'x_min': -4,
'focus': 2
}
def construct(self):
self.adjust_x_range()
graph = self.get_graph(color=LIGHT_BROWN)
focus = Dot(self.get_focus()).set_fill(DARK_BLUE)
directrix = self.get_directrix()
focusLabel = TexMobject('F').scale(0.75)
focusLabel.next_to(focus, RIGHT, buff=SMALL_BUFF)
self.directrix = directrix
focus.plot_depth = 1
sub1 = CText('在抛物线上任取不同的两点P1,P2').scale(0.4)
sub1.to_corner(RIGHT+DOWN)
p1_y = ValueTracker(7)
p2_y = ValueTracker(2)
p1 = Dot()
p1.add_updater(lambda m: m.move_to(self.value_to_point(p1_y.get_value())))
p1.plot_depth = 1
p2 = Dot()
p2.add_updater(lambda m: m.move_to(self.value_to_point(p2_y.get_value())))
p2.plot_depth = 1
p1.set_fill(RED)
p2.set_fill(RED)
p1Label = TexMobject('P_1').scale(0.75)
p2Label = TexMobject('P_2').scale(0.75)
p1Label.add_updater(lambda m:\
m.next_to(p1, RIGHT, buff=SMALL_BUFF))
p2Label.add_updater(lambda m:\
m.next_to(p2, LEFT+UP, buff=SMALL_BUFF))
sub2 = CText('连接两点,延长交准线于点K')
sub2.scale(0.4).to_corner(RIGHT+DOWN)
ppLine = Line()
ppLine.add_updater(lambda m:\
m.put_start_and_end_on(p1.get_center(),\
self.get_directrix_point(p1, p2)))
k = Dot()
k.plot_depth = 1
k.set_fill(DARK_BLUE)
k.add_updater(lambda m: m.move_to(ppLine.points[-1]))
kLabel = TexMobject('K').scale(0.75)
kLabel.add_updater(lambda m:\
m.next_to(k, LEFT, buff=SMALL_BUFF))
sub3 = CText('分别连接P1F, P2F')
sub3.scale(0.4).to_corner(RIGHT+DOWN)
p1f = Line()
p2f = Line()
p1f.add_updater(lambda m:\
m.put_start_and_end_on(p1.get_center(),\
focus.get_center()))
p2f.add_updater(lambda m:\
m.put_start_and_end_on(p2.get_center(),\
focus.get_center()))
sub4 = CText('延长P1F交准线于D')
sub4.scale(0.4).to_corner(RIGHT+DOWN)
p1fd = Line()
p1fd.add_updater(lambda m:\
m.put_start_and_end_on(\
focus.get_center(),\
self.get_directrix_point(p1, focus)))
d = Dot()
d.plot_depth = 1
d.set_fill(DARK_BLUE)
d.add_updater(lambda m: m.move_to(p1fd.points[-1]))
dLabel = TexMobject('D')
dLabel.scale(0.75)
dLabel.add_updater(lambda m:\
m.next_to(d, LEFT, buff=SMALL_BUFF))
sub5 = CText('连接KF')
sub5.scale(0.4).to_corner(RIGHT+DOWN)
kf = Line()
kf.add_updater(lambda m:\
m.put_start_and_end_on\
(k.get_center(), focus.get_center()))
ang1 = ArcBetweenPoints(*self.get_arc_point(
p2.get_center(),
focus.get_center(),
k.get_center()
))
ang1.add_updater(lambda m:\
m.put_start_and_end_on(
*self.get_arc_point(
p2.get_center(),
focus.get_center(),
k.get_center()
)))
ang1Label = TexMobject('1').scale(0.5)
ang1Label.add_updater(lambda m:\
m.move_to(ang1.get_center()))
ang2 = ArcBetweenPoints(*self.get_arc_point(
k.get_center(),
focus.get_center(),
d.get_center()
))
ang2.add_updater(lambda m:\
m.put_start_and_end_on(
*self.get_arc_point(
k.get_center(),
focus.get_center(),
d.get_center()
)))
ang2Label = TexMobject('2').scale(0.5)
ang2Label.add_updater(lambda m:\
m.move_to(ang2.get_center()))
ang1Value = DecimalNumber(self.get_angle(
p2.get_center(),
focus.get_center(),
k.get_center()) / DEGREES, num_decimal_places=2)
ang1Value.add_updater(lambda m:\
m.set_value(self.get_angle(
p2.get_center(),
focus.get_center(),
k.get_center()
) / DEGREES))
ang2Value = DecimalNumber(self.get_angle(
k.get_center(),
focus.get_center(),
d.get_center()) / DEGREES, num_decimal_places=2)
ang2Value.add_updater(lambda m:\
m.set_value(self.get_angle(
k.get_center(),
focus.get_center(),
d.get_center()
) / DEGREES))
ang1Tail = TexMobject('^{\circ}')
ang2Tail = TexMobject('^{\circ}')
ang1head = TexMobject('\\angle 1=')
ang1Text = VGroup(ang1head, ang1Value, ang1Tail)
ang1Text.arrange(buff=SMALL_BUFF)
ang1Tail.shift(0.15*UP)
ang2head = TexMobject('\\angle 2=')
ang2Text = VGroup(ang2head, ang2Value, ang2Tail)
ang2Text.arrange(buff=SMALL_BUFF)
ang2Tail.shift(0.15*UP)
angs = VGroup(ang1Text, ang2Text)
angs.arrange(buff=MED_SMALL_BUFF)
angs.shift(3*RIGHT + 2*UP)
m1 = Dot()
m1.plot_depth = 1
m1.set_fill(ORANGE)
m1.add_updater(lambda m:\
m.move_to(self.coords_to_point(
-self.focus, p1_y.get_value()
)))
m2 = Dot()
m2.plot_depth = 1
m2.set_fill(ORANGE)
m2.add_updater(lambda m:\
m.move_to(self.coords_to_point(
-self.focus, p2_y.get_value()
)))
m1Label = TexMobject('M_1').scale(0.75)
m2Label = TexMobject('M_2').scale(0.75)
m1Label.add_updater(lambda m:\
m.next_to(m1, LEFT, buff=SMALL_BUFF))
m2Label.add_updater(lambda m:\
m.next_to(m2, LEFT, buff=SMALL_BUFF))
m1p1 = DashedLine()
m1p1.add_updater(lambda m:\
m.put_start_and_end_on(
m1.get_center(), p1.get_center()
))
m2p2 = DashedLine()
m2p2.add_updater(lambda m:\
m.put_start_and_end_on(
m2.get_center(), p2.get_center()
))
fracs = TexMobject('{KP_2', '\\over', 'KP_1}', '=',
'{M_2P_2', '\\over', 'M_1P_1}', '=',
'{FP_2', '\\over', 'FP_1}')
fracs.shift(3*RIGHT + 2*UP)
fracs2 = TexMobject('{KP_2', '\\over', 'KP_1}', '=',
'{FP_2', '\\over', 'FP_1}')
fracs2.move_to(fracs.get_center())
fracs2.align_to(fracs, LEFT)
fracs3 = TexMobject('{KP_2', '\\over', 'FP_2}', '=',
'{KP_1', '\\over', 'FP_1}')
fracs3.move_to(fracs2.get_center())
explain = CText('由正弦定理').scale(0.4)
explain.next_to(fracs3, DOWN)
explain.align_to(fracs3, LEFT)
fracs4 = TexMobject('{\\sin\\angle 1', '\\over', '\\sin\\angle K}', '=',
'{\\sin\\angle KFP_1', '\\over', '\\sin\\angle K}')
fracs4.next_to(explain, DOWN)
fracs4.align_to(explain, LEFT)
form = TexMobject('\\sin \\angle 1 = \\sin \\angle KFP_1')
form.next_to(fracs4, DOWN)
form.align_to(fracs4, LEFT)
form2 = TexMobject('\\angle1 < \\angle KFP_1 < \\pi')
form2.next_to(form, DOWN)
form2.align_to(form, LEFT)
form3 = TexMobject('\\angle1 = \pi - \\angle KFP_1 = \\angle 2')
form3.next_to(form2, DOWN)
form3.align_to(form2, LEFT)
remover = Rectangle(height=FRAME_HEIGHT, width=FRAME_WIDTH)
remover.set_color(BLACK)
remover.set_fill(BLACK, opacity=1)
remover.plot_depth = 2
# kp2 = Line()
# kp2.add_updater(lambda m:\
# m.put_start_and_end_on(k.get_center(),
# p2.get_center()))
# kp2.set_color(YELLOW)
############################################
# Animation part #
############################################
self.play(ShowCreation(focus), ShowCreation(directrix))
self.play(ShowCreation(graph), Write(focusLabel))
self.play(Write(sub1))
self.play(*[ShowCreation(e) for e in\
[p1, p2, p1Label, p2Label]])
self.wait()
self.play(FadeOut(sub1))
self.play(Write(sub2))
self.wait()
self.play(ShowCreation(ppLine))
self.play(ShowCreation(k), Write(kLabel))
self.wait()
self.play(FadeOut(sub2))
self.play(Write(sub3))
self.play(*[ShowCreation(e) for e in [p1f, p2f]])
self.wait()
self.play(FadeOut(sub3))
self.play(Write(sub4))
self.play(ShowCreation(p1fd))
self.play(*[ShowCreation(e) for e in [d, dLabel]])
self.wait()
self.play(FadeOut(sub4))
self.play(Write(sub5))
self.wait()
self.play(ShowCreation(kf))
self.play(ShowCreation(ang1), Write(ang1Label))
self.play(ShowCreation(ang2), Write(ang2Label))
self.play(FadeOut(sub5))
self.play(Write(angs))
self.play(*[ShowCreation(e) for e in\
[m1, m2, m1p1, m2p2, m1Label, m2Label]])
self.play(ApplyMethod(p2_y.set_value, -1), run_time=2)
self.wait()
self.play(ApplyMethod(p1_y.set_value, 5))
self.wait()
self.play(ApplyMethod(p1_y.set_value, 9), run_time=3)
self.wait()
self.play(ApplyMethod(p2_y.set_value, 3), run_time=2)
self.play(FadeOut(angs))
self.wait()
self.play(Write(fracs))
self.wait(5)
#self.play(ReplacementTransform(fracs, fracs2))
self.play(FadeOut(fracs[4:8]))
self.play(*[ApplyMethod(fracs[i].move_to, fracs[i - 4].get_center()) for i in range(8, 11)])
# self.play(FadeOut(fracs), FadeIn(fracs2), run_time=0.1)
# self.wait(5)
# self.play(ReplacementTransform(fracs2, fracs3))
self.wait(5)
pos1 = fracs[2].get_center()
pos2 = fracs[8].get_center()
self.play(ApplyMethod(fracs[2].move_to, pos2),
ApplyMethod(fracs[8].move_to, pos1))
self.wait(5)
self.play(Write(explain))
self.wait(3)
self.play(ShowCreationThenDestruction(Polygon(
k.get_center(), focus.get_center(),
p2.get_center()
).set_fill(DARK_BLUE, opacity=1)), run_time=3)
self.play(Write(fracs4[:3]))
self.play(ShowCreationThenDestruction(Polygon(
k.get_center(), focus.get_center(),
p1.get_center()
).set_fill(DARK_BLUE, opacity=1)), run_time=3)
self.play(Write(fracs4[3:]))
self.wait(3)
self.play(Write(form))
self.wait(3)
self.play(Write(form2))
self.wait(3)
self.play(Write(form3))
self.wait(5)
self.play(FadeIn(remover))
def get_directrix_point(self, p1, p2):
p1c = p1.get_center()
p2c = p2.get_center()
vec = p2c - p1c
vec /= vec[0]
p3 = p2c + (self.directrix.get_center() - p2c)[0] * vec
return p3
def get_arc_point(self, p1, c, p2, radius=0.3):
v1 = normalize(p1 - c)
v2 = normalize(p2 - c)
return [v1 * radius + c, v2 * radius + c]
def get_angle(self, p1, c, p2):
v1 = p1 - c
v2 = p2 - c
v1n = np.sum([x**2 for x in v1])
v2n = np.sum([x**2 for x in v2])
ang = np.arccos(np.dot(v1, v2) /\
np.sqrt(v1n * v2n))
return ang
class Summary(Scene):
def construct(self):
text = CText('总结')
text.set_fill(DARK_BROWN)
content1 = CText('抛物线任意弦与准线的交点')
content2 = CText('及该抛物线的焦点所构成的线段,')
content3 = CText('平分该弦两端点与焦点所构成角的外角')
contents = VGroup(content1, content2, content3)
contents.scale(0.7)
contents.arrange(DOWN)
total = VGroup(text, contents)
total.arrange(DOWN, buff=MED_LARGE_BUFF)
self.play(Write(text))
self.wait(2)
self.play(Write(contents))
self.wait(10) | 31.603912 | 142 | 0.551601 |
5c33b39795094fc8f6ebb4eb9294a1a105fc0dbc | 7,357 | py | Python | pybamm/solvers/casadi_algebraic_solver.py | anandmy/PyBaMM | dd8e5ebf85dc4324e163adad274ccb56c88f3698 | [
"BSD-3-Clause"
] | null | null | null | pybamm/solvers/casadi_algebraic_solver.py | anandmy/PyBaMM | dd8e5ebf85dc4324e163adad274ccb56c88f3698 | [
"BSD-3-Clause"
] | null | null | null | pybamm/solvers/casadi_algebraic_solver.py | anandmy/PyBaMM | dd8e5ebf85dc4324e163adad274ccb56c88f3698 | [
"BSD-3-Clause"
] | null | null | null | #
# Casadi algebraic solver class
#
import casadi
import pybamm
import numpy as np
class CasadiAlgebraicSolver(pybamm.BaseSolver):
"""Solve a discretised model which contains only (time independent) algebraic
equations using CasADi's root finding algorithm.
Note: this solver could be extended for quasi-static models, or models in
which the time derivative is manually discretised and results in a (possibly
nonlinear) algebaric system at each time level.
Parameters
----------
tol : float, optional
The tolerance for the solver (default is 1e-6).
extra_options : dict, optional
Any options to pass to the CasADi rootfinder.
Please consult `CasADi documentation <https://tinyurl.com/y7hrxm7d>`_ for
details.
"""
def __init__(self, tol=1e-6, extra_options=None):
super().__init__()
self.tol = tol
self.name = "CasADi algebraic solver"
self.algebraic_solver = True
self.extra_options = extra_options or {}
pybamm.citations.register("Andersson2019")
@property
def tol(self):
return self._tol
@tol.setter
def tol(self, value):
self._tol = value
def _integrate(self, model, t_eval, inputs=None):
"""
Calculate the solution of the algebraic equations through root-finding
Parameters
----------
model : :class:`pybamm.BaseModel`
The model whose solution to calculate.
t_eval : :class:`numpy.array`, size (k,)
The times at which to compute the solution
inputs : dict, optional
Any input parameters to pass to the model when solving. If any input
parameters that are present in the model are missing from "inputs", then
the solution will consist of `ProcessedSymbolicVariable` objects, which must
be provided with inputs to obtain their value.
"""
# Record whether there are any symbolic inputs
inputs = inputs or {}
has_symbolic_inputs = any(isinstance(v, casadi.MX) for v in inputs.values())
symbolic_inputs = casadi.vertcat(
*[v for v in inputs.values() if isinstance(v, casadi.MX)]
)
# Create casadi objects for the root-finder
inputs = casadi.vertcat(*[v for v in inputs.values()])
y0 = model.y0
# If y0 already satisfies the tolerance for all t then keep it
if has_symbolic_inputs is False and all(
np.all(abs(model.casadi_algebraic(t, y0, inputs).full()) < self.tol)
for t in t_eval
):
pybamm.logger.debug("Keeping same solution at all times")
return pybamm.Solution(t_eval, y0, termination="success")
# The casadi algebraic solver can read rhs equations, but leaves them unchanged
# i.e. the part of the solution vector that corresponds to the differential
# equations will be equal to the initial condition provided. This allows this
# solver to be used for initialising the DAE solvers
if model.rhs == {}:
len_rhs = 0
y0_diff = casadi.DM()
y0_alg = y0
else:
len_rhs = model.concatenated_rhs.size
y0_diff = y0[:len_rhs]
y0_alg = y0[len_rhs:]
y_alg = None
# Set up
t_sym = casadi.MX.sym("t")
y_alg_sym = casadi.MX.sym("y_alg", y0_alg.shape[0])
y_sym = casadi.vertcat(y0_diff, y_alg_sym)
t_and_inputs_sym = casadi.vertcat(t_sym, symbolic_inputs)
alg = model.casadi_algebraic(t_sym, y_sym, inputs)
# Set constraints vector in the casadi format
# Constrain the unknowns. 0 (default): no constraint on ui, 1: ui >= 0.0,
# -1: ui <= 0.0, 2: ui > 0.0, -2: ui < 0.0.
constraints = np.zeros_like(model.bounds[0], dtype=int)
# If the lower bound is positive then the variable must always be positive
constraints[model.bounds[0] >= 0] = 1
# If the upper bound is negative then the variable must always be negative
constraints[model.bounds[1] <= 0] = -1
# Set up rootfinder
roots = casadi.rootfinder(
"roots",
"newton",
dict(x=y_alg_sym, p=t_and_inputs_sym, g=alg),
{
**self.extra_options,
"abstol": self.tol,
"constraints": list(constraints[len_rhs:]),
},
)
for idx, t in enumerate(t_eval):
# Evaluate algebraic with new t and previous y0, if it's already close
# enough then keep it
# We can't do this if there are symbolic inputs
if has_symbolic_inputs is False and np.all(
abs(model.casadi_algebraic(t, y0, inputs).full()) < self.tol
):
pybamm.logger.debug(
"Keeping same solution at t={}".format(t * model.timescale_eval)
)
if y_alg is None:
y_alg = y0_alg
else:
y_alg = casadi.horzcat(y_alg, y0_alg)
# Otherwise calculate new y_sol
else:
t_eval_inputs_sym = casadi.vertcat(t, symbolic_inputs)
# Solve
try:
y_alg_sol = roots(y0_alg, t_eval_inputs_sym)
success = True
message = None
# Check final output
y_sol = casadi.vertcat(y0_diff, y_alg_sol)
fun = model.casadi_algebraic(t, y_sol, inputs)
except RuntimeError as err:
success = False
message = err.args[0]
fun = None
# If there are no symbolic inputs, check the function is below the tol
# Skip this check if there are symbolic inputs
if success and (
has_symbolic_inputs is True or np.all(casadi.fabs(fun) < self.tol)
):
# update initial guess for the next iteration
y0_alg = y_alg_sol
y0 = casadi.vertcat(y0_diff, y0_alg)
# update solution array
if y_alg is None:
y_alg = y_alg_sol
else:
y_alg = casadi.horzcat(y_alg, y_alg_sol)
elif not success:
raise pybamm.SolverError(
"Could not find acceptable solution: {}".format(message)
)
else:
raise pybamm.SolverError(
"""
Could not find acceptable solution: solver terminated
successfully, but maximum solution error ({})
above tolerance ({})
""".format(
casadi.mmax(casadi.fabs(fun)), self.tol
)
)
# Concatenate differential part
y_diff = casadi.horzcat(*[y0_diff] * len(t_eval))
y_sol = casadi.vertcat(y_diff, y_alg)
# Return solution object (no events, so pass None to t_event, y_event)
return pybamm.Solution(t_eval, y_sol, termination="success")
| 39.983696 | 88 | 0.560962 |
656a69da5302d87a2a3fa603af3cd57462fc1fef | 1,406 | py | Python | test/win/gyptest-command-quote.py | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
] | 77 | 2018-07-01T15:55:34.000Z | 2022-03-30T09:16:54.000Z | test/win/gyptest-command-quote.py | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
] | 116 | 2021-05-29T16:32:51.000Z | 2021-08-13T16:05:29.000Z | test/win/gyptest-command-quote.py | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
] | 53 | 2018-04-13T12:06:06.000Z | 2022-03-25T13:54:38.000Z | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure the program in a command can be a called batch file, or an
application in the path. Specifically, this means not quoting something like
"call x.bat", lest the shell look for a program named "call x.bat", rather
than calling "x.bat".
"""
from __future__ import print_function
import TestGyp
import sys
if sys.platform == 'win32':
print("This test is currently disabled: https://crbug.com/483696.")
sys.exit(0)
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'command-quote'
test.run_gyp('command-quote.gyp', chdir=CHDIR)
test.build('command-quote.gyp', 'test_batch', chdir=CHDIR)
test.build('command-quote.gyp', 'test_call_separate', chdir=CHDIR)
test.build('command-quote.gyp', 'test_with_double_quotes', chdir=CHDIR)
test.build('command-quote.gyp', 'test_with_single_quotes', chdir=CHDIR)
# We confirm that this fails because other generators don't handle spaces in
# inputs so it's preferable to not have it work here.
test.build('command-quote.gyp', 'test_with_spaces', chdir=CHDIR, status=1)
CHDIR = 'command-quote/subdir/and/another'
test.run_gyp('in-subdir.gyp', chdir=CHDIR)
test.build('in-subdir.gyp', 'test_batch_depth', chdir=CHDIR)
test.pass_test()
| 32.697674 | 78 | 0.733286 |
bcdbfc8e527d0dc9a95eddaf040f8035207b6c20 | 4,358 | py | Python | python/paddle/fluid/tests/unittests/test_elementwise_add_mkldnn_op.py | jerrywgz/Paddle | 85c4912755b783dd7554a9d6b9dae4a7e40371bc | [
"Apache-2.0"
] | 2 | 2018-07-05T14:37:36.000Z | 2018-07-05T14:37:42.000Z | python/paddle/fluid/tests/unittests/test_elementwise_add_mkldnn_op.py | jerrywgz/Paddle | 85c4912755b783dd7554a9d6b9dae4a7e40371bc | [
"Apache-2.0"
] | 3 | 2017-07-15T14:20:08.000Z | 2019-05-06T03:16:54.000Z | python/paddle/fluid/tests/unittests/test_elementwise_add_mkldnn_op.py | jerrywgz/Paddle | 85c4912755b783dd7554a9d6b9dae4a7e40371bc | [
"Apache-2.0"
] | 1 | 2018-07-20T07:13:31.000Z | 2018-07-20T07:13:31.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from test_elementwise_add_op import *
'''
Some tests differ from the tests defined in test_elementwise_add_op.py
because MKLDNN does not support tensors of number of dimensions 3.
Such dimensions cause exceptions in MKLDNN reorder primitive.
'''
class TestMKLDNNElementwiseAddOp(TestElementwiseAddOp):
def init_input_output(self):
self.x = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype)
self.y = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype)
self.out = np.add(self.x, self.y)
def init_kernel_type(self):
self.use_mkldnn = True
class TestMKLDNNElementwiseAddOp_scalar(TestElementwiseAddOp_scalar):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype)
self.y = np.random.rand(1).astype(self.dtype)
self.out = self.x + self.y
def init_kernel_type(self):
self.use_mkldnn = True
class TestMKLDNNElementwiseAddOp_scalar2(TestElementwiseAddOp_scalar2):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype)
self.y = np.random.rand(1, 1).astype(self.dtype)
self.out = self.x + self.y
def init_kernel_type(self):
self.use_mkldnn = True
class TestMKLDNNElementwiseAddOp_Vector(TestElementwiseAddOp_Vector):
def init_kernel_type(self):
self.use_mkldnn = True
class TesMKLDNNtElementwiseAddOp_broadcast_0(TestElementwiseAddOp_broadcast_0):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype)
self.y = np.random.rand(2).astype(self.dtype)
self.out = self.x + self.y.reshape(2, 1, 1, 1)
def init_kernel_type(self):
self.use_mkldnn = True
class TestMKLDNNElementwiseAddOp_broadcast_1(TestElementwiseAddOp_broadcast_1):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype)
self.y = np.random.rand(3).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 3, 1, 1)
def init_kernel_type(self):
self.use_mkldnn = True
class TestMKLDNNElementwiseAddOp_broadcast_2(TestElementwiseAddOp_broadcast_2):
def init_input_output(self):
self.x = np.random.rand(2, 2, 3, 4).astype(self.dtype)
self.y = np.random.rand(4).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 1, 1, 4)
def init_kernel_type(self):
self.use_mkldnn = True
class TestMKLDNNElementwiseAddOp_broadcast_3(TestElementwiseAddOp_broadcast_3):
def init_kernel_type(self):
self.use_mkldnn = True
class TestMKLDNNElementwiseAddOp_broadcast_4(TestElementwiseAddOp_broadcast_4):
def init_kernel_type(self):
self.use_mkldnn = True
class TestMKLDNNElementwiseAddOp_rowwise_add_0(
TestElementwiseAddOp_rowwise_add_0):
def init_input_output(self):
self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype)
self.y = np.random.rand(3, 4).astype(self.dtype)
self.out = self.x + self.y.reshape(1, 3, 4, 1)
def init_kernel_type(self):
self.use_mkldnn = True
class TestMKLDNNElementwiseAddOp_rowwise_add_1(
TestElementwiseAddOp_rowwise_add_1):
def init_kernel_type(self):
self.use_mkldnn = True
class TestMKLDNNElementwiseAddOp_channelwise_add(
TestElementwiseAddOp_channelwise_add):
def init_input_output(self):
self.x = np.random.rand(3, 5, 20, 20).astype(self.dtype)
self.y = np.random.rand(3, 1, 1, 1).astype(self.dtype)
self.out = self.x + self.y
def init_kernel_type(self):
self.use_mkldnn = True
if __name__ == '__main__':
unittest.main()
| 33.267176 | 79 | 0.707664 |
02b11a13ebecf7aad3eb8d2d62e02713a52b7859 | 6,350 | py | Python | CalibTracker/SiStripDCS/test/CheckAllIOVs.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | CalibTracker/SiStripDCS/test/CheckAllIOVs.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | CalibTracker/SiStripDCS/test/CheckAllIOVs.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | #!/usr/bin/env python
#GBenelli Added the /env above to use the python version of CMSSW and run without having to do python <SCRIPT NAME>
""" This script does the following:
1- reads the list of iovs (by timestamp) in a sqlite file or in the Offline DB
2- creates a cfg for each iov and runs them
3- creates 2 log files per IOV (Summary/Debug) with all the SiStripDetVOff information in ASCII format
It is recommended to redirect the output to a file.
"""
from __future__ import print_function
#3- takes the output of each job and builds a single output with the content of each iov
import os
import re
import sys
import time
""" Helper functions for time conversions """
def pack(high,low):
"""pack high,low 32bit unsigned int to one unsigned 64bit long long
Note:the print value of result number may appear signed, if the sign bit is used.
"""
h=high<<32
return (h|low)
def secondsFromString(i):
"""convert from a string in the format output from timeStamptoDate to a 32bit seconds from the epoch.
The format accepted is \"DD/MM/YYYY HH:MM:SS\". The year must be the full number.
"""
return int(time.mktime(time.strptime(i, "%d/%m/%Y %H:%M:%S")))
def packFromString(i):
"""pack from a string in the format output from timeStamptoUTC to a 64bit timestamp
the format accepted is \"DD/MM/YYYY HH:MM:SS\" . The year must be the full number.
"""
return pack(secondsFromString(i), 0)
def intervalSinceEpoch(i):
""" compute the interval of time is seconds since the Epoch and return the packed 64bit value.
"""
return( packFromString(i) - packFromString("01/01/1970 00:00:00") )
def unpack(i):
"""unpack 64bit unsigned long long into 2 32bit unsigned int, return tuple (high,low)
"""
high=i>>32
low=i&0xFFFFFFFF
return(high,low)
def timeStamptoDate(i):
"""convert 64bit timestamp to local date in string format
"""
#GBenelli Add a try: except: to handle the stop time of the last IOV "end of time"
try:
date=time.ctime(unpack(i)[0])
except:
#Handle the case of last IOV (or any IOV) timestamp being "out of range" by returning -1 instead of the date...
print("Could not unpack time stamp %s, unpacked to %s!"%(i,unpack(i)[0]))
date=-1
return date
# The first parameter is the name of the script
if len(sys.argv) < 3:
print("Please provide the name of the sqlite file and the tag as in: ")
print("./CheckAllIOVs.py dbfile.db SiStripDetVOff_Fake_31X")
print("OR to access directly the Offline DB with a time bracket:")
print("./CheckAllIOVs.py CMS_COND_31X_STRIP SiStripDetVOff_v1_offline DD/MM/YYYY HH:MM:SS DD/MM/YYYY HH:MM:SS")
sys.exit(1)
print("Reading all IOVs")
database= sys.argv[1]
#Offline DB case (e.g. user would write ./CheckAllIOVs.py CMS_COND_31X_STRIP SiStripDetVOff_v1_offline):
if "COND" in database and "STRIP" in database:
DBConnection="frontier://PromptProd/"+database
#SQLite DB case (e.g. user would write ./CheckAllIOVs.py dbfile.db SiStripDetVOff_Fake_31X):
else:
DBConnection="sqlite_file:"+database
tag=sys.argv[2]
#GBenelli commit code from Marco to run check on a time interval:
startFrom = 0
if len(sys.argv) > 3:
startFrom = packFromString(sys.argv[3])
endAt = 0
if len(sys.argv) > 4:
endAt = packFromString(sys.argv[4])
#TODO:
#Should use subprocess.Popen...
#Use cmscond_list_iov command to get the full list of IOVs available in the DB
iovs = os.popen("cmscond_list_iov -c "+DBConnection+" -t "+tag)
cmscond_list_iov_output = iovs.readlines()
for line in cmscond_list_iov_output:
print(line)
if "[DB=" in line:
(start,end)=line.split()[0:2]
if long(startFrom) > long(start):
print("Skipping IOV =", start, " before requested =", startFrom)
continue
if (endAt != 0) and (long(endAt) < long(end)):
print("Skipping IOV =", end, " after requested =", endAt)
continue
# print "start =", start,
# print ", end =", end
if long(startFrom) > long(start):
print("Skipping IOV =", start, " before requested =", startFrom)
continue
if (endAt != 0) and (long(endAt) < long(end)):
print("Skipping IOV =", end, " after requested =", endAt)
continue
##TODO:Should we investigate this issue? Is it going to be an issue in the DB?
if end == "18446744073709551615":
end = str(int(start) + 1)
startDate = timeStamptoDate(int(start))
endDate = timeStamptoDate(int(end))
#GBenelli Handle here the case of "end of time" IOV end time stamp
if endDate==-1:
endDate=timeStamptoDate(int(start)+1)
print("start date = ", startDate, end=' ')
print(", end date = ", endDate)
fullDates="_FROM_"+startDate.replace(" ", "_").replace(":", "_")+"_TO_"+endDate.replace(" ", "_").replace(":", "_")
fileName="DetVOffPrint"+fullDates+"_cfg.py"
CfgFile=open(fileName,"w")
#Until the tag is in the release, CMSSW_RELEASE_BASE should be replaced by CMSSW_BASE...
for cfgline in open(os.path.join(os.getenv("CMSSW_RELEASE_BASE"),"src/CalibTracker/SiStripDCS/test","templateCheckAllIOVs_cfg.py"),"r"):
#Do the template replacements for the relevant variables:
if "STARTTIME" in cfgline:
cfgline=cfgline.replace("STARTTIME",start)
elif "ENDTIME" in cfgline:
cfgline=cfgline.replace("ENDTIME",end)
elif "DATE" in cfgline:
cfgline=cfgline.replace("DATE",fullDates)
elif "DATABASECONNECTION" in cfgline:
cfgline=cfgline.replace("DATABASECONNECTION",DBConnection)
elif "TAG" in cfgline:
cfgline=cfgline.replace("TAG",tag)
#Write the line from the template into the cloned cfg.py file:
CfgFile.write(cfgline)
CfgFile.close()
#Comment this line if you are debuggin and don't want cmsRun to run!
os.system("cmsRun "+fileName+" > /dev/null")
for logline in open("DetVOffReaderDebug_"+fullDates+".log", "r"):
if "IOV" in logline or "OFF" in logline or "ON" in logline:
print(logline.strip("\n"))
else:
print(line)
| 40.44586 | 144 | 0.651811 |
2a13a28cffd171db4884ae9ef755b01e42c66acc | 1,899 | py | Python | test/unit/sort_search/utils_test.py | dclark87/pytools | f395f3cdedc3e2f3debcaab510343f5a0b52d604 | [
"MIT"
] | null | null | null | test/unit/sort_search/utils_test.py | dclark87/pytools | f395f3cdedc3e2f3debcaab510343f5a0b52d604 | [
"MIT"
] | null | null | null | test/unit/sort_search/utils_test.py | dclark87/pytools | f395f3cdedc3e2f3debcaab510343f5a0b52d604 | [
"MIT"
] | null | null | null | # test/unit/sort_search/utils_test.py
#
# Author: Daniel Clark, 2016
'''
Unit test module to perform testing on utils module
'''
# Import packages
import unittest
class AMergeBTestCase(unittest.TestCase):
'''
TestCase for the sorting module
'''
# Set up test case
def setUp(self):
'''
Initialize test case with attributes
'''
# Init instance attributes
pass
def test_a_merge_b(self):
'''
Test the a_merge_b function is working properly
'''
# Import packages
from pytools.sort_search import utils
# Init variables
a_arr = [1, 3, 5, 7, None, None, None]
b_arr = [2, 4, 6]
ab_sorted = [1, 2, 3, 4, 5, 6, 7]
# Test they are equal
ab_merged = utils.a_merge_b(a_arr, b_arr)
self.assertEqual(ab_merged, ab_sorted)
# Where b needs to be at beg of a
a_arr = [2, 3, 5, 7, None, None, None]
b_arr = [0, 1, 6]
ab_sorted = [0, 1, 2, 3, 5, 6, 7]
# Test they are equal
ab_merged = utils.a_merge_b(a_arr, b_arr)
self.assertEqual(ab_merged, ab_sorted)
class RotArrSearchTestCase(unittest.TestCase):
'''
TestCase for the sorting module
'''
# Set up test case
def setUp(self):
'''
Initialize test case with attributes
'''
# Init instance attributes
pass
def test_rot_arr_search(self):
'''
Test the rot_arr_search function is working properly
'''
# Import packages
from pytools.sort_search import utils
# Init variables
rot_arr = [15, 16, 19, 20, 25, 1, 3, 4, 5, 7, 10, 14]
elem = 5
# Run function
pos = utils.rot_arr_search(rot_arr, elem)
self.assertEqual(pos, rot_arr.index(elem))
if __name__ == '__main__':
unittest.main() | 22.081395 | 61 | 0.573986 |
fad7c2b62e10da33b9543042345521b5b9155985 | 6,608 | py | Python | arrapi/raws/base.py | meisnate12/ArrAPI | 0e407ced6005d2e626d46e353699a09d8914beb8 | [
"MIT"
] | 11 | 2021-06-14T01:37:09.000Z | 2022-03-30T08:51:10.000Z | arrapi/raws/base.py | meisnate12/ArrAPI | 0e407ced6005d2e626d46e353699a09d8914beb8 | [
"MIT"
] | 12 | 2021-06-17T13:21:19.000Z | 2022-03-02T05:45:15.000Z | arrapi/raws/base.py | meisnate12/ArrAPI | 0e407ced6005d2e626d46e353699a09d8914beb8 | [
"MIT"
] | 4 | 2021-11-07T04:48:50.000Z | 2022-01-23T12:58:48.000Z | import logging
from abc import ABC, abstractmethod
from json.decoder import JSONDecodeError
from requests import Session
from requests.exceptions import RequestException
from arrapi import ArrException, ConnectionFailure, NotFound, Unauthorized, Invalid
logger = logging.getLogger(__name__)
class BaseRawAPI(ABC):
@abstractmethod
def __init__(self, url, apikey, v1=False, session=None):
self.url = url.rstrip("/")
self.apikey = apikey
self.session = Session() if session is None else session
self.v1 = v1
self.v3 = True
try:
status = self.get_system_status()
except NotFound:
self.v3 = False
status = self.get_system_status()
if "version" not in status or status["version"] is None:
raise ConnectionFailure(f"Failed to Connect to {self.url}")
if v1 is False:
self.v3 = int(status["version"][0]) > 2
def _get(self, path, **kwargs):
""" process get request. """
return self._request("get", path, **kwargs)
def _delete(self, path, json=None, **kwargs):
""" process delete request. """
return self._request("delete", path, json=json, **kwargs)
def _post(self, path, json=None, **kwargs):
""" process post request. """
return self._request("post", path, json=json, **kwargs)
def _put(self, path, json=None, **kwargs):
""" process put request. """
return self._request("put", path, json=json, **kwargs)
def _request(self, request_type, path, json=None, **kwargs):
""" process request. """
url_params = {"apikey": f"{self.apikey}"}
for kwarg in kwargs:
url_params[kwarg] = kwargs[kwarg]
request_url = f"{self.url}/api{'/v1' if self.v1 else '/v3' if self.v3 else ''}/{path}"
if json is not None:
logger.debug(f"Request JSON {json}")
try:
if request_type == "delete":
response = self.session.delete(request_url, json=json, params=url_params)
elif request_type == "post":
response = self.session.post(request_url, json=json, params=url_params)
elif request_type == "put":
response = self.session.put(request_url, json=json, params=url_params)
else:
response = self.session.get(request_url, params=url_params)
except RequestException:
raise ConnectionFailure(f"Failed to Connect to {self.url}")
try:
response_json = response.json()
except JSONDecodeError:
logger.debug(f"Response ({response.status_code} [{response.reason}]) {response.content}")
if response.status_code >= 400:
raise ArrException(f"({response.status_code} [{response.reason}]) {response.content}")
else:
return None
else:
logger.debug(f"Response ({response.status_code} [{response.reason}]) {response_json}")
if response.status_code == 401:
raise Unauthorized(f"({response.status_code} [{response.reason}]) Invalid API Key {response_json}")
elif response.status_code == 404:
raise NotFound(f"({response.status_code} [{response.reason}]) Item Not Found {response_json}")
elif response.status_code == 500 and "message" in response_json and response_json["message"] == "Sequence contains no matching element":
raise Invalid(f"({response.status_code} [{response.reason}]) Invalid option provided")
elif response.status_code >= 400:
if isinstance(response_json, list) and "errorMessage" in response_json[0]:
raise ArrException(f"({response.status_code} [{response.reason}]) {response_json[0]['errorMessage']}")
else:
raise ArrException(f"({response.status_code} [{response.reason}]) {response_json}")
return response_json
def get_tag(self, detail=False):
""" GET /tag and GET /tag/detail """
return self._get("tag/detail" if detail and (self.v1 or self.v3) else "tag")
def post_tag(self, label):
""" POST /tag """
return self._post("tag", json={"label": str(label).lower()})
def get_tag_id(self, tag_id, detail=False):
""" GET /tag/{id} and GET /tag/detail/{id} """
return self._get(f"tag/detail/{tag_id}" if detail and (self.v1 or self.v3) else f"tag/{tag_id}")
def put_tag_id(self, tag_id, label):
""" PUT /tag/{id} """
return self._put(f"tag/{tag_id}", json={"id": tag_id, "label": str(label).lower()})
def delete_tag_id(self, tag_id):
""" DELETE /tag/{id} """
return self._delete(f"tag/{tag_id}")
def get_command(self):
""" GET /command """
return self._get("command")
def get_command_id(self, command_id):
""" GET /command/{id} """
return self._get(f"command/{command_id}")
def post_command(self, command, **kwargs):
""" POST /command """
json = {k: v for k, v in kwargs.items()}
json["name"] = command
return self._post("command", json=json)
def get_qualityProfile(self):
"""" GET /qualityProfile for v3 and GET /profile for v2 """
return self._get("qualityProfile" if self.v1 or self.v3 else "profile")
def get_qualityProfileId(self, qualityProfileId):
"""" GET /qualityProfile/qualityProfileId for v3 and GET /profile/qualityProfileId for v2 """
return self._get(f"qualityProfile/{qualityProfileId}" if self.v1 or self.v3 else f"profile/{qualityProfileId}")
def get_rootFolder(self):
""" GET /rootFolder """
return self._get("rootFolder")
def post_rootFolder(self, json):
""" POST /rootFolder """
return self._post("rootFolder", json=json)
def add_rootFolder(self, path):
return self.post_rootFolder({"path": path})
def delete_rootFolder(self, rootFolderID):
self._delete(f"rootFolder/{rootFolderID}")
def get_remotePathMapping(self):
""" GET /remotePathMapping """
return self._get("remotePathMapping")
def get_system_status(self):
""" GET /system/status """
return self._get("system/status")
class BaseRawV1API(BaseRawAPI):
@abstractmethod
def __init__(self, url, apikey, session=None):
super().__init__(url, apikey, v1=True, session=session)
def get_metadataProfile(self):
""" GET /metadataProfile """
return self._get("metadataProfile") | 41.043478 | 148 | 0.614407 |
5805766db9c13e31a01dee5f11ec2703b1ca5a1f | 1,571 | bzl | Python | internal/common/typescript_mock_lib.bzl | DevVersion/rules_nodejs | 96374fdfa94f69731cc184716d194033818c6587 | [
"Apache-2.0"
] | null | null | null | internal/common/typescript_mock_lib.bzl | DevVersion/rules_nodejs | 96374fdfa94f69731cc184716d194033818c6587 | [
"Apache-2.0"
] | null | null | null | internal/common/typescript_mock_lib.bzl | DevVersion/rules_nodejs | 96374fdfa94f69731cc184716d194033818c6587 | [
"Apache-2.0"
] | 1 | 2018-10-04T23:39:45.000Z | 2018-10-04T23:39:45.000Z | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A mock typescript_lib rule.
Allows testing that jasmine_node_test will work with ts_library from
rules_typescript without introducing a circular dependency between
rules_nodejs and rules_typescript repositories.
"""
def _mock_typescript_lib(ctx):
es5_sources = depset()
transitive_decls = depset()
for s in ctx.attr.srcs:
es5_sources = depset([f for f in s.files if f.path.endswith(".js")], transitive = [es5_sources])
transitive_decls = depset([f for f in s.files if f.path.endswith(".d.ts")], transitive = [transitive_decls])
return struct(
runfiles = ctx.runfiles(collect_default=True, collect_data = True),
typescript = struct(
es5_sources = es5_sources,
transitive_declarations = transitive_decls
),
)
mock_typescript_lib = rule(
implementation = _mock_typescript_lib,
attrs = {
"srcs": attr.label_list(allow_files = True),
"data": attr.label_list(allow_files = True, cfg = "data"),
}
)
| 36.534884 | 112 | 0.734564 |
433892edbf8f67419d6410a456c90495550d83cf | 6,798 | py | Python | awx/__init__.py | Avinesh/awx | 6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf | [
"Apache-2.0"
] | 1 | 2021-09-07T14:53:57.000Z | 2021-09-07T14:53:57.000Z | awx/__init__.py | Avinesh/awx | 6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf | [
"Apache-2.0"
] | 2 | 2020-02-04T05:01:38.000Z | 2020-02-18T06:44:52.000Z | awx/__init__.py | Avinesh/awx | 6310a2edd890d6062a9f6bcdeb2b46c4b876c2bf | [
"Apache-2.0"
] | 1 | 2020-01-28T05:34:09.000Z | 2020-01-28T05:34:09.000Z | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
from __future__ import absolute_import, unicode_literals
import os
import sys
import warnings
from pkg_resources import get_distribution
__version__ = get_distribution('awx').version
__all__ = ['__version__']
# Check for the presence/absence of "devonly" module to determine if running
# from a source code checkout or release packaage.
try:
import awx.devonly # noqa
MODE = 'development'
except ImportError: # pragma: no cover
MODE = 'production'
import hashlib
try:
import django
from django.db.backends.base import schema
from django.db.backends.utils import names_digest
HAS_DJANGO = True
except ImportError:
HAS_DJANGO = False
if HAS_DJANGO is True:
# This line exists to make sure we don't regress on FIPS support if we
# upgrade Django; if you're upgrading Django and see this error,
# update the version check below, and confirm that FIPS still works.
# If operating in a FIPS environment, `hashlib.md5()` will raise a `ValueError`,
# but will support the `usedforsecurity` keyword on RHEL and Centos systems.
# Keep an eye on https://code.djangoproject.com/ticket/28401
target_version = '2.2.4'
if django.__version__ != target_version:
raise RuntimeError(
"Django version other than {target} detected: {current}. "
"Overriding `names_digest` is known to work for Django {target} "
"and may not work in other Django versions.".format(target=target_version,
current=django.__version__)
)
try:
names_digest('foo', 'bar', 'baz', length=8)
except ValueError:
def names_digest(*args, length):
"""
Generate a 32-bit digest of a set of arguments that can be used to shorten
identifying names. Support for use in FIPS environments.
"""
h = hashlib.md5(usedforsecurity=False)
for arg in args:
h.update(arg.encode())
return h.hexdigest()[:length]
schema.names_digest = names_digest
def find_commands(management_dir):
# Modified version of function from django/core/management/__init__.py.
command_dir = os.path.join(management_dir, 'commands')
commands = []
try:
for f in os.listdir(command_dir):
if f.startswith('_'):
continue
elif f.endswith('.py') and f[:-3] not in commands:
commands.append(f[:-3])
elif f.endswith('.pyc') and f[:-4] not in commands: # pragma: no cover
commands.append(f[:-4])
except OSError:
pass
return commands
def oauth2_getattribute(self, attr):
# Custom method to override
# oauth2_provider.settings.OAuth2ProviderSettings.__getattribute__
from django.conf import settings
val = None
if 'migrate' not in sys.argv:
# certain Django OAuth Toolkit migrations actually reference
# setting lookups for references to model classes (e.g.,
# oauth2_settings.REFRESH_TOKEN_MODEL)
# If we're doing an OAuth2 setting lookup *while running* a migration,
# don't do our usual "Configure Tower in Tower" database setting lookup
val = settings.OAUTH2_PROVIDER.get(attr)
if val is None:
val = object.__getattribute__(self, attr)
return val
def prepare_env():
# Update the default settings environment variable based on current mode.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'awx.settings.%s' % MODE)
# Hide DeprecationWarnings when running in production. Need to first load
# settings to apply our filter after Django's own warnings filter.
from django.conf import settings
if not settings.DEBUG: # pragma: no cover
warnings.simplefilter('ignore', DeprecationWarning)
# Monkeypatch Django find_commands to also work with .pyc files.
import django.core.management
django.core.management.find_commands = find_commands
# Monkeypatch Oauth2 toolkit settings class to check for settings
# in django.conf settings each time, not just once during import
import oauth2_provider.settings
oauth2_provider.settings.OAuth2ProviderSettings.__getattribute__ = oauth2_getattribute
# Use the AWX_TEST_DATABASE_* environment variables to specify the test
# database settings to use when management command is run as an external
# program via unit tests.
for opt in ('ENGINE', 'NAME', 'USER', 'PASSWORD', 'HOST', 'PORT'): # pragma: no cover
if os.environ.get('AWX_TEST_DATABASE_%s' % opt, None):
settings.DATABASES['default'][opt] = os.environ['AWX_TEST_DATABASE_%s' % opt]
# Disable capturing all SQL queries in memory when in DEBUG mode.
if settings.DEBUG and not getattr(settings, 'SQL_DEBUG', True):
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.utils import CursorWrapper
BaseDatabaseWrapper.make_debug_cursor = lambda self, cursor: CursorWrapper(cursor, self)
# Use the default devserver addr/port defined in settings for runserver.
default_addr = getattr(settings, 'DEVSERVER_DEFAULT_ADDR', '127.0.0.1')
default_port = getattr(settings, 'DEVSERVER_DEFAULT_PORT', 8000)
from django.core.management.commands import runserver as core_runserver
original_handle = core_runserver.Command.handle
def handle(self, *args, **options):
if not options.get('addrport'):
options['addrport'] = '%s:%d' % (default_addr, int(default_port))
elif options.get('addrport').isdigit():
options['addrport'] = '%s:%d' % (default_addr, int(options['addrport']))
return original_handle(self, *args, **options)
core_runserver.Command.handle = handle
def manage():
# Prepare the AWX environment.
prepare_env()
# Now run the command (or display the version).
from django.conf import settings
from django.core.management import execute_from_command_line
if len(sys.argv) >= 2 and sys.argv[1] in ('version', '--version'): # pragma: no cover
sys.stdout.write('%s\n' % __version__)
# If running as a user without permission to read settings, display an
# error message. Allow --help to still work.
elif settings.SECRET_KEY == 'permission-denied':
if len(sys.argv) == 1 or len(sys.argv) >= 2 and sys.argv[1] in ('-h', '--help', 'help'):
execute_from_command_line(sys.argv)
sys.stdout.write('\n')
prog = os.path.basename(sys.argv[0])
sys.stdout.write('Permission denied: %s must be run as root or awx.\n' % prog)
sys.exit(1)
else:
execute_from_command_line(sys.argv)
| 40.951807 | 96 | 0.676964 |
2c81e547ecc22f3939caa196a04c8f3c5ec7ce7f | 2,180 | py | Python | virustotal.py | emr4h/virustotalApi | f53cf97a80d50779ef10aa1c107a34c9bf6f0eaf | [
"MIT"
] | null | null | null | virustotal.py | emr4h/virustotalApi | f53cf97a80d50779ef10aa1c107a34c9bf6f0eaf | [
"MIT"
] | null | null | null | virustotal.py | emr4h/virustotalApi | f53cf97a80d50779ef10aa1c107a34c9bf6f0eaf | [
"MIT"
] | null | null | null | import argparse
from pyfiglet import Figlet
import random
import subprocess
import requests
print("\n\n\n")
fontList = ["big","bulbhead","roman","epic","larry3d","speed","nancyj","stampatello","smslant","slscript","serifcap","rounded","puffy","o8","letters","colossal","basic"]
fontType = random.choice(fontList)
f = Figlet(font=fontType)
print(f.renderText('VirusTotal Api'))
print("by emr4h\n")
parser = argparse.ArgumentParser(prog="virustotalApi\n", description="Virustotal API", usage="\n\n Hash Analysis with Virus Total: python3 virustotal.py -p <file_path> \n ")
parser.add_argument("-p","--path", help = "Path of the file to be analyzed")
args = parser.parse_args()
def hashVirusTotal(file):
hash = subprocess.check_output(["md5",file])
hash = hash.split()
hash = hash[3]
hash = hash.decode()
fileHash = str(hash)
params = {}
apiKey = input("\nIf you have a virustotal account, please enter your apikey, you can find your apikey in your profile for free (recommended). \nIf you don't have the apikey proceed without entering anything. A default apikey will be used, but since this key is free, it may be subject to query limitations. \nPlease Enter Your API Key : ")
if(apiKey == ""):
params = {'apikey': "464660c9da6e6cfd9edc41d92d354f7b8b3bfdd76a01d6cfdabc46d6a575bb3b", 'resource': fileHash}
else :
apiKey = str(apiKey)
params = {'apikey': apiKey, 'resource': fileHash}
responseData = requests.get('https://www.virustotal.com/vtapi/v2/file/report', params=params)
jsonData = responseData.json()
responseData = int(jsonData.get('response_code'))
if(responseData == 0):
print ('\nThe file with the ' + fileHash + ' hash number was not found in virustotal\n')
elif(responseData == 1):
if(int(jsonData.get('positives'))) == 0:
print ('\nThe file with the ' + fileHash + ' is not a malware\n')
else:
print ('\nThe file with the ' + fileHash + ' is a malware\n')
else:
print('\nThe hash could not be searched. Please try again later.\n')
if __name__=='__main__':
if(args.path):
hashVirusTotal(args.path) | 40.37037 | 344 | 0.673853 |
bc0abfafa703f781c5961c155a68da5c9e02e027 | 1,869 | py | Python | nova/tests/functional/v3/test_console_auth_tokens.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | 7 | 2015-09-22T11:27:16.000Z | 2015-11-02T12:33:46.000Z | nova/tests/functional/v3/test_console_auth_tokens.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | 2 | 2015-09-07T22:14:46.000Z | 2020-08-12T08:51:56.000Z | nova/tests/functional/v3/test_console_auth_tokens.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | 4 | 2015-09-09T16:48:56.000Z | 2022-03-15T20:52:57.000Z | # Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo.serialization import jsonutils
from nova.tests.functional.v3 import test_servers
class ConsoleAuthTokensSampleJsonTests(test_servers.ServersSampleBase):
extension_name = "os-console-auth-tokens"
extra_extensions_to_load = ["os-remote-consoles"]
def _get_console_url(self, data):
return jsonutils.loads(data)["console"]["url"]
def _get_console_token(self, uuid):
response = self._do_post('servers/%s/action' % uuid,
'get-rdp-console-post-req',
{'action': 'os-getRDPConsole'})
url = self._get_console_url(response.content)
return re.match('.+?token=([^&]+)', url).groups()[0]
def test_get_console_connect_info(self):
self.flags(enabled=True, group='rdp')
uuid = self._post_server()
token = self._get_console_token(uuid)
response = self._do_get('os-console-auth-tokens/%s' % token)
subs = self._get_regexes()
subs["uuid"] = uuid
subs["host"] = r"[\w\.\-]+"
subs["port"] = "[0-9]+"
subs["internal_access_path"] = ".*"
self._verify_response('get-console-connect-info-get-resp', subs,
response, 200)
| 35.942308 | 78 | 0.64687 |
c708351cade9cfd963da97629b9820c36d0dedcd | 71 | py | Python | Console/Python/MisionTic/Excercises/test1.py | DavidsDvm/Dev_Exercises | 59e12913e987e710558044e27c07b192576167f3 | [
"Unlicense",
"MIT"
] | 1 | 2021-08-30T21:06:52.000Z | 2021-08-30T21:06:52.000Z | Console/Python/MisionTic/Excercises/test1.py | DavidsDvm/Dev_Exercises | 59e12913e987e710558044e27c07b192576167f3 | [
"Unlicense",
"MIT"
] | 1 | 2021-05-18T05:42:02.000Z | 2021-05-18T05:42:02.000Z | Console/Python/MisionTic/Excercises/test1.py | DavidsDvm/Dev_Exercises | 59e12913e987e710558044e27c07b192576167f3 | [
"Unlicense",
"MIT"
] | null | null | null | """
First proyect on Python
Hellow world test
"""
print("Hello world") | 11.833333 | 23 | 0.704225 |
f9b8cce5a5c3fb3e79fba914185381c5640effc8 | 5,498 | py | Python | demo_man/tests/test_simple_game.py | avanwinkle/mpf-examples | 5606013752436f80d29709bcf6abdd171d32b4b4 | [
"MIT"
] | 6 | 2016-02-16T19:54:38.000Z | 2019-02-09T05:57:39.000Z | demo_man/tests/test_simple_game.py | avanwinkle/mpf-examples | 5606013752436f80d29709bcf6abdd171d32b4b4 | [
"MIT"
] | 11 | 2016-04-14T23:56:13.000Z | 2021-11-29T02:43:35.000Z | demo_man/tests/test_simple_game.py | avanwinkle/mpf-examples | 5606013752436f80d29709bcf6abdd171d32b4b4 | [
"MIT"
] | 13 | 2016-03-21T13:40:56.000Z | 2022-02-23T23:33:24.000Z | import os
from mpfmc.tests.FullMpfMachineTestCase import FullMachineTestCase
class TestSimpleGame(FullMachineTestCase):
def get_machine_path(self):
return os.path.abspath(os.path.join(os.path.realpath(__file__), os.pardir, os.pardir))
def test_single_player_game(self):
self.hit_and_release_switch("s_start")
self.advance_time_and_run(.1)
# game should be running
self.assertIsNotNone(self.machine.game)
self.assertEqual(1, self.machine.game.player.ball)
# playfield expects a ball
self.assertEqual(1, self.machine.playfield.available_balls)
# but its not there yet
self.assertEqual(0, self.machine.playfield.balls)
self.advance_time_and_run(3)
# player presses eject
self.hit_and_release_switch("s_ball_launch")
# after 3 its there
self.advance_time_and_run(2)
self.hit_and_release_switch("s_right_ramp_enter")
self.advance_time_and_run(1)
self.assertEqual(1, self.machine.playfield.balls)
self.assertTextOnTopSlide("BALL 1 FREE PLAY")
# make some points
self.hit_and_release_switch("s_left_jet")
self.hit_and_release_switch("s_left_jet")
self.hit_and_release_switch("s_left_jet")
self.hit_and_release_switch("s_left_jet")
self.advance_time_and_run()
self.assertEqual(4 * 75020, self.machine.game.player.score)
self.assertTextOnTopSlide("300,080")
# test the claw
self.hit_switch_and_run("s_elevator_hold", 1)
self.assertEqual("enabled", self.machine.coils["c_claw_motor_right"].hw_driver.state)
self.hit_switch_and_run("s_claw_position_1", 1)
self.assertEqual("disabled", self.machine.coils["c_claw_motor_right"].hw_driver.state)
self.assertEqual("enabled", self.machine.coils["c_elevator_motor"].hw_driver.state)
self.assertEqual("enabled", self.machine.coils["c_claw_magnet"].hw_driver.state)
self.hit_switch_and_run("s_elevator_index", 1)
self.release_switch_and_run("s_elevator_hold", 1)
self.assertEqual("disabled", self.machine.coils["c_elevator_motor"].hw_driver.state)
self.assertEqual("enabled", self.machine.coils["c_claw_magnet"].hw_driver.state)
self.hit_switch_and_run("s_flipper_lower_left", 1)
self.assertEqual("enabled", self.machine.coils["c_claw_motor_left"].hw_driver.state)
self.assertEqual("disabled", self.machine.coils["c_claw_motor_right"].hw_driver.state)
self.hit_and_release_switch("s_ball_launch")
self.advance_time_and_run()
self.assertEqual("disabled", self.machine.coils["c_claw_magnet"].hw_driver.state)
self.assertEqual("disabled", self.machine.coils["c_claw_motor_left"].hw_driver.state)
self.assertEqual("disabled", self.machine.coils["c_claw_motor_right"].hw_driver.state)
self.release_switch_and_run("s_flipper_lower_left", 1)
# ball drains
self.machine.default_platform.add_ball_to_device(self.machine.ball_devices.trough)
# wait for highscore display
self.advance_time_and_run(10)
self.assertEqual(0, self.machine.playfield.balls)
self.assertEqual(2, self.machine.game.player.ball)
self.advance_time_and_run(5)
# player presses eject
self.hit_and_release_switch("s_ball_launch")
# and it should eject a new ball to the pf
self.advance_time_and_run(2)
self.hit_and_release_switch("s_right_ramp_enter")
self.advance_time_and_run(1)
self.assertEqual(1, self.machine.playfield.balls)
# ball drains again
self.machine.default_platform.add_ball_to_device(self.machine.ball_devices.trough)
# wait for highscore display
self.advance_time_and_run(10)
self.assertEqual(0, self.machine.playfield.balls)
self.assertEqual(3, self.machine.game.player.ball)
self.advance_time_and_run(5)
# player presses eject
self.hit_and_release_switch("s_ball_launch")
# and it should eject a new ball to the pf
self.advance_time_and_run(2)
self.hit_and_release_switch("s_right_ramp_enter")
self.advance_time_and_run(1)
self.assertEqual(1, self.machine.playfield.balls)
# ball drains again. game should end
self.machine.default_platform.add_ball_to_device(self.machine.ball_devices.trough)
self.advance_time_and_run(10)
self.mock_event('text_input_high_score_complete')
# enter high score
self.assertSlideOnTop("high_score_enter_initials")
self.hit_and_release_switch("s_flipper_lower_right")
self.hit_and_release_switch("s_flipper_lower_right")
self.hit_and_release_switch("s_start") # C
self.advance_time_and_run()
self.assertTextOnTopSlide("C")
self.hit_and_release_switch("s_flipper_lower_right")
self.hit_and_release_switch("s_start") # CD
self.advance_time_and_run()
self.assertTextOnTopSlide("CD")
self.hit_and_release_switch("s_flipper_lower_right")
self.hit_and_release_switch("s_start") # CDE
self.advance_time_and_run()
self.assertTextOnTopSlide("CDE")
self.hit_and_release_switch("s_start")
self.advance_time_and_run()
self.assertEventCalled('text_input_high_score_complete')
self.advance_time_and_run(10)
self.assertIsNone(self.machine.game)
| 42.292308 | 94 | 0.706439 |
c3ee3eaf4b643888b2149ba9a8ab3d978157d4f1 | 1,559 | py | Python | Day_5/aoc2021_5.py | daryltanwk/AOC2021 | 52709bc57ded64b8ffc1f4ee05ad5b9d67ab11fc | [
"MIT"
] | null | null | null | Day_5/aoc2021_5.py | daryltanwk/AOC2021 | 52709bc57ded64b8ffc1f4ee05ad5b9d67ab11fc | [
"MIT"
] | null | null | null | Day_5/aoc2021_5.py | daryltanwk/AOC2021 | 52709bc57ded64b8ffc1f4ee05ad5b9d67ab11fc | [
"MIT"
] | null | null | null | # Open file for reading
mapOfSea = [[0 for i in range(1000)] for j in range(1000)]
inputFile = open('input_5', 'rt')
for rawLine in inputFile:
line = rawLine.strip().split(" -> ")
start = [int(line[0].split(",")[0]), int(line[0].split(",")[1])]
end = [int(line[1].split(",")[0]), int(line[1].split(",")[1])]
"""
PUZZLE ONE
"""
xDirection = 1 if start[0] < end[0] else -1
yDirection = 1 if start[1] < end[1] else -1
# Check if line is Horizontal (Y value does not change)
if start[1] == end[1]:
currentX = start[0]
for x in range(abs(start[0]-end[0])+1):
mapOfSea[currentX][start[1]] += 1
currentX += xDirection
# Check if line is Vertical (X value does not change)
elif start[0] == end[0]:
currentY = start[1]
for x in range(abs(start[1]-end[1])+1):
mapOfSea[start[0]][currentY] += 1
currentY += yDirection
###
# PUZZLE TWO START - COMMENT OUT THIS SECTION FOR PUZZLE ONE SOLUTION
###
# Handle Diagonals (none of the above cases)
else:
currentX = start[0]
currentY = start[1]
for x in range(abs(start[0]-end[0])+1):
mapOfSea[currentX][currentY] += 1
currentX += xDirection
currentY += yDirection
###
# PUZZLE TWO END - COMMENT OUT THIS SECTION FOR PUZZLE ONE SOLUTION
###
inputFile.close()
puzzleCounter = 0
for a in mapOfSea:
for b in a:
if b >= 2:
puzzleCounter += 1
print("Puzzle Answer:", puzzleCounter)
| 29.415094 | 73 | 0.561257 |
2687d49c16c0ae16b106f6d99ce74f4f8096468e | 6,854 | py | Python | src/main.py | Muhammad-Dah/SimpleHTR | 571967c8985160ff869d950b0ed3154fe6d0d426 | [
"MIT"
] | null | null | null | src/main.py | Muhammad-Dah/SimpleHTR | 571967c8985160ff869d950b0ed3154fe6d0d426 | [
"MIT"
] | null | null | null | src/main.py | Muhammad-Dah/SimpleHTR | 571967c8985160ff869d950b0ed3154fe6d0d426 | [
"MIT"
] | null | null | null | import argparse
import json
import warnings
import cv2
import editdistance
import matplotlib.pyplot as plt
from path import Path
from DataLoaderIAM import DataLoaderIAM, Batch
from Model import Model, DecoderType
from SamplePreprocessor import word_image_preprocess
from spelling_correction import SpellingCorrector
warnings.simplefilter(action='ignore', category=FutureWarning)
class FilePaths:
"""filenames and paths to data"""
fnCharList = '../model/charList.txt'
fnSummary = '../model/summary.json'
fnInfer = '../data/test.png'
fnLineInfer = '../data/lines/4.png'
fnCorpus = '../data/corpus.txt'
def write_summary(charErrorRates, wordAccuracies):
with open(FilePaths.fnSummary, 'w') as f:
json.dump({'charErrorRates': charErrorRates, 'wordAccuracies': wordAccuracies}, f)
def train(model, loader):
"""train NN"""
epoch = 0 # number of training epochs since start
summaryCharErrorRates = []
summaryWordAccuracies = []
bestCharErrorRate = float('inf') # best valdiation character error rate
noImprovementSince = 0 # number of epochs no improvement of character error rate occured
earlyStopping = 25 # stop training after this number of epochs without improvement
while True:
epoch += 1
print('Epoch:', epoch)
# train
print('Train NN')
loader.trainSet()
while loader.hasNext():
iterInfo = loader.getIteratorInfo()
batch = loader.getNext()
loss = model.trainBatch(batch)
print(f'Epoch: {epoch} Batch: {iterInfo[0]}/{iterInfo[1]} Loss: {loss}')
# validate
charErrorRate, wordAccuracy = validate_words(model, loader)
# write summary
summaryCharErrorRates.append(charErrorRate)
summaryWordAccuracies.append(wordAccuracy)
write_summary(summaryCharErrorRates, summaryWordAccuracies)
# if best validation accuracy so far, save model parameters
if charErrorRate < bestCharErrorRate:
print('Character error rate improved, save model')
bestCharErrorRate = charErrorRate
noImprovementSince = 0
model.save()
else:
print(f'Character error rate not improved, best so far: {charErrorRate * 100.0}%')
noImprovementSince += 1
# stop training if no more improvement in the last x epochs
if noImprovementSince >= earlyStopping:
print(f'No more improvement since {earlyStopping} epochs. Training stopped.')
break
def validate_words(model, loader):
"""validate NN"""
print('Validate NN')
loader.validationSet()
numCharErr = 0
numCharTotal = 0
numWordOK = 0
numWordTotal = 0
correcter = SpellingCorrector()
while loader.hasNext():
iterInfo = loader.getIteratorInfo()
print(f'Batch: {iterInfo[0]} / {iterInfo[1]}')
batch = loader.getNext()
(recognized, _) = model.inferBatch(batch)
print('Ground truth -> Recognized')
for i in range(len(recognized)):
corrected_pred = correcter.get_correct(recognized[i])
numWordOK += 1 if batch.gtTexts[i] == corrected_pred else 0
numWordTotal += 1
dist = editdistance.eval(corrected_pred, batch.gtTexts[i])
numCharErr += dist
numCharTotal += len(batch.gtTexts[i])
print('[OK]' if dist == 0 else '[ERR:%d]' % dist, '"' + batch.gtTexts[i] + '"', '->',
'"' + corrected_pred + '"')
# print validation result
charErrorRate = numCharErr / numCharTotal
wordAccuracy = numWordOK / numWordTotal
print(f'Character error rate: {charErrorRate * 100.0}%. Word accuracy: {wordAccuracy * 100.0}%.')
return charErrorRate, wordAccuracy
def infer_word(model, fnImg: str):
"""recognize text in image provided by file path"""
img = word_image_preprocess(cv2.imread(fnImg, cv2.IMREAD_GRAYSCALE), Model.imgSize)
batch = Batch(None, [img])
(recognized, prob) = model.inferBatch(batch, True)
return SpellingCorrector().get_correct(recognized), prob[0]
def plot_experiment_results():
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(15, 5))
checkpoint_path = '../model/summary.json'
with open(checkpoint_path, 'r') as f:
results = json.load(f)
train_loss = results['charErrorRates']
train_accuracy = results['wordAccuracies']
axs[0].set_title('Training Model Char Error Rates')
axs[0].plot(train_loss)
axs[0].set(xlabel="epoch", ylabel="Loss")
axs[0].grid(True)
axs[1].set_title('Training Model Word Accuracies')
axs[1].plot(train_accuracy)
axs[1].set(xlabel="epoch", ylabel=" Accuracy")
axs[1].grid(True)
plt.show()
def main():
"""main function"""
parser = argparse.ArgumentParser()
parser.add_argument('--train', help='train the NN', action='store_true')
parser.add_argument('--validate', help='validate the NN', action='store_true')
parser.add_argument('--decoder', choices=['bestpath', 'beamsearch'], default='bestpath', help='CTC decoder')
parser.add_argument('--batch_size', help='batch size', type=int, default=100)
parser.add_argument('--data_dir', help='directory containing IAM dataset', type=Path, required=False,
default='../data/words')
parser.add_argument('--dump', help='dump output of NN to CSV file(s)', action='store_true')
args = parser.parse_args()
# set chosen CTC decoder
decoderType = DecoderType.BestPath # if args.decoder == 'bestpath':
if args.decoder == 'beamsearch':
decoderType = DecoderType.BeamSearch
# train or validate on IAM dataset
if args.train or args.validate:
# load training data, create TF model
loader = DataLoaderIAM(args.data_dir, args.batch_size, Model.imgSize, Model.maxTextLen)
# save characters of model for inference mode
open(FilePaths.fnCharList, 'w').write(str().join(loader.charList))
# save img contained in dataset into file
open(FilePaths.fnCorpus, 'w').write(str(' ').join(loader.trainWords + loader.validationWords))
# execute training or validation
if args.train:
model = Model(loader.charList, decoderType)
train(model, loader)
elif args.validate:
model = Model(loader.charList, decoderType, mustRestore=True)
validate_words(model, loader)
# infer text on test image
else:
model = Model(open(FilePaths.fnCharList).read(), decoderType, mustRestore=True, dump=args.dump)
recognized, probability = infer_word(model, FilePaths.fnInfer)
print(f'Recognized: "{recognized}"')
print(f'Probability: {probability}')
if __name__ == '__main__':
main()
# plot_experiment_results()
| 36.457447 | 112 | 0.659031 |
1d08afceb3105e4bf0ea91f9551731bc484ae222 | 2,614 | py | Python | sdk/python/pulumi_azure_nextgen/eventgrid/v20190601/list_topic_shared_access_keys.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/eventgrid/v20190601/list_topic_shared_access_keys.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/eventgrid/v20190601/list_topic_shared_access_keys.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListTopicSharedAccessKeysResult',
'AwaitableListTopicSharedAccessKeysResult',
'list_topic_shared_access_keys',
]
@pulumi.output_type
class ListTopicSharedAccessKeysResult:
"""
Shared access keys of the Topic
"""
def __init__(__self__, key1=None, key2=None):
if key1 and not isinstance(key1, str):
raise TypeError("Expected argument 'key1' to be a str")
pulumi.set(__self__, "key1", key1)
if key2 and not isinstance(key2, str):
raise TypeError("Expected argument 'key2' to be a str")
pulumi.set(__self__, "key2", key2)
@property
@pulumi.getter
def key1(self) -> Optional[str]:
"""
Shared access key1 for the topic.
"""
return pulumi.get(self, "key1")
@property
@pulumi.getter
def key2(self) -> Optional[str]:
"""
Shared access key2 for the topic.
"""
return pulumi.get(self, "key2")
class AwaitableListTopicSharedAccessKeysResult(ListTopicSharedAccessKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListTopicSharedAccessKeysResult(
key1=self.key1,
key2=self.key2)
def list_topic_shared_access_keys(resource_group_name: Optional[str] = None,
topic_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListTopicSharedAccessKeysResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group within the user's subscription.
:param str topic_name: Name of the topic.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['topicName'] = topic_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid/v20190601:listTopicSharedAccessKeys', __args__, opts=opts, typ=ListTopicSharedAccessKeysResult).value
return AwaitableListTopicSharedAccessKeysResult(
key1=__ret__.key1,
key2=__ret__.key2)
| 33.512821 | 162 | 0.667942 |
f9a35f812fae2c1968c50be0a53c3533d0d23888 | 863 | py | Python | fpc-compression/uvint.py | clayne/toys | ec06411e2d3b920403607888d4a573e41390ee5b | [
"BSD-2-Clause"
] | null | null | null | fpc-compression/uvint.py | clayne/toys | ec06411e2d3b920403607888d4a573e41390ee5b | [
"BSD-2-Clause"
] | null | null | null | fpc-compression/uvint.py | clayne/toys | ec06411e2d3b920403607888d4a573e41390ee5b | [
"BSD-2-Clause"
] | null | null | null | def encode(x):
assert x >= 0
buf = b''
while True:
if x < 128:
buf += (x | 0x80).to_bytes(1, "little")
break
else:
buf += (x & 0x7f).to_bytes(1, "little")
x >>= 7
return buf
def decode(buf):
x = 0
i = 0
while True:
v = int(buf[i])
x = ((v & 0x7f) << (7*i)) | x
i += 1
if v & 0x80:
return x, i
def decode_from_file(file):
x = 0
i = 0
while True:
b = file.read(1)
assert len(b) == 1, len(b)
v = int.from_bytes(b, 'little')
x = ((v & 0x7f) << (7*i)) | x
i += 1
if v & 0x80:
return x
if __name__ == '__main__':
for x in range(0, 100000):
enc = encode(x)
dec, length = decode(enc)
assert x == dec
assert len(enc), length
| 19.177778 | 51 | 0.418308 |
99bdb0614214462fdcad569249bdbaa5771370b8 | 1,656 | py | Python | pysim/information/entropy.py | jejjohnson/pysim | 4cd5f0987d3cbdeba1c932ca845df1b0bd9d46bf | [
"MIT"
] | 3 | 2021-04-09T06:20:00.000Z | 2021-05-17T13:46:45.000Z | pysim/information/entropy.py | jejjohnson/pysim | 4cd5f0987d3cbdeba1c932ca845df1b0bd9d46bf | [
"MIT"
] | null | null | null | pysim/information/entropy.py | jejjohnson/pysim | 4cd5f0987d3cbdeba1c932ca845df1b0bd9d46bf | [
"MIT"
] | null | null | null | import numpy as np
from scipy import stats
from typing import Union, Optional, Dict
from .histogram import hist_entropy
from .knn import knn_entropy
from .kde import kde_entropy_uni
from .gaussian import gauss_entropy_uni, gauss_entropy_multi
from sklearn.utils import check_array
def univariate_entropy(X: np.ndarray, method: str = "histogram", **kwargs) -> float:
"""Calculates the entropy given the method initiali"""
# check input array
X = check_array(X, ensure_2d=True)
n_samples, n_features = X.shape
msg = "n_features is greater than 1. Please use Multivariate instead."
assert 1 == n_features, msg
if method == "histogram":
return hist_entropy(X, **kwargs)
elif method == "knn":
return knn_entropy(X, **kwargs)
elif method == "kde":
return kde_entropy_uni(X, **kwargs)
elif method in ["gauss", "gaussian"]:
return gauss_entropy_uni(X)
else:
raise ValueError(f"Unrecognized method: {method}")
def marginal_entropy(X: np.ndarray, method: str = "histogram", **kwargs) -> np.ndarray:
# check input array
X = check_array(X, ensure_2d=True)
n_samples, n_features = X.shape
H_entropy = np.empty(n_features)
for i, ifeature in enumerate(X.T):
H_entropy[i] = univariate_entropy(X.T[i][:, None], method, **kwargs)
return H_entropy
def multivariate_entropy(X: np.ndarray, method: str = "knn", **kwargs) -> float:
if method == "knn":
return knn_entropy(X, **kwargs)
elif method in ["gauss", "gaussian"]:
return gauss_entropy_multi(X)
else:
raise ValueError(f"Unrecognized method: {method}")
| 29.052632 | 87 | 0.676932 |
12da8659caca2dcbd8e981dd7124b52737bff970 | 7,452 | py | Python | tensorflow/python/kernel_tests/linalg/linear_operator_algebra_test.py | fraudies/tensorflow | a42423e302b71893bbd24aa896869941013c07fb | [
"Apache-2.0"
] | 36 | 2016-12-17T15:25:25.000Z | 2022-01-29T21:50:53.000Z | tensorflow/python/kernel_tests/linalg/linear_operator_algebra_test.py | shekharpalit/tensorflow | 6aa83398ab03bfae822f36772757097bcb98b6ed | [
"Apache-2.0"
] | 59 | 2019-06-17T09:37:49.000Z | 2022-01-19T01:21:34.000Z | tensorflow/python/kernel_tests/linalg/linear_operator_algebra_test.py | shekharpalit/tensorflow | 6aa83398ab03bfae822f36772757097bcb98b6ed | [
"Apache-2.0"
] | 36 | 2017-07-27T21:12:40.000Z | 2022-02-03T16:45:56.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for registration mechanisms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops.linalg import cholesky_registrations # pylint: disable=unused-import
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import matmul_registrations # pylint: disable=unused-import
from tensorflow.python.platform import test
# pylint: disable=protected-access
_ADJOINTS = linear_operator_algebra._ADJOINTS
_registered_adjoint = linear_operator_algebra._registered_adjoint
_CHOLESKY_DECOMPS = linear_operator_algebra._CHOLESKY_DECOMPS
_registered_cholesky = linear_operator_algebra._registered_cholesky
_INVERSES = linear_operator_algebra._INVERSES
_registered_inverse = linear_operator_algebra._registered_inverse
_MATMUL = linear_operator_algebra._MATMUL
_registered_matmul = linear_operator_algebra._registered_matmul
# pylint: enable=protected-access
class AdjointTest(test.TestCase):
def testRegistration(self):
class CustomLinOp(linear_operator.LinearOperator):
def _matmul(self, a):
pass
def _shape(self):
return tensor_shape.TensorShape([1, 1])
def _shape_tensor(self):
pass
# Register Adjoint to a lambda that spits out the name parameter
@linear_operator_algebra.RegisterAdjoint(CustomLinOp)
def _adjoint(a): # pylint: disable=unused-argument,unused-variable
return "OK"
self.assertEqual("OK", CustomLinOp(dtype=None).adjoint())
def testRegistrationFailures(self):
class CustomLinOp(linear_operator.LinearOperator):
pass
with self.assertRaisesRegexp(TypeError, "must be callable"):
linear_operator_algebra.RegisterAdjoint(CustomLinOp)("blah")
# First registration is OK
linear_operator_algebra.RegisterAdjoint(CustomLinOp)(lambda a: None)
# Second registration fails
with self.assertRaisesRegexp(ValueError, "has already been registered"):
linear_operator_algebra.RegisterAdjoint(CustomLinOp)(lambda a: None)
def testExactAdjointRegistrationsAllMatch(self):
for (k, v) in _ADJOINTS.items():
self.assertEqual(v, _registered_adjoint(k[0]))
class CholeskyTest(test.TestCase):
def testRegistration(self):
class CustomLinOp(linear_operator.LinearOperator):
def _matmul(self, a):
pass
def _shape(self):
return tensor_shape.TensorShape([1, 1])
def _shape_tensor(self):
pass
# Register Cholesky to a lambda that spits out the name parameter
@linear_operator_algebra.RegisterCholesky(CustomLinOp)
def _cholesky(a): # pylint: disable=unused-argument,unused-variable
return "OK"
with self.assertRaisesRegexp(ValueError, "positive definite"):
CustomLinOp(dtype=None, is_self_adjoint=True).cholesky()
with self.assertRaisesRegexp(ValueError, "self adjoint"):
CustomLinOp(dtype=None, is_positive_definite=True).cholesky()
custom_linop = CustomLinOp(
dtype=None, is_self_adjoint=True, is_positive_definite=True)
self.assertEqual("OK", custom_linop.cholesky())
def testRegistrationFailures(self):
class CustomLinOp(linear_operator.LinearOperator):
pass
with self.assertRaisesRegexp(TypeError, "must be callable"):
linear_operator_algebra.RegisterCholesky(CustomLinOp)("blah")
# First registration is OK
linear_operator_algebra.RegisterCholesky(CustomLinOp)(lambda a: None)
# Second registration fails
with self.assertRaisesRegexp(ValueError, "has already been registered"):
linear_operator_algebra.RegisterCholesky(CustomLinOp)(lambda a: None)
def testExactCholeskyRegistrationsAllMatch(self):
for (k, v) in _CHOLESKY_DECOMPS.items():
self.assertEqual(v, _registered_cholesky(k[0]))
class MatmulTest(test.TestCase):
def testRegistration(self):
class CustomLinOp(linear_operator.LinearOperator):
def _matmul(self, a):
pass
def _shape(self):
return tensor_shape.TensorShape([1, 1])
def _shape_tensor(self):
pass
# Register Matmul to a lambda that spits out the name parameter
@linear_operator_algebra.RegisterMatmul(CustomLinOp, CustomLinOp)
def _matmul(a, b): # pylint: disable=unused-argument,unused-variable
return "OK"
custom_linop = CustomLinOp(
dtype=None, is_self_adjoint=True, is_positive_definite=True)
self.assertEqual("OK", custom_linop.matmul(custom_linop))
def testRegistrationFailures(self):
class CustomLinOp(linear_operator.LinearOperator):
pass
with self.assertRaisesRegexp(TypeError, "must be callable"):
linear_operator_algebra.RegisterMatmul(CustomLinOp, CustomLinOp)("blah")
# First registration is OK
linear_operator_algebra.RegisterMatmul(
CustomLinOp, CustomLinOp)(lambda a: None)
# Second registration fails
with self.assertRaisesRegexp(ValueError, "has already been registered"):
linear_operator_algebra.RegisterMatmul(
CustomLinOp, CustomLinOp)(lambda a: None)
def testExactMatmulRegistrationsAllMatch(self):
for (k, v) in _MATMUL.items():
self.assertEqual(v, _registered_matmul(k[0], k[1]))
class InverseTest(test.TestCase):
def testRegistration(self):
class CustomLinOp(linear_operator.LinearOperator):
def _matmul(self, a):
pass
def _shape(self):
return tensor_shape.TensorShape([1, 1])
def _shape_tensor(self):
pass
# Register Inverse to a lambda that spits out the name parameter
@linear_operator_algebra.RegisterInverse(CustomLinOp)
def _inverse(a): # pylint: disable=unused-argument,unused-variable
return "OK"
with self.assertRaisesRegexp(ValueError, "singular"):
CustomLinOp(dtype=None, is_non_singular=False).inverse()
self.assertEqual("OK", CustomLinOp(
dtype=None, is_non_singular=True).inverse())
def testRegistrationFailures(self):
class CustomLinOp(linear_operator.LinearOperator):
pass
with self.assertRaisesRegexp(TypeError, "must be callable"):
linear_operator_algebra.RegisterInverse(CustomLinOp)("blah")
# First registration is OK
linear_operator_algebra.RegisterInverse(CustomLinOp)(lambda a: None)
# Second registration fails
with self.assertRaisesRegexp(ValueError, "has already been registered"):
linear_operator_algebra.RegisterInverse(CustomLinOp)(lambda a: None)
def testExactRegistrationsAllMatch(self):
for (k, v) in _INVERSES.items():
self.assertEqual(v, _registered_inverse(k[0]))
if __name__ == "__main__":
test.main()
| 32.973451 | 96 | 0.743022 |
9010d57bb916e33c0bbef2300298af9962c15bd6 | 1,683 | py | Python | energyquantified/api/periods.py | energyquantified/eq-python-client | 3ca8423ae29c4a3dbd8f6289ab76a64b21dabf1c | [
"Apache-2.0"
] | 4 | 2021-03-02T10:08:37.000Z | 2021-11-12T08:18:18.000Z | energyquantified/api/periods.py | mfsorensen/eq-python-client | 6dc65f5c16881b27fc22c9f461440c327997861e | [
"Apache-2.0"
] | 23 | 2020-07-22T13:41:20.000Z | 2021-10-12T09:03:27.000Z | energyquantified/api/periods.py | mfsorensen/eq-python-client | 6dc65f5c16881b27fc22c9f461440c327997861e | [
"Apache-2.0"
] | 2 | 2020-10-01T20:18:08.000Z | 2021-03-02T10:08:41.000Z | from .base import BaseAPI
from ..metadata import CurveType
from ..parser.periodseries import parse_periodseries
# Tuple of supported values for Curve.curve_type in the periods API
CURVE_TYPES = (CurveType.PERIOD,)
class PeriodsAPI(BaseAPI):
"""
Period-based series API operations. Access these operations via an
instance of the :py:class:`energyquantified.EnergyQuantified` class:
>>> eq = EnergyQuantified(api_key="aaaa-bbbb-cccc-dddd")
>>> eq.periods.load(curve, begin, end)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def load(
self,
curve,
begin=None,
end=None):
"""
Load period-based series data for a curve.
This operation works for curves with ``curve_type = PERIOD`` only.
:param curve: The curve or curve name
:type curve: :py:class:`energyquantified.metadata.Curve`, str
:param begin: The begin date-time
:type begin: date, datetime, str, required
:param end: The end date-time
:type end: date, datetime, str, required
:return: A period-based series
:rtype: :py:class:`energyquantified.data.Periodseries`
"""
# Build URL
safe_curve = self._urlencode_curve_name(curve, curve_types=CURVE_TYPES)
url = f"/periods/{safe_curve}/"
# Parameters
params = {}
self._add_datetime(params, "begin", begin, required=True)
self._add_datetime(params, "end", end, required=True)
# HTTP request
response = self._get(url, params=params)
return parse_periodseries(response.json())
| 32.365385 | 79 | 0.634581 |
a16b5d59e8b281b74e57defb3ed00ef8e4dae89f | 6,853 | py | Python | wrappers/visualising_network_measures.py | FCYtheFreeman/BrainNetworksInPython | 7a43a8021d43b23498afca481f03a0e319f48a56 | [
"MIT"
] | null | null | null | wrappers/visualising_network_measures.py | FCYtheFreeman/BrainNetworksInPython | 7a43a8021d43b23498afca481f03a0e319f48a56 | [
"MIT"
] | null | null | null | wrappers/visualising_network_measures.py | FCYtheFreeman/BrainNetworksInPython | 7a43a8021d43b23498afca481f03a0e319f48a56 | [
"MIT"
] | 1 | 2020-03-03T04:03:08.000Z | 2020-03-03T04:03:08.000Z | #!/usr/bin/env python
#=============================================================================
# Created by Kirstie Whitaker
# on a day when she really should have been replying to emails
# but wanted to make some pretty network pictures instead, April 2017
# Contact: kw401@cam.ac.uk
#=============================================================================
#=============================================================================
# IMPORTS
#=============================================================================
import os
import sys
import argparse
import textwrap
import numpy as np
import pandas as pd
sys.path.append(os.path.join(os.path.dirname(__file__), '../scripts/'))
import make_graphs as mkg
from corrmat_from_regionalmeasures import corrmat_from_regionalmeasures
#=============================================================================
# FUNCTIONS
#=============================================================================
def setup_argparser():
'''
Code to read in arguments from the command line
Also allows you to change some settings
'''
# Build a basic parser.
help_text = (('I AM SOME TEXT\n')+
('I AM SOME MORE TEXT.'))
sign_off = 'Author: Kirstie Whitaker <kw401@cam.ac.uk>'
parser = argparse.ArgumentParser(description=help_text,
epilog=sign_off,
formatter_class=argparse.RawTextHelpFormatter)
# Now add the arguments
parser.add_argument(dest='regional_measures_file_A',
type=str,
metavar='regional_measures_file_A',
help=textwrap.dedent(('CSV file that contains regional values for each participant in group A.\n')+
('Column labels should be the region names or covariate variable\n')+
('names. All participants in the file will be included in the\n')+
('correlation matrix.')))
parser.add_argument(dest='regional_measures_file_B',
type=str,
metavar='regional_measures_file_B',
help=textwrap.dedent(('CSV file that contains regional values for each participant in group B.\n')+
('Column labels should be the region names or covariate variable\n')+
('names. All participants in the file will be included in the\n')+
('correlation matrix.')))
parser.add_argument(dest='names_file',
type=str,
metavar='names_file',
help=textwrap.dedent(('Text file that contains the names of each region, in the same\n')+
('order as the two correlation matrices. One region name on each line.')))
parser.add_argument(dest='centroids_file',
type=str,
metavar='centroids_file',
help=textwrap.dedent(('Text file that contains the x, y, z coordinates of each region,\n')+
('in the same order as the two correlation matrices. One set of three\n')+
('coordinates, tab or space delimited, on each line.')))
parser.add_argument(dest='output_dir',
type=str,
metavar='output_dir',
help=textwrap.dedent(('Location in which to save global and nodal measures for the two groups.')))
parser.add_argument('--nameA',
type=str,
metavar='nameA',
help=textwrap.dedent(('Name of group A')),
default='GroupA')
parser.add_argument('--nameB',
type=str,
metavar='nameA',
help=textwrap.dedent(('Name of group B')),
default='GroupB')
parser.add_argument('-c', '--cost',
type=float,
metavar='cost',
help=textwrap.dedent(('Cost at which to threshold the matrix.\n')+
(' Default: 10.0')),
default=10.0)
parser.add_argument('-n', '--n_perm',
type=int,
metavar='n_perm',
help=textwrap.dedent(('Number of permutations of the data to compare with the real groupings.\n')+
(' Default: 1000')),
default=1000)
parser.add_argument('--names_308_style',
action='store_true',
help=textwrap.dedent(('Include this flag if your names are in the NSPN 308\n')+
('parcellation style (which means you have 41 subcortical regions)\n')+
('that are still in the names and centroids files and that\n')+
('the names are in <hemi>_<DK-region>_<part> format.\n')+
(' Default: False')),
default=False)
arguments = parser.parse_args()
return arguments, parser
if __name__ == "__main__":
# Read in the command line arguments
arg, parser = setup_argparser()
# View the correlation matrix
create_real_corrmats(arg.regional_measures_file_A,
arg.nameA,
arg.regional_measures_file_B,
arg.nameB,
arg.names_file,
arg.covars_file,
arg.output_dir,
arg.names_308_style)
network_analysis_from_corrmat(arg.corr_mat_file,
arg.names_file,
arg.centroids_file,
arg.output_dir,
cost=arg.cost,
n_rand=arg.n_rand,
names_308_style=arg.names_308_style)
#=============================================================================
# Wooo! All done :)
#=============================================================================
| 47.262069 | 127 | 0.43076 |
472ba9e0d2d7a06a37987b293bdf6f56b262544c | 515 | py | Python | tests/test_graphs.py | cjauvin/RavenPy | d9671b5a71004bb0501ab64e0e6efbd06d2fa465 | [
"MIT"
] | 12 | 2020-12-07T23:07:13.000Z | 2022-03-08T20:50:58.000Z | tests/test_graphs.py | cjauvin/RavenPy | d9671b5a71004bb0501ab64e0e6efbd06d2fa465 | [
"MIT"
] | 119 | 2020-08-25T08:17:17.000Z | 2022-03-30T16:12:19.000Z | tests/test_graphs.py | cjauvin/RavenPy | d9671b5a71004bb0501ab64e0e6efbd06d2fa465 | [
"MIT"
] | 3 | 2020-12-02T17:33:13.000Z | 2021-08-31T15:39:26.000Z | import numpy as np
import xarray as xr
from xclim.indicators.land import fit, stats
from ravenpy.utilities import graphs
from ravenpy.utilities.testdata import get_local_testdata
def test_ts_fit_graph():
fn = get_local_testdata(
"hydro_simulations/raven-gr4j-cemaneige-sim_hmets-0_Hydrographs.nc"
)
ds = xr.open_dataset(fn)
ts = stats(ds.q_sim, op="max", freq="M")
p = fit(ts)
np.testing.assert_array_equal(p.isnull(), False)
fig = graphs.ts_fit_graph(ts, p)
return fig
| 24.52381 | 75 | 0.720388 |
8b5a94cbda81d3c5dcae05bbbf7877f8ce1c4658 | 1,672 | py | Python | api/ask.py | qh73xe/HowAboutNatume | 8d994a1e16e2153dc200097d8f8b43713d76a3d5 | [
"MIT"
] | null | null | null | api/ask.py | qh73xe/HowAboutNatume | 8d994a1e16e2153dc200097d8f8b43713d76a3d5 | [
"MIT"
] | 7 | 2020-03-24T15:37:48.000Z | 2021-06-01T22:01:22.000Z | api/ask.py | qh73xe/HowAboutNatume | 8d994a1e16e2153dc200097d8f8b43713d76a3d5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*
"""生成されたモデルから類義語を返します."""
from typing import List
from tokenizer import get_noun, get_adjective
from logger import getLogger
LOGGER = getLogger('ASK_MODULE')
def get_model(author: str):
"""モデルパスを返します."""
from os import path
from config import MODEL_DIR
from gensim.models.word2vec import Word2Vec
model_name = '.'.join([author, 'word2vec', 'model'])
model_path = path.join(MODEL_DIR, model_name)
return Word2Vec.load(model_path)
def ask(author: str, querys: List[str]) -> List:
"""Get 5 similar words for querys."""
model = get_model(author)
try:
results = model.most_similar(positive=querys, topn=150)
except Exception as e:
msg = "{author} can't answer. (hint: {hint})".format(
author=author, hint=str(e)
)
LOGGER.error(msg)
else:
noun = []
adjective = []
try:
for result in results:
word = result[0]
noun.extend(get_noun(word))
adjective.extend(get_adjective(word))
except Exception as e:
msg = ' '.join([
"Querys have some problems",
"(querys: {querys}, result: {result})".format(
querys=querys, result=results
)
])
LOGGER.error(msg)
else:
return {
'nouns': noun[:3],
'adjective': adjective[:1]
}
if __name__ == "__main__":
import sys
from tokenizer import get_entity
querys = get_entity(sys.argv[1])
print('querys: {}'.format(querys))
print(ask('夏目漱石', querys))
| 26.967742 | 63 | 0.559211 |
f8f79ce95cd08407f87f58604528329e8a0f748d | 663 | py | Python | tasks.py | Memberships-Affiliate-Management-API/admin-portal | 87ce14a265e7119e4889a37e116f565a00f2bf0d | [
"MIT"
] | null | null | null | tasks.py | Memberships-Affiliate-Management-API/admin-portal | 87ce14a265e7119e4889a37e116f565a00f2bf0d | [
"MIT"
] | 1 | 2021-09-06T10:37:34.000Z | 2021-09-06T10:37:34.000Z | tasks.py | Memberships-Affiliate-Management-API/admin-portal | 87ce14a265e7119e4889a37e116f565a00f2bf0d | [
"MIT"
] | 1 | 2021-08-31T15:26:02.000Z | 2021-08-31T15:26:02.000Z | """
**scheduler**
used to dynamically add jobs on a separate thread to complete tasks that should not interfere
with requests, or requests that takes a long time to complete
"""
__developer__ = "mobius-crypt"
__email__ = "mobiusndou@gmail.com"
__twitter__ = "@blueitserver"
__github_repo__ = "https://github.com/freelancing-solutions/memberships-and-affiliate-api"
__github_profile__ = "https://github.com/freelancing-solutions/"
from backend.src.scheduler.scheduler import task_scheduler
def run_tasks():
print(f'running tasks...')
task_scheduler.run_all(delay_seconds=5)
task_scheduler.clear()
print('done running tasks')
| 33.15 | 101 | 0.746606 |
2fa3b69ca38531af1117f6a7d2f3d75c4a16bd9c | 2,844 | py | Python | mmcv/ops/corner_pool.py | frankier/mmcv | 0970ae94c21a162472b3d3d2ae75824e0d7c6698 | [
"Apache-2.0"
] | null | null | null | mmcv/ops/corner_pool.py | frankier/mmcv | 0970ae94c21a162472b3d3d2ae75824e0d7c6698 | [
"Apache-2.0"
] | null | null | null | mmcv/ops/corner_pool.py | frankier/mmcv | 0970ae94c21a162472b3d3d2ae75824e0d7c6698 | [
"Apache-2.0"
] | null | null | null | from torch import nn
from torch.autograd import Function
from ..utils import ext_loader
ext_module = ext_loader.load_ext('_ext', [
'top_pool_forward', 'top_pool_backward', 'bottom_pool_forward',
'bottom_pool_backward', 'left_pool_forward', 'left_pool_backward',
'right_pool_forward', 'right_pool_backward'
])
class TopPoolFunction(Function):
@staticmethod
def forward(ctx, input):
output = ext_module.top_pool_forward(input)
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
output = ext_module.top_pool_backward(input, grad_output)
return output
class BottomPoolFunction(Function):
@staticmethod
def forward(ctx, input):
output = ext_module.bottom_pool_forward(input)
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
output = ext_module.bottom_pool_backward(input, grad_output)
return output
class LeftPoolFunction(Function):
@staticmethod
def forward(ctx, input):
output = ext_module.left_pool_forward(input)
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
output = ext_module.left_pool_backward(input, grad_output)
return output
class RightPoolFunction(Function):
@staticmethod
def forward(ctx, input):
output = ext_module.right_pool_forward(input)
ctx.save_for_backward(input)
return output
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
output = ext_module.right_pool_backward(input, grad_output)
return output
class CornerPool(nn.Module):
"""Corner Pooling.
Corner Pooling is a new type of pooling layer that helps a
convolutional network better localize corners of bounding boxes.
Please refer to https://arxiv.org/abs/1808.01244 for more details.
Code is modified from https://github.com/princeton-vl/CornerNet-Lite.
Args:
mode(str): Pooling orientation for the pooling layer
- 'bottom': Bottom Pooling
- 'left': Left Pooling
- 'right': Right Pooling
- 'top': Top Pooling
Returns:
Feature map after pooling.
"""
pool_functions = {
'bottom': BottomPoolFunction,
'left': LeftPoolFunction,
'right': RightPoolFunction,
'top': TopPoolFunction,
}
def __init__(self, mode):
super(CornerPool, self).__init__()
assert mode in self.pool_functions
self.corner_pool = self.pool_functions[mode]
def forward(self, x):
return self.corner_pool.apply(x)
| 26.333333 | 73 | 0.671941 |
a59463a0443aeeebb30b3013e76f0a1aa54d6842 | 2,602 | py | Python | tests/pipfile.py | chachabooboo/king-phisher | 8a91b9043de0f12b5cad9a5f1d64ebd0179a6c4d | [
"BSD-3-Clause"
] | 1,143 | 2015-01-12T15:05:16.000Z | 2020-04-12T16:10:19.000Z | tests/pipfile.py | chachabooboo/king-phisher | 8a91b9043de0f12b5cad9a5f1d64ebd0179a6c4d | [
"BSD-3-Clause"
] | 399 | 2015-01-22T15:20:03.000Z | 2020-04-08T23:01:46.000Z | tests/pipfile.py | chachabooboo/king-phisher | 8a91b9043de0f12b5cad9a5f1d64ebd0179a6c4d | [
"BSD-3-Clause"
] | 351 | 2015-02-02T21:39:38.000Z | 2020-03-21T11:45:20.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tests/pipfile.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
import os
import unittest
from king_phisher import testing
class PipfileLockTests(testing.KingPhisherTestCase):
pipfile_lock_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'Pipfile.lock'))
def test_blacklisted_packages_are_not_present(self):
with open(self.pipfile_lock_path, 'r') as file_h:
pipfile_lock = json.load(file_h)
meta = pipfile_lock.get('_meta', {})
self.assertEqual(meta.get('pipfile-spec'), 6, msg="incompatible specification version, this test must be reviewed")
packages = pipfile_lock.get('default', {})
self.assertIsNotEmpty(packages)
# a list of packages to blacklist from the default group
blacklisted_package_names = (
'alabaster',
'sphinx',
'sphinx-rtd-theme',
'sphinxcontrib-websupport'
)
for package_name in blacklisted_package_names:
message = "blacklisted package '{}' found in the Pipfile.lock default group".format(package_name)
self.assertNotIn(package_name, packages, msg=message)
if __name__ == '__main__':
unittest.main()
| 42.655738 | 117 | 0.7598 |
e3d11d5f9a3c1365b5508e126be8cc73f8724fa0 | 59 | py | Python | tests/unit/strres/__init__.py | RaenonX/Jelly-Bot-API | c7da1e91783dce3a2b71b955b3a22b68db9056cf | [
"MIT"
] | 5 | 2020-08-26T20:12:00.000Z | 2020-12-11T16:39:22.000Z | tests/unit/strres/__init__.py | RaenonX/Jelly-Bot | c7da1e91783dce3a2b71b955b3a22b68db9056cf | [
"MIT"
] | 234 | 2019-12-14T03:45:19.000Z | 2020-08-26T18:55:19.000Z | tests/unit/strres/__init__.py | RaenonX/Jelly-Bot-API | c7da1e91783dce3a2b71b955b3a22b68db9056cf | [
"MIT"
] | 2 | 2019-10-23T15:21:15.000Z | 2020-05-22T09:35:55.000Z | from .extutils import * # noqa
from .mdb import * # noqa
| 19.666667 | 31 | 0.661017 |
f0546748b4d806348eec388c4393a16659c4f93e | 16,293 | py | Python | build/lib/lsclib/run.py | duncanesmith/lsclib | 4cffeb231610d2ed43270b7ec6bdadd3a3f9a2ba | [
"MIT"
] | 6 | 2021-07-16T06:32:26.000Z | 2022-01-23T03:12:23.000Z | build/lib/lsclib/run.py | duncanesmith/lsclib | 4cffeb231610d2ed43270b7ec6bdadd3a3f9a2ba | [
"MIT"
] | null | null | null | build/lib/lsclib/run.py | duncanesmith/lsclib | 4cffeb231610d2ed43270b7ec6bdadd3a3f9a2ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 11 16:14:00 2019
@author: smithd24
"""
import math
import time
from lsclib import xls_read_interpolate as xlsread
from lsclib import lsc_classes as lsccls
import cProfile, pstats
from lsclib import external_equations as eqn
import pandas as pd
def wedge(trials, Einc = 451.313, light_form = 'direct',
results = 'single', theta_o = .000001, phi_o = .000001):
"""Set up geometry to run trials for a wedge-shaped LSC. Specify
vertices that make up the LSC. Each vertice will belong to a boundary and
each boundary will belong to a volume. Using the LSC classes that have
been defined, attribute characteristics to each boundary and volume.
Parameters
----------
trials : int
Indicate number of bundles that will be used for the program
Einc : float
Indicates incident irradiance on the LSC. Default is 451.313 based upon
the irradiance of the Newport Solar Simulator used for experiments.
light_form : string
Determines distribution of light entering the LSC.
'direct' - light enters at a fixed angle
'diffuse' - light enters with a Lambertian distribution
'ground' - light enters with a modified Lambertian distribution due to
the relative angle up from the ground
results : string
Determines if one result or if a matrix of incidence angle
combinations is desired
theta_o : float, optional
Initial polar incidence angle. Default is zero. This is the polar angle
relative to the LSC normal.
phi_o : float, optional
Initial azimuthal incidence angle. Default is zero. This is the azimuth
angle relative to the LSC normal.
Returns
-------
LSC : object
LSC object is returned after program has run. The LSC object will have
the optical efficiency, short circuit current (for a cell within an
LSC and a bare solar cell), and spectral mismatch factor available as
attributes among a variety of other simulation results.
Notes
-----
Having this written as one large script might not be the best. There are
a large variety of inputs, so, instead of inputting them all here, these
could be inputs to particular functions that make up "lsc_main". This would
also demonstrate that a user is not limited to only the configuration
detailed in the main script shown here.
Phosphor particle should be added before iterating through incidence angle
combinations.
Starting volume/boundary process shoud be improved.
errorcounts should be replaced by a for loop that runs automatically
"""
# Initialize wedge-shaped geometry. Coordinates of individual vertices are
# determined before they are assigned to individual boundaries.
Height = .007
Film_height = .001
Short_side_pos = 0 # distance up from zero to the bottom point of the
# short side of a wedge-shaped LSC
Top_length = .05
Mirror_gap_length = .000001
W = .022
precision = 16
hypotenuse = math.sqrt(Short_side_pos**2 + Top_length**2)
angle = math.acos(Top_length/hypotenuse)
L0 = 0
H0_0 = 0
H0_1 = Film_height
H0_2 = Height
L1 = Mirror_gap_length*math.cos(angle)
H1_0 = Mirror_gap_length*math.sin(angle)
H1_1 = H1_0 + Film_height
L2 = Top_length
H2_0 = Short_side_pos
H2_1 = H2_0 + Film_height
H2_2 = Height
L1 = round(L1, precision)
H1_0 = round(H1_0, precision)
H1_1 = round(H1_1, precision)
H2_1 = round(H2_1, precision)
L = Top_length
H = Height
# read in various excel data tables
[abs_matrix, EQE_pv, IQE_pv, emi_source,
abs_particle, emi_particle] = xlsread.excel_read()
EQE_pv, EQE_pv_max = xlsread.spline(EQE_pv)
IQE_pv, IQE_pv_max = xlsread.spline(IQE_pv)
emi_source, emi_source_max = xlsread.spline(emi_source)
abs_particle, abs_particle_max = xlsread.spline(abs_particle)
emi_particle, emi_particle_max = xlsread.spline(emi_particle)
# establish particle characteristics
wave_len_min = 270 # minimum wavelength that can be absorbed by a particle
wave_len_max = 500 # maximum wavelength that can be absorbed by a particle
qe = 0.75 # quantum efficiency of a particle
poa = .2 # probability of particle absorption (exp. value)
extinction = 4240 # extinction coefficient (42.4 cm^-1)
# establish matrix characteristics
IoR = eqn.IoR_Sellmeier # set index of refraction as constant or eqn
abs_matrix, abs_matrix_max = xlsread.spline(abs_matrix)
wave_len_min_matrix = 229 # minimum wavelength absorbed by matrix
wave_len_max_matrix = 1100 # maximum wavelength absorbed by matrix
# establish solar cell characteristics
wave_len_min_pv = 350 # minimum wavelength absorbed by pv
wave_len_max_pv = 1100 # maximum wavelength absorbed by pv
# if running a combination of many theta_o and phi_o
if light_form == 'direct' and results == 'matrix':
data = {'': [0.001, 15, 30, 45, 60, 75],
0: [0, 0, 0, 0, 0, 0],
15: [0, 0, 0, 0, 0, 0],
30: [0, 0, 0, 0, 0, 0],
45: [0, 0, 0, 0, 0, 0],
60: [0, 0, 0, 0, 0, 0],
75: [0, 0, 0, 0, 0, 0],
89.999: [0, 0, 0, 0, 0, 0]}
# Convert the dictionary into DataFrame
df = pd.DataFrame(data)
df.set_index('', inplace = True)
theta_loop_count = len(df.index)
phi_loop_count = len(df.columns)
# if expecting just one combination of inputs
if results == 'single':
theta_loop_count = 1
phi_loop_count = 1
for j in range(phi_loop_count):
for i in range(theta_loop_count):
start_time = time.time()
lsc = lsccls.LSC() # initialize LSC class
# add phosphor particle
particle = lsccls.Particle(poa, extinction, wave_len_min,
wave_len_max, qe, abs_particle,
emi_particle, emi_particle_max)
# define dimensions/characteristics of Volume 0 - mirror gap
# input boundaries by setting boundary points of each
bdy0a = [[0, L0, H0_0], [0, L1, H1_0], [W, L1, H1_0], [W, L0, H0_0]]
bdy0b = [[0, L1, H1_0], [0, L1, H1_1], [W, L1, H1_1], [W, L1, H1_0]]
bdy0c = [[0, L0, H0_1], [0, L1, H1_1], [W, L1, H1_1], [W, L0, H0_1]]
bdy0d = [[0, L0, H0_0], [0, L0, H0_1], [W, L0, H0_1], [W, L0, H0_0]]
bdy0e = [[0, L0, H0_0], [0, L1, H1_0], [0, L1, H1_1], [0, L0, H0_1]]
bdy0f = [[W, L0, H0_0], [W, L1, H1_0], [W, L1, H1_1], [W, L0, H0_1]]
bdys0 = [bdy0a, bdy0b, bdy0c, bdy0d, bdy0e, bdy0f]
lsc.vol_list.append(lsccls.AbsorbingVolume(bdys0, 0, lsc, IoR,
abs_matrix,
wave_len_min_matrix,
wave_len_max_matrix))
# add bottom surface
lsc[0].bdy_list.append(lsccls.OpaqueBoundary(bdys0[0], lsc[0],
'specular', .05))
# add right interface with film
lsc[0].bdy_list.append(lsccls.TransparentBoundary(bdys0[1],
lsc[0]))
# add interface with rest of matrix
lsc[0].bdy_list.append(lsccls.TransparentBoundary(bdys0[2],
lsc[0]))
# add left solar cell
lsc[0].bdy_list.append(lsccls.PVBoundary(bdys0[3], lsc[0],
'diffuse', EQE_pv ,
0, wave_len_min_pv,
wave_len_max_pv))
# add front mirror
lsc[0].bdy_list.append(lsccls.OpaqueBoundary(bdys0[4], lsc[0],
'specular', .05))
# add back mirror
lsc[0].bdy_list.append(lsccls.OpaqueBoundary(bdys0[5], lsc[0],
'specular', .05))
# define dimensions/characteristics of Volume 1 - phosphor film
# input boundaries by setting boundary points of each
bdy1a = [[0, L1, H1_0], [0, L2, H2_0], [W, L2, H2_0], [W, L1, H1_0]]
bdy1b = [[0, L2, H2_0], [0, L2, H2_1], [W, L2, H2_1], [W, L2, H2_0]]
bdy1c = [[0, L1, H1_1], [0, L2, H2_1], [W, L2, H2_1], [W, L1, H1_1]]
bdy1d = [[0, L1, H1_0], [0, L1, H1_1], [W, L1, H1_1], [W, L1, H1_0]]
bdy1e = [[0, L1, H1_0], [0, L2, H2_0], [0, L2, H2_1], [0, L1, H1_1]]
bdy1f = [[W, L1, H1_0], [W, L2, H2_0], [W, L2, H2_1], [W, L1, H1_1]]
bdys1 = [bdy1a, bdy1b, bdy1c, bdy1d, bdy1e, bdy1f]
lsc.vol_list.append(lsccls.ParticleVolume(bdys1, 1, lsc, IoR,
abs_matrix, particle,
wave_len_min_matrix,
wave_len_max_matrix))
# add bottom surface
lsc[1].bdy_list.append(lsccls.OpaqueBoundary(bdys1[0], lsc[1],
'specular', .05))
# add right mirror
lsc[1].bdy_list.append(lsccls.OpaqueBoundary(bdys1[1], lsc[1],
'specular', .05))
# add top surface
lsc[1].bdy_list.append(lsccls.TransparentBoundary(bdys1[2], lsc[1]))
# add left interface with mirror gap
lsc[1].bdy_list.append(lsccls.TransparentBoundary(bdys1[3], lsc[1]))
# add front mirror
lsc[1].bdy_list.append(lsccls.OpaqueBoundary(bdys1[4], lsc[1],
'specular', .05))
# add back mirror
lsc[1].bdy_list.append(lsccls.OpaqueBoundary(bdys1[5], lsc[1],
'specular', .05))
# define dimensions/characteristics of Volume 2 - rest of matrix
# input boundaries by setting boundary points of each
bdy2a = [[0, L0, H0_1], [0, L2, H2_1], [W, L2, H2_1], [W, L0, H0_1]]
bdy2b = [[0, L2, H2_1], [0, L2, H2_2], [W, L2, H2_2], [W, L2, H2_1]]
bdy2c = [[0, L0, H0_2], [0, L2, H2_2], [W, L2, H2_2], [W, L0, H0_2]]
bdy2d = [[0, L0, H0_1], [0, L0, H0_2], [W, L0, H0_2], [W, L0, H0_1]]
bdy2e = [[0, L0, H0_1], [0, L2, H2_1], [0, L2, H2_2], [0, L0, H0_2]]
bdy2f = [[W, L0, H0_1], [W, L2, H2_1], [W, L2, H2_2], [W, L0, H0_2]]
bdys2 = [bdy2a, bdy2b, bdy2c, bdy2d, bdy2e, bdy2f]
# define volume
lsc.vol_list.append(lsccls.AbsorbingVolume(bdys2, 2, lsc, IoR,
abs_matrix,
wave_len_min_matrix,
wave_len_max_matrix))
# add interface with mirror gap and phosphor film
lsc[2].bdy_list.append(lsccls.TransparentBoundary(bdys2[0],
lsc[2]))
# add right mirror
lsc[2].bdy_list.append(lsccls.OpaqueBoundary(bdys2[1], lsc[2],
'specular', .05))
# add top surface
lsc[2].bdy_list.append(lsccls.TransparentBoundary(bdys2[2],
lsc[2]))
# add solar cell
lsc[2].bdy_list.append(
lsccls.PVBoundary(bdys2[3], lsc[2], 'diffuse', EQE_pv , 0,
wave_len_min_pv, wave_len_max_pv))
# add front mirror
lsc[2].bdy_list.append(lsccls.OpaqueBoundary(bdys2[4], lsc[2],
'specular', .05))
# add back mirror
lsc[2].bdy_list.append(lsccls.OpaqueBoundary(bdys2[5], lsc[2],
'specular', .05))
# Prepare data inputs for LSC simulation
if light_form == 'direct' and results == 'matrix':
theta_o = df.index[i]
phi_o = df.columns[j]
lsc.matching_pairs()
I = Einc*math.cos(math.radians(theta_o))*(L*W)
theta_o = math.radians(theta_o + 180) # adjust theta to head down
phi_o = math.radians(phi_o + 90) # adjust phi
# Run LSC trials, determining fate of every bundle
starting_vol = len(lsc) - 1
starting_bdy = 2
lsc.main(trials, L, W, H, light_form, theta_o,
phi_o, starting_vol, starting_bdy, I,
emi_source, emi_source_max, particle)
# Process data outputs from all LSC trials
# determine if all bundles in volume 0 are accounted for
errorcount0 = (lsc[0].bundles_absorbed +
lsc[0][0].bundles_absorbed +
lsc[0][1].bundles_reflected +
lsc[0][1].bundles_refracted +
lsc[0][2].bundles_reflected +
lsc[0][2].bundles_refracted +
lsc[0][3].bundles_absorbed +
lsc[0][4].bundles_absorbed +
lsc[0][5].bundles_absorbed)
# determine if all bundles in volume 1 are accounted for
errorcount1 = (lsc[1].bundles_absorbed +
lsc[1][0].bundles_absorbed +
lsc[1][1].bundles_absorbed +
lsc[1][2].bundles_reflected +
lsc[1][2].bundles_refracted +
lsc[1][3].bundles_reflected +
lsc[1][3].bundles_refracted +
lsc[1][4].bundles_absorbed +
lsc[1][5].bundles_absorbed +
particle.bundles_absorbed)
# determine if all bundles in volume 2 are accounted for
errorcount2 = (lsc[2].bundles_absorbed +
lsc[2][0].bundles_reflected +
lsc[2][0].bundles_refracted +
lsc[2][1].bundles_absorbed +
lsc[2][2].bundles_reflected +
lsc[2][2].bundles_refracted +
lsc[2][3].bundles_absorbed +
lsc[2][4].bundles_absorbed +
lsc[2][5].bundles_absorbed)
error = (errorcount0 + errorcount1 + errorcount2)/trials
if error != 1:
print("\nENERGY IS NOT CONSERVED!!!!!")
if results == 'matrix':
df.iloc[i,j] = lsc
if results == 'matrix':
writer = pd.ExcelWriter('LSC_data.xlsx')
df.to_excel(writer,'Sheet1')
writer.save()
lsc = df
else:
print(time.time() - start_time)
print(light_form)
print(lsc.optical_efficiency)
return lsc
# if __name__ == '__main__':
# LSC_wedge_main(1000)
# pr = cProfile.Profile()
# pr.enable()
# LSC_wedge_main(1000)
# pr.disable()
# pr.dump_stats('prof_data')
# ps = pstats.Stats('prof_data')
# ps.sort_stats(pstats.SortKey.CUMULATIVE)
# ps.print_stats() | 45.511173 | 80 | 0.515682 |
bf09e83dc4252364b9c9f98e0d5481452e0454c3 | 3,089 | py | Python | qtpyvcp/widgets/qtdesigner/designer_plugin.py | Lcvette/qtpyvcp | 4143a4a4e1f557f7d0c8998c886b4a254f0be60b | [
"BSD-3-Clause-LBNL",
"MIT"
] | 1 | 2020-09-27T15:46:26.000Z | 2020-09-27T15:46:26.000Z | qtpyvcp/widgets/qtdesigner/designer_plugin.py | adargel/qtpyvcp | 2fcb9c26616ac4effa8d92befa9e1c00a80daafa | [
"BSD-3-Clause-LBNL",
"MIT"
] | null | null | null | qtpyvcp/widgets/qtdesigner/designer_plugin.py | adargel/qtpyvcp | 2fcb9c26616ac4effa8d92befa9e1c00a80daafa | [
"BSD-3-Clause-LBNL",
"MIT"
] | null | null | null | from qtpy.QtGui import QIcon
from qtpy.QtDesigner import QPyDesignerCustomWidgetPlugin
from plugin_extension import ExtensionFactory, Q_TYPEID
from designer_hooks import DesignerHooks
from rules_editor import RulesEditorExtension
class _DesignerPlugin(QPyDesignerCustomWidgetPlugin):
group_name = None
def __init__(self, parent=None):
super(_DesignerPlugin, self).__init__(parent=parent)
self.initialized = False
self.manager = None
# This MUST be overridden to return the widget class
def pluginClass(self):
raise NotImplementedError()
def designerExtensions(self):
if hasattr(self.pluginClass(), 'RULE_PROPERTIES'):
return [RulesEditorExtension,]
else:
return []
# Override to set the default widget name used in QtDesinger
def objectName(self):
name = self.name().lower()
if name.startswith('vcp'):
name = name[3:]
return name
# Override to set the tooltip displayed in the QtDesinger widget box
def toolTip(self):
return ""
# Override to set the 'whats this' in QtDesinger
def whatsThis(self):
return ""
# Override to specify that widgets can be added as children in QtDesigner
def isContainer(self):
return False
# Override to set the icon used for the widget in QtDesigner
def icon(self):
return QIcon()
# Override to set the QtDesigner widget box group heading
def group(self):
if self.group_name is None:
try:
tmp = self.pluginClass().__module__.split('.')[2].split('_')[0].capitalize()
return "QtPyVCP - {}".format(tmp)
except:
return "QtPyVCP - Undefined"
else:
return self.group_name
# Override to set initial QtDesigner property values
def domXml(self):
return '<widget class="{}" name="{}">\n</widget>\n'.format(
self.name(), self.objectName())
#==============================================================================
# These methods should not need to be overridden
#==============================================================================
def initialize(self, form_editor):
if self.initialized:
return
designer_hooks = DesignerHooks()
designer_hooks.form_editor = form_editor
self.manager = form_editor.extensionManager()
if len(self.designerExtensions()) > 0 and self.manager:
factory = ExtensionFactory(parent=self.manager)
self.manager.registerExtensions(factory,
Q_TYPEID['QDesignerTaskMenuExtension'])
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
w = self.pluginClass()(parent)
w.extensions = self.designerExtensions()
return w
def name(self):
return self.pluginClass().__name__
def includeFile(self):
return self.pluginClass().__module__
| 30.89 | 92 | 0.606669 |
5845cc1f638688395696d9c7c4ad1930ae76d24a | 596 | py | Python | uiflow/main.py | stanlaw7/aaatimer | adac362119b0c3be72bc0531564ca59b8ee04863 | [
"CC0-1.0"
] | 2 | 2020-02-17T17:27:21.000Z | 2021-03-26T18:18:55.000Z | uiflow/main.py | hfcs/aaatimer | adac362119b0c3be72bc0531564ca59b8ee04863 | [
"CC0-1.0"
] | null | null | null | uiflow/main.py | hfcs/aaatimer | adac362119b0c3be72bc0531564ca59b8ee04863 | [
"CC0-1.0"
] | null | null | null | from m5stack import *
from m5ui import *
from uiflow import *
setScreenColor(0xffffff)
image0 = M5Img(120, 71, "res/logo.png", True)
labelTimer = M5TextBox(36, 210, "Timer", lcd.FONT_DejaVu18, 0x000000, rotate=0)
labelPar = M5TextBox(202, 210, "Par Time", lcd.FONT_DejaVu18, 0x000000, rotate=0)
def buttonA_wasPressed():
# global params
execfile("apps/timer.py")
pass
btnA.wasPressed(buttonA_wasPressed)
def buttonC_wasPressed():
# global params
execfile("apps/par_time.py")
pass
btnC.wasPressed(buttonC_wasPressed)
labelTimer.show()
labelPar.show()
while True:
wait_ms(2) | 19.225806 | 81 | 0.741611 |
b7a4bbf0351a53fa70ea73228e3d99bf2e128548 | 5,649 | py | Python | docs/conf.py | uutzinger/Adafruit_CircuitPython_HTU21D | afd8d06a7220cc200b57ee03f9c12854a6b1b3fc | [
"Unlicense",
"MIT-0",
"MIT"
] | 2 | 2018-09-30T22:11:12.000Z | 2020-04-24T20:25:01.000Z | docs/conf.py | uutzinger/Adafruit_CircuitPython_HTU21D | afd8d06a7220cc200b57ee03f9c12854a6b1b3fc | [
"Unlicense",
"MIT-0",
"MIT"
] | 7 | 2018-09-25T01:52:29.000Z | 2021-09-27T16:44:58.000Z | docs/conf.py | uutzinger/Adafruit_CircuitPython_HTU21D | afd8d06a7220cc200b57ee03f9c12854a6b1b3fc | [
"Unlicense",
"MIT-0",
"MIT"
] | 9 | 2018-09-25T01:49:44.000Z | 2022-03-28T19:47:47.000Z | # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["digitalio", "busio"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"BusDevice": (
"https://circuitpython.readthedocs.io/projects/busdevice/en/latest/",
None,
),
"Register": (
"https://circuitpython.readthedocs.io/projects/register/en/latest/",
None,
),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit HTU21D Library"
copyright = "2018 ktown"
author = "ktown"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "AdafruitHtu21dLibrarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"AdafruitHTU21DLibrary.tex",
"AdafruitHTU21D Library Documentation",
author,
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"AdafruitHTU21Dlibrary",
"Adafruit HTU21D Library Documentation",
[author],
1,
)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitHTU21DLibrary",
"Adafruit HTU21D Library Documentation",
author,
"AdafruitHTU21DLibrary",
"One line description of project.",
"Miscellaneous",
),
]
| 29.575916 | 85 | 0.664542 |
475423ec49857c4090a622c89ed1b70e7614a560 | 3,666 | py | Python | libs/sdc_etl_libs/api_helpers/APIFactory.py | darknegma/docker-airflow | 44e3d02d7ac43c8876145ae47acfbbbde67230df | [
"Apache-2.0"
] | null | null | null | libs/sdc_etl_libs/api_helpers/APIFactory.py | darknegma/docker-airflow | 44e3d02d7ac43c8876145ae47acfbbbde67230df | [
"Apache-2.0"
] | 3 | 2021-03-31T19:26:57.000Z | 2021-12-13T20:33:01.000Z | libs/sdc_etl_libs/api_helpers/APIFactory.py | darknegma/docker-airflow | 44e3d02d7ac43c8876145ae47acfbbbde67230df | [
"Apache-2.0"
] | null | null | null |
import logging
class APIFactory:
@staticmethod
def get_api(api_name_):
"""
Generates an instance of a requested API class.
:param api_name_: String. Name of API class to generate from available_apis dict.
:return: API class instance.
"""
if api_name_ in available_apis.keys():
return available_apis[api_name_]()
else:
logging.exception(f"{api_name_} is not a valid API option.")
def generate_newrelic():
try:
from sdc_etl_libs.api_helpers.apis.NewRelic.NewRelic import NewRelic
return NewRelic()
except Exception as e:
logging.error(f"API Factory could not generate an NewRelic API class. {e}")
def generate_truevault():
try:
from sdc_etl_libs.api_helpers.apis.TrueVault.TrueVault import TrueVault
return TrueVault()
except Exception as e:
logging.error(f"API Factory could not generate an TrueVault API class. {e}")
def generate_timecontrol():
try:
from sdc_etl_libs.api_helpers.apis.TimeControl.TimeControl import TimeControl
return TimeControl()
except Exception as e:
logging.error(f"API Factory could not generate an TimeControl API class. {e}")
def generate_exacttarget():
try:
from sdc_etl_libs.api_helpers.apis.ExactTarget.ExactTarget import ExactTarget
return ExactTarget()
except Exception as e:
logging.error(f"API Factory could not generate an ExactTarget API class. {e}")
def generate_podium():
try:
from sdc_etl_libs.api_helpers.apis.Podium.Podium import Podium
return Podium()
except Exception as e:
logging.error(f"API Factory could not generate an Podium API class. {e}")
def generate_ultiprorestapis():
try:
from sdc_etl_libs.api_helpers.apis.Ultipro.UltiproRESTAPIs import UltiproRESTAPIs
return UltiproRESTAPIs()
except Exception as e:
logging.error(f"API Factory could not generate an UltiproRESTAPIs API class. {e}")
def generate_ultiprotimemanagement():
try:
from sdc_etl_libs.api_helpers.apis.Ultipro.UltiproTimeManagement import UltiproTimeManagement
return UltiproTimeManagement()
except Exception as e:
logging.error(f"API Factory could not generate an UltiproTimeManagement API class. {e}")
def generate_ultiproraas():
try:
from sdc_etl_libs.api_helpers.apis.Ultipro.UltiproRaaS import UltiproRaaS
return UltiproRaaS()
except Exception as e:
logging.error(f"API Factory could not generate an UltiproRaaS API class. {e}")
def generate_ultiproservices():
try:
from sdc_etl_libs.api_helpers.apis.Ultipro.UltiproServices import UltiproServices
return UltiproServices()
except Exception as e:
logging.error(f"API Factory could not generate an UltiproServices API class. {e}")
def generate_searchads360():
try:
from sdc_etl_libs.api_helpers.apis.SearchAds360.SearchAds360 import SearchAds360
return SearchAds360()
except Exception as e:
logging.error(f"API Factory could not generate an SearchAds360 API class. {e}")
available_apis = {
'podium': generate_podium,
'exacttarget': generate_exacttarget,
'ultipro-restapis': generate_ultiprorestapis,
'ultipro-timemanagement': generate_ultiprotimemanagement,
'ultipro-services': generate_ultiproservices,
'ultipro-raas': generate_ultiproraas,
'timecontrol': generate_timecontrol,
'new-relic': generate_newrelic,
'truevault': generate_truevault,
'searchads360': generate_searchads360
} | 32.732143 | 101 | 0.703219 |
5ffce228f8157797ded10fbee245600240f3b385 | 241 | py | Python | pyfms/transformers.py | vishalbelsare/PyFactorizationMachines | f4eb14f095af8a99e195119a15b3f33276a6e91a | [
"MIT"
] | 3 | 2020-05-19T20:48:16.000Z | 2021-04-26T16:04:20.000Z | pyfms/transformers.py | vishalbelsare/PyFactorizationMachines | f4eb14f095af8a99e195119a15b3f33276a6e91a | [
"MIT"
] | null | null | null | pyfms/transformers.py | vishalbelsare/PyFactorizationMachines | f4eb14f095af8a99e195119a15b3f33276a6e91a | [
"MIT"
] | 1 | 2020-11-10T00:23:24.000Z | 2020-11-10T00:23:24.000Z | from theano import tensor as T
from . import core
class Linear(core.Transformer):
def transform(self, y_hat):
return y_hat
class Sigmoid(core.Transformer):
def transform(self, y_hat):
return T.nnet.sigmoid(y_hat)
| 18.538462 | 36 | 0.697095 |
ea6f50ef1c78e2885b69b0841abf8bc9069d786f | 13,616 | py | Python | django/db/backends/mysql/base.py | shinshin86/django | 5cc81cd9eb69f5f7a711412c02039b435c393135 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2019-11-17T04:10:38.000Z | 2019-11-17T04:10:38.000Z | django/db/backends/mysql/base.py | Blaahborgh/django | c591bc3ccece1514d6b419826c7fa36ada9d9213 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/db/backends/mysql/base.py | Blaahborgh/django | c591bc3ccece1514d6b419826c7fa36ada9d9213 | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2020-11-04T08:47:02.000Z | 2020-11-04T08:47:02.000Z | """
MySQL database backend for Django.
Requires mysqlclient: https://pypi.org/project/mysqlclient/
"""
import re
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils.functional import cached_property
try:
import MySQLdb as Database
except ImportError as err:
raise ImproperlyConfigured(
'Error loading MySQLdb module.\n'
'Did you install mysqlclient?'
) from err
from MySQLdb.constants import CLIENT, FIELD_TYPE # isort:skip
from MySQLdb.converters import conversions # isort:skip
# Some of these import MySQLdb, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .validation import DatabaseValidation # isort:skip
version = Database.version_info
if version < (1, 3, 7):
raise ImproperlyConfigured('mysqlclient 1.3.7 or newer is required; you have %s.' % Database.__version__)
# MySQLdb returns TIME columns as timedelta -- they are more like timedelta in
# terms of actual behavior as they are signed and include days -- and Django
# expects time.
django_conversions = {
**conversions,
**{FIELD_TYPE.TIME: backend_utils.typecast_time},
}
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same).
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
class CursorWrapper:
"""
A thin wrapper around MySQLdb's normal cursor class that catches particular
exception instances and reraises them with the correct types.
Implemented as a wrapper, rather than a subclass, so that it isn't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (
1048, # Column cannot be null
1690, # BIGINT UNSIGNED value is out of range
)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
raise utils.IntegrityError(*tuple(e.args))
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
raise utils.IntegrityError(*tuple(e.args))
raise
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
display_name = 'MySQL'
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BigAutoField': 'bigint AUTO_INCREMENT',
'BinaryField': 'longblob',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime(6)',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time(6)',
'UUIDField': 'char(32)',
}
# For these columns, MySQL doesn't:
# - accept default values and implicitly treats these columns as nullable
# - support a database index
_limited_data_types = (
'tinyblob', 'blob', 'mediumblob', 'longblob', 'tinytext', 'text',
'mediumtext', 'longtext', 'json',
)
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE BINARY CONCAT('%%', {}, '%%')",
'icontains': "LIKE CONCAT('%%', {}, '%%')",
'startswith': "LIKE BINARY CONCAT({}, '%%')",
'istartswith': "LIKE CONCAT({}, '%%')",
'endswith': "LIKE BINARY CONCAT('%%', {})",
'iendswith': "LIKE CONCAT('%%', {})",
}
isolation_levels = {
'read uncommitted',
'read committed',
'repeatable read',
'serializable',
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
validation_class = DatabaseValidation
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
}
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = settings_dict['PASSWORD']
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
# Validate the transaction isolation level, if specified.
options = settings_dict['OPTIONS'].copy()
isolation_level = options.pop('isolation_level', 'read committed')
if isolation_level:
isolation_level = isolation_level.lower()
if isolation_level not in self.isolation_levels:
raise ImproperlyConfigured(
"Invalid transaction isolation level '%s' specified.\n"
"Use one of %s, or None." % (
isolation_level,
', '.join("'%s'" % s for s in sorted(self.isolation_levels))
))
self.isolation_level = isolation_level
kwargs.update(options)
return kwargs
def get_new_connection(self, conn_params):
return Database.connect(**conn_params)
def init_connection_state(self):
assignments = []
if self.features.is_sql_auto_is_null_enabled:
# SQL_AUTO_IS_NULL controls whether an AUTO_INCREMENT column on
# a recently inserted row will return when the field is tested
# for NULL. Disabling this brings this aspect of MySQL in line
# with SQL standards.
assignments.append('SET SQL_AUTO_IS_NULL = 0')
if self.isolation_level:
assignments.append('SET SESSION TRANSACTION ISOLATION LEVEL %s' % self.isolation_level.upper())
if assignments:
with self.cursor() as cursor:
cursor.execute('; '.join(assignments))
def create_cursor(self, name=None):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
"""
Disable foreign key checks, primarily for use in adding rows with
forward references. Always return True to indicate constraint checks
need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
self.cursor().execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
"""
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
"""
with self.cursor() as cursor:
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
""" % (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not "
"have a corresponding value in %s.%s."
% (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def mysql_version(self):
with self.temporary_connection() as cursor:
cursor.execute('SELECT VERSION()')
server_info = cursor.fetchone()[0]
match = server_version_re.match(server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
return tuple(int(x) for x in match.groups())
| 40.165192 | 109 | 0.606713 |
02d27ab29b75458daafc0bd80682a11f3ee152f1 | 16,070 | py | Python | src/pymor/core/defaults.py | kinnala/pymor | 9d2a8ee5f7a71482e62952257332d269d50678e9 | [
"Unlicense"
] | 2 | 2022-03-22T11:47:12.000Z | 2022-03-22T11:48:23.000Z | src/pymor/core/defaults.py | kinnala/pymor | 9d2a8ee5f7a71482e62952257332d269d50678e9 | [
"Unlicense"
] | 14 | 2022-01-05T09:25:11.000Z | 2022-03-31T17:07:10.000Z | src/pymor/core/defaults.py | kinnala/pymor | 9d2a8ee5f7a71482e62952257332d269d50678e9 | [
"Unlicense"
] | 1 | 2022-03-28T10:58:18.000Z | 2022-03-28T10:58:18.000Z | # This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
"""This module contains pyMOR's facilities for handling default values.
A default value in pyMOR is always the default value of some
function argument. To mark the value of an optional function argument
as a user-modifiable default value use the :func:`defaults` decorator.
As an additional feature, if `None` is passed for such an argument,
its default value is used instead of `None`. This is useful
for writing code of the following form::
@default('option')
def algorithm(U, option=42):
...
def method_called_by_user(V, option_for_algorithm=None):
...
algorithm(U, option=option_for_algorithm)
...
If the user does not provide `option_for_algorithm` to
`method_called_by_user`, the default `42` is automatically chosen
without the implementor of `method_called_by_user` having to care
about this.
The user interface for handling default values in pyMOR is provided
by :func:`set_defaults`, :func:`load_defaults_from_file`,
:func:`write_defaults_to_file` and :func:`print_defaults`.
If pyMOR is imported, it will automatically search for a configuration
file named `pymor_defaults.py` in the current working directory.
If found, the file is loaded via :func:`load_defaults_from_file`.
However, as a security precaution, the file will only be loaded if it is
owned by the user running the Python interpreter
(:func:`load_defaults_from_file` uses `exec` to load the configuration).
As an alternative, the environment variable `PYMOR_DEFAULTS` can be
used to specify the path of a configuration file. If empty or set to
`NONE`, no configuration file will be loaded whatsoever.
.. warning::
Note that changing defaults may affect the result of a (cached)
function call. pyMOR will emit a warning, when a result is retrieved
from the cache that has been computed using an earlier set of
|defaults| (see :func:`defaults_changes`).
"""
from collections import defaultdict, OrderedDict
import functools
import importlib
import inspect
import pkgutil
import textwrap
import threading
from pymor.core.exceptions import DependencyMissing
from pymor.tools.table import format_table
_default_container = None
class DefaultContainer:
"""Internal singleton class holding all default values defined in pyMOR.
Not to be used directly.
"""
def __new__(cls):
global _default_container
if _default_container is not None:
raise ValueError('DefaultContainer is a singleton! Use pymor.core.defaults._default_container.')
else:
return object.__new__(cls)
def __init__(self):
self._data = defaultdict(dict)
self.registered_functions = set()
self.changes = 0
self.changes_lock = threading.Lock()
def _add_defaults_for_function(self, func, args):
if func.__doc__ is not None:
new_docstring = inspect.cleandoc(func.__doc__)
new_docstring += '''
Defaults
--------
'''
new_docstring += '\n'.join(textwrap.wrap(', '.join(args), 80)) + '\n(see :mod:`pymor.core.defaults`)'
func.__doc__ = new_docstring
params = OrderedDict(inspect.signature(func).parameters)
argnames = tuple(params.keys())
defaultsdict = {}
for n in args:
p = params.get(n, None)
if p is None:
raise ValueError(f"Decorated function has no argument '{n}'")
if p.default is p.empty:
raise ValueError(f"Decorated function has no default for argument '{n}'")
defaultsdict[n] = p.default
path = func.__module__ + '.' + getattr(func, '__qualname__', func.__name__)
if path in self.registered_functions:
raise ValueError(f'Function with name {path} already registered for default values!')
self.registered_functions.add(path)
for k, v in defaultsdict.items():
self._data[path + '.' + k]['func'] = func
self._data[path + '.' + k]['code'] = v
defaultsdict = {}
for k in self._data:
if k.startswith(path + '.'):
defaultsdict[k.split('.')[-1]] = self.get(k)[0]
func.argnames = argnames
func.defaultsdict = defaultsdict
self._update_function_signature(func)
def _update_function_signature(self, func):
sig = inspect.signature(func)
params = OrderedDict(sig.parameters)
for n, v in func.defaultsdict.items():
params[n] = params[n].replace(default=v)
func.__signature__ = sig.replace(parameters=params.values())
def update(self, defaults, type='user'):
with self.changes_lock:
self.changes += 1
assert type in ('user', 'file')
functions_to_update = set()
for k, v in defaults.items():
k_parts = k.split('.')
func = self._data[k].get('func', None)
if not func:
head = k_parts[:-2]
while head:
try:
importlib.import_module('.'.join(head))
break
except ImportError:
head = head[:-1]
func = self._data[k].get('func', None)
if not func:
del self._data[k]
raise KeyError(k)
self._data[k][type] = v
argname = k_parts[-1]
func.defaultsdict[argname] = v
functions_to_update.add(func)
for func in functions_to_update:
self._update_function_signature(func)
def get(self, key):
values = self._data[key]
if 'user' in values:
return values['user'], 'user'
elif 'file' in values:
return values['file'], 'file'
elif 'code' in values:
return values['code'], 'code'
else:
raise ValueError('No default value matching the specified criteria')
def __getitem__(self, key):
assert isinstance(key, str)
self.get(key)[0]
def keys(self):
return self._data.keys()
def import_all(self):
packages = {k.split('.')[0] for k in self._data.keys()}.union({'pymor'})
for package in packages:
_import_all(package)
_default_container = DefaultContainer()
def defaults(*args):
"""Function decorator for marking function arguments as user-configurable defaults.
If a function decorated with :func:`defaults` is called, the values of the marked
default parameters are set to the values defined via :func:`load_defaults_from_file`
or :func:`set_defaults` in case no value has been provided by the caller of the function.
Moreover, if `None` is passed as a value for a default argument, the argument
is set to its default value, as well. If no value has been specified using
:func:`set_defaults` or :func:`load_defaults_from_file`, the default value provided in
the function signature is used.
If the argument `arg` of function `f` in sub-module `m` of package `p` is
marked as a default value, its value will be changeable by the aforementioned
methods under the path `p.m.f.arg`.
Note that the `defaults` decorator can also be used in user code.
Parameters
----------
args
List of strings containing the names of the arguments of the decorated
function to mark as pyMOR defaults. Each of these arguments has to be
a keyword argument (with a default value).
"""
assert all(isinstance(arg, str) for arg in args)
def the_decorator(decorated_function):
if not args:
return decorated_function
global _default_container
_default_container._add_defaults_for_function(decorated_function, args=args)
def set_default_values(*wrapper_args, **wrapper_kwargs):
for k, v in zip(decorated_function.argnames, wrapper_args):
if k in wrapper_kwargs:
raise TypeError(f"{decorated_function.__name__} got multiple values for argument '{k}'")
wrapper_kwargs[k] = v
wrapper_kwargs = {k: v if v is not None else decorated_function.defaultsdict.get(k, None)
for k, v in wrapper_kwargs.items()}
wrapper_kwargs = dict(decorated_function.defaultsdict, **wrapper_kwargs)
return wrapper_kwargs
# ensure that __signature__ is not copied
@functools.wraps(decorated_function, updated=())
def defaults_wrapper(*wrapper_args, **wrapper_kwargs):
kwargs = set_default_values(*wrapper_args, **wrapper_kwargs)
return decorated_function(**kwargs)
return defaults_wrapper
return the_decorator
def _import_all(package_name='pymor'):
package = importlib.import_module(package_name)
if hasattr(package, '__path__'):
def onerror(name):
from pymor.core.logger import getLogger
logger = getLogger('pymor.core.defaults._import_all')
logger.warning('Failed to import ' + name)
for p in pkgutil.walk_packages(package.__path__, package_name + '.', onerror=onerror):
try:
importlib.import_module(p[1])
except DependencyMissing:
pass
except ImportError:
from pymor.core.logger import getLogger
logger = getLogger('pymor.core.defaults._import_all')
logger.warning('Failed to import ' + p[1])
def print_defaults(import_all=True, shorten_paths=2):
"""Print all |default| values set in pyMOR.
Parameters
----------
import_all
While :func:`print_defaults` will always print all defaults defined in
loaded configuration files or set via :func:`set_defaults`, default
values set in the function signature can only be printed after the
modules containing these functions have been imported. If `import_all`
is set to `True`, :func:`print_defaults` will therefore first import all
of pyMOR's modules, to provide a complete lists of defaults.
shorten_paths
Shorten the paths of all default values by `shorten_paths` components.
The last two path components will always be printed.
"""
if import_all:
_default_container.import_all()
keys, values, comments = [], [], []
for k in sorted(_default_container.keys()):
v, c = _default_container.get(k)
k_parts = k.split('.')
if len(k_parts) >= shorten_paths + 2:
keys.append('.'.join(k_parts[shorten_paths:]))
else:
keys.append('.'.join(k_parts))
values.append(repr(v))
comments.append(c)
key_string = 'path (shortened)' if shorten_paths else 'path'
rows = [[key_string, 'value', 'source']] + list(zip(keys, values, comments))
print(format_table(rows, title='pyMOR defaults'))
print()
def write_defaults_to_file(filename='./pymor_defaults.py', packages=('pymor',)):
"""Write the currently set |default| values to a configuration file.
The resulting file is an ordinary Python script and can be modified
by the user at will. It can be loaded in a later session using
:func:`load_defaults_from_file`.
Parameters
----------
filename
Name of the file to write to.
packages
List of package names.
To discover all default values that have been defined using the
:func:`defaults` decorator, `write_defaults_to_file` will
recursively import all sub-modules of the named packages before
creating the configuration file.
"""
for package in packages:
_import_all(package)
keys, values, as_comment = [], [], []
for k in sorted(_default_container.keys()):
v, c = _default_container.get(k)
keys.append("'" + k + "'")
values.append(repr(v))
as_comment.append(c == 'code')
key_width = max(max([0] + list(map(len, ks))) for ks in keys)
with open(filename, 'wt') as f:
print('''
# pyMOR defaults config file
# This file has been automatically created by pymor.core.defaults.write_defaults_to_file'.
d = {}
'''[1:], file=f)
lks = keys[0].split('.')[:-1] if keys else ''
for c, k, v in zip(as_comment, keys, values):
ks = k.split('.')[:-1]
if lks != ks:
print('', file=f)
lks = ks
comment = '# ' if c else ''
print(f'{comment}d[{k:{key_width}}] = {v}', file=f)
print('Written defaults to file ' + filename)
def load_defaults_from_file(filename='./pymor_defaults.py'):
"""Loads |default| values defined in configuration file.
Suitable configuration files can be created via :func:`write_defaults_to_file`.
The file is loaded via Python's :func:`exec` function, so be very careful
with configuration files you have not created your own. You have been
warned!
Parameters
----------
filename
Path of the configuration file.
"""
env = {}
exec(open(filename, 'rt').read(), env)
try:
_default_container.update(env['d'], type='file')
except KeyError as e:
raise KeyError(f'Error loading defaults from file. Key {e} does not correspond to a default')
def _set_defaults(defaults):
from pymor.tools import mpi
if not mpi.rank0:
set_defaults(defaults)
def set_defaults(defaults):
"""Set |default| values.
This method sets the default value of function arguments marked via the
:func:`defaults` decorator, overriding default values specified in the
function signature or set earlier via :func:`load_defaults_from_file` or
previous :func:`set_defaults` calls.
Parameters
----------
defaults
Dictionary of default values. Keys are the full paths of the default
values (see :func:`defaults`).
"""
from pymor.tools import mpi
if mpi._event_loop_running and mpi.rank0:
mpi.call(_set_defaults, defaults)
try:
_default_container.update(defaults, type='user')
except KeyError as e:
raise KeyError(f'Error setting defaults. Key {e} does not correspond to a default')
def get_defaults(user=True, file=True, code=True):
"""Get |default| values.
Returns all |default| values as a dict. The parameters can be set to filter by type.
Parameters
----------
user
If `True`, returned dict contains defaults that have been set by the user
with :func:`set_defaults`.
file
If `True`, returned dict contains defaults that have been loaded from file.
code
If `True`, returned dict contains unmodified default values.
"""
defaults = {}
for k in _default_container.keys():
v, t = _default_container.get(k)
if t == 'user' and user:
defaults[k] = v
if t == 'file' and file:
defaults[k] = v
if t == 'code' and code:
defaults[k] = v
return defaults
def defaults_changes():
"""Returns the number of changes made to to pyMOR's global |defaults|.
This methods returns the number of changes made to the state of
pyMOR's global |defaults| via :func:`set_defaults` or
:func:`load_defaults_from_file` since the start of program execution.
Since changing |defaults| may affect the result of a (cached) function
call, this value is used to warn when a result is retrieved from the cache
that has been computed using an earlier set of |defaults|.
.. warning::
Note that when using :mod:`parallelization <pymor.parallel>`,
workers might set different defaults at the same time, resulting
in equal change counts but different states of |defaults| at each
worker.
"""
return _default_container.changes
| 35.870536 | 113 | 0.64692 |
c0ada40c655c14cf1f24546d65b52c829c77275e | 735 | py | Python | climfill/numba_nanmean.py | climachine/climfill | 2bc1ace5d880e18d8f351373921f385a2c0d9bd8 | [
"Apache-2.0"
] | 10 | 2021-07-08T16:09:37.000Z | 2021-12-20T10:22:55.000Z | climfill/numba_nanmean.py | climachine/climfill | c4cd6797d932b7e21004e4172c9c5a05b2a24e47 | [
"Apache-2.0"
] | 1 | 2021-09-30T11:44:25.000Z | 2021-09-30T11:44:25.000Z | climfill/numba_nanmean.py | climachine/climfill | 2bc1ace5d880e18d8f351373921f385a2c0d9bd8 | [
"Apache-2.0"
] | 2 | 2021-04-30T16:29:31.000Z | 2021-12-17T15:39:24.000Z | """
this file contains a fast, cython-based version for the spatiotemporal
filtering of the data.
Adapted after source:
ilovesymposia.com/2017/03/12/scipys-new-lowlevelcallable-is-a-game-changer/
"""
import numpy as np
from numba import carray, cfunc
from numba.types import CPointer, float64, intc, intp, voidptr
# mean of footprint as I need it
@cfunc(intc(CPointer(float64), intp, CPointer(float64), voidptr))
def nbnanmean(values_ptr, len_values, result, data):
values = carray(values_ptr, (len_values,), dtype=float64)
result[0] = np.nan
tmp = 0
i = 0
for v in values:
if ~np.isnan(v):
tmp = tmp + v
i = i + 1
if i != 0:
result[0] = tmp / max(i, 1)
return 1
| 26.25 | 75 | 0.659864 |
31726890f6c931fa9889457dde660823e80d3fb9 | 2,908 | py | Python | pymatgen/analysis/tests/test_substrate_analyzer.py | mailhexu/pymatgen | 70da55dd860771eb9d38c306dbcd3f6b074b7a54 | [
"MIT"
] | 18 | 2019-06-15T18:08:21.000Z | 2022-01-30T05:01:29.000Z | ComRISB/pyextern/pymatgen/pymatgen/analysis/tests/test_substrate_analyzer.py | comscope/Comsuite | b80ca9f34c519757d337487c489fb655f7598cc2 | [
"BSD-3-Clause"
] | null | null | null | ComRISB/pyextern/pymatgen/pymatgen/analysis/tests/test_substrate_analyzer.py | comscope/Comsuite | b80ca9f34c519757d337487c489fb655f7598cc2 | [
"BSD-3-Clause"
] | 11 | 2019-06-05T02:57:55.000Z | 2021-12-29T02:54:25.000Z | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
TODO: Modify unittest doc.
"""
__author__ = "Shyam Dwaraknath"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "shyamd@lbl.gov"
__date__ = "2/5/16"
import unittest
from pymatgen.analysis.substrate_analyzer import SubstrateAnalyzer, \
ZSLGenerator, fast_norm, reduce_vectors, vec_area, get_factors
from pymatgen.util.testing import PymatgenTest
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.elasticity.elastic import ElasticTensor
class ZSLGenTest(PymatgenTest):
# Clean up test to be based on test structures
def runTest(self):
# Film VO2
film = SpacegroupAnalyzer(self.get_structure("VO2"),
symprec=0.1).get_conventional_standard_structure()
# Substrate TiO2
substrate = SpacegroupAnalyzer(self.get_structure("TiO2"),
symprec=0.1).get_conventional_standard_structure()
z = ZSLGenerator()
self.assertAlmostEqual(fast_norm([3, 2, 1]), 3.7416573867739413)
self.assertArrayEqual(reduce_vectors([1, 0, 0], [2, 2, 0]),
[[1, 0, 0], [0, 2, 0]])
self.assertEqual(vec_area([1, 0, 0], [0, 2, 0]),
2)
self.assertArrayEqual(list(get_factors(18)), [1, 2, 3, 6, 9, 18])
self.assertTrue(z.is_same_vectors([[1.01, 0, 0], [0, 2, 0]],
[[1, 0, 0], [0, 2.01, 0]]))
self.assertFalse(z.is_same_vectors([[1.01, 2, 0], [0, 2, 0]],
[[1, 0, 0], [0, 2.01, 0]]))
matches = list(z.generate(film, substrate))
self.assertEqual(len(matches), 82)
class SubstrateAnalyzerTest(PymatgenTest):
# Clean up test to be based on test structures
def runTest(self):
# Film VO2
film = SpacegroupAnalyzer(self.get_structure("VO2"),
symprec=0.1).get_conventional_standard_structure()
# Substrate TiO2
substrate = SpacegroupAnalyzer(self.get_structure("TiO2"),
symprec=0.1).get_conventional_standard_structure()
film_elac = ElasticTensor.from_voigt([
[324.32, 187.3, 170.92, 0., 0., 0.],
[187.3, 324.32, 170.92, 0., 0., 0.],
[170.92, 170.92, 408.41, 0., 0., 0.],
[0., 0., 0., 150.73, 0., 0.],
[0., 0., 0., 0., 150.73, 0.],
[0., 0., 0., 0., 0., 238.74]])
s = SubstrateAnalyzer()
matches = list(s.calculate(film,substrate,film_elac))
self.assertEqual(len(matches), 82)
if __name__ == '__main__':
unittest.main()
| 34.619048 | 89 | 0.58425 |
471d10b21018df3914d9e28ce5336760cea97b0e | 2,041 | py | Python | examples/elliptic_inverse_field.py | jeremyyu8/deepxde | f3ded90f9cc11521f9e539aef567524c6ce7cea1 | [
"Apache-2.0"
] | 2 | 2021-05-07T03:49:12.000Z | 2021-08-17T16:14:25.000Z | examples/elliptic_inverse_field.py | jeremyyu8/deepxde | f3ded90f9cc11521f9e539aef567524c6ce7cea1 | [
"Apache-2.0"
] | null | null | null | examples/elliptic_inverse_field.py | jeremyyu8/deepxde | f3ded90f9cc11521f9e539aef567524c6ce7cea1 | [
"Apache-2.0"
] | 1 | 2022-02-25T05:21:36.000Z | 2022-02-25T05:21:36.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import deepxde as dde
import matplotlib.pyplot as plt
import numpy as np
from deepxde.backend import tf
# generate num equally-spaced points from -1 to 1
def gen_traindata(num):
xvals = np.linspace(-1, 1, num).reshape(num, 1)
uvals = np.sin(np.pi * xvals)
return xvals, uvals
def main():
def pde(x, y):
u, q = y[:, 0:1], y[:, 1:2]
du_xx = dde.grad.hessian(y, x, component=0, i=0, j=0)
# solution is u(x) = sin(pi*x), q(x) = -pi^2 * sin(pi*x)
return -du_xx + q
def sol(x):
return np.sin(np.pi * x ** 2)
geom = dde.geometry.Interval(-1, 1)
bc = dde.DirichletBC(geom, sol, lambda _, on_boundary: on_boundary, component=0)
ob_x, ob_u = gen_traindata(100)
observe_u = dde.PointSetBC(ob_x, ob_u, component=0)
data = dde.data.PDE(
geom,
pde,
[bc, observe_u],
num_domain=200,
num_boundary=2,
anchors=ob_x,
num_test=1000,
)
net = dde.maps.PFNN([1, [20, 20], [20, 20], [20, 20], 2], "tanh", "Glorot uniform")
model = dde.Model(data, net)
model.compile("adam", lr=0.0001, loss_weights=[1, 100, 1000])
losshistory, train_state = model.train(epochs=20000)
dde.saveplot(losshistory, train_state, issave=True, isplot=True)
# view results
x = geom.uniform_points(500)
yhat = model.predict(x)
uhat, qhat = yhat[:, 0:1], yhat[:, 1:2]
utrue = np.sin(np.pi * x)
print("l2 relative error for u: " + str(dde.metrics.l2_relative_error(utrue, uhat)))
plt.figure()
plt.plot(x, uhat, label="uhat")
plt.plot(x, utrue, label="utrue")
plt.legend()
qtrue = -np.pi ** 2 * np.sin(np.pi * x)
print("l2 relative error for q: " + str(dde.metrics.l2_relative_error(qtrue, qhat)))
plt.figure()
plt.plot(x, qhat, label="qhat")
plt.plot(x, qtrue, label="qtrue")
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| 26.506494 | 88 | 0.611955 |
f7313f100d4294fe5183c05e8c3ad109ceb0c790 | 16,944 | py | Python | pandas/tests/indexes/ranges/test_range.py | mujtahidalam/pandas | 526468c8fe6fc5157aaf2fce327c5ab2a3350f49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-06-17T12:54:33.000Z | 2021-06-17T12:54:33.000Z | pandas/tests/indexes/ranges/test_range.py | mujtahidalam/pandas | 526468c8fe6fc5157aaf2fce327c5ab2a3350f49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/indexes/ranges/test_range.py | mujtahidalam/pandas | 526468c8fe6fc5157aaf2fce327c5ab2a3350f49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pytest
from pandas.core.dtypes.common import ensure_platform_int
import pandas as pd
from pandas import (
Float64Index,
Index,
Int64Index,
RangeIndex,
)
import pandas._testing as tm
from pandas.tests.indexes.test_numeric import Numeric
# aliases to make some tests easier to read
RI = RangeIndex
I64 = Int64Index
F64 = Float64Index
OI = Index
class TestRangeIndex(Numeric):
_index_cls = RangeIndex
@pytest.fixture
def simple_index(self) -> Index:
return self._index_cls(start=0, stop=20, step=2)
@pytest.fixture(
params=[
RangeIndex(start=0, stop=20, step=2, name="foo"),
RangeIndex(start=18, stop=-1, step=-2, name="bar"),
],
ids=["index_inc", "index_dec"],
)
def index(self, request):
return request.param
def test_can_hold_identifiers(self, simple_index):
idx = simple_index
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is False
def test_too_many_names(self, simple_index):
index = simple_index
with pytest.raises(ValueError, match="^Length"):
index.names = ["roger", "harold"]
@pytest.mark.parametrize(
"index, start, stop, step",
[
(RangeIndex(5), 0, 5, 1),
(RangeIndex(0, 5), 0, 5, 1),
(RangeIndex(5, step=2), 0, 5, 2),
(RangeIndex(1, 5, 2), 1, 5, 2),
],
)
def test_start_stop_step_attrs(self, index, start, stop, step):
# GH 25710
assert index.start == start
assert index.stop == stop
assert index.step == step
@pytest.mark.parametrize("attr_name", ["_start", "_stop", "_step"])
def test_deprecated_start_stop_step_attrs(self, attr_name, simple_index):
# GH 26581
idx = simple_index
with tm.assert_produces_warning(FutureWarning):
getattr(idx, attr_name)
def test_copy(self):
i = RangeIndex(5, name="Foo")
i_copy = i.copy()
assert i_copy is not i
assert i_copy.identical(i)
assert i_copy._range == range(0, 5, 1)
assert i_copy.name == "Foo"
def test_repr(self):
i = RangeIndex(5, name="Foo")
result = repr(i)
expected = "RangeIndex(start=0, stop=5, step=1, name='Foo')"
assert result == expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
i = RangeIndex(5, 0, -1)
result = repr(i)
expected = "RangeIndex(start=5, stop=0, step=-1)"
assert result == expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
def test_insert(self):
idx = RangeIndex(5, name="Foo")
result = idx[1:4]
# test 0th element
tm.assert_index_equal(idx[0:4], result.insert(0, idx[0]))
# GH 18295 (test missing)
expected = Float64Index([0, np.nan, 1, 2, 3, 4])
for na in [np.nan, None, pd.NA]:
result = RangeIndex(5).insert(1, na)
tm.assert_index_equal(result, expected)
result = RangeIndex(5).insert(1, pd.NaT)
expected = Index([0, pd.NaT, 1, 2, 3, 4], dtype=object)
tm.assert_index_equal(result, expected)
def test_delete(self):
idx = RangeIndex(5, name="Foo")
expected = idx[1:].astype(int)
result = idx.delete(0)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
expected = idx[:-1].astype(int)
result = idx.delete(-1)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
msg = "index 5 is out of bounds for axis 0 with size 5"
with pytest.raises((IndexError, ValueError), match=msg):
# either depending on numpy version
result = idx.delete(len(idx))
def test_view(self):
i = RangeIndex(0, name="Foo")
i_view = i.view()
assert i_view.name == "Foo"
i_view = i.view("i8")
tm.assert_numpy_array_equal(i.values, i_view)
i_view = i.view(RangeIndex)
tm.assert_index_equal(i, i_view)
def test_dtype(self, simple_index):
index = simple_index
assert index.dtype == np.int64
def test_cache(self):
# GH 26565, GH26617, GH35432
# This test checks whether _cache has been set.
# Calling RangeIndex._cache["_data"] creates an int64 array of the same length
# as the RangeIndex and stores it in _cache.
idx = RangeIndex(0, 100, 10)
assert idx._cache == {}
repr(idx)
assert idx._cache == {}
str(idx)
assert idx._cache == {}
idx.get_loc(20)
assert idx._cache == {}
90 in idx # True
assert idx._cache == {}
91 in idx # False
assert idx._cache == {}
idx.all()
assert idx._cache == {}
idx.any()
assert idx._cache == {}
for _ in idx:
pass
assert idx._cache == {}
idx.format()
assert idx._cache == {}
df = pd.DataFrame({"a": range(10)}, index=idx)
str(df)
assert idx._cache == {}
df.loc[50]
assert idx._cache == {}
with pytest.raises(KeyError, match="51"):
df.loc[51]
assert idx._cache == {}
df.loc[10:50]
assert idx._cache == {}
df.iloc[5:10]
assert idx._cache == {}
# idx._cache should contain a _data entry after call to idx._data
idx._data
assert isinstance(idx._data, np.ndarray)
assert idx._data is idx._data # check cached value is reused
assert len(idx._cache) == 1
expected = np.arange(0, 100, 10, dtype="int64")
tm.assert_numpy_array_equal(idx._cache["_data"], expected)
def test_is_monotonic(self):
index = RangeIndex(0, 20, 2)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is False
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is False
index = RangeIndex(4, 0, -1)
assert index.is_monotonic is False
assert index._is_strictly_monotonic_increasing is False
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(1, 2)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(2, 1)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(1, 1)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
def test_equals_range(self):
equiv_pairs = [
(RangeIndex(0, 9, 2), RangeIndex(0, 10, 2)),
(RangeIndex(0), RangeIndex(1, -1, 3)),
(RangeIndex(1, 2, 3), RangeIndex(1, 3, 4)),
(RangeIndex(0, -9, -2), RangeIndex(0, -10, -2)),
]
for left, right in equiv_pairs:
assert left.equals(right)
assert right.equals(left)
def test_logical_compat(self, simple_index):
idx = simple_index
assert idx.all() == idx.values.all()
assert idx.any() == idx.values.any()
def test_identical(self, simple_index):
index = simple_index
i = Index(index.copy())
assert i.identical(index)
# we don't allow object dtype for RangeIndex
if isinstance(index, RangeIndex):
return
same_values_different_type = Index(i, dtype=object)
assert not i.identical(same_values_different_type)
i = index.copy(dtype=object)
i = i.rename("foo")
same_values = Index(i, dtype=object)
assert same_values.identical(index.copy(dtype=object))
assert not i.identical(index)
assert Index(same_values, name="foo", dtype=object).identical(i)
assert not index.copy(dtype=object).identical(index.copy(dtype="int64"))
def test_nbytes(self):
# memory savings vs int index
i = RangeIndex(0, 1000)
assert i.nbytes < i._int64index.nbytes / 10
# constant memory usage
i2 = RangeIndex(0, 10)
assert i.nbytes == i2.nbytes
@pytest.mark.parametrize(
"start,stop,step",
[
# can't
("foo", "bar", "baz"),
# shouldn't
("0", "1", "2"),
],
)
def test_cant_or_shouldnt_cast(self, start, stop, step):
msg = f"Wrong type {type(start)} for value {start}"
with pytest.raises(TypeError, match=msg):
RangeIndex(start, stop, step)
def test_view_index(self, simple_index):
index = simple_index
index.view(Index)
def test_prevent_casting(self, simple_index):
index = simple_index
result = index.astype("O")
assert result.dtype == np.object_
def test_repr_roundtrip(self, simple_index):
index = simple_index
tm.assert_index_equal(eval(repr(index)), index)
def test_slice_keep_name(self):
idx = RangeIndex(1, 2, name="asdf")
assert idx.name == idx[1:].name
def test_has_duplicates(self, index):
assert index.is_unique
assert not index.has_duplicates
def test_extended_gcd(self, simple_index):
index = simple_index
result = index._extended_gcd(6, 10)
assert result[0] == result[1] * 6 + result[2] * 10
assert 2 == result[0]
result = index._extended_gcd(10, 6)
assert 2 == result[1] * 10 + result[2] * 6
assert 2 == result[0]
def test_min_fitting_element(self):
result = RangeIndex(0, 20, 2)._min_fitting_element(1)
assert 2 == result
result = RangeIndex(1, 6)._min_fitting_element(1)
assert 1 == result
result = RangeIndex(18, -2, -2)._min_fitting_element(1)
assert 2 == result
result = RangeIndex(5, 0, -1)._min_fitting_element(1)
assert 1 == result
big_num = 500000000000000000000000
result = RangeIndex(5, big_num * 2, 1)._min_fitting_element(big_num)
assert big_num == result
def test_max_fitting_element(self):
result = RangeIndex(0, 20, 2)._max_fitting_element(17)
assert 16 == result
result = RangeIndex(1, 6)._max_fitting_element(4)
assert 4 == result
result = RangeIndex(18, -2, -2)._max_fitting_element(17)
assert 16 == result
result = RangeIndex(5, 0, -1)._max_fitting_element(4)
assert 4 == result
big_num = 500000000000000000000000
result = RangeIndex(5, big_num * 2, 1)._max_fitting_element(big_num)
assert big_num == result
def test_pickle_compat_construction(self):
# RangeIndex() is a valid constructor
pass
def test_slice_specialised(self, simple_index):
index = simple_index
index.name = "foo"
# scalar indexing
res = index[1]
expected = 2
assert res == expected
res = index[-1]
expected = 18
assert res == expected
# slicing
# slice value completion
index_slice = index[:]
expected = index
tm.assert_index_equal(index_slice, expected)
# positive slice values
index_slice = index[7:10:2]
expected = Index(np.array([14, 18]), name="foo")
tm.assert_index_equal(index_slice, expected)
# negative slice values
index_slice = index[-1:-5:-2]
expected = Index(np.array([18, 14]), name="foo")
tm.assert_index_equal(index_slice, expected)
# stop overshoot
index_slice = index[2:100:4]
expected = Index(np.array([4, 12]), name="foo")
tm.assert_index_equal(index_slice, expected)
# reverse
index_slice = index[::-1]
expected = Index(index.values[::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[-8::-1]
expected = Index(np.array([4, 2, 0]), name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[-40::-1]
expected = Index(np.array([], dtype=np.int64), name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[40::-1]
expected = Index(index.values[40::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[10::-1]
expected = Index(index.values[::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
@pytest.mark.parametrize("step", set(range(-5, 6)) - {0})
def test_len_specialised(self, step):
# make sure that our len is the same as np.arange calc
start, stop = (0, 5) if step > 0 else (5, 0)
arr = np.arange(start, stop, step)
index = RangeIndex(start, stop, step)
assert len(index) == len(arr)
index = RangeIndex(stop, start, step)
assert len(index) == 0
@pytest.fixture(
params=[
([RI(1, 12, 5)], RI(1, 12, 5)),
([RI(0, 6, 4)], RI(0, 6, 4)),
([RI(1, 3), RI(3, 7)], RI(1, 7)),
([RI(1, 5, 2), RI(5, 6)], RI(1, 6, 2)),
([RI(1, 3, 2), RI(4, 7, 3)], RI(1, 7, 3)),
([RI(-4, 3, 2), RI(4, 7, 2)], RI(-4, 7, 2)),
([RI(-4, -8), RI(-8, -12)], RI(0, 0)),
([RI(-4, -8), RI(3, -4)], RI(0, 0)),
([RI(-4, -8), RI(3, 5)], RI(3, 5)),
([RI(-4, -2), RI(3, 5)], I64([-4, -3, 3, 4])),
([RI(-2), RI(3, 5)], RI(3, 5)),
([RI(2), RI(2)], I64([0, 1, 0, 1])),
([RI(2), RI(2, 5), RI(5, 8, 4)], RI(0, 6)),
([RI(2), RI(3, 5), RI(5, 8, 4)], I64([0, 1, 3, 4, 5])),
([RI(-2, 2), RI(2, 5), RI(5, 8, 4)], RI(-2, 6)),
([RI(3), I64([-1, 3, 15])], I64([0, 1, 2, -1, 3, 15])),
([RI(3), F64([-1, 3.1, 15.0])], F64([0, 1, 2, -1, 3.1, 15.0])),
([RI(3), OI(["a", None, 14])], OI([0, 1, 2, "a", None, 14])),
([RI(3, 1), OI(["a", None, 14])], OI(["a", None, 14])),
]
)
def appends(self, request):
"""Inputs and expected outputs for RangeIndex.append test"""
return request.param
def test_append(self, appends):
# GH16212
indices, expected = appends
result = indices[0].append(indices[1:])
tm.assert_index_equal(result, expected, exact=True)
if len(indices) == 2:
# Append single item rather than list
result2 = indices[0].append(indices[1])
tm.assert_index_equal(result2, expected, exact=True)
def test_engineless_lookup(self):
# GH 16685
# Standard lookup on RangeIndex should not require the engine to be
# created
idx = RangeIndex(2, 10, 3)
assert idx.get_loc(5) == 1
tm.assert_numpy_array_equal(
idx.get_indexer([2, 8]), ensure_platform_int(np.array([0, 2]))
)
with pytest.raises(KeyError, match="3"):
idx.get_loc(3)
assert "_engine" not in idx._cache
# Different types of scalars can be excluded immediately, no need to
# use the _engine
with pytest.raises(KeyError, match="'a'"):
idx.get_loc("a")
assert "_engine" not in idx._cache
def test_format_empty(self):
# GH35712
empty_idx = self._index_cls(0)
assert empty_idx.format() == []
assert empty_idx.format(name=True) == [""]
@pytest.mark.parametrize(
"RI",
[
RangeIndex(0, -1, -1),
RangeIndex(0, 1, 1),
RangeIndex(1, 3, 2),
RangeIndex(0, -1, -2),
RangeIndex(-3, -5, -2),
],
)
def test_append_len_one(self, RI):
# GH39401
result = RI.append([])
tm.assert_index_equal(result, RI, exact=True)
@pytest.mark.parametrize("base", [RangeIndex(0, 2), Index([0, 1])])
def test_isin_range(self, base):
# GH#41151
values = RangeIndex(0, 1)
result = base.isin(values)
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
| 31.61194 | 86 | 0.578435 |
bf24b450b511b172b3f9ce347050cc207ccd7f8a | 1,856 | py | Python | pos_bahrain/api/item.py | hafeesk/pos_bahrain | aa00fb25bb2146fcfad9274aa154946d3174d8b0 | [
"MIT"
] | null | null | null | pos_bahrain/api/item.py | hafeesk/pos_bahrain | aa00fb25bb2146fcfad9274aa154946d3174d8b0 | [
"MIT"
] | null | null | null | pos_bahrain/api/item.py | hafeesk/pos_bahrain | aa00fb25bb2146fcfad9274aa154946d3174d8b0 | [
"MIT"
] | 1 | 2021-09-04T11:08:13.000Z | 2021-09-04T11:08:13.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
def _groupby(key, list_of_dicts):
from itertools import groupby
from operator import itemgetter
keywise = {}
for k, v in groupby(
sorted(list_of_dicts, key=itemgetter(key)),
itemgetter(key),
):
keywise[k] = list(v)
return keywise
@frappe.whitelist()
def get_more_pos_data():
return {
'batch_no_details': get_batch_no_details(),
'uom_details': get_uom_details(),
'exchange_rates': get_exchange_rates(),
}
def get_batch_no_details():
batches = frappe.db.sql(
"""
SELECT name, item, expiry_date
FROM `tabBatch`
WHERE IFNULL(expiry_date, '4000-10-10') >= CURDATE()
ORDER BY expiry_date
""",
as_dict=1,
)
return _groupby('item', batches)
def get_uom_details():
uoms = frappe.db.sql(
"""
SELECT
parent AS item_code,
uom,
conversion_factor
FROM `tabUOM Conversion Detail`
""",
as_dict=1
)
return _groupby('item_code', uoms)
def _merge_dicts(x, y):
z = x.copy()
z.update(y)
return z
def get_exchange_rates():
from erpnext.setup.utils import get_exchange_rate
mops = frappe.db.sql(
"""
SELECT
name AS mode_of_payment,
alt_currency AS currency
FROM `tabMode of Payment`
WHERE in_alt_currency=1
""",
as_dict=1
)
return {mop.mode_of_payment: mop for mop in map(lambda x: _merge_dicts(
x,
{
'conversion_rate': get_exchange_rate(
x.currency,
frappe.defaults.get_user_default('currency'),
),
}
), mops)}
| 22.634146 | 75 | 0.554418 |
c15925f13593bc829c583440a06f445e29dfa2e5 | 3,017 | py | Python | lib/all_scripts/file_tools.py | jkaessens/gwas-assoc | 1053c94222701f108362e33c99155cfc148f4ca2 | [
"MIT"
] | null | null | null | lib/all_scripts/file_tools.py | jkaessens/gwas-assoc | 1053c94222701f108362e33c99155cfc148f4ca2 | [
"MIT"
] | null | null | null | lib/all_scripts/file_tools.py | jkaessens/gwas-assoc | 1053c94222701f108362e33c99155cfc148f4ca2 | [
"MIT"
] | null | null | null | import os
import os.path
from misc_tools import *
def ensure_dir_exists_for_file(path_to_file):
"""
checks if dir of a file (full path)
exists and if not creates it
"""
ap = os.path.dirname(os.path.abspath(os.path.expanduser(path_to_file)))
if os.path.exists(ap):
if not os.path.isdir(ap):
abort(ap + " exists but is not a directory.")
else:
os.makedirs(ap)
def ensure_dir_exists(path_to_dir):
"""
checks if a dir exists and if not creates it
"""
ap = os.path.abspath(os.path.expanduser(path_to_dir))
if os.path.exists(ap):
if not os.path.isdir(ap):
abort(ap + " exists but is not a directory.")
else:
os.makedirs(ap)
def read_sets_of_lines_from_file(file_name, line_ranges, converter = None, verbose=True):
"""
Opens and reads a file and extracts sets of lines specified by a list of start/stop pairs.
file_name: Name of the file to be read from
line_ranges: A list of pairs of start and stop lines. These are integers starting at 0 for the first line.
converter: A function (e. g. 'int()') which will be applied to each line before being put into the resulting list
Returns a list containing lists of values. The order is accordant to the order of the line_ranges
ASSUMPTION: The regions do must not overlap
"""
#
# Create a local, sorted version of the line ranges, that
# preserves the information about the order
#
temp_line_ranges =[]
line_range_no = 0
for lr in line_ranges:
temp_line_ranges.append( [lr[0], lr[1], line_range_no])
line_range_no += 1
temp_line_ranges.sort()
#
# Now, go through the file
#
result_list = [None] * len(line_ranges)
line_range_iter = iter(temp_line_ranges)
act_line_range = line_range_iter.next()
act_start = act_line_range[0]
act_stop = act_line_range[1]
act_result_range = []
in_range = False
line_no = 0
file = open(file_name)
if verbose:
print
print "Reading", len(line_ranges), "region(s) from this file:"
print file_name
print
for line in file:
if in_range:
#
# Do the reading
# (if the last pos has been reached
#
if converter:
act_result_range.append(converter(line))
#print converter(line)
else:
act_result_range.append(line)
if line_no ==act_stop:
#
# Stoping the reading
#
if verbose:
print " Stop reading at ", line_no
print
in_range = False
result_list[act_line_range[2]] = act_result_range
# Prepare for the next range
try:
act_line_range = line_range_iter.next()
except StopIteration:
break
act_start = act_line_range[0]
act_stop = act_line_range[1]
act_result_range = []
else:
if line_no ==act_start:
#
# Starting the reading
#
if verbose:
print " Start reading at", line_no
in_range = True
if converter:
act_result_range.append(converter(line))
else:
act_result_range.append(line)
line_no+=1
#
# It's done
#
file.close()
return result_list
| 21.246479 | 115 | 0.680146 |
97ae2695cef8f8355f7529c7e34b3ff4de575fe4 | 1,613 | py | Python | setup.py | rookielxy/nvidia-htop | dfbf78c28b440b247c5e8dd048ec950dd507c77d | [
"BSD-3-Clause"
] | null | null | null | setup.py | rookielxy/nvidia-htop | dfbf78c28b440b247c5e8dd048ec950dd507c77d | [
"BSD-3-Clause"
] | null | null | null | setup.py | rookielxy/nvidia-htop | dfbf78c28b440b247c5e8dd048ec950dd507c77d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
from setuptools import setup
import pathlib
here = pathlib.Path(__file__).parent.resolve()
# Get the long description from the README file
long_description = (here / 'README.md').read_text(encoding='utf-8')
setup(name='nvidia-htop',
version='1.0.3',
description='A tool for enriching the output of nvidia-smi',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/peci1/nvidia-htop',
author='Martin Pecka',
author_email='peci1@seznam.cz',
entry_pints={
'console_scripts': [
'nvidia-htop = nvidia-htop:main'
],
},
install_requires=[
"termcolor"
],
python_requires='>=3.5, <4',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: GPU',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Utilities',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
],
keywords='nvidia, nvidia-smi, GPU, htop, top',
project_urls={
'Bug Reports': 'https://github.com/peci1/nvidia-htop/issues',
'Source': 'https://github.com/peci1/nvidia-htop',
},
)
| 29.87037 | 69 | 0.608184 |
4e5b164a8051703f87ac87f991edb7e4fa5b73c0 | 22,775 | py | Python | Lib/objc/_CoreDuet.py | snazari/Pyto | bcea7bbef35cab21ce73087b1a0c00a07d07ec72 | [
"MIT"
] | 701 | 2018-10-22T11:54:09.000Z | 2022-03-31T14:39:30.000Z | Lib/objc/_CoreDuet.py | snazari/Pyto | bcea7bbef35cab21ce73087b1a0c00a07d07ec72 | [
"MIT"
] | 229 | 2018-10-24T09:15:31.000Z | 2021-12-24T16:51:37.000Z | Lib/objc/_CoreDuet.py | snazari/Pyto | bcea7bbef35cab21ce73087b1a0c00a07d07ec72 | [
"MIT"
] | 131 | 2018-11-25T18:33:03.000Z | 2022-03-24T03:18:07.000Z | """
Classes from the 'CoreDuet' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
_DKSyncCoordinatorLogging = _Class("_DKSyncCoordinatorLogging")
_DKSyncContextObjectFactory = _Class("_DKSyncContextObjectFactory")
_DKSyncContext = _Class("_DKSyncContext")
_DKSyncPeerObserver = _Class("_DKSyncPeerObserver")
_DKSyncPeerInfo = _Class("_DKSyncPeerInfo")
_CDDeviceInfo = _Class("_CDDeviceInfo")
_DKPlatform = _Class("_DKPlatform")
_CDSizeMetricFamily = _Class("_CDSizeMetricFamily")
_CDSizeMetric = _Class("_CDSizeMetric")
_CDMutableSizeMetric = _Class("_CDMutableSizeMetric")
_CDHomeControlsDataCollectionTask = _Class("_CDHomeControlsDataCollectionTask")
_CDHomeControlsDataCollectionSession = _Class("_CDHomeControlsDataCollectionSession")
_CDDataCollection = _Class("_CDDataCollection")
_CDDataCollectionSession = _Class("_CDDataCollectionSession")
_CDEventStreamsRegister = _Class("_CDEventStreamsRegister")
_DKProtobufMetadata = _Class("_DKProtobufMetadata")
_CDDatePrinter = _Class("_CDDatePrinter")
_CDStatsPrinter = _Class("_CDStatsPrinter")
_DKObjectType = _Class("_DKObjectType")
_DKIdentifierType = _Class("_DKIdentifierType")
_DKQuantityType = _Class("_DKQuantityType")
_DKCategoryType = _Class("_DKCategoryType")
_DKSyncRapportCommonStorage = _Class("_DKSyncRapportCommonStorage")
_DKSyncRapportStorage = _Class("_DKSyncRapportStorage")
_DKSyncRapportContextStorage = _Class("_DKSyncRapportContextStorage")
_DKSyncRapportKnowledgeStorage = _Class("_DKSyncRapportKnowledgeStorage")
_CDSpotlightItemRecorder = _Class("_CDSpotlightItemRecorder")
_CDSpotlightItemRecorderOperation = _Class("_CDSpotlightItemRecorderOperation")
_DKDataProtectionStateMonitor = _Class("_DKDataProtectionStateMonitor")
_DKDataProtectionMonitor = _Class("_DKDataProtectionMonitor")
_DKXPCKnowledgeStore = _Class("_DKXPCKnowledgeStore")
_CDDateRange = _Class("_CDDateRange")
_DKKnowledgeStorageLogging = _Class("_DKKnowledgeStorageLogging")
_CDPInteractionStoreDataHarvester = _Class("_CDPInteractionStoreDataHarvester")
_CDSpotlightQuerier = _Class("_CDSpotlightQuerier")
_CDMDSearchQueryDelegate = _Class("_CDMDSearchQueryDelegate")
_CDSleepPredictorMath = _Class("_CDSleepPredictorMath")
_CDErrorUtilities = _Class("_CDErrorUtilities")
CDBudget = _Class("CDBudget")
_CDNetworkBudgetCalculator = _Class("_CDNetworkBudgetCalculator")
CDAttribute = _Class("CDAttribute")
CDAttributeOccurrence = _Class("CDAttributeOccurrence")
_DKSyncUrgencyTracker = _Class("_DKSyncUrgencyTracker")
_CDSocialInteractionAdvisor = _Class("_CDSocialInteractionAdvisor")
_CDInteractionRecorder = _Class("_CDInteractionRecorder")
_DKStandingQueryExecutor = _Class("_DKStandingQueryExecutor")
_DKDeviceActivityStandingQuery = _Class("_DKDeviceActivityStandingQuery")
_CDSpotlightEventIndexer = _Class("_CDSpotlightEventIndexer")
_CDEventIndexerContext = _Class("_CDEventIndexerContext")
_CDEventIndexerBookmark = _Class("_CDEventIndexerBookmark")
_CDPSerializedDataHarvester = _Class("_CDPSerializedDataHarvester")
_DKSyncSerializer = _Class("_DKSyncSerializer")
_DKPredictor = _Class("_DKPredictor")
_CDInteractionFeedbackLogger = _Class("_CDInteractionFeedbackLogger")
CDSession = _Class("CDSession")
CDGlanceLingerEvent = _Class("CDGlanceLingerEvent")
_DKKnowledgeStorageSimpleKeyValueStore = _Class(
"_DKKnowledgeStorageSimpleKeyValueStore"
)
_CDCommunicatorHelper = _Class("_CDCommunicatorHelper")
_DKObjectMOConverter = _Class("_DKObjectMOConverter")
_DKObjectFromMOCache = _Class("_DKObjectFromMOCache")
_DKSyncPeer = _Class("_DKSyncPeer")
CDMonitorManager = _Class("CDMonitorManager")
_CDInteractionStoreNotificationReceiver = _Class(
"_CDInteractionStoreNotificationReceiver"
)
_CDInteractionStoreNotifier = _Class("_CDInteractionStoreNotifier")
_DKEventData = _Class("_DKEventData")
_CDInteractionPolicies = _Class("_CDInteractionPolicies")
_DKDiskSubsystemIdentifier = _Class("_DKDiskSubsystemIdentifier")
_DKNotificationEventIdentifier = _Class("_DKNotificationEventIdentifier")
_DKUUIDIdentifier = _Class("_DKUUIDIdentifier")
_DKTopicIdentifier = _Class("_DKTopicIdentifier")
_DKHomeKitHomeAppViewIdentifier = _Class("_DKHomeKitHomeAppViewIdentifier")
_DKHomeKitAccessoryControlIdentifier = _Class("_DKHomeKitAccessoryControlIdentifier")
_DKHomeKitSceneIdentifier = _Class("_DKHomeKitSceneIdentifier")
_DKResourceIdentifier = _Class("_DKResourceIdentifier")
_DKLocationIdentifier = _Class("_DKLocationIdentifier")
_DKBundleIdentifier = _Class("_DKBundleIdentifier")
_DKTitleIdentifier = _Class("_DKTitleIdentifier")
_DKAnyStringIdentifier = _Class("_DKAnyStringIdentifier")
_DKCustomIdentifier = _Class("_DKCustomIdentifier")
_CDLogging = _Class("_CDLogging")
_DKCoreDataStorage = _Class("_DKCoreDataStorage")
_DKIntentMetadataKey = _Class("_DKIntentMetadataKey")
_DKRoutineLocationOfInterestTypeCategory = _Class(
"_DKRoutineLocationOfInterestTypeCategory"
)
_DKSourceMOConverter = _Class("_DKSourceMOConverter")
_CDDataCollectionUtilities = _Class("_CDDataCollectionUtilities")
_DKRoutineEventStreams = _Class("_DKRoutineEventStreams")
_DKEventStatsUtilities = _Class("_DKEventStatsUtilities")
_DKEventTypeStatsTimerCounter = _Class("_DKEventTypeStatsTimerCounter")
_DKEventStatsTimerCounter = _Class("_DKEventStatsTimerCounter")
_DKEventTypeResultStatsCounter = _Class("_DKEventTypeResultStatsCounter")
_DKEventTypeStatsCounter = _Class("_DKEventTypeStatsCounter")
_DKEventResultStatsCounter = _Class("_DKEventResultStatsCounter")
_DKEventStatsCounter = _Class("_DKEventStatsCounter")
_DKEventStatsCollection = _Class("_DKEventStatsCollection")
_DKEventStatsCounterInternal = _Class("_DKEventStatsCounterInternal")
_DKSyncedFeatures = _Class("_DKSyncedFeatures")
_DKActivityThrottler = _Class("_DKActivityThrottler")
_DKThrottledActivity = _Class("_DKThrottledActivity")
_CDInteractionAdvisorSettings = _Class("_CDInteractionAdvisorSettings")
_CDPSimpleDataPoint = _Class("_CDPSimpleDataPoint")
_CDEntitlementsUtilities = _Class("_CDEntitlementsUtilities")
_DKSyncMetadataStorage = _Class("_DKSyncMetadataStorage")
_CDBatterySaver = _Class("_CDBatterySaver")
_DKSyncErrors = _Class("_DKSyncErrors")
_CDCloudFamilyDataCollectionTask = _Class("_CDCloudFamilyDataCollectionTask")
_CDCloudFamilyDataCollectionSession = _Class("_CDCloudFamilyDataCollectionSession")
_DKQueryDispatcher = _Class("_DKQueryDispatcher")
_DKAggregateEventQueryResult = _Class("_DKAggregateEventQueryResult")
_DKContentProviderQueries = _Class("_DKContentProviderQueries")
_DKEventCKConverter = _Class("_DKEventCKConverter")
_DKSyncPowerlog = _Class("_DKSyncPowerlog")
_CDEventStream = _Class("_CDEventStream")
_DKKnowledgeSyncStorageAssertion = _Class("_DKKnowledgeSyncStorageAssertion")
_DKSyncChanges = _Class("_DKSyncChanges")
_DKLocalChanges = _Class("_DKLocalChanges")
_DKKnowledgeStorage = _Class("_DKKnowledgeStorage")
_CDCloudFamilyPETDataCollectionUtilities = _Class(
"_CDCloudFamilyPETDataCollectionUtilities"
)
_DKAppMediaUsageMetadataKey = _Class("_DKAppMediaUsageMetadataKey")
_DKDigitalHealthMetadataKey = _Class("_DKDigitalHealthMetadataKey")
_CDPortraitStreams = _Class("_CDPortraitStreams")
_CDIntentSpotlightIndex = _Class("_CDIntentSpotlightIndex")
_DKHistogram = _Class("_DKHistogram")
_CDPeopleSuggesterSettings = _Class("_CDPeopleSuggesterSettings")
_CDPeopleSuggesterContext = _Class("_CDPeopleSuggesterContext")
_CDSuggestedPerson = _Class("_CDSuggestedPerson")
_CDCachedPeopleSuggestion = _Class("_CDCachedPeopleSuggestion")
_CDPeopleSuggester = _Class("_CDPeopleSuggester")
_CDPortraitMetadataKey = _Class("_CDPortraitMetadataKey")
_CDEntityMetadataKey = _Class("_CDEntityMetadataKey")
_CDTopicMetadataKey = _Class("_CDTopicMetadataKey")
_CDEventHandlerForDefaultPairedNearby = _Class("_CDEventHandlerForDefaultPairedNearby")
_CDEventHandlerForWatchNearby = _Class("_CDEventHandlerForWatchNearby")
_CDEventHandlerForAppInFocus = _Class("_CDEventHandlerForAppInFocus")
_CDEventHandlerForActivityLevel = _Class("_CDEventHandlerForActivityLevel")
_CDFileUtility = _Class("_CDFileUtility")
_CDDataCollectionAnonymizer = _Class("_CDDataCollectionAnonymizer")
_CDAppActionRecorder = _Class("_CDAppActionRecorder")
_CDUserActivity = _Class("_CDUserActivity")
_DKSyncLocalKnowledgeStorage = _Class("_DKSyncLocalKnowledgeStorage")
_CDTemporalInteractionAdvisor = _Class("_CDTemporalInteractionAdvisor")
_CDInteractionAdvisorUtils = _Class("_CDInteractionAdvisorUtils")
_CDGenericInteractionRanker = _Class("_CDGenericInteractionRanker")
_CDInteractionRank = _Class("_CDInteractionRank")
_CDInteractionAdviceEngine = _Class("_CDInteractionAdviceEngine")
_DKRoutineMetadataKeys = _Class("_DKRoutineMetadataKeys")
_CDMultiLevelRateLimiter = _Class("_CDMultiLevelRateLimiter")
_DKKnowledgeContentProvider = _Class("_DKKnowledgeContentProvider")
_DKKeyValueStore = _Class("_DKKeyValueStore")
_CDInteractionAdvisorSimple = _Class("_CDInteractionAdvisorSimple")
_DKRateLimitPolicyEnforcer = _Class("_DKRateLimitPolicyEnforcer")
_CDSiriLearningSettings = _Class("_CDSiriLearningSettings")
_DKEntityCategory = _Class("_DKEntityCategory")
_DKAlarmStateCategory = _Class("_DKAlarmStateCategory")
_DKAirPlayPredictionCategory = _Class("_DKAirPlayPredictionCategory")
_DKAudioRouteCategory = _Class("_DKAudioRouteCategory")
_DKRoutineLocationCategory = _Class("_DKRoutineLocationCategory")
_DKMotionCategory = _Class("_DKMotionCategory")
_DKUIOrientationCategory = _Class("_DKUIOrientationCategory")
_DKTristateCategory = _Class("_DKTristateCategory")
_DKBoolCategory = _Class("_DKBoolCategory")
_DKAnyIntegerCategory = _Class("_DKAnyIntegerCategory")
_DKCustomCategory = _Class("_DKCustomCategory")
_DKPredictionQueryFeedback = _Class("_DKPredictionQueryFeedback")
_CDInteractionDataCollectionTask = _Class("_CDInteractionDataCollectionTask")
_CDInteractionDataCollectionSession = _Class("_CDInteractionDataCollectionSession")
_DKSync3Policy = _Class("_DKSync3Policy")
_DKSync3FeaturePolicy = _Class("_DKSync3FeaturePolicy")
_DKSync3TransportPolicy = _Class("_DKSync3TransportPolicy")
_CDInteractionStore = _Class("_CDInteractionStore")
_DKSyncWindow = _Class("_DKSyncWindow")
_CDAttachment = _Class("_CDAttachment")
_CDComplications = _Class("_CDComplications")
_CDPaths = _Class("_CDPaths")
_DKBatteryPercentageQuantity = _Class("_DKBatteryPercentageQuantity")
_DKAnyDoubleQuantity = _Class("_DKAnyDoubleQuantity")
_DKCustomQuantity = _Class("_DKCustomQuantity")
_CDContactStatistics = _Class("_CDContactStatistics")
_CDContact = _Class("_CDContact")
CDTrendLogic = _Class("CDTrendLogic")
_DKPrivacyPolicyEnforcer = _Class("_DKPrivacyPolicyEnforcer")
_CDSpotlightItemUtils = _Class("_CDSpotlightItemUtils")
_CDInBedDetector = _Class("_CDInBedDetector")
_CDInBedDetection = _Class("_CDInBedDetection")
_CDPModelTuning = _Class("_CDPModelTuning")
_CDPModelTuningState = _Class("_CDPModelTuningState")
_DKPreferences = _Class("_DKPreferences")
_CDDiagnosticDataReporter = _Class("_CDDiagnosticDataReporter")
_DKCKError = _Class("_DKCKError")
_CDPSimpleModel = _Class("_CDPSimpleModel")
_DKEventIntersection = _Class("_DKEventIntersection")
_CDInteractionPolicy = _Class("_CDInteractionPolicy")
_CDServerRequest = _Class("_CDServerRequest")
_CDPeriodicScheduler = _Class("_CDPeriodicScheduler")
_CDPeriodicSchedulerJob = _Class("_CDPeriodicSchedulerJob")
_DKContentProviderCache = _Class("_DKContentProviderCache")
_DKMetadataHomeAppView = _Class("_DKMetadataHomeAppView")
_DKMetadataHomeKitAccessoryControl = _Class("_DKMetadataHomeKitAccessoryControl")
_DKMetadataHomeKitScene = _Class("_DKMetadataHomeKitScene")
_CDSpotlightEventIndexerDataSource = _Class("_CDSpotlightEventIndexerDataSource")
_CDSpotlightIntentIndexerDataSource = _Class("_CDSpotlightIntentIndexerDataSource")
_DKFamilyPredictionMetadataKey = _Class("_DKFamilyPredictionMetadataKey")
_DKBehavioralRuleFeaturesMetadataKey = _Class("_DKBehavioralRuleFeaturesMetadataKey")
_DKDeviceStandbyTimerMetadataKey = _Class("_DKDeviceStandbyTimerMetadataKey")
_DKDeviceActivityLevelFeedbackMetadataKey = _Class(
"_DKDeviceActivityLevelFeedbackMetadataKey"
)
_DKDeviceIsPluggedInMetadataKey = _Class("_DKDeviceIsPluggedInMetadataKey")
_DKDeviceBatteryPercentageMetadataKey = _Class("_DKDeviceBatteryPercentageMetadataKey")
_DKTombstoneMetadataKey = _Class("_DKTombstoneMetadataKey")
_DKNotificationUsageMetadataKey = _Class("_DKNotificationUsageMetadataKey")
_DKRelevantShortcutMetadataKey = _Class("_DKRelevantShortcutMetadataKey")
_DKSafariHistoryMetadataKey = _Class("_DKSafariHistoryMetadataKey")
_DKPeopleSuggesterOutputForSiriNLMetadataKey = _Class(
"_DKPeopleSuggesterOutputForSiriNLMetadataKey"
)
_DKMapsShareEtaFeedbackMetadataKey = _Class("_DKMapsShareEtaFeedbackMetadataKey")
_DKShareSheetSuggestLessFeedbackMetadataKey = _Class(
"_DKShareSheetSuggestLessFeedbackMetadataKey"
)
_DKShareSheetFeedbackMetadataKey = _Class("_DKShareSheetFeedbackMetadataKey")
_DKSearchFeedbackMetadataKey = _Class("_DKSearchFeedbackMetadataKey")
_DKDiscoverabilityUsageMetadataKey = _Class("_DKDiscoverabilityUsageMetadataKey")
_DKDiscoverabilitySignalsMetadataKey = _Class("_DKDiscoverabilitySignalsMetadataKey")
_DKSiriIntentEventMetadataKey = _Class("_DKSiriIntentEventMetadataKey")
_DKSiriServiceMetadataKey = _Class("_DKSiriServiceMetadataKey")
_DKDeviceIdMetadataKey = _Class("_DKDeviceIdMetadataKey")
_DKDebugMetadataKey = _Class("_DKDebugMetadataKey")
_DKPeriodMetadataKey = _Class("_DKPeriodMetadataKey")
_DKCallMetadataKey = _Class("_DKCallMetadataKey")
_DKBluetoothMetadataKey = _Class("_DKBluetoothMetadataKey")
_DKOrientationMetadataKey = _Class("_DKOrientationMetadataKey")
_DKBatterySaverMetadataKey = _Class("_DKBatterySaverMetadataKey")
_DKBacklightMetadataKey = _Class("_DKBacklightMetadataKey")
_DKBulletinBoardMetadataKey = _Class("_DKBulletinBoardMetadataKey")
_DKStarkMetadataKey = _Class("_DKStarkMetadataKey")
_DKMicroLocationMetadataKey = _Class("_DKMicroLocationMetadataKey")
_DKLocationMetadataKey = _Class("_DKLocationMetadataKey")
_DKMotionMetadataKey = _Class("_DKMotionMetadataKey")
_DKCalendarMetadataKey = _Class("_DKCalendarMetadataKey")
_DKTimerMetadataKey = _Class("_DKTimerMetadataKey")
_DKAlarmMetadataKey = _Class("_DKAlarmMetadataKey")
_DKAirPlayPredictionMetadataKey = _Class("_DKAirPlayPredictionMetadataKey")
_DKNowPlayingMetadataKey = _Class("_DKNowPlayingMetadataKey")
_DKAudioMetadataKey = _Class("_DKAudioMetadataKey")
_DKAppInstallMetadataKey = _Class("_DKAppInstallMetadataKey")
_DKLocationApplicationActivityMetadataKey = _Class(
"_DKLocationApplicationActivityMetadataKey"
)
_DKApplicationActivityMetadataKey = _Class("_DKApplicationActivityMetadataKey")
_DKAppClipUsageMetadataKey = _Class("_DKAppClipUsageMetadataKey")
_DKApplicationMetadataKey = _Class("_DKApplicationMetadataKey")
_DKSource = _Class("_DKSource")
_DKObserverEntry = _Class("_DKObserverEntry")
_CDObservationCenter = _Class("_CDObservationCenter")
CDDevice = _Class("CDDevice")
_CDQueryInteractionAdvisor = _Class("_CDQueryInteractionAdvisor")
_CDCloudFamilyPETDataCollectionTask = _Class("_CDCloudFamilyPETDataCollectionTask")
_CDCloudFamilyPETDataCollectionSession = _Class(
"_CDCloudFamilyPETDataCollectionSession"
)
_DKSyncType = _Class("_DKSyncType")
_DKEventStream = _Class("_DKEventStream")
_CDInteractionHarvester = _Class("_CDInteractionHarvester")
_DKPhotosMetadataKeys = _Class("_DKPhotosMetadataKeys")
_DKPhotosEventStreams = _Class("_DKPhotosEventStreams")
_CDRateLimiter = _Class("_CDRateLimiter")
_CDRateAndTotalLimiter = _Class("_CDRateAndTotalLimiter")
_CDStringTokenizer = _Class("_CDStringTokenizer")
_DKSyncHistory = _Class("_DKSyncHistory")
_CDPPredictionResult = _Class("_CDPPredictionResult")
_CDEventStreamProperties = _Class("_CDEventStreamProperties")
_CDInteraction = _Class("_CDInteraction")
_CDStats = _Class("_CDStats")
_CDHashUtilities = _Class("_CDHashUtilities")
_CDPSimpleModelParameterManager = _Class("_CDPSimpleModelParameterManager")
_CDPSimpleModelParameterManagerAccountState = _Class(
"_CDPSimpleModelParameterManagerAccountState"
)
_CDPSimpleModelParameterManagerTuningValue = _Class(
"_CDPSimpleModelParameterManagerTuningValue"
)
_DKTombstonePolicy = _Class("_DKTombstonePolicy")
_DKTombstoneRequirement = _Class("_DKTombstoneRequirement")
_DKAppUsageTombstoneRequirement = _Class("_DKAppUsageTombstoneRequirement")
_DKEligibleForPredictionTombstoneRequirement = _Class(
"_DKEligibleForPredictionTombstoneRequirement"
)
_DKAppActivityStreamTombstoneRequirement = _Class(
"_DKAppActivityStreamTombstoneRequirement"
)
_DKAppIntentsStreamTombstoneRequirement = _Class(
"_DKAppIntentsStreamTombstoneRequirement"
)
_DKSyncStreamsTombstoneRequirement = _Class("_DKSyncStreamsTombstoneRequirement")
_CDSleepPredictor = _Class("_CDSleepPredictor")
_DKSystemEventStreams = _Class("_DKSystemEventStreams")
_DKTombstoneNotifier = _Class("_DKTombstoneNotifier")
_CDSerializableKeyedData = _Class("_CDSerializableKeyedData")
_CDSharedMemoryKeyValueStore = _Class("_CDSharedMemoryKeyValueStore")
_DKSyncUpCloudKitKnowledgeStorage = _Class("_DKSyncUpCloudKitKnowledgeStorage")
_DKSyncDownCloudKitKnowledgeStorage = _Class("_DKSyncDownCloudKitKnowledgeStorage")
_DKSyncCloudKitKnowledgeStorage = _Class("_DKSyncCloudKitKnowledgeStorage")
_DKCloudUtilities = _Class("_DKCloudUtilities")
_CDContactResolver = _Class("_CDContactResolver")
_CDPrivacyPolicy = _Class("_CDPrivacyPolicy")
_DKKnowledgeStore = _Class("_DKKnowledgeStore")
_DKCompatibility = _Class("_DKCompatibility")
_DKSyncDeletedEventIDs = _Class("_DKSyncDeletedEventIDs")
_DKChangeSet = _Class("_DKChangeSet")
_DKLocationHistoryCache = _Class("_DKLocationHistoryCache")
_DKCachedLocationVisit = _Class("_DKCachedLocationVisit")
_DKObject = _Class("_DKObject")
_DKEvent = _Class("_DKEvent")
_DKRelation = _Class("_DKRelation")
_DKQuantity = _Class("_DKQuantity")
_DKCategory = _Class("_DKCategory")
_DKIdentifier = _Class("_DKIdentifier")
_DKSyncPolicy = _Class("_DKSyncPolicy")
_DKSyncToggle = _Class("_DKSyncToggle")
_DKEventUtils = _Class("_DKEventUtils")
_DKEventDateRef = _Class("_DKEventDateRef")
_CDAdvisedInteraction = _Class("_CDAdvisedInteraction")
_CDInteractionAdvisor = _Class("_CDInteractionAdvisor")
_CDEventStreams = _Class("_CDEventStreams")
_DKSync2State = _Class("_DKSync2State")
_DKSyncContextObject = _Class("_DKSyncContextObject")
_DKSyncPeerStatusTracker = _Class("_DKSyncPeerStatusTracker")
_DKSync2Coordinator = _Class("_DKSync2Coordinator")
_CDPerfMetricFamily = _Class("_CDPerfMetricFamily")
_CDPerfMetric = _Class("_CDPerfMetric")
_CDMutablePerfMetric = _Class("_CDMutablePerfMetric")
_DKSync2Policy = _Class("_DKSync2Policy")
_DKMetadata = _Class("_DKMetadata")
_CDSleepForAutoSu = _Class("_CDSleepForAutoSu")
_CDAutoSuCache = _Class("_CDAutoSuCache")
_CDAutoSuConfig = _Class("_CDAutoSuConfig")
_CDAirPlayDataCollectionTask = _Class("_CDAirPlayDataCollectionTask")
_CDAirPlayDataCollectionSession = _Class("_CDAirPlayDataCollectionSession")
_CDDateQuantizer = _Class("_CDDateQuantizer")
_DKMetadataPersistenceLookupTable = _Class("_DKMetadataPersistenceLookupTable")
_CDInteractionCache = _Class("_CDInteractionCache")
_DKPrivacyMaintainer = _Class("_DKPrivacyMaintainer")
HomeControlAnalysisPETCoreBehaviorAnalysisEvent = _Class(
"HomeControlAnalysisPETCoreBehaviorAnalysisEvent"
)
InteractionAnalysisPETInteractionEvents = _Class(
"InteractionAnalysisPETInteractionEvents"
)
_DKPRMetadataEntry = _Class("_DKPRMetadataEntry")
_DKPRChangeSet = _Class("_DKPRChangeSet")
_DKPRMetadata = _Class("_DKPRMetadata")
_DKPRSource = _Class("_DKPRSource")
_DKPREvent = _Class("_DKPREvent")
CloudFamilyAnalysisPETContactEvent = _Class("CloudFamilyAnalysisPETContactEvent")
_DKPRValueType = _Class("_DKPRValueType")
_DKPREventData = _Class("_DKPREventData")
CloudFamilyAnalysisPETContactEvents = _Class("CloudFamilyAnalysisPETContactEvents")
_DKPRCompressedChangeSet = _Class("_DKPRCompressedChangeSet")
InteractionAnalysisPETInteractionEvent = _Class(
"InteractionAnalysisPETInteractionEvent"
)
_DKPRValue = _Class("_DKPRValue")
_DKPRStream = _Class("_DKPRStream")
CloudFamilyAnalysisPETCloudFamilyAnalysisEvent = _Class(
"CloudFamilyAnalysisPETCloudFamilyAnalysisEvent"
)
_DKSyncPeerMO = _Class("_DKSyncPeerMO")
_DKStructuredMetadataMO = _Class("_DKStructuredMetadataMO")
_DKKeyValueMO = _Class("_DKKeyValueMO")
_CDInteractionRecord = _Class("_CDInteractionRecord")
_DKChangeSetMO = _Class("_DKChangeSetMO")
_CDContactRecord = _Class("_CDContactRecord")
_CDKeywordRecord = _Class("_CDKeywordRecord")
_DKRelationMO = _Class("_DKRelationMO")
_CDAttachmentRecord = _Class("_CDAttachmentRecord")
_CDVersionRecord = _Class("_CDVersionRecord")
_DKCustomMetadataMO = _Class("_DKCustomMetadataMO")
_DKObjectMO = _Class("_DKObjectMO")
_DKIdentifierMO = _Class("_DKIdentifierMO")
_DKCategoryMO = _Class("_DKCategoryMO")
_DKEventMO = _Class("_DKEventMO")
_DKQuantityMO = _Class("_DKQuantityMO")
_DKSourceMO = _Class("_DKSourceMO")
_DKHistogramValueMO = _Class("_DKHistogramValueMO")
_DKHistogramMO = _Class("_DKHistogramMO")
_DKPredictionTimeline = _Class("_DKPredictionTimeline")
_DKCategoryCache = _Class("_DKCategoryCache")
_DKEventStreamCache = _Class("_DKEventStreamCache")
_DKSyncOperation = _Class("_DKSyncOperation")
_DKSyncBlockOperation = _Class("_DKSyncBlockOperation")
_DKSyncCompositeOperation = _Class("_DKSyncCompositeOperation")
_DKPerformSyncUpHistoryOperation = _Class("_DKPerformSyncUpHistoryOperation")
_DKPerformSyncDownPeerAdditionsOperation = _Class(
"_DKPerformSyncDownPeerAdditionsOperation"
)
_DKPerformSyncUpHistoryDeletionsOperation = _Class(
"_DKPerformSyncUpHistoryDeletionsOperation"
)
_DKPerformSyncUpHistoryAdditionsOperation = _Class(
"_DKPerformSyncUpHistoryAdditionsOperation"
)
_DKPerformSyncDownPeerDeletionsOperation = _Class(
"_DKPerformSyncDownPeerDeletionsOperation"
)
_DKSyncBlockCompositeOperation = _Class("_DKSyncBlockCompositeOperation")
_DKPerformSyncUpChangeOperation = _Class("_DKPerformSyncUpChangeOperation")
_DKPerformSyncDownOperation = _Class("_DKPerformSyncDownOperation")
_DKPerformSyncDownPeerOperation = _Class("_DKPerformSyncDownPeerOperation")
_DKModifyRecordsOperation = _Class("_DKModifyRecordsOperation")
_DKFetchRecordZoneChangesOperation = _Class("_DKFetchRecordZoneChangesOperation")
_DKQueryOperation = _Class("_DKQueryOperation")
_DKFetchDatabaseChangesOperation = _Class("_DKFetchDatabaseChangesOperation")
_DKFetchRecordZonesOperation = _Class("_DKFetchRecordZonesOperation")
_DKModifyRecordZonesOperation = _Class("_DKModifyRecordZonesOperation")
_DKQuery = _Class("_DKQuery")
_DKHistogramQuery = _Class("_DKHistogramQuery")
_DKEventQuery = _Class("_DKEventQuery")
_DKPredictionQuery = _Class("_DKPredictionQuery")
| 51.065022 | 87 | 0.855851 |
bd79794a6debaafa31d8de1cd0dc02ca46919297 | 246 | py | Python | groupby_index.test.py | vi117/my-nsmc-study | f0658bae31fc560c3dbce5800dda8e9aff7bc79f | [
"MIT-0"
] | null | null | null | groupby_index.test.py | vi117/my-nsmc-study | f0658bae31fc560c3dbce5800dda8e9aff7bc79f | [
"MIT-0"
] | null | null | null | groupby_index.test.py | vi117/my-nsmc-study | f0658bae31fc560c3dbce5800dda8e9aff7bc79f | [
"MIT-0"
] | null | null | null | import unittest
from groupby_index import *
class Test(unittest.TestCase):
def test_padding_array(self):
self.assertEqual([*map(lambda x:[*x],groupby_index([1,2,3,4],2))],[[1,2],[3,4]])
if __name__ == '__main__':
unittest.main() | 27.333333 | 88 | 0.670732 |
449c142c90b87c84999cd49a9ec13a6556f20651 | 21,892 | py | Python | Tests/test_SeqRecord.py | pragyabansal02/biopython | b2cf2be5dd7dd2869af0fe8a5e1f92e0f6b0b493 | [
"BSD-3-Clause"
] | 2 | 2019-10-25T18:20:34.000Z | 2019-10-28T15:26:40.000Z | Tests/test_SeqRecord.py | pragyabansal02/biopython | b2cf2be5dd7dd2869af0fe8a5e1f92e0f6b0b493 | [
"BSD-3-Clause"
] | null | null | null | Tests/test_SeqRecord.py | pragyabansal02/biopython | b2cf2be5dd7dd2869af0fe8a5e1f92e0f6b0b493 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2009-2017 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""SeqFeature related tests for SeqRecord objects from Bio.SeqIO.
Initially this takes matched tests of GenBank and FASTA files from the NCBI
and confirms they are consistent using our different parsers.
"""
import unittest
from Bio import SeqIO
from Bio.Alphabet import generic_dna, generic_protein
from Bio.Seq import Seq, MutableSeq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation, ExactPosition
from Bio.SeqFeature import WithinPosition, BeforePosition, AfterPosition, OneOfPosition
class SeqRecordCreation(unittest.TestCase):
"""Test basic creation of SeqRecords."""
def test_annotations(self):
"""Pass in annotations to SeqRecords."""
rec = SeqRecord(Seq("ACGT", generic_dna),
id="Test", name="Test", description="Test")
self.assertEqual(rec.annotations, {})
rec = SeqRecord(Seq("ACGT", generic_dna),
id="Test", name="Test", description="Test",
annotations={"test": ["a test"]})
self.assertEqual(rec.annotations["test"], ["a test"])
def test_letter_annotations(self):
"""Pass in letter annotations to SeqRecords."""
rec = SeqRecord(Seq("ACGT", generic_dna),
id="Test", name="Test", description="Test")
self.assertEqual(rec.annotations, {})
rec = SeqRecord(Seq("ACGT", generic_dna),
id="Test", name="Test", description="Test",
letter_annotations={"test": [1, 2, 3, 4]})
self.assertEqual(rec.letter_annotations["test"], [1, 2, 3, 4])
# Now try modifying it to a bad value...
try:
rec.letter_annotations["bad"] = "abc"
self.fail("Adding a bad letter_annotation should fail!")
except (TypeError, ValueError) as e:
pass
# Now try setting it afterwards to a bad value...
rec = SeqRecord(Seq("ACGT", generic_dna),
id="Test", name="Test", description="Test")
try:
rec.letter_annotations = {"test": [1, 2, 3]}
self.fail("Changing to bad letter_annotations should fail!")
except (TypeError, ValueError) as e:
pass
# Now try setting it at creation time to a bad value...
try:
rec = SeqRecord(Seq("ACGT", generic_dna),
id="Test", name="Test", description="Test",
letter_annotations={"test": [1, 2, 3]})
self.fail("Wrong length letter_annotations should fail!")
except (TypeError, ValueError) as e:
pass
def test_replacing_seq(self):
"""Replacing .seq if .letter_annotation present."""
rec = SeqRecord(Seq("ACGT", generic_dna),
id="Test", name="Test", description="Test",
letter_annotations={"example": [1, 2, 3, 4]})
try:
rec.seq = Seq("ACGTACGT", generic_dna)
self.fail("Changing .seq length with letter_annotations present should fail!")
except ValueError as e:
self.assertEqual(str(e), "You must empty the letter annotations first!")
# Check we can replace IF the length is the same
self.assertEqual(str(rec.seq), "ACGT")
self.assertEqual(rec.letter_annotations, {"example": [1, 2, 3, 4]})
rec.seq = Seq("NNNN", generic_dna)
self.assertEqual(str(rec.seq), "NNNN")
self.assertEqual(rec.letter_annotations, {"example": [1, 2, 3, 4]})
def test_valid_id(self):
with self.assertRaises(TypeError):
SeqRecord(Seq("ACGT", generic_dna), id={})
def test_valid_name(self):
with self.assertRaises(TypeError):
SeqRecord(Seq("ACGT", generic_dna), name={})
def test_valid_description(self):
with self.assertRaises(TypeError):
SeqRecord(Seq("ACGT", generic_dna), description={})
def test_valid_dbxrefs(self):
with self.assertRaises(TypeError):
SeqRecord(Seq("ACGT", generic_dna), dbxrefs={})
def test_valid_annotations(self):
with self.assertRaises(TypeError):
SeqRecord(Seq("ACGT", generic_dna), annotations=[])
def test_valid_features(self):
with self.assertRaises(TypeError):
SeqRecord(Seq("ACGT", generic_dna), features={})
class SeqRecordMethods(unittest.TestCase):
"""Test SeqRecord methods."""
def setUp(self):
f0 = SeqFeature(FeatureLocation(0, 26), type="source",
qualifiers={"mol_type": ["fake protein"]})
f1 = SeqFeature(FeatureLocation(0, ExactPosition(10)))
f2 = SeqFeature(FeatureLocation(WithinPosition(12, left=12, right=15), BeforePosition(22)))
f3 = SeqFeature(FeatureLocation(AfterPosition(16),
OneOfPosition(26, [ExactPosition(25), AfterPosition(26)])))
self.record = SeqRecord(Seq("ABCDEFGHIJKLMNOPQRSTUVWZYX", generic_protein),
id="TestID", name="TestName", description="TestDescr",
dbxrefs=["TestXRef"], annotations={"k": "v"},
letter_annotations={"fake": "X" * 26},
features=[f0, f1, f2, f3])
def test_iter(self):
for amino in self.record:
self.assertEqual("A", amino)
break
def test_contains(self):
self.assertIn(Seq("ABC", generic_protein), self.record)
def test_str(self):
expected = """
ID: TestID
Name: TestName
Description: TestDescr
Database cross-references: TestXRef
Number of features: 4
/k=v
Per letter annotation for: fake
Seq('ABCDEFGHIJKLMNOPQRSTUVWZYX', ProteinAlphabet())"""
self.assertEqual(expected.lstrip(), str(self.record))
def test_repr(self):
expected = "SeqRecord(seq=Seq('ABCDEFGHIJKLMNOPQRSTUVWZYX', ProteinAlphabet()), " \
"id='TestID', name='TestName', description='TestDescr', dbxrefs=['TestXRef'])"
self.assertEqual(expected, repr(self.record))
def test_format(self):
expected = ">TestID TestDescr\nABCDEFGHIJKLMNOPQRSTUVWZYX\n"
self.assertEqual(expected, self.record.format("fasta"))
def test_format_spaces(self):
rec = SeqRecord(Seq("ABCDEFGHIJKLMNOPQRSTUVWZYX", generic_protein),
id="TestID", name="TestName", description="TestDescr")
rec.description = "TestDescr with5spaces"
expected = ">TestID TestDescr with5spaces\nABCDEFGHIJKLMNOPQRSTUVWZYX\n"
self.assertEqual(expected, rec.format("fasta"))
def test_upper(self):
self.assertEqual("ABCDEFGHIJKLMNOPQRSTUVWZYX", str(self.record.lower().upper().seq))
def test_lower(self):
self.assertEqual("abcdefghijklmnopqrstuvwzyx", str(self.record.lower().seq))
def test_slicing(self):
self.assertEqual("B", self.record[1])
self.assertEqual("BC", self.record[1:3].seq)
with self.assertRaises(ValueError):
c = self.record["a"].seq
def test_slice_variants(self):
"""Simple slices using different start/end values."""
for start in list(range(-30, 30)) + [None]:
for end in list(range(-30, 30)) + [None]:
if start is None and end is None:
continue
rec = self.record[start:end]
seq = self.record.seq[start:end]
seq_str = str(self.record.seq)[start:end]
self.assertEqual(seq_str, str(seq))
self.assertEqual(seq_str, str(rec.seq))
self.assertEqual("X" * len(seq_str), rec.letter_annotations["fake"])
def test_slice_simple(self):
"""Simple slice."""
rec = self.record
self.assertEqual(len(rec), 26)
left = rec[:10]
self.assertEqual(str(left.seq), str(rec.seq[:10]))
right = rec[-10:]
self.assertEqual(str(right.seq), str(rec.seq[-10:]))
mid = rec[12:22]
self.assertEqual(str(mid.seq), str(rec.seq[12:22]))
for sub in [left, right, mid]:
self.assertEqual(len(sub), 10)
self.assertEqual(sub.id, "TestID")
self.assertEqual(sub.name, "TestName")
self.assertEqual(sub.description, "TestDescr")
self.assertEqual(sub.letter_annotations, {"fake": "X" * 10})
self.assertEqual(sub.dbxrefs, []) # May change this...
self.assertEqual(sub.annotations, {}) # May change this...
self.assertEqual(len(sub.features), 1)
# By construction, each feature matches the full sliced region:
self.assertEqual(str(sub.features[0].extract(sub.seq)), str(sub.seq))
self.assertEqual(sub.features[0].extract(str(sub.seq)), str(sub.seq))
def test_slice_zero(self):
"""Zero slice."""
rec = self.record
self.assertEqual(len(rec), 26)
self.assertEqual(len(rec[2:-2]), 22)
self.assertEqual(len(rec[5:2]), 0)
self.assertEqual(len(rec[5:2][2:-2]), 0)
def test_add_simple(self):
"""Simple addition."""
rec = self.record + self.record
self.assertEqual(len(rec), 52)
self.assertEqual(rec.id, "TestID")
self.assertEqual(rec.name, "TestName")
self.assertEqual(rec.description, "TestDescr")
self.assertEqual(rec.dbxrefs, ["TestXRef"])
self.assertEqual(rec.annotations, {"k": "v"})
self.assertEqual(rec.letter_annotations, {"fake": "X" * 52})
self.assertEqual(len(rec.features), 2 * len(self.record.features))
def test_add_seq(self):
"""Simple addition of Seq or string."""
for other in [Seq("BIO"), "BIO"]:
rec = self.record + other # will use SeqRecord's __add__ method
self.assertEqual(len(rec), 26 + 3)
self.assertEqual(str(rec.seq), str(self.record.seq) + "BIO")
self.assertEqual(rec.id, "TestID")
self.assertEqual(rec.name, "TestName")
self.assertEqual(rec.description, "TestDescr")
self.assertEqual(rec.dbxrefs, ["TestXRef"])
self.assertEqual(rec.annotations, {"k": "v"})
self.assertEqual(rec.letter_annotations, {})
self.assertEqual(len(rec.features), len(self.record.features))
self.assertEqual(rec.features[0].type, "source")
self.assertEqual(rec.features[0].location.nofuzzy_start, 0)
self.assertEqual(rec.features[0].location.nofuzzy_end, 26) # not +3
def test_add_seqrecord(self):
"""Simple left addition of SeqRecord from genbank file."""
other = SeqIO.read("GenBank/dbsource_wrap.gb", "gb")
other.dbxrefs = ["dummy"]
rec = self.record + other
self.assertEqual(len(rec), len(self.record) + len(other))
self.assertEqual(str(rec.seq), str(self.record.seq) + str(other.seq))
self.assertEqual(rec.id, "<unknown id>")
self.assertEqual(rec.name, "<unknown name>")
self.assertEqual(rec.description, "<unknown description>")
self.assertEqual(rec.dbxrefs, ["TestXRef", "dummy"])
self.assertEqual(len(rec.annotations), 0)
self.assertEqual(len(rec.letter_annotations), 0)
self.assertEqual(len(rec.features),
len(self.record.features) + len(other.features))
self.assertEqual(rec.features[0].type, "source")
self.assertEqual(rec.features[0].location.nofuzzy_start, 0)
self.assertEqual(rec.features[0].location.nofuzzy_end, len(self.record)) # not +3
i = len(self.record.features)
self.assertEqual(rec.features[i].type, "source")
self.assertEqual(rec.features[i].location.nofuzzy_start, len(self.record))
self.assertEqual(rec.features[i].location.nofuzzy_end, len(rec))
def test_add_seq_left(self):
"""Simple left addition of Seq or string."""
for other in [Seq("BIO"), "BIO"]:
rec = other + self.record # will use SeqRecord's __radd__ method
self.assertEqual(len(rec), 26 + 3)
self.assertEqual(str(rec.seq), "BIO" + str(self.record.seq))
self.assertEqual(rec.id, "TestID")
self.assertEqual(rec.name, "TestName")
self.assertEqual(rec.description, "TestDescr")
self.assertEqual(rec.dbxrefs, ["TestXRef"])
self.assertEqual(rec.annotations, {"k": "v"})
self.assertEqual(rec.letter_annotations, {})
self.assertEqual(len(rec.features), len(self.record.features))
self.assertEqual(rec.features[0].type, "source")
self.assertEqual(rec.features[0].location.nofuzzy_start, 3)
self.assertEqual(rec.features[0].location.nofuzzy_end, 26 + 3)
def test_slice_add_simple(self):
"""Simple slice and add."""
for cut in range(27):
rec = self.record[:cut] + self.record[cut:]
self.assertEqual(str(rec.seq), str(self.record.seq))
self.assertEqual(len(rec), 26)
self.assertEqual(rec.id, "TestID")
self.assertEqual(rec.name, "TestName")
self.assertEqual(rec.description, "TestDescr")
self.assertEqual(rec.dbxrefs, []) # May change this...
self.assertEqual(rec.annotations, {}) # May change this...
self.assertEqual(rec.letter_annotations, {"fake": "X" * 26})
self.assertTrue(len(rec.features) <= len(self.record.features))
def test_slice_add_shift(self):
"""Simple slice and add to shift."""
for cut in range(27):
rec = self.record[cut:] + self.record[:cut]
self.assertEqual(str(rec.seq), str(self.record.seq[cut:] + self.record.seq[:cut]))
self.assertEqual(len(rec), 26)
self.assertEqual(rec.id, "TestID")
self.assertEqual(rec.name, "TestName")
self.assertEqual(rec.description, "TestDescr")
self.assertEqual(rec.dbxrefs, []) # May change this...
self.assertEqual(rec.annotations, {}) # May change this...
self.assertEqual(rec.letter_annotations, {"fake": "X" * 26})
self.assertTrue(len(rec.features) <= len(self.record.features))
class SeqRecordMethodsMore(unittest.TestCase):
"""Test SeqRecord methods cont."""
# This class does not have a setUp defining self.record
def test_reverse_complement_seq(self):
s = SeqRecord(Seq("ACTG"), id="TestID", name="TestName",
description="TestDescription", dbxrefs=["TestDbxrefs"],
features=[SeqFeature(FeatureLocation(0, 3), type="Site")],
annotations={"organism": "bombyx"},
letter_annotations={"test": "abcd"})
rc = s.reverse_complement(id=True, name=True, description=True,
dbxrefs=True, features=True, annotations=True,
letter_annotations=True)
self.assertEqual("CAGT", str(rc.seq))
self.assertEqual("TestID", rc.id)
self.assertEqual("TestID", s.reverse_complement(id="TestID").id)
self.assertEqual("TestName", rc.name)
self.assertEqual("TestName", s.reverse_complement(name="TestName").name)
self.assertEqual("TestDescription", rc.description)
self.assertEqual("TestDescription",
s.reverse_complement(description="TestDescription").description)
self.assertEqual(["TestDbxrefs"], rc.dbxrefs)
self.assertEqual(["TestDbxrefs"],
s.reverse_complement(dbxrefs=["TestDbxrefs"]).dbxrefs)
self.assertEqual("[SeqFeature(FeatureLocation(ExactPosition(1), ExactPosition(4)), type='Site')]",
repr(rc.features))
rc2 = s.reverse_complement(features=[SeqFeature(FeatureLocation(1, 4), type="Site")])
self.assertEqual("[SeqFeature(FeatureLocation(ExactPosition(1), ExactPosition(4)), type='Site')]",
repr(rc2.features))
self.assertEqual({"organism": "bombyx"}, rc.annotations)
self.assertEqual({"organism": "bombyx"},
s.reverse_complement(annotations={"organism": "bombyx"}).annotations)
self.assertEqual({"test": "dcba"}, rc.letter_annotations)
self.assertEqual({"test": "abcd"},
s.reverse_complement(letter_annotations={"test": "abcd"}).letter_annotations)
def test_reverse_complement_mutable_seq(self):
s = SeqRecord(MutableSeq("ACTG"))
self.assertEqual("CAGT", str(s.reverse_complement().seq))
def test_translate(self):
s = SeqRecord(Seq("ATGGTGTAA"), id="TestID", name="TestName",
description="TestDescription", dbxrefs=["TestDbxrefs"],
features=[SeqFeature(FeatureLocation(0, 3), type="Site")],
annotations={"organism": "bombyx"},
letter_annotations={"test": "abcdefghi"})
t = s.translate()
self.assertEqual(t.seq, "MV*")
self.assertEqual(t.id, "<unknown id>")
self.assertEqual(t.name, "<unknown name>")
self.assertEqual(t.description, "<unknown description>")
self.assertFalse(t.dbxrefs)
self.assertFalse(t.features)
self.assertFalse(t.annotations)
self.assertFalse(t.letter_annotations)
t = s.translate(cds=True, id=True, name=True, description=True,
dbxrefs=True, annotations=True)
self.assertEqual(t.seq, "MV")
self.assertEqual(t.id, "TestID")
self.assertEqual(t.name, "TestName")
self.assertEqual(t.description, "TestDescription")
self.assertEqual(t.dbxrefs, ["TestDbxrefs"])
self.assertFalse(t.features)
self.assertEqual(t.annotations, {"organism": "bombyx"})
self.assertFalse(t.letter_annotations)
def test_lt_exception(self):
def lt():
SeqRecord(Seq("A")) < SeqRecord(Seq("A"))
self.assertRaises(NotImplementedError, lt)
def test_le_exception(self):
def le():
SeqRecord(Seq("A")) <= SeqRecord(Seq("A"))
self.assertRaises(NotImplementedError, le)
def test_eq_exception(self):
def equality():
SeqRecord(Seq("A")) == SeqRecord(Seq("A"))
self.assertRaises(NotImplementedError, equality)
def test_ne_exception(self):
def notequality():
SeqRecord(Seq("A")) != SeqRecord(Seq("A"))
self.assertRaises(NotImplementedError, notequality)
def test_gt_exception(self):
def gt():
SeqRecord(Seq("A")) > SeqRecord(Seq("A"))
self.assertRaises(NotImplementedError, gt)
def test_ge_exception(self):
def ge():
SeqRecord(Seq("A")) >= SeqRecord(Seq("A"))
self.assertRaises(NotImplementedError, ge)
def test_hash_exception(self):
def hash1():
hash(SeqRecord(Seq("A")))
self.assertRaises(TypeError, hash1)
def hash2():
SeqRecord(Seq("A")).__hash__()
self.assertRaises(TypeError, hash2)
class TestTranslation(unittest.TestCase):
def setUp(self):
self.s = SeqRecord(Seq("ATGGTGTAA"), id="TestID", name="TestName",
description="TestDescription", dbxrefs=["TestDbxrefs"],
features=[SeqFeature(FeatureLocation(0, 3), type="Site")],
annotations={"organism": "bombyx"},
letter_annotations={"test": "abcdefghi"})
def test_defaults(self):
t = self.s.translate()
self.assertEqual(t.seq, "MV*")
self.assertEqual(t.id, "<unknown id>")
self.assertEqual(t.name, "<unknown name>")
self.assertEqual(t.description, "<unknown description>")
self.assertFalse(t.dbxrefs)
self.assertFalse(t.features)
self.assertFalse(t.annotations)
self.assertFalse(t.letter_annotations)
def test_preserve(self):
t = self.s.translate(cds=True, id=True, name=True, description=True,
dbxrefs=True, annotations=True)
self.assertEqual(t.seq, "MV")
self.assertEqual(t.id, "TestID")
self.assertEqual(t.name, "TestName")
self.assertEqual(t.description, "TestDescription")
self.assertEqual(t.dbxrefs, ["TestDbxrefs"])
self.assertFalse(t.features)
self.assertEqual(t.annotations, {"organism": "bombyx"})
self.assertFalse(t.letter_annotations)
# Should not preserve these
self.assertRaises(TypeError, self.s.translate, features=True)
self.assertRaises(TypeError, self.s.translate, letter_annotations=True)
def test_new_annot(self):
t = self.s.translate(1, to_stop=True, gap="-",
id="Foo", name="Bar", description="Baz", dbxrefs=["Nope"],
features=[SeqFeature(FeatureLocation(0, 3), type="Site")],
annotations={"a": "team"},
letter_annotations={"aa": ["Met", "Val"]})
self.assertEqual(t.seq, "MV")
self.assertEqual(t.id, "Foo")
self.assertEqual(t.name, "Bar")
self.assertEqual(t.description, "Baz")
self.assertEqual(t.dbxrefs, ["Nope"])
self.assertEqual(len(t.features), 1)
self.assertEqual(t.annotations, {"a": "team"})
self.assertEqual(t.letter_annotations, {"aa": ["Met", "Val"]})
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 45.138144 | 106 | 0.60497 |
d40526058594481ae81398ae323b5c3603247bbc | 2,943 | py | Python | vespene/workers/ssh_agent.py | Conan-Kudo/vespene | 9e9977523f45586e1326ccd77d8cc0cb10591a07 | [
"Apache-2.0"
] | 680 | 2018-10-29T12:12:10.000Z | 2019-04-27T09:52:58.000Z | vespene/workers/ssh_agent.py | Conan-Kudo/vespene | 9e9977523f45586e1326ccd77d8cc0cb10591a07 | [
"Apache-2.0"
] | 110 | 2018-10-29T12:33:34.000Z | 2019-02-14T02:31:43.000Z | vespene/workers/ssh_agent.py | Conan-Kudo/vespene | 9e9977523f45586e1326ccd77d8cc0cb10591a07 | [
"Apache-2.0"
] | 92 | 2018-10-29T12:21:12.000Z | 2019-06-08T11:08:08.000Z | # Copyright 2018, Michael DeHaan LLC
# License: Apache License Version 2.0
# -------------------------------------------------------------------------
# ssh_agent.py - Vespene workers are run wrapped by 'ssh-agent' processes
# and the workers can use SSH keys configured per project to do SCM checkouts
# or use SSH-based automation. This is mostly handled right now
# through basic expect scripts and does support password-locked keys.
# --------------------------------------------------------------------------
import os
import tempfile
from vespene.common.logger import Logger
from vespene.workers import commands
LOG = Logger()
# =============================================================================
class SshAgentManager(object):
def __init__(self, builder, build):
self.builder = builder
self.build = build
self.project = self.build.project
self.tempfile_paths = []
def add_all_keys(self):
for access in self.project.ssh_keys.all():
self.add_key(access)
def add_key(self, access):
(_, keyfile) = tempfile.mkstemp()
answer_file = None
try:
fh = open(keyfile, "w")
private = access.get_private_key()
fh.write(private)
fh.close()
answer_file = None
if access.unlock_password:
LOG.debug("adding SSH key with passphrase!")
self.ssh_add_with_passphrase(keyfile, access.get_unlock_password())
else:
if ',ENCRYPTED' in private:
raise Exception("SSH key has a passphrase but an unlock password was not set. Aborting")
LOG.debug("adding SSH key without passphrase!")
self.ssh_add_without_passphrase(keyfile)
finally:
os.remove(keyfile)
if answer_file:
os.remove(answer_file)
def cleanup(self):
# remove SSH identities
LOG.debug("removing SSH identities")
commands.execute_command(self.build, "ssh-add -D", log_command=False, message_log=False, output_log=False)
def ssh_add_without_passphrase(self, keyfile):
LOG.debug(keyfile)
cmd = "ssh-add %s < /dev/null" % keyfile
commands.execute_command(self.build, cmd, env=None, log_command=False, message_log=False, output_log=False)
def ssh_add_with_passphrase(self, keyfile, passphrase):
(_, fname) = tempfile.mkstemp()
fh = open(fname, "w")
script = """
#!/usr/bin/expect -f
spawn ssh-add %s
expect "Enter passphrase*:"
send "%s\n";
expect "Identity added*"
interact
""" % (keyfile, passphrase)
fh.write(script)
fh.close()
commands.execute_command(self.build, "/usr/bin/expect -f %s" % fname, output_log=False, message_log=False)
os.remove(fname)
return fname
| 34.623529 | 118 | 0.571526 |
60da9f19bc559026c8a2cb0e1ca0732442ddf6a5 | 8,991 | py | Python | RUN_data_analysis.py | Beat98/rl-ion-trap-tutorial | ea119d710fb01d7420e20c1d8947086f6ab7c770 | [
"MIT"
] | null | null | null | RUN_data_analysis.py | Beat98/rl-ion-trap-tutorial | ea119d710fb01d7420e20c1d8947086f6ab7c770 | [
"MIT"
] | null | null | null | RUN_data_analysis.py | Beat98/rl-ion-trap-tutorial | ea119d710fb01d7420e20c1d8947086f6ab7c770 | [
"MIT"
] | null | null | null | import time
import csv
import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
height = 1080
aspect = 4 / 3
dpi = 200
mpl.rcParams.update({
'text.usetex': False,
'font.family': 'serif',
'font.serif': 'cmr10',
'mathtext.fontset': 'cm',
'font.family': 'STIXGeneral',
'axes.unicode_minus': True,
'axes.labelsize': 12,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 10,
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.major.top': 'on',
'xtick.major.bottom': 'on',
'ytick.major.left': 'on',
'ytick.major.right': 'on',
'xtick.top': True,
'ytick.right': True})
mpl.rcParams['figure.figsize'] = (height * aspect / dpi, height / dpi)
plt.rcParams['figure.dpi'] = dpi
from agent.Universal_Agent import UniversalAgent
from ecm.Universal_ECM import UniversalECM
#from ecm.HValuesToProbabilities import linear_probabilities, softmax
from env.IonTrap_env import IonTrapEnv
def run(adj_matrix, KWARGS, eta, beta, num_episodes, avg):
full_data_steps = np.zeros((avg, num_episodes))
# full_reward_list = np.zeros((avg, num_episodes))
laser_seq_list = []
time_list = []
for i in range(avg):
start_time = time.time()
# initialize Environment
env = IonTrapEnv(**KWARGS)
# linear_probabilities is a function that converts h-values to probabilities by simply normalizing them
# gamma=0 means no forgetting, eta=1.0 means no interaction between steps
ecm = UniversalECM(gamma_damping=0, eta_glow_damping=eta, beta=beta)
ag = UniversalAgent(ECM=ecm, actions=env.actions, adj_matrix=adj_matrix)
data_steps = np.array([])
reward_list = np.array([])
for n in range(num_episodes):
# initial observation from environment
observation = env.reset()
# bool: whether or not the environment has finished the episode
done = False
# int: the current time step in this episode
num_steps = 0
cum_reward = 0
action_seq = []
laser_seq = []
srv_seq = []
while not done:
# increment counter
num_steps += 1
# predict action
action = ag.step(observation)
#random:
#action = np.random.choice(len(env.actions))
if n == num_episodes - 1:
laser_seq = np.append(laser_seq, action)
# perform action on environment and receive observation and reward
observation, reward, done = env.step(action)
srv_seq.append(env.srv(observation))
cum_reward += reward
ag.learn(reward)
if done:
data_steps = np.append(data_steps, num_steps)
#print(srv_seq)
# reward_list = np.append(reward_list, cum_reward)
if n == num_episodes - 1:
laser_seq_list.append(laser_seq)
end_time = time.time()
time_list.append(end_time-start_time)
full_data_steps[i, :] = data_steps
# full_reward_list[i, :] = reward_list
avg_data_steps = np.mean(full_data_steps, axis=0)
std_data_steps = np.std(full_data_steps, axis=0)
return avg_data_steps, std_data_steps, np.asarray(laser_seq_list), time_list
def initialize_config(config, num_ions, KWARGS):
env = IonTrapEnv(**KWARGS)
num_actions = len(env.actions)
if config == 1:
adj_matrix = np.zeros((num_actions + 1, num_actions + 1))
adj_matrix[0][list(range(1, num_actions + 1))] = 1
elif config == 2:
adj_matrix = np.zeros((num_actions + 2, num_actions + 2))
adj_matrix[-1][list(range(2, num_actions + 1))] = 1
adj_matrix[0][[1, -1]] = 1
elif config == 3:
adj_matrix = np.zeros((num_actions + 1, num_actions + 1))
adj_matrix[0][1] = 1
else:
print("invalid configuration")
return adj_matrix
def rewarded_srv(num_ions, dim):
srv = [dim for n in range(num_ions)]
return srv
def store_data(data, config, eta, beta, path, data_name):
np.savetxt(
f"{path}{data_name}_config_{config}_dim_{dim}_ions_{num_ions}_eta_{eta}_beta_{beta}_episodes_{num_episodes}_avg_{avg}.txt",
data)
def store_seq(seq, config, eta, beta, path, data_name):
with open(f"{path}{data_name}_config_{config}_dim_{dim}_ions_{num_ions}_eta_{eta}_beta_{beta}_episodes_{num_episodes}_avg_{avg}.csv",'w', newline='') as file:
mywriter = csv.writer(file, delimiter=",")
mywriter.writerows(seq)
def get_data_for_comparison(config_s, eta_s, beta_s, path):
for eta in eta_s:
for beta in beta_s:
for config in config_s:
#if eta == beta:
adj_matrix = initialize_config(config, num_ions, KWARGS)
avg_data_steps, std_data_steps, laser_seq_list, time_list = run(adj_matrix, KWARGS, eta, beta, num_episodes, avg)
# store data
store_seq(laser_seq_list, config, eta, beta, path, "Iontrap_final_laser_seq")
store_data(avg_data_steps, config, eta, beta, path, "Iontrap_avg_steps")
store_data(std_data_steps, config, eta, beta, path, "Iontrap_avg_steps_std")
store_data(time_list, config, eta, beta, path, "Iontrap_time_per_agent")
return avg_data_steps, std_data_steps, laser_seq_list, time_list
def when_learned(data, limit):
for i in range(len(data)):
if i > limit:
m = 0
for n in range(i - limit, i):
if data[n] == data[i]:
m += 1
if m == limit:
return i - limit
else:
continue
def plot_comparison(start, end, config_s, eta_s, beta_s, path, colors):
xs = np.arange(start, end)
table = np.zeros((32,4))
i = 0
n = 0
for eta in eta_s:
for beta in beta_s:
for config in config_s:
#if (eta != 0.15 and beta != 0.2):
data = np.loadtxt(
f"{path}Iontrap_avg_steps_config_{config}_dim_{dim}_ions_{num_ions}_eta_{eta}_beta_{beta}_episodes_{num_episodes}_avg_{avg}.txt")
err = np.loadtxt(
f"{path}Iontrap_avg_steps_std_config_{config}_dim_{dim}_ions_{num_ions}_eta_{eta}_beta_{beta}_episodes_{num_episodes}_avg_{avg}.txt")
learned = when_learned(data,50)
table[i,:] = [eta,beta,data[x_cut],learned]
i += 1
if data[x_cut] < y_cut:
color = colors[n]
n += 1
print(f"result: {data[x_cut]}")
print(f"error: {err[x_cut]}")
plt.plot(xs, data[start:end], color=color, linestyle='-',
label=rf"$\eta$ = {eta}, $\beta$ = {beta}")#, config: {config}", ms=2) #
# plt.plot(xs, data[start:end] + err[start:end], color=color, linestyle='--', alpha=0.2, ms=0.01,
# lw=0.1)
# plt.plot(xs, data[start:end] - err[start:end], color=color, linestyle='--', alpha=0.2, ms=0.01,
# lw=0.1)
# plt.fill_between(xs, data[start:end] + err[start:end], data[start:end] - err[start:end],
# color=color, alpha=0.1)
#plt.ylim([0, 50])
plt.legend()
plt.ylabel('average number of pulses')
plt.xlabel('episode')
df = pd.DataFrame(table, columns = ["eta","beta","result","learned"])
print(df.to_latex())
plt.tight_layout(pad=0.1)
#plt.savefig(f"figures/ion_trap_paramOpt_best_config_2")
plt.show()
num_episodes = 500
avg = 1
dim = 3
num_ions = 2
max_steps = 10
phases = {'pulse_angles': [np.pi / 2], 'pulse_phases': [np.pi / 2], 'ms_phases': [-np.pi / 2]}
KWARGS = {'num_ions': num_ions, 'dim': dim, 'goal': [rewarded_srv(num_ions, dim)], 'phases': phases,
'max_steps': max_steps}
eta_s = [0.1,0.15,0.2,0.25]
beta_s = [0.15,0.2,0.25,0.3]
eta_s = [0.25]
beta_s = [0.3]
config_s = [1]
path = "data/ion_trap_tree/"
x_cut = num_episodes-1
y_cut = 1000
colors = ["r","g","b","darkorange","y","k","grey", "olive","b","gold","lime","navy","brown","c","purple","hotpink","crimson",
"r","g","darkorange","k","y","grey", "olive","b","gold","lime","navy","brown","c","purple","hotpink","crimson"]
avg_data_steps, std_data_steps, laser_seq_list, time_list = get_data_for_comparison(config_s, eta_s, beta_s, path)
plot_comparison(0, x_cut, config_s, eta_s, beta_s, path, colors)
| 33.177122 | 162 | 0.574018 |
ed07df8b479bc8cae4152a85b925812ad88ea0b4 | 1,957 | py | Python | youtube_discussion_tree_api/_http.py | quimpm/youtube_discussion_tree | e7e457df673891f9c70495c28ca7d718f196bede | [
"MIT"
] | 13 | 2021-05-20T13:13:43.000Z | 2021-06-21T18:33:44.000Z | youtube_discussion_tree_api/_http.py | quimpm/youtube_discussion_tree | e7e457df673891f9c70495c28ca7d718f196bede | [
"MIT"
] | null | null | null | youtube_discussion_tree_api/_http.py | quimpm/youtube_discussion_tree | e7e457df673891f9c70495c28ca7d718f196bede | [
"MIT"
] | null | null | null | import requests
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api.formatters import TextFormatter
from .utils import YoutubeDataApiOperations
from youtube_discussion_tree_api._errors import NoEnglishTranscription
def _get_video_transcription(video_id):
try:
transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=['en'])
except:
raise NoEnglishTranscription(video_id, "This video doen't suport a Transcription to english")
formatter = TextFormatter()
return formatter.format_transcript(transcript)
def _get_video_info(id_video, api_key, quota_manager):
quota_manager._actualize_current_quota(YoutubeDataApiOperations.LIST)
youtube_api_videos = "https://www.googleapis.com/youtube/v3/videos"
params = {
"key" : api_key,
"part" : ["snippet", "statistics"],
"id" : id_video
}
return requests.get(youtube_api_videos, params = params).json()
def _get_video_comments(id_video, api_key, quota_manager):
quota_manager._actualize_current_quota(YoutubeDataApiOperations.LIST)
youtube_api_comment_threads = "https://www.googleapis.com/youtube/v3/commentThreads"
params = {
"key" : api_key,
"part" : ["snippet", "replies"],
"order" : "relevance",
"videoId" : id_video,
"maxResults" : 100
}
return requests.get(youtube_api_comment_threads, params = params).json()
def _get_list_search_videos(query, search_results, api_key, quota_manager):
quota_manager._actualize_current_quota(YoutubeDataApiOperations.SEARCH)
youtube_api_search = "https://www.googleapis.com/youtube/v3/search"
params = {
"key" : api_key,
"part" : ["snippet"],
"q" : query,
"maxResults" : search_results,
"type" : ["video"],
"order" : "relevance",
"videoCaption" : "closedCaption"
}
return requests.get(youtube_api_search, params).json() | 39.938776 | 101 | 0.713337 |
229a76baf2f4f29bff41b0d4de3075492ff15996 | 396 | py | Python | dp-search/tests/base.py | flaxandteal/dp-search-app | eecdd61435d8665ea18c9f084bfa6a3c23b00221 | [
"MIT"
] | null | null | null | dp-search/tests/base.py | flaxandteal/dp-search-app | eecdd61435d8665ea18c9f084bfa6a3c23b00221 | [
"MIT"
] | null | null | null | dp-search/tests/base.py | flaxandteal/dp-search-app | eecdd61435d8665ea18c9f084bfa6a3c23b00221 | [
"MIT"
] | 3 | 2018-08-12T06:43:04.000Z | 2021-12-20T12:54:16.000Z | import unittest
import abc
# Nosetests will take care of sys.path for this import
from server.app import create_app
app = create_app()
class BaseTest(unittest.TestCase):
__metaclass__ = abc.ABCMeta # Abstract class
def setUp(self):
self.app = app
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
| 22 | 54 | 0.694444 |
4bca2224bb10067e06812c68d7a38e46ef03d72e | 2,956 | py | Python | engine/omega_engine/backends/opengl/opengl_core/components/entity.py | jadsonlucio/Opengl-CG-Project | 47b50bf93b8d3a1ccef1f41f22ed3327d9496b8c | [
"MIT"
] | null | null | null | engine/omega_engine/backends/opengl/opengl_core/components/entity.py | jadsonlucio/Opengl-CG-Project | 47b50bf93b8d3a1ccef1f41f22ed3327d9496b8c | [
"MIT"
] | 3 | 2021-06-08T20:54:18.000Z | 2022-03-12T00:13:46.000Z | engine/omega_engine/backends/opengl/opengl_core/components/entity.py | jadsonlucio/Opengl-CG-Project | 47b50bf93b8d3a1ccef1f41f22ed3327d9496b8c | [
"MIT"
] | null | null | null | import ctypes
import numpy as np
from OpenGL.GL import *
from omega_engine.core import load_obj_data
from omega_engine.core import MatrixModel
class Entity():
def __init__(self, vertices, vertex_format, indices, texture=None, model=None, draw_mode=GL_TRIANGLES):
self.draw_mode = draw_mode
self.vertices = vertices
self.vertex_format = np.array(vertex_format)
self.indices = indices
self.texture = texture
if model == None:
model = MatrixModel()
self.model = model
self.vao = glGenVertexArrays(1)
self.vbo = None
self.ibo = None
self.gen_vertex_buffer()
self.populate_vao()
def gen_vertex_buffer(self):
self.vbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.vbo)
glBufferData(GL_ARRAY_BUFFER, self.vertices.nbytes, self.vertices, GL_STATIC_DRAW)
self.ibo = glGenBuffers(1)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ibo)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, self.indices.nbytes, self.indices, GL_STATIC_DRAW)
def populate_vao(self):
glBindVertexArray(self.vao)
glBindBuffer(GL_ARRAY_BUFFER, self.vbo)
stride = self.vertex_format.transpose()[0].sum() * 4
pointer_v = 0
for size, dtype, position in self.vertex_format:
glEnableVertexAttribArray(position)
glVertexAttribPointer(position, size, dtype, False, stride, ctypes.c_void_p(int(pointer_v)))
pointer_v += size * 4
def draw(self, program):
if self.texture:
self.texture.bind()
program.use()
program.set_uniform_matrix4f_by_name(self.model.matrix4, "model", 1)
glBindVertexArray(self.vao)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ibo)
glDrawElements(self.draw_mode, len(self.indices), GL_UNSIGNED_INT, None)
def __copy__(self):
entity = Entity.__new__(Entity)
entity.vertices = self.vertices
entity.indices = self.indices
entity.vertex_format = self.vertex_format
entity.texture = self.texture
entity.model = MatrixModel()
entity.vao = self.vao
entity.vbo = self.vbo
entity.ibo = self.ibo
return entity
@classmethod
def load_obj(cls, obj_file_path, mtl_file_path):
vertices, normals, colors, indices = load_obj_data(obj_file_path, mtl_file_path)
_vertices_data = []
print(np.array(vertices).shape)
print(np.array(normals).shape)
print(np.array(colors).shape)
for vertice, normal, color in zip(vertices, normals, colors):
_vertices_data += vertice + color + normal
vertex_format = [[3, GL_FLOAT, 0], [3, GL_FLOAT, 1], [3, GL_FLOAT, 2]]
_vertices_data = np.array(_vertices_data, dtype="float32")
indices = np.array(indices, dtype="uint32")
return _vertices_data, vertex_format, indices
| 33.977011 | 107 | 0.657984 |
cf9594c02c942cadb4c3a7862d9adffc5a3335a1 | 318 | py | Python | crawling_scraping/RSS_xml_scraping/scrape_rss_by_feedparser.py | litteletips/crawling_scraping-scrapy_tool | 6d70b4d2a91f2d2bebcc5266ed43ad9be4723bc0 | [
"MIT"
] | null | null | null | crawling_scraping/RSS_xml_scraping/scrape_rss_by_feedparser.py | litteletips/crawling_scraping-scrapy_tool | 6d70b4d2a91f2d2bebcc5266ed43ad9be4723bc0 | [
"MIT"
] | 16 | 2021-03-19T09:44:52.000Z | 2022-03-12T00:22:14.000Z | crawling_scraping/RSS_xml_scraping/scrape_rss_by_feedparser.py | litteletips/crawling_scraping | 6d70b4d2a91f2d2bebcc5266ed43ad9be4723bc0 | [
"MIT"
] | null | null | null | # feedparserを使ったRSSスクレイピング
# はてなブックマークのRSSからスクレイピングする
# 実行方法
# python scrape_rss_by_lxml.py
import feedparser
# はてなブックマークの人気エントリー(「テクノロジー」カテゴリ)のRSSを読み込む。
d = feedparser.parse('http://b.hatena.ne.jp/hotentry/it.rss')
# すべての要素について処理を繰り返す。
for entry in d.entries:
print(entry.link, entry.title) # URLとタイトルを出力する。
| 21.2 | 61 | 0.773585 |
29f18e4c5d42a39102bd534dcba21d9d79bc3aa7 | 1,523 | py | Python | src/frontend/templatehelpers/templatetags/custom_formatting.py | katago/katago-server | 04b1d565370fef8fec16f45a272b0894a2abc60d | [
"MIT"
] | 27 | 2020-05-03T11:01:27.000Z | 2022-03-17T05:33:10.000Z | src/frontend/templatehelpers/templatetags/custom_formatting.py | katago/katago-server | 04b1d565370fef8fec16f45a272b0894a2abc60d | [
"MIT"
] | 54 | 2020-05-09T01:18:41.000Z | 2022-01-22T10:31:15.000Z | src/frontend/templatehelpers/templatetags/custom_formatting.py | katago/katago-server | 04b1d565370fef8fec16f45a272b0894a2abc60d | [
"MIT"
] | 9 | 2020-09-29T11:31:32.000Z | 2022-03-09T01:37:50.000Z | from django import template
register = template.Library()
# Peformance hack for converting times to a fixed ISO-like format noticeably faster
# than strftime, which has to handle a lot of other cases.
@register.filter(expects_localtime=False)
def isotimestr(value):
return "{}-{:02d}-{:02d} {:02d}:{:02d}:{:02d} {}".format(
value.year, value.month, value.day, value.hour, value.minute, value.second, value.tzname()
)
# Replace underscores with spaces - used to make it more natural to wordbreak a column
# and get better css flow
@register.filter()
def underscores_to_spaces(value):
return value.replace("_", " ")
@register.filter()
def chop_network_run_name(value, run_name):
if value.startswith(run_name + "-"):
return value[len(run_name) + 1 :]
return value
@register.filter()
def game_winner_class(game, network):
if game.winner == "W" and game.white_network.name == network.name:
return "winnerResultStyle"
if game.winner == "B" and game.black_network.name == network.name:
return "winnerResultStyle"
if game.winner == "B" and game.white_network.name == network.name:
return "loserResultStyle"
if game.winner == "W" and game.black_network.name == network.name:
return "loserResultStyle"
return "drawResultStyle"
@register.filter()
def network_row_style(network, strongest_confident_network):
if network.name == strongest_confident_network.name:
return "strongestNetworkStyle"
return "normalNetworkStyle"
| 33.108696 | 98 | 0.709783 |
2839bbf6a6953ca38575d54ccac8a20d775b8982 | 6,835 | py | Python | bindings/python/ensmallen_graph/datasets/string/caldicellulosiruptorhydrothermalis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/caldicellulosiruptorhydrothermalis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/caldicellulosiruptorhydrothermalis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Caldicellulosiruptor hydrothermalis.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 21:21:31.486670
The undirected graph Caldicellulosiruptor hydrothermalis has 2535 nodes
and 152914 weighted edges, of which none are self-loops. The graph is dense
as it has a density of 0.04761 and has 9 connected components, where the
component with most nodes has 2509 nodes and the component with the least
nodes has 2 nodes. The graph median node degree is 96, the mean node degree
is 120.64, and the node degree mode is 3. The top 5 most central nodes
are 632292.Calhy_1330 (degree 940), 632292.Calhy_1295 (degree 936), 632292.Calhy_2314
(degree 793), 632292.Calhy_1304 (degree 780) and 632292.Calhy_1113 (degree
744).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import CaldicellulosiruptorHydrothermalis
# Then load the graph
graph = CaldicellulosiruptorHydrothermalis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def CaldicellulosiruptorHydrothermalis(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Caldicellulosiruptor hydrothermalis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Caldicellulosiruptor hydrothermalis graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 21:21:31.486670
The undirected graph Caldicellulosiruptor hydrothermalis has 2535 nodes
and 152914 weighted edges, of which none are self-loops. The graph is dense
as it has a density of 0.04761 and has 9 connected components, where the
component with most nodes has 2509 nodes and the component with the least
nodes has 2 nodes. The graph median node degree is 96, the mean node degree
is 120.64, and the node degree mode is 3. The top 5 most central nodes
are 632292.Calhy_1330 (degree 940), 632292.Calhy_1295 (degree 936), 632292.Calhy_2314
(degree 793), 632292.Calhy_1304 (degree 780) and 632292.Calhy_1113 (degree
744).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import CaldicellulosiruptorHydrothermalis
# Then load the graph
graph = CaldicellulosiruptorHydrothermalis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="CaldicellulosiruptorHydrothermalis",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.78534 | 223 | 0.710022 |
4b447ffb9badde1ed2daf40e686b67350be6a8e5 | 883 | py | Python | doc/figures/nodes-rect-cornerRadius.py | johnnovak/twyg | d125d651e2d737f0825a9be948b5c4d6a202d3da | [
"MIT"
] | 9 | 2016-01-07T04:17:38.000Z | 2021-07-30T09:48:38.000Z | doc/figures/nodes-rect-cornerRadius.py | johnnovak/twyg | d125d651e2d737f0825a9be948b5c4d6a202d3da | [
"MIT"
] | null | null | null | doc/figures/nodes-rect-cornerRadius.py | johnnovak/twyg | d125d651e2d737f0825a9be948b5c4d6a202d3da | [
"MIT"
] | 2 | 2017-05-25T13:34:54.000Z | 2017-09-21T21:34:39.000Z | import os, sys
from fig import *
config = r"""
[layout]
style layout
[node]
style rect
fontName $FONTNAME
fontSize $FONTSIZE
textBaselineCorrection $BASELINE_CORR
strokeWidth 3
roundingStyle arc
cornerRadius %s
textPadX 25
textPadY 8
[connection]
style curve
[color]
style cycle
colorscheme "mint-examples%s"
fontColorAuto no
fontColor #fff
"""
scale = 0.8
data1 = { 'constellate': [] }
data2 = { 'mattock': [] }
data3 = { 'umbraged': [] }
trees = [
create_tree(config % (5, 3), data1),
create_tree(config % (10, ''), data2),
create_tree(config % (1000, 2), data3)
]
write_all_trees(trees, scale)
| 19.195652 | 45 | 0.483579 |
761e5e0f66954f4a2ad2a734b0e41ab678fb001d | 2,831 | py | Python | examples/fit_rhodopsins.py | carbonscott/helix | e2ee6e1293cae4f0bd1220ed5a41268d20a095db | [
"MIT"
] | null | null | null | examples/fit_rhodopsins.py | carbonscott/helix | e2ee6e1293cae4f0bd1220ed5a41268d20a095db | [
"MIT"
] | null | null | null | examples/fit_rhodopsins.py | carbonscott/helix | e2ee6e1293cae4f0bd1220ed5a41268d20a095db | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
sys.path.insert(0, "..")
## sys.path.insert(0, "/home/scott/Dropbox/codes/helix")
from loaddata import load_xlsx
import pyrotein as pr
import numpy as np
import helix
import os
import multiprocessing as mp
# Specify chains to process...
fl_chain = "chains.comp.xlsx"
lines = load_xlsx(fl_chain, sheet = "Sheet1")
drc_pdb = "pdb"
# Define helix...
TM_segments = {
"TM1_e" : [ 35, 50],
"TM1_m" : [ 51, 57],
"TM1_c" : [ 58, 64],
"TM2_c1" : [ 74, 81],
"TM2_c2" : [ 81, 88],
"TM2_pi" : [ 86, 90],
"TM2_e" : [ 92, 100],
"TM3_e" : [110, 123],
"TM3_c" : [123, 136],
"TM4_c" : [150, 167],
"TM4_m" : [166, 169],
"TM4_e" : [169, 172],
"TM5_e" : [202, 208],
"TM5_pi" : [207, 212],
"TM5_c" : [212, 225],
"TM6_c1" : [244, 247],
"TM6_c2" : [248, 264],
"TM6_c" : [244, 264],
"TM6_e" : [265, 276],
"TM7_e" : [289, 296],
"TM7_310" : [296, 300],
"TM7_c" : [301, 307],
}
## for i_fl, line in enumerate(lines):
def parallel(line):
# Unpack parameters
_, pdb, chain, _ = line[:4]
# Read the PDB file...
fl_pdb = os.path.join(drc_pdb, f"{pdb}.pdb")
atom_list = pr.atom.read(fl_pdb)
atom_dict = pr.atom.create_lookup_table(atom_list)
# Collect results...
result_dict = {}
for seg, (nterm, cterm) in TM_segments.items():
# Obtain coordinates...
xyzs_dict = {}
peptides = ["N", "CA", "C", "O"]
for i in peptides:
xyzs_dict[i] = pr.atom.extract_xyz([i], atom_dict, chain, nterm, cterm)
# Fitting...
try: result = helix.parameterize.helix(xyzs_dict, lam = [0.0], report = False)
except ValueError: pass
print(f"Fitting {pdb}.{chain}.{seg}: {nterm}...{cterm}")
# Report...
params = result.params
parvals = helix.parameterize.unpack_params(params)
res = helix.parameterize.report_result(result)
result_dict[f"{seg}"] = res
drc_out = "helix"
fl_out = os.path.join(drc_out, f"{pdb}_{chain}.helixparam.dat")
with open(fl_out,'w') as fh:
for i, (seg, res) in enumerate(result_dict.items()):
fh.write(f"{i:02d}")
fh.write(" ")
fh.write(f"{seg:10s}")
fh.write(" ")
fh.write( " ".join(res) )
fh.write("\n")
num_job = 4
if __name__ == "__main__":
with mp.Pool(num_job) as proc:
proc.map( parallel, lines )
| 28.31 | 86 | 0.485341 |
cbdd7c8656d3c0a4d0baaa1dcf68204165cd90b3 | 1,489 | py | Python | setup.py | esloch/toki | 8f92d464137839f6883e13b30357978e17c5a46e | [
"Apache-2.0"
] | null | null | null | setup.py | esloch/toki | 8f92d464137839f6883e13b30357978e17c5a46e | [
"Apache-2.0"
] | null | null | null | setup.py | esloch/toki | 8f92d464137839f6883e13b30357978e17c5a46e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import find_packages, setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = []
setup_requirements = [
'pytest-runner',
]
test_requirements = [
'pytest',
]
setup(
author="Ivan Ogasawara",
author_email='ivan.ogasawara@gmail.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Toki: Database Expression API",
install_requires=requirements,
license="Apache Software License 2.0",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='toki',
name='toki',
packages=find_packages(include=['toki']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/toki-project/toki',
version='0.0.1',
zip_safe=False,
)
| 27.072727 | 61 | 0.63734 |
552e9adbab30cd8e6fe2110b17595420612c7503 | 4,362 | py | Python | sources/lcd_i2c.py | fxmartin/fxmartin.github.io | cb5d7f579f8fa0c04ebc78657b3ad684ae3e65b8 | [
"MIT"
] | null | null | null | sources/lcd_i2c.py | fxmartin/fxmartin.github.io | cb5d7f579f8fa0c04ebc78657b3ad684ae3e65b8 | [
"MIT"
] | null | null | null | sources/lcd_i2c.py | fxmartin/fxmartin.github.io | cb5d7f579f8fa0c04ebc78657b3ad684ae3e65b8 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#--------------------------------------
# ___ ___ _ ____
# / _ \/ _ \(_) __/__ __ __
# / , _/ ___/ /\ \/ _ \/ // /
# /_/|_/_/ /_/___/ .__/\_, /
# /_/ /___/
#
# lcd_i2c.py
# LCD test script using I2C backpack.
# Supports 16x2 and 20x4 screens.
#
# Author : FX, mail@fxmartin.me
# Original Author : Matt Hawkins
# Date : 10/09/2016
#
# https://fxmartin.github.io/
#
# Copyright 2015 Matt Hawkins
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#--------------------------------------
import smbus
import time
import psutil
import socket
import fcntl
import struct
# Define some device parameters
I2C_ADDR = 0x3F # I2C device address
LCD_WIDTH = 20 # Maximum characters per line
# Define some device constants
LCD_CHR = 1 # Mode - Sending data
LCD_CMD = 0 # Mode - Sending command
LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
LCD_LINE_3 = 0x94 # LCD RAM address for the 3rd line
LCD_LINE_4 = 0xD4 # LCD RAM address for the 4th line
LCD_BACKLIGHT_ON = 0x08 # On
LCD_BACKLIGHT_OFF = 0x00 # Off
ENABLE = 0b00000100 # Enable bit
# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005
#Open I2C interface
#bus = smbus.SMBus(0) # Rev 1 Pi uses 0
bus = smbus.SMBus(1) # Rev 2 Pi uses 1
def lcd_init():
# Initialise display
lcd_byte(0x33,LCD_CMD) # 110011 Initialise
lcd_byte(0x32,LCD_CMD) # 110010 Initialise
lcd_byte(0x06,LCD_CMD) # 000110 Cursor move direction
lcd_byte(0x0C,LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
lcd_byte(0x28,LCD_CMD) # 101000 Data length, number of lines, font size
lcd_byte(0x01,LCD_CMD) # 000001 Clear display
time.sleep(E_DELAY)
def lcd_byte(bits, mode):
# Send byte to data pins
# bits = the data
# mode = 1 for data
# 0 for command
bits_high = mode | (bits & 0xF0) | LCD_BACKLIGHT_ON
bits_low = mode | ((bits<<4) & 0xF0) | LCD_BACKLIGHT_ON
# High bits
bus.write_byte(I2C_ADDR, bits_high)
lcd_toggle_enable(bits_high)
# Low bits
bus.write_byte(I2C_ADDR, bits_low)
lcd_toggle_enable(bits_low)
def lcd_toggle_enable(bits):
# Toggle enable
time.sleep(E_DELAY)
bus.write_byte(I2C_ADDR, (bits | ENABLE))
time.sleep(E_PULSE)
bus.write_byte(I2C_ADDR,(bits & ~ENABLE))
time.sleep(E_DELAY)
def lcd_string(message,line):
# Send string to display
message = message.ljust(LCD_WIDTH," ")
lcd_byte(line, LCD_CMD)
for i in range(LCD_WIDTH):
lcd_byte(ord(message[i]),LCD_CHR)
# Get the IP address of the PI
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def main():
# Main program block
# Initialise display
lcd_init()
# Display system stats
temp = round(int(open('/sys/class/thermal/thermal_zone0/temp').read()) / 1e3,2)
lcd_string("CPU : "+str(psutil.cpu_percent()) + '% - ' + str(temp) + 'C',LCD_LINE_1)
# Display memory stats
memory = psutil.virtual_memory()
# Divide from Bytes -> KB -> MB
available = round(memory.available/1024.0/1024.0,1)
total = round(memory.total/1024.0/1024.0,1)
lcd_string("Mem : " + str(total) + 'MB/' + str(memory.percent) + '%',LCD_LINE_2)
# Display Disk stats
disk = psutil.disk_usage('/')
# Divide from Bytes -> KB -> MB -> GB
free = round(disk.free/1024.0/1024.0/1024.0,1)
total = round(disk.total/1024.0/1024.0/1024.0,1)
lcd_string("Disk : "+str(total) + 'GB/' + str(disk.percent) + '% ',LCD_LINE_3)
# Display Network info
#lcd_string("wlan : " + get_ip_address('eth0'),LCD_LINE_4)
lcd_string("wlan : " + get_ip_address('wlan0'),LCD_LINE_4)
if __name__ == '__main__':
main()
| 28.697368 | 87 | 0.676525 |
fb8d5f7c1e3d7dde49d5b660bd517f3dece22b9a | 3,173 | py | Python | src/python/grpcio/grpc/framework/foundation/callable_util.py | benjaminp/grpc | dfb1a0f20624417bff408a14b12a23713085b999 | [
"Apache-2.0"
] | 1 | 2020-07-05T06:10:21.000Z | 2020-07-05T06:10:21.000Z | src/python/grpcio/grpc/framework/foundation/callable_util.py | benjaminp/grpc | dfb1a0f20624417bff408a14b12a23713085b999 | [
"Apache-2.0"
] | 3 | 2020-03-23T18:01:51.000Z | 2021-03-19T23:15:15.000Z | src/python/grpcio/grpc/framework/foundation/callable_util.py | benjaminp/grpc | dfb1a0f20624417bff408a14b12a23713085b999 | [
"Apache-2.0"
] | 1 | 2021-11-26T05:29:58.000Z | 2021-11-26T05:29:58.000Z | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for working with callables."""
import abc
import collections
import enum
import functools
import logging
import six
logging.basicConfig()
_LOGGER = logging.getLogger(__name__)
class Outcome(six.with_metaclass(abc.ABCMeta)):
"""A sum type describing the outcome of some call.
Attributes:
kind: One of Kind.RETURNED or Kind.RAISED respectively indicating that the
call returned a value or raised an exception.
return_value: The value returned by the call. Must be present if kind is
Kind.RETURNED.
exception: The exception raised by the call. Must be present if kind is
Kind.RAISED.
"""
@enum.unique
class Kind(enum.Enum):
"""Identifies the general kind of the outcome of some call."""
RETURNED = object()
RAISED = object()
class _EasyOutcome(
collections.namedtuple('_EasyOutcome',
['kind', 'return_value', 'exception']), Outcome):
"""A trivial implementation of Outcome."""
def _call_logging_exceptions(behavior, message, *args, **kwargs):
try:
return _EasyOutcome(Outcome.Kind.RETURNED, behavior(*args, **kwargs),
None)
except Exception as e: # pylint: disable=broad-except
_LOGGER.exception(message)
return _EasyOutcome(Outcome.Kind.RAISED, None, e)
def with_exceptions_logged(behavior, message):
"""Wraps a callable in a try-except that logs any exceptions it raises.
Args:
behavior: Any callable.
message: A string to log if the behavior raises an exception.
Returns:
A callable that when executed invokes the given behavior. The returned
callable takes the same arguments as the given behavior but returns a
future.Outcome describing whether the given behavior returned a value or
raised an exception.
"""
@functools.wraps(behavior)
def wrapped_behavior(*args, **kwargs):
return _call_logging_exceptions(behavior, message, *args, **kwargs)
return wrapped_behavior
def call_logging_exceptions(behavior, message, *args, **kwargs):
"""Calls a behavior in a try-except that logs any exceptions it raises.
Args:
behavior: Any callable.
message: A string to log if the behavior raises an exception.
*args: Positional arguments to pass to the given behavior.
**kwargs: Keyword arguments to pass to the given behavior.
Returns:
An Outcome describing whether the given behavior returned a value or raised
an exception.
"""
return _call_logging_exceptions(behavior, message, *args, **kwargs)
| 32.377551 | 80 | 0.710369 |
8c34b50e93314c34214fc67a5e86534f34ce79c0 | 9,290 | py | Python | tests/regressiontests/queries/models.py | graingert/django | 784d0c261c76535dc760bc8d76793d92f35c1513 | [
"BSD-3-Clause"
] | 1 | 2020-08-08T01:55:00.000Z | 2020-08-08T01:55:00.000Z | tests/regressiontests/queries/models.py | graingert/django | 784d0c261c76535dc760bc8d76793d92f35c1513 | [
"BSD-3-Clause"
] | null | null | null | tests/regressiontests/queries/models.py | graingert/django | 784d0c261c76535dc760bc8d76793d92f35c1513 | [
"BSD-3-Clause"
] | null | null | null | """
Various complex queries that have been problematic in the past.
"""
from __future__ import unicode_literals
import threading
from django.db import models
class DumbCategory(models.Model):
pass
class ProxyCategory(DumbCategory):
class Meta:
proxy = True
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children')
category = models.ForeignKey(NamedCategory, null=True, default=None)
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
class Meta:
ordering = ['note']
def __unicode__(self):
return self.note
def __init__(self, *args, **kwargs):
super(Note, self).__init__(*args, **kwargs)
# Regression for #13227 -- having an attribute that
# is unpickleable doesn't stop you from cloning queries
# that use objects of that type as an argument.
self.lock = threading.Lock()
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
notes = models.ManyToManyField(Note)
def __unicode__(self):
return self.name
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note)
class Meta:
ordering = ['info']
def __unicode__(self):
return self.info
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo)
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True, null=True)
creator = models.ForeignKey(Author)
note = models.ForeignKey(Note)
class Meta:
ordering = ['-note', 'name']
def __unicode__(self):
return self.name
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, to_field='num', null=True)
def __unicode__(self):
return self.name
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __unicode__(self):
return '%d: %s' % (self.rank, self.author.name)
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item)
class Meta:
ordering = ['item']
def __unicode__(self):
return self.title
class Number(models.Model):
num = models.IntegerField()
def __unicode__(self):
return unicode(self.num)
# Symmetrical m2m field with a normal field using the reverse accesor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y')
class Y(models.Model):
x1 = models.ForeignKey(X, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY')
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self')
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_query_set(self):
qs = super(CustomManager, self).get_query_set()
return qs.filter(public=True, tag__name='t1')
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __unicode__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_query_set(self):
return super(MemberManager, self).get_query_set().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, primary_key=True)
parent = models.ForeignKey(Member, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", null=True, unique=True)
def __unicode__(self):
return self.name
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity)
# Multiple foreign keys
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __unicode__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA)
b = models.ForeignKey(LeafB)
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __unicode__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection)
# Multi-layer ordering
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject, null=True)
class Meta:
ordering = ['single']
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, null=True)
class Meta:
ordering = ['others']
def __unicode__(self):
return self.name
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __unicode__(self):
return self.name
class Eaten(models.Model):
food = models.ForeignKey(Food, to_field="name")
meal = models.CharField(max_length=20)
def __unicode__(self):
return "%s at %s" % (self.food, self.meal)
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", to_field="num", null=True)
def __unicode__(self):
return "%s" % self.num
# Bug #12252
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA)
num = models.PositiveSmallIntegerField()
def __unicode__(self):
return self.name
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA)
objectb = models.ForeignKey(ObjectB)
def __unicode__(self):
return self.name
class SimpleCategory(models.Model):
name = models.CharField(max_length=15)
def __unicode__(self):
return self.name
class SpecialCategory(SimpleCategory):
special_name = models.CharField(max_length=15)
def __unicode__(self):
return self.name + " " + self.special_name
class CategoryItem(models.Model):
category = models.ForeignKey(SimpleCategory)
def __unicode__(self):
return "category item: " + str(self.category)
class OneToOneCategory(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory)
def __unicode__(self):
return "one2one " + self.new_name
class NullableName(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ['id']
| 25.805556 | 83 | 0.689774 |
90720cdd3a4091cd42ed2ffb57c714ad70c2f31b | 1,880 | py | Python | tests/ip_messaging/test_channels.py | quippp/twilio-python | 22b84cdfd19a6b1bde84350053870a7c507af410 | [
"MIT"
] | 1 | 2020-01-29T23:39:15.000Z | 2020-01-29T23:39:15.000Z | tests/ip_messaging/test_channels.py | quippp/twilio-python | 22b84cdfd19a6b1bde84350053870a7c507af410 | [
"MIT"
] | 1 | 2016-05-26T21:39:12.000Z | 2016-05-26T21:39:14.000Z | v/lib/python2.7/site-packages/tests/ip_messaging/test_channels.py | josh6beasttt/HangWithFriends | 0c5113bf1203190364d4922754c21eb5d87a5c25 | [
"Apache-2.0"
] | 2 | 2019-05-19T06:02:26.000Z | 2020-12-23T11:27:20.000Z | import unittest
from mock import patch, Mock
from twilio.rest.resources.ip_messaging import Channels, Channel
from tests.tools import create_mock_json
BASE_URI = "https://ip-messaging.twilio.com/v1/Services/ISxxx"
ACCOUNT_SID = "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
AUTH = (ACCOUNT_SID, "token")
CHANNEL_SID = "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
list_resource = Channels(BASE_URI, AUTH)
class ChannelTest(unittest.TestCase):
@patch("twilio.rest.resources.base.make_twilio_request")
def test_create_channel(self, mock):
resp = create_mock_json("tests/resources/ip_messaging/channel_instance.json")
resp.status_code = 201
mock.return_value = resp
uri = "%s/Channels" % (BASE_URI)
list_resource.create(friendly_name='TestChannel', unique_name='Unique')
exp_params = {
'FriendlyName': "TestChannel",
'UniqueName': 'Unique'
}
mock.assert_called_with("POST", uri, data=exp_params, auth=AUTH,
use_json_extension=False)
@patch("twilio.rest.resources.base.make_twilio_request")
def test_get(self, mock):
resp = create_mock_json("tests/resources/ip_messaging/channel_instance.json")
mock.return_value = resp
uri = "%s/Channels/%s" % (BASE_URI, CHANNEL_SID)
list_resource.get(CHANNEL_SID)
mock.assert_called_with("GET", uri, auth=AUTH,
use_json_extension=False)
@patch("twilio.rest.resources.base.Resource.request")
def test_delete(self, req):
""" Deleting a call should work """
resp = Mock()
resp.content = ""
resp.status_code = 204
req.return_value = resp, {}
app = Channel(list_resource, "CH123")
app.delete()
uri = "%s/Channels/CH123" % (BASE_URI)
req.assert_called_with("DELETE", uri)
| 34.181818 | 85 | 0.660106 |
f617e67960172809461f2eeda539f1ce0c903b53 | 5,654 | py | Python | tests/test_psi.py | benranderson/uhb | a0169cfae587384e96c628b82e02667537ad00b6 | [
"MIT"
] | null | null | null | tests/test_psi.py | benranderson/uhb | a0169cfae587384e96c628b82e02667537ad00b6 | [
"MIT"
] | null | null | null | tests/test_psi.py | benranderson/uhb | a0169cfae587384e96c628b82e02667537ad00b6 | [
"MIT"
] | null | null | null | """Tests for pipe-soil interaction module."""
import pytest
from uhb import psi
@pytest.fixture(params=[
{
"D_o": 0.508,
"gamma": 8000,
"c": 0,
"h": 4.725,
"psi_s": 35,
"soil_type": "dense sand",
"f": 0.67,
"H": 4.979,
"Nqv": 7.796,
"Nch": 0,
"Nqh": 18.23,
"Nc": 46.128,
"Nq": 33.296,
"Ngamma": 44.701,
"Tu": 19667,
"delta_p": 0.0508,
"Pu": 368876,
"delta_qu": 0.04979,
"Qu": 157757,
"Qd": 778158,
},
{
"D_o": 0.1731,
"gamma": 18000,
"c": 0,
"h": 1,
"psi_s": 32,
"soil_type": "dense sand",
"f": 0.6,
"H": 1.087,
"Nqv": 4.565,
"Nch": 0,
"Nqh": 11.979,
"Nc": 35.493,
"Nq": 23.177,
"Ngamma": 26.05,
"Tu": 2722,
"delta_p": 0.01731,
"Pu": 40553,
"delta_qu": 0.01087,
"Qu": 15455,
"Qd": 89413,
},
])
def gen_test_data(request):
""" Generate test data. """
return request.param
def test_calculate_soil_weight():
assert pytest.approx(psi.calculate_soil_weight(100, 100, 1)) == 10000
def test_depth_to_centre(gen_test_data):
assert pytest.approx(
psi.depth_to_centre(gen_test_data["D_o"], gen_test_data["h"]),
0.001) == gen_test_data["H"]
def test_Nch(gen_test_data):
H = psi.depth_to_centre(gen_test_data["D_o"], gen_test_data["h"])
assert pytest.approx(
psi.Nch(0, H, gen_test_data["D_o"]),
0.001) == gen_test_data["Nch"]
def test_Nqh(gen_test_data):
H = psi.depth_to_centre(gen_test_data["D_o"], gen_test_data["h"])
assert pytest.approx(
psi.Nqh(gen_test_data["psi_s"], H, gen_test_data["D_o"]),
0.001) == gen_test_data["Nqh"]
@pytest.mark.parametrize(
"psi_s, expected", [
(0, 0),
(10, 2.8090),
(60, 21.0084),
]
)
def test_Nqh_edge_cases(psi_s, expected):
assert pytest.approx(psi.Nqh(psi_s, 1, 1), 0.001) == expected
@pytest.mark.parametrize(
"c, H, D, expected", [
(0, 1, 1, 0),
(1, 2, 1, 4),
(1, 50, 1, 10),
]
)
def test_Ncv(c, H, D, expected):
assert pytest.approx(psi.Ncv(c, H, D)) == expected
def test_Nqv(gen_test_data):
assert pytest.approx(
psi.Nqv(gen_test_data["psi_s"], gen_test_data["H"], gen_test_data["D_o"]),
0.001) == gen_test_data["Nqv"]
def test_Nqv_zero():
assert pytest.approx(psi.Nqv(0, 1, 1)) == 0
def test_Nc(gen_test_data):
assert pytest.approx(
psi.Nc(gen_test_data["psi_s"], gen_test_data["H"], gen_test_data["D_o"]),
0.001) == gen_test_data["Nc"]
def test_Nq(gen_test_data):
assert pytest.approx(psi.Nq(gen_test_data["psi_s"]), 0.001) == gen_test_data["Nq"]
def test_Ngamma(gen_test_data):
assert pytest.approx(psi.Ngamma(gen_test_data["psi_s"]),
0.001) == gen_test_data["Ngamma"]
@pytest.mark.parametrize(
"soil_type, expected", [("dense sand", 0.003), ("loose sand", 0.005)]
)
def test_delta_t(soil_type, expected):
assert pytest.approx(psi.delta_t(soil_type)) == expected
def test_Tu(gen_test_data):
assert pytest.approx(
psi.Tu(gen_test_data["D_o"], gen_test_data["H"], gen_test_data["c"], gen_test_data["f"],
gen_test_data["psi_s"], gen_test_data["gamma"]),
0.001) == gen_test_data["Tu"]
def test_delta_p(gen_test_data):
assert pytest.approx(
psi.delta_p(gen_test_data["H"], gen_test_data["D_o"])
) == gen_test_data["delta_p"]
def test_Pu(gen_test_data):
assert pytest.approx(
psi.Pu(gen_test_data["c"], gen_test_data["H"], gen_test_data["D_o"],
gen_test_data["psi_s"], gen_test_data["gamma"]),
0.001) == gen_test_data["Pu"]
def test_delta_qu(gen_test_data):
assert pytest.approx(psi.delta_qu(
gen_test_data["soil_type"], gen_test_data["H"], gen_test_data["D_o"]),
0.001) == gen_test_data["delta_qu"]
@pytest.mark.parametrize(
"soil_type, H, D, expected", [
("sand", 1, 1, 0.01),
("clay", 1, 1, 0.1),
]
)
def test_delta_qu_others(soil_type, H, D, expected):
assert pytest.approx(psi.delta_qu(soil_type, H, D)) == expected
def test_delta_qu_unknown_soil():
with pytest.raises(ValueError):
psi.delta_qu("unknown", 1, 1)
def test_Qu(gen_test_data):
assert pytest.approx(psi.Qu(
gen_test_data["psi_s"], gen_test_data["c"], gen_test_data["D_o"],
gen_test_data["gamma"], gen_test_data["H"]),
0.001) == gen_test_data["Qu"]
@pytest.mark.parametrize(
"soil_type, D, expected", [
("dense sand", 0.508, 0.0508),
("stiff clay", 0.2, 0.04)
]
)
def test_delta_qd(soil_type, D, expected):
assert pytest.approx(psi.delta_qd(soil_type, D)) == expected
def test_delta_qd_unknown_soil():
with pytest.raises(ValueError):
psi.delta_qd("unknown", 100)
def test_Qd(gen_test_data):
assert pytest.approx(
psi.Qd(gen_test_data["psi_s"], gen_test_data["c"], gen_test_data["D_o"],
gen_test_data["gamma"], gen_test_data["H"], 1025),
0.001) == gen_test_data["Qd"]
# @pytest.mark.parametrize(
# "inputs, expected", [
# (test_inputs[0], (0.003, 19667)),
# (test_inputs[1], (0.003, 2722)),
# ]
# )
# def test_gen_axial_spring(inputs, expected):
# assert pytest.approx(psi.gen_axial_spring(inputs, 1), 0.001) == expected
# def test_gen_axial_spring_unknown_soil():
# with pytest.raises(ValueError):
# psi.gen_axial_spring(test_inputs[0], "none")
| 25.7 | 96 | 0.586664 |
ca5f4f802e63b1f083687d008c8eab8e48750ad6 | 1,656 | py | Python | test/helpers/raw_package_helpers.py | RobertWilbrandt/bm257s | 379ced4830a122557d928accf66cc47f343768ea | [
"BSD-3-Clause"
] | 2 | 2021-02-21T22:10:53.000Z | 2022-01-04T15:41:37.000Z | test/helpers/raw_package_helpers.py | RobertWilbrandt/bm257s | 379ced4830a122557d928accf66cc47f343768ea | [
"BSD-3-Clause"
] | null | null | null | test/helpers/raw_package_helpers.py | RobertWilbrandt/bm257s | 379ced4830a122557d928accf66cc47f343768ea | [
"BSD-3-Clause"
] | null | null | null | """Helper methods for creating and checking raw data packages"""
from bm257s.package_reader import Symbol
# Example from "spec" that should read "AC 513.6V"
EXAMPLE_RAW_PKG = b"\x02\x1A\x20\x3C\x47\x50\x6A\x78\x8F\x9F\xA7\xB0\xC0\xD0\xE5"
EXAMPLE_RAW_PKG_SYMBOLS = {Symbol.AUTO, Symbol.AC, Symbol.VOLT, Symbol.SCALE}
EXAMPLE_RAW_PKG_STRING = "513.6"
EXAMPLE_RAW_PKG_VALUE = 513.6
def check_example_pkg(test_case, pkg):
"""Check and assert correctness of parsed example package
:param test_case: Test case to assert from
:type test_case: unittest.TestCase
:param pkg: Package to check
:type pkg: bm257s.package_reader.Package
"""
test_case.assertSetEqual(
pkg.symbols,
EXAMPLE_RAW_PKG_SYMBOLS,
msg="Read correct symbols from example package",
)
test_case.assertEqual(
pkg.segment_string(),
EXAMPLE_RAW_PKG_STRING,
msg="Read correct string from example package",
)
test_case.assertAlmostEqual(
pkg.segment_float(),
EXAMPLE_RAW_PKG_VALUE,
msg="Read correct float number from example package",
)
def change_byte_index(data, pos, index):
"""Changes the byte index at a specific position in a package to a different value
:param data: Raw data package
:type data: bytes
:param pos: Index of byte to change
:type pos: int
:param index: New index
:type index: int
:return: Raw data package with changed byte
:rtype: bytes
"""
data_part_mask = (1 << 5) - 1
new_byte = bytes([(index << 4) | (data[pos] & data_part_mask)])
return data[0:pos] + new_byte + data[pos + 1 :] # noqa: E203
| 30.666667 | 86 | 0.683575 |
7f78ad152c275ecdbf83b15017a1cb14ccbb5c65 | 3,410 | py | Python | phantomcli/tests/test_image.py | the16thpythonist/phantom-cli | 921588dda66bf84bf79569493f4e4312b59cd56d | [
"MIT"
] | 1 | 2021-11-24T01:50:36.000Z | 2021-11-24T01:50:36.000Z | phantomcli/tests/test_image.py | the16thpythonist/phantom-cli | 921588dda66bf84bf79569493f4e4312b59cd56d | [
"MIT"
] | 1 | 2021-11-15T17:48:43.000Z | 2021-11-15T17:48:43.000Z | phantomcli/tests/test_image.py | the16thpythonist/phantom-cli | 921588dda66bf84bf79569493f4e4312b59cd56d | [
"MIT"
] | 1 | 2020-01-13T17:26:14.000Z | 2020-01-13T17:26:14.000Z | # Standard library import
import os
from unittest import TestCase
# third party imports
import imageio
import numpy as np
# Package import
from phantomcli.image import PhantomImage
class TestPhantomImage(TestCase):
FOLDER_PATH = os.path.dirname(os.path.abspath(__file__))
IMAGE_PATH = os.path.join(FOLDER_PATH, 'sample.jpg')
def test_sample_image_correctly_read_from_jpeg(self):
expected_image = imageio.imread(self.IMAGE_PATH, pilmode='L')
phantom_image = PhantomImage.from_jpeg(self.IMAGE_PATH)
self.assertTrue(np.alltrue(expected_image == phantom_image.array))
def test_image_conversion_to_p16_working(self):
image_array = np.array([8, 7])
expected_bytes = b'\x08\x00\x07\x00'
phantom_image = PhantomImage(image_array)
actual_bytes = phantom_image.p16()
self.assertEqual(expected_bytes, actual_bytes)
def test_image_creation_from_p16_working(self):
expected_array = np.array([[8, 7], [2, 4]])
# 18.03.2019
# This should be the image represented as little endian 16 bit values per pixel in the image
image_bytes = b'\x08\x00\x07\x00\x02\x00\x04\x00'
phantom_image = PhantomImage.from_p16(image_bytes, (2, 2))
self.assertTrue(np.alltrue(expected_array == phantom_image.array))
def test_sample_image_conversion_between_p16_and_jpeg(self):
expected_array = imageio.imread(self.IMAGE_PATH, pilmode='L')
# First we create a phantom image from the file path, then convert it to p16 convert it back and see if it
# still is the same image
phantom_image = PhantomImage.from_jpeg(self.IMAGE_PATH)
phantom_image = PhantomImage.from_p16(phantom_image.p16(), phantom_image.resolution)
self.assertTrue(np.alltrue(expected_array == phantom_image.array))
def test_sample_image_conversion_between_p8_and_jpeg(self):
expected_array = imageio.imread(self.IMAGE_PATH, pilmode='L')
# First we create a phantom image from the file path, then convert it to p16 convert it back and see if it
# still is the same image
phantom_image = PhantomImage.from_jpeg(self.IMAGE_PATH)
phantom_image = PhantomImage.from_p8(phantom_image.p8(), phantom_image.resolution)
self.assertTrue(np.alltrue(expected_array == phantom_image.array))
def test_sample_image_conversion_between_p10_and_jpeg(self):
expected_array = imageio.imread(self.IMAGE_PATH, pilmode='L')
# First we create a phantom image from the file path, then convert it to p16 convert it back and see if it
# still is the same image
phantom_image = PhantomImage.from_jpeg(self.IMAGE_PATH)
phantom_image = PhantomImage.from_p10(phantom_image.p10(), phantom_image.resolution)
self.assertTrue(np.alltrue(expected_array == phantom_image.array))
# 12.07.2019
def test_sample_image_conversion_between_p12l_and_jpeg(self):
expected_array = imageio.imread(self.IMAGE_PATH, pilmode='L')
# First we create a phantom image from the file path, then convert it to p16 convert it back and see if it
# still is the same image
phantom_image = PhantomImage.from_jpeg(self.IMAGE_PATH)
phantom_image = PhantomImage.from_p12l(phantom_image.p12l(), phantom_image.resolution)
self.assertTrue(np.alltrue(expected_array == phantom_image.array))
| 44.285714 | 114 | 0.726979 |
49a92c3d0d46571d345f81e1679852ab05bbb304 | 13,087 | py | Python | src/data/decoy_efficacy.py | sidhikabalachandar/lig_clash_score | 449bac16a7c2b9779e7cd51ff17eb5e41be6ff99 | [
"FTL"
] | null | null | null | src/data/decoy_efficacy.py | sidhikabalachandar/lig_clash_score | 449bac16a7c2b9779e7cd51ff17eb5e41be6ff99 | [
"FTL"
] | null | null | null | src/data/decoy_efficacy.py | sidhikabalachandar/lig_clash_score | 449bac16a7c2b9779e7cd51ff17eb5e41be6ff99 | [
"FTL"
] | null | null | null | """
The purpose of this code is to first create the raw directory folder and include the following files
starting protein receptor
starting ligand
target ligand
glide pose viewer file
Then the top glide poses are added
Then the decoys are created
It can be run on sherlock using
$ $SCHRODINGER/run python3 decoy_efficacy.py indv /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/refined_random.txt /home/users/sidhikab/lig_clash_score/src/data/run /oak/stanford/groups/rondror/projects/combind/flexibility/atom3d/raw /home/users/sidhikab/lig_clash_score/src/data/decoys_all --protein Q16539 --target 2bak --start 3d7z --index 43
"""
import argparse
import os
import schrodinger.structure as structure
import schrodinger.structutils.transform as transform
from schrodinger.structutils.transform import get_centroid
import schrodinger.structutils.interactions.steric_clash as steric_clash
import numpy as np
import schrodinger.structutils.rmsd as rmsd
import random
import subprocess
from tqdm import tqdm
import math
def get_jobs_in_queue(code):
cmd = ['squeue', '-u', os.environ['USER']]
slurm = subprocess.run(cmd, stdout=subprocess.PIPE, encoding='utf-8')
count = 0
for ln in slurm.stdout.split('\n'):
terms = ln.split()
if len(terms) > 2 and terms[2].strip()[:3] == code:
count += 1
return count
def create_conformer_decoys(save_path, run_path, conformers, grid, num_jobs_submitted, start_lig_center, target_lig,
prot, min_angle, max_angle, rmsd_cutoff, protein, target, start, index):
conformer_ls = [[c, 0] for c in conformers]
rot_ls = []
for rot_x in range(int(math.degrees(min_angle)), int(math.degrees(max_angle)) + 1):
for rot_y in range(int(math.degrees(min_angle)), int(math.degrees(max_angle)) + 1):
for rot_z in range(int(math.degrees(min_angle)), int(math.degrees(max_angle)) + 1):
rot_ls.append([[math.radians(rot_x), math.radians(rot_y), math.radians(rot_z)], 0])
output_file = os.path.join(run_path, '{}_{}_{}_{}.txt'.format(protein, target, start, index))
num_iter_without_pose = 0
num_valid_poses = 0
num_total_poses = 0
while True:
num_iter_without_pose += 1
num_total_poses += 1
if num_total_poses % 1000 == 0:
num_jobs_in_queue = get_jobs_in_queue('{}{}{}'.format(protein[0], target[0], start[0]))
f = open(output_file, "a")
f.write("num_total_poses: {}, len(grid): {}, len(conformer_ls): {}, len(rot_ls): {}, num_jobs_in_queue: "
"{}\n".format(num_total_poses, len(grid), len(conformer_ls), len(rot_ls), num_jobs_in_queue))
f.close()
if num_jobs_in_queue != num_jobs_submitted:
break
conformer_index = random.randint(0, len(conformer_ls) - 1)
conformer = conformer_ls[conformer_index][0]
conformer_center = list(get_centroid(conformer))
# translation
index = random.randint(0, len(grid) - 1)
grid_loc = grid[index][0]
transform.translate_structure(conformer, start_lig_center[0] - conformer_center[0] + grid_loc[0],
start_lig_center[1] - conformer_center[1] + grid_loc[1],
start_lig_center[2] - conformer_center[2] + grid_loc[2])
conformer_center = list(get_centroid(conformer))
# rotation
if len(grid) > 1:
x_angle = np.random.uniform(min_angle, max_angle)
y_angle = np.random.uniform(min_angle, max_angle)
z_angle = np.random.uniform(min_angle, max_angle)
else:
rot_index = random.randint(0, len(rot_ls) - 1)
x_angle, y_angle, z_angle = rot_ls[rot_index][0]
transform.rotate_structure(conformer, x_angle, y_angle, z_angle, conformer_center)
if steric_clash.clash_volume(prot, struc2=conformer) < 200:
num_valid_poses += 1
if rmsd.calculate_in_place_rmsd(conformer, conformer.getAtomIndices(), target_lig,
target_lig.getAtomIndices()) < rmsd_cutoff:
save_file = os.path.join(save_path, '{}_{}_{}.txt'.format(protein, target, start))
f = open(output_file, "a")
f.write("Num poses searched = {}\n".format(num_total_poses))
f.write("Num acceptable clash poses searched = {}\n".format(num_valid_poses))
f.close()
if not os.path.exists(save_file):
with open(save_file, 'w') as f:
f.write("Num poses searched = {}\n".format(num_total_poses))
f.write("Num acceptable clash poses searched = {}\n".format(num_valid_poses))
break
grid[index][1] = 0
num_iter_without_pose = 0
elif num_iter_without_pose == 5 and len(grid) > 1:
max_val = max(grid, key=lambda x: x[1])
grid.remove(max_val)
num_iter_without_pose = 0
elif num_iter_without_pose == 5 and len(grid) == 1:
if len(conformer_ls) == 1 and len(rot_ls) == 1:
save_file = os.path.join(save_path, '{}_{}_{}.txt'.format(protein, target, start))
f = open(output_file, "a")
f.write("Num poses searched = {}\n".format(num_total_poses))
f.write("Num acceptable clash poses searched = {}\n".format(num_valid_poses))
f.write("No correct poses found\n")
f.close()
if not os.path.exists(save_file):
with open(save_file, 'w') as f:
f.write("Num poses searched = {}\n".format(num_total_poses))
f.write("Num acceptable clash poses searched = {}\n".format(num_valid_poses))
f.write("No correct poses found\n")
break
elif len(conformer_ls) > 1 and (len(rot_ls) == 1 or (len(conformer_ls) + len(rot_ls)) % 2 == 0):
max_val = max(conformer_ls, key=lambda x: x[1])
conformer_ls.remove(max_val)
else:
max_val = max(rot_ls, key=lambda x: x[1])
rot_ls.remove(max_val)
num_iter_without_pose = 0
else:
grid[index][1] += 1
conformer_ls[conformer_index][1] += 1
if len(grid) == 1:
rot_ls[rot_index][1] += 1
def run_group(protein, target, start, raw_root, save_path, run_path, min_angle, max_angle, index, rmsd_cutoff, grid,
num_jobs_submitted):
"""
creates decoys for each protein, target, start group
:param grouped_files: (list) list of protein, target, start groups
:param raw_root: (string) directory where raw data will be placed
:param data_root: (string) pdbbind directory where raw data will be obtained
:param index: (int) group number
:param max_poses: (int) maximum number of glide poses considered
:param decoy_type: (string) either cartesian or random
:param max_decoys: (int) maximum number of decoys created per glide pose
:param mean_translation: (float) mean distance decoys are translated
:param stdev_translation: (float) stdev of distance decoys are translated
:param min_angle: (float) minimum angle decoys are rotated
:param max_angle: (float) maximum angle decoys are rotated
:return:
"""
pair = '{}-to-{}'.format(target, start)
protein_path = os.path.join(raw_root, protein)
pair_path = os.path.join(protein_path, pair)
start_lig_file = os.path.join(pair_path, '{}_lig.mae'.format(start))
start_lig = list(structure.StructureReader(start_lig_file))[0]
target_lig_file = os.path.join(pair_path, 'ligand_poses', '{}_lig0.mae'.format(target))
target_lig = list(structure.StructureReader(target_lig_file))[0]
start_lig_center = list(get_centroid(start_lig))
prot_file = os.path.join(pair_path, '{}_prot.mae'.format(start))
prot = list(structure.StructureReader(prot_file))[0]
aligned_file = os.path.join(pair_path, "aligned_conformers.mae")
conformers = list(structure.StructureReader(aligned_file))
create_conformer_decoys(save_path, run_path, conformers, grid, num_jobs_submitted, start_lig_center, target_lig, prot,
min_angle, max_angle, rmsd_cutoff, protein, target, start, index)
def get_prots(docked_prot_file):
"""
gets list of all protein, target ligands, and starting ligands in the index file
:param docked_prot_file: (string) file listing proteins to process
:return: process (list) list of all protein, target ligands, and starting ligands to process
"""
process = []
with open(docked_prot_file) as fp:
for line in tqdm(fp, desc='index file'):
if line[0] == '#': continue
protein, target, start = line.strip().split()
process.append((protein, target, start))
return process
def get_grid_groups(grid_size, n):
grid = []
for dx in range(-grid_size, grid_size):
for dy in range(-grid_size, grid_size):
for dz in range(-grid_size, grid_size):
grid.append([[dx, dy, dz], 0])
grouped_files = []
for i in range(0, len(grid), n):
grouped_files += [grid[i: i + n]]
return grouped_files
def main():
parser = argparse.ArgumentParser()
parser.add_argument('task', type=str, help='either all, group, check, '
'all_dist_check, group_dist_check, check_dist_check, '
'all_name_check, group_name_check, check_name_check, or delete')
parser.add_argument('docked_prot_file', type=str, help='file listing proteins to process')
parser.add_argument('run_path', type=str, help='directory where script and output files will be written')
parser.add_argument('raw_root', type=str, help='directory where raw data will be placed')
parser.add_argument('save_path', type=str, help='directory where results will be saved')
parser.add_argument('--min_angle', type=float, default=- np.pi / 6, help='minimum angle decoys are rotated')
parser.add_argument('--max_angle', type=float, default=np.pi / 6, help='maximum angle decoys are rotated')
parser.add_argument('--grid_size', type=int, default=6, help='grid size in positive and negative x, y, z '
'directions')
parser.add_argument('--index', type=int, default=-1, help='grid point group index')
parser.add_argument('--rmsd_cutoff', type=int, default=2, help='rmsd accuracy cutoff between predicted ligand pose '
'and true ligand pose')
parser.add_argument('--protein', type=str, default='', help='protein name')
parser.add_argument('--target', type=str, default='', help='target ligand name')
parser.add_argument('--start', type=str, default='', help='start ligand name')
parser.add_argument('--n', type=int, default=20, help='number of grid points processed in indv task')
args = parser.parse_args()
if not os.path.exists(args.run_path):
os.mkdir(args.run_path)
if not os.path.exists(args.save_path):
os.mkdir(args.save_path)
random.seed(0)
if args.task == 'all':
grouped_files = get_grid_groups(args.grid_size, args.n)
process = get_prots(args.docked_prot_file)
random.shuffle(process)
submitted_codes = []
i = 0
while len(submitted_codes) < 1:
protein, target, start = process[i]
i += 1
code = '{}{}{}'.format(protein[0], target[0], start[0])
# if code not in submitted_codes:
if code == 'Q23':
submitted_codes.append(code)
if not os.path.exists(os.path.join(args.save_path, '{}_{}_{}.txt'.format(protein, target, start))):
for j in range(len(grouped_files)):
z_code = '{}{}'.format(code, j)
os.system('sbatch -p owners -t 1:00:00 -o {} -J {} --wrap="$SCHRODINGER/run python3 '
'decoy_efficacy.py indv {} {} {} {} --index {} --protein {} --target {} --start '
'{}"'.format(os.path.join(args.run_path, '{}.out'.format(z_code)), z_code,
args.docked_prot_file, args.run_path, args.raw_root, args.save_path,
j, protein, target, start))
print(i)
print(submitted_codes)
elif args.task == 'indv':
grouped_files = get_grid_groups(args.grid_size, args.n)
run_group(args.protein, args.target, args.start, args.raw_root, args.save_path, args.run_path, args.min_angle,
args.max_angle, args.index, args.rmsd_cutoff, grouped_files[args.index], len(grouped_files))
if __name__=="__main__":
main() | 50.334615 | 364 | 0.622373 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.