hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
43f18bbb8086893bf4a2fd000390118ef12a005b | 6,807 | py | Python | blpapi/datetime.py | buckie/blpapi-py | 0ff500b547679e3310414d1427215d620b9f67d9 | [
"Unlicense"
] | 5 | 2019-03-20T05:43:50.000Z | 2021-12-17T06:03:33.000Z | blpapi/datetime.py | buckie/blpapi-py | 0ff500b547679e3310414d1427215d620b9f67d9 | [
"Unlicense"
] | null | null | null | blpapi/datetime.py | buckie/blpapi-py | 0ff500b547679e3310414d1427215d620b9f67d9 | [
"Unlicense"
] | 2 | 2019-03-20T05:39:22.000Z | 2019-07-02T14:00:01.000Z | # datetime.py
"""Utilities that deal with blpapi.Datetime data type"""
from . import internals
from . import utils
import datetime as _dt
class FixedOffset(_dt.tzinfo, metaclass=utils.MetaClassForClassesWithEnums):
"""Time zone information.
Represents time zone information to be used with Python standard library
datetime classes.
FixedOffset(offsetInMinutes) creates an object that implements
datetime.tzinfo interface and represents a timezone with the specified
'offsetInMinutes' from UTC.
This class is intended to be used as 'tzinfo' for Python standard library
datetime.datetime and datetime.time classes. These classes are accepted by
the blpapi package to set DATE, TIME or DATETIME elements. For example, the
DATETIME element of a request could be set as:
value = datetime.datetime(1941, 6, 22, 4, 0, tzinfo=FixedOffset(4*60))
request.getElement("last_trade").setValue(value)
The TIME element could be set in a similar way:
value = datetime.time(9, 0, 1, tzinfo=FixedOffset(-5*60))
request.getElement("session_open").setValue(value)
Note that you could use any other implementations of datetime.tzinfo with
BLPAPI-Py, for example the widely used 'pytz' package
(http://pypi.python.org/pypi/pytz/).
For more details see datetime module documentation at
http://docs.python.org/library/datetime.html
"""
def __init__(self, offsetInMinutes=0):
_dt.tzinfo.__init__(self)
self.__offset = _dt.timedelta(minutes=offsetInMinutes)
def utcoffset(self, unused):
return self.__offset
def dst(self, unused):
return FixedOffset._dt.timedelta(0)
def getOffsetInMinutes(self):
return self.__offset.days * 24 * 60 + self.__offset.seconds / 60
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
return self.getOffsetInMinutes()
def __cmp__(self, other):
"""Let the comparison operations work based on the time delta."""
return cmp(self.getOffsetInMinutes(), other.getOffsetInMinutes())
# Protect enumeration constant(s) defined in this class and in classes
# derived from this class from changes:
class _DatetimeUtil(object):
"""Utility methods that deal with BLPAPI dates and times."""
@staticmethod
def convertToNative(blpapiDatetime):
"""Convert BLPAPI Datetime object to a suitable Python object."""
parts = blpapiDatetime.parts
hasDate = parts & internals.DATETIME_DATE_PART == \
internals.DATETIME_DATE_PART
hasTime = parts & internals.DATETIME_TIME_PART == \
internals.DATETIME_TIME_PART
mlsecs = blpapiDatetime.milliSeconds * 1000 if parts & \
internals.DATETIME_MILLISECONDS_PART else 0
tzinfo = FixedOffset(blpapiDatetime.offset) if parts & \
internals.DATETIME_OFFSET_PART else None
if hasDate:
if hasTime:
return _dt.datetime(blpapiDatetime.year,
blpapiDatetime.month,
blpapiDatetime.day,
blpapiDatetime.hours,
blpapiDatetime.minutes,
blpapiDatetime.seconds,
mlsecs,
tzinfo)
else:
# Skip an offset, because it's not informative in case of
# there is a date without the time
return _dt.date(blpapiDatetime.year,
blpapiDatetime.month,
blpapiDatetime.day)
else:
if not hasTime:
raise ValueError("Datetime object misses both time and date \
parts", blpapiDatetime)
return _dt.time(blpapiDatetime.hours,
blpapiDatetime.minutes,
blpapiDatetime.seconds,
mlsecs,
tzinfo)
@staticmethod
def isDatetime(dtime):
"""Return True if the parameter is one of Python date/time objects."""
return isinstance(dtime, (_dt.datetime, _dt.date, _dt.time))
@staticmethod
def convertToBlpapi(dtime):
"Convert a Python date/time object to a BLPAPI Datetime object."""
res = internals.blpapi_Datetime_tag()
offset = None
if isinstance(dtime, _dt.datetime):
offset = dtime.utcoffset()
res.year = dtime.year
res.month = dtime.month
res.day = dtime.day
res.hours = dtime.hour
res.minutes = dtime.minute
res.seconds = dtime.second
res.milliSeconds = dtime.microsecond / 1000
res.parts = internals.DATETIME_DATE_PART | \
internals.DATETIME_TIMEMILLI_PART
elif isinstance(dtime, _dt.date):
res.year = dtime.year
res.month = dtime.month
res.day = dtime.day
res.parts = internals.DATETIME_DATE_PART
elif isinstance(dtime, _dt.time):
offset = dtime.utcoffset()
res.hours = dtime.hour
res.minutes = dtime.minute
res.seconds = dtime.second
res.milliSeconds = dtime.microsecond / 1000
res.parts = internals.DATETIME_TIMEMILLI_PART
else:
raise TypeError("Datetime could be created only from \
datetime.datetime, datetime.date or datetime.time")
if offset is not None:
res.offset = offset.seconds // 60
res.parts |= internals.DATETIME_OFFSET_PART
return res
__copyright__ = """
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
| 39.807018 | 79 | 0.642574 |
9fda3afa961246c85abb8e5389071c99a7413adf | 4,299 | py | Python | dvc/scm/base.py | DiPaolo/dvc | 644e9ede924b6416b4ff714fdc0c3463190ab6f2 | [
"Apache-2.0"
] | null | null | null | dvc/scm/base.py | DiPaolo/dvc | 644e9ede924b6416b4ff714fdc0c3463190ab6f2 | [
"Apache-2.0"
] | 21 | 2021-10-12T05:34:57.000Z | 2022-01-07T12:22:45.000Z | dvc/scm/base.py | DiPaolo/dvc | 644e9ede924b6416b4ff714fdc0c3463190ab6f2 | [
"Apache-2.0"
] | null | null | null | """Manages source control systems (e.g. Git) in DVC."""
import os
from dvc.exceptions import DvcException
class SCMError(DvcException):
"""Base class for source control management errors."""
class FileNotInRepoError(SCMError):
"""Thrown when trying to find .gitignore for a file that is not in a scm
repository.
"""
class CloneError(SCMError):
def __init__(self, url, path):
super().__init__(f"Failed to clone repo '{url}' to '{path}'")
class RevError(SCMError):
pass
class NoSCMError(SCMError):
def __init__(self):
msg = (
"Only supported for Git repositories. If you're "
"seeing this error in a Git repo, try updating the DVC "
"configuration with `dvc config core.no_scm false`."
)
super().__init__(msg)
class MergeConflictError(SCMError):
pass
class InvalidRemoteSCMRepo(SCMError):
def __init__(self, url: str):
msg = f"'{url}' is not a valid Git remote or URL"
super().__init__(msg)
class GitAuthError(SCMError):
def __init__(self, url: str):
super().__init__(
f"HTTP Git authentication is not supported: '{url}'"
"\nSee https://dvc.org/doc//user-guide/"
"troubleshooting#git-auth"
)
class Base:
"""Base class for source control management driver implementations."""
def __init__(self, root_dir=os.curdir):
self._root_dir = os.path.realpath(root_dir)
@property
def root_dir(self) -> str:
return self._root_dir
def __repr__(self):
return "{class_name}: '{directory}'".format(
class_name=type(self).__name__, directory=self.dir
)
@property
def dir(self):
"""Path to a directory with SCM specific information."""
return None
@staticmethod
def is_repo(root_dir): # pylint: disable=unused-argument
"""Returns whether or not root_dir is a valid SCM repository."""
return True
@staticmethod
def is_submodule(root_dir): # pylint: disable=unused-argument
"""Returns whether or not root_dir is a valid SCM repository
submodule.
"""
return True
def is_ignored(self, path): # pylint: disable=unused-argument
"""Returns whether or not path is ignored by SCM."""
return False
def ignore(self, path): # pylint: disable=unused-argument
"""Makes SCM ignore a specified path."""
def ignore_remove(self, path): # pylint: disable=unused-argument
"""Makes SCM stop ignoring a specified path."""
@property
def ignore_file(self):
"""Filename for a file that contains ignored paths for this SCM."""
def add(self, paths):
"""Makes SCM track every path from a specified list of paths."""
def commit(self, msg):
"""Makes SCM create a commit."""
def checkout(self, branch, create_new=False):
"""Makes SCM checkout a branch."""
def branch(self, branch):
"""Makes SCM create a branch with a specified name."""
def tag(self, tag):
"""Makes SCM create a tag with a specified name."""
def untracked_files(self): # pylint: disable=no-self-use
"""Returns a list of untracked files."""
return []
def is_tracked(self, path): # pylint: disable=no-self-use, unused-argument
"""Returns whether or not a specified path is tracked."""
return False
def is_dirty(self):
"""Return whether the SCM contains uncommitted changes."""
return False
def active_branch(self): # pylint: disable=no-self-use
"""Returns current branch in the repo."""
return ""
def list_branches(self): # pylint: disable=no-self-use
"""Returns a list of available branches in the repo."""
return []
def list_tags(self): # pylint: disable=no-self-use
"""Returns a list of available tags in the repo."""
return []
def list_all_commits(self): # pylint: disable=no-self-use
"""Returns a list of commits in the repo."""
return []
def belongs_to_scm(self, path):
"""Return boolean whether file belongs to scm"""
def close(self):
"""Method to close the files"""
def _reset(self) -> None:
pass
| 28.098039 | 79 | 0.624564 |
713e19c3097aa1785009f0bbdab91f8d4a52282e | 1,060 | py | Python | Code-Collection/Larmor-Frequency/code/larmor.py | basavyr/physics-code-collection | 6ce50ec184ff2de081d0ca29e679e54dbb21f592 | [
"MIT"
] | 1 | 2021-04-20T04:49:59.000Z | 2021-04-20T04:49:59.000Z | Code-Collection/Larmor-Frequency/code/larmor.py | basavyr/physics-code-collection | 6ce50ec184ff2de081d0ca29e679e54dbb21f592 | [
"MIT"
] | 43 | 2021-01-19T05:02:48.000Z | 2022-03-12T01:07:32.000Z | Code-Collection/Larmor-Frequency/code/larmor.py | basavyr/physics-code-collection | 6ce50ec184ff2de081d0ca29e679e54dbb21f592 | [
"MIT"
] | null | null | null | #! /Users/robertpoenaru/.pyenv/shims/python
# Sources
# Larmor Frequency
# http://hyperphysics.phy-astr.gsu.edu/hbase/Nuclear/larmor.html
# Larmor Precession
# http://hyperphysics.phy-astr.gsu.edu/hbase/magnetic/larmor.html#c1
# Nuclear Magnetic Resonance
# http://hyperphysics.phy-astr.gsu.edu/hbase/Nuclear/nmr.html#c1
# Nuclear Spin Polarization
# http://hyperphysics.phy-astr.gsu.edu/hbase/Nuclear/spinpol.html#c1
# Energy Calculation for Rigid Rotor Molecules
# http://hyperphysics.phy-astr.gsu.edu/hbase/molecule/rotqm.html#c1
import numpy as np
from numpy import random as rd
# The gyromagnetic ratio
g = 2.0
M_PROTON = 1.0
M_ELECTRON = float((1 / 1836) * M_PROTON)
B_FIELD = 2.5 # TESLA
MU_PROTON = 12
MU_ELECTRON = 24
def Larmor(b_field, mass, gyromagnetic):
CHARGE = 1.6
W_1 = CHARGE * gyromagnetic
W_2 = 2.0 * mass
omega_L = (W_1 / W_2) * b_field
print(
f'The frequency of precession of the given spin state around the magnetic field is f_L= {omega_L}')
return omega_L
Larmor(B_FIELD, M_PROTON, g)
| 22.083333 | 107 | 0.723585 |
c6de70d42cbb647c0b865efc53cdf8cd9f0371f3 | 15,993 | py | Python | assemblyline/al_ui/apiv3/file.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | 46 | 2017-05-15T11:15:08.000Z | 2018-07-02T03:32:52.000Z | assemblyline/al_ui/apiv3/file.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | null | null | null | assemblyline/al_ui/apiv3/file.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | 24 | 2017-05-17T03:26:17.000Z | 2018-07-09T07:00:50.000Z | from flask import request
from os.path import basename
import re
from assemblyline.common.charset import safe_str
from assemblyline.common.concurrency import execute_concurrently
from assemblyline.common.hexdump import hexdump
from assemblyline.al.common import forge
from al_ui.apiv3 import core
from al_ui.api_base import api_login, make_api_response, make_file_response
from al_ui.config import STORAGE, ALLOW_RAW_DOWNLOADS
from al_ui.helper.result import format_result
from al_ui.helper.user import load_user_settings
SUB_API = 'file'
Classification = forge.get_classification()
config = forge.get_config()
context = forge.get_ui_context()
encode_file = context.encode_file
file_api = core.make_subapi_blueprint(SUB_API)
file_api._doc = "Perform operations on files"
FILTER_RAW = ''.join([(len(repr(chr(x))) == 3) and chr(x) or chr(x) == '\\' and chr(x) or chr(x) == "\x09" and chr(x)
or chr(x) == "\x0d" and chr(x) or chr(x) == "\x0a" and chr(x) or '.' for x in range(256)])
@file_api.route("/download/<srl>/", methods=["GET"])
@api_login(required_priv=['R'])
def download_file(srl, **kwargs):
"""
Download the file using the default encoding method. This api
will force the browser in download mode.
Variables:
srl => A resource locator for the file (sha256)
Arguments:
name => Name of the file to download
format => Format to encode the file in
password => Password of the password protected zip
Data Block:
None
API call example:
/api/v3/file/download/123456...654321/
Result example:
<THE FILE BINARY ENCODED IN SPECIFIED FORMAT>
"""
user = kwargs['user']
file_obj = STORAGE.get_file(srl)
if not file_obj:
return make_api_response({}, "The file was not found in the system.", 404)
if user and Classification.is_accessible(user['classification'], file_obj['classification']):
params = load_user_settings(user)
name = request.args.get('name', srl)
if name == "":
name = srl
else:
name = basename(name)
name = safe_str(name)
file_format = request.args.get('format', params['download_encoding'])
if file_format == "raw" and not ALLOW_RAW_DOWNLOADS:
return make_api_response({}, "RAW file download has been disabled by administrators.", 403)
password = request.args.get('password', None)
with forge.get_filestore() as f_transport:
data = f_transport.get(srl)
if not data:
return make_api_response({}, "The file was not found in the system.", 404)
data, error, already_encoded = encode_file(data, file_format, name, password)
if error:
return make_api_response({}, error['text'], error['code'])
if file_format != "raw" and not already_encoded:
name = "%s.%s" % (name, file_format)
return make_file_response(data, name, len(data))
else:
return make_api_response({}, "You are not allowed to download this file.", 403)
@file_api.route("/hex/<srl>/", methods=["GET"])
@api_login()
def get_file_hex(srl, **kwargs):
"""
Returns the file hex representation
Variables:
srl => A resource locator for the file (sha256)
Arguments:
None
Data Block:
None
API call example:
/api/v3/file/hex/123456...654321/
Result example:
<THE FILE HEX REPRESENTATION>
"""
user = kwargs['user']
file_obj = STORAGE.get_file(srl)
if not file_obj:
return make_api_response({}, "The file was not found in the system.", 404)
if user and Classification.is_accessible(user['classification'], file_obj['classification']):
with forge.get_filestore() as f_transport:
data = f_transport.get(srl)
if not data:
return make_api_response({}, "This file was not found in the system.", 404)
return make_api_response(hexdump(data))
else:
return make_api_response({}, "You are not allowed to view this file.", 403)
@file_api.route("/strings/<srl>/", methods=["GET"])
@api_login()
def get_file_strings(srl, **kwargs):
"""
Return all strings in a given file
Variables:
srl => A resource locator for the file (sha256)
Arguments:
len => Minimum length for a string
Data Block:
None
Result example:
<THE LIST OF STRINGS>
"""
user = kwargs['user']
hlen = request.args.get('len', "6")
file_obj = STORAGE.get_file(srl)
if not file_obj:
return make_api_response({}, "The file was not found in the system.", 404)
if user and Classification.is_accessible(user['classification'], file_obj['classification']):
with forge.get_filestore() as f_transport:
data = f_transport.get(srl)
if not data:
return make_api_response({}, "This file was not found in the system.", 404)
# Ascii strings
pattern = "[\x1f-\x7e]{%s,}" % hlen
string_list = re.findall(pattern, data)
# UTF-16 strings
try:
string_list += re.findall(pattern, data.decode("utf-16", errors="ignore"))
except UnicodeDecodeError:
pass
return make_api_response("\n".join(string_list))
else:
return make_api_response({}, "You are not allowed to view this file.", 403)
@file_api.route("/raw/<srl>/", methods=["GET"])
@api_login()
def get_file_raw(srl, **kwargs):
"""
Return the raw values for a file where non-utf8 chars are replaced by DOTs.
Variables:
srl => A resource locator for the file (sha256)
Arguments:
None
Data Block:
None
Result example:
<THE RAW FILE>
"""
user = kwargs['user']
file_obj = STORAGE.get_file(srl)
if not file_obj:
return make_api_response({}, "The file was not found in the system.", 404)
if user and Classification.is_accessible(user['classification'], file_obj['classification']):
with forge.get_filestore() as f_transport:
data = f_transport.get(srl)
if not data:
return make_api_response({}, "This file was not found in the system.", 404)
return make_api_response(data.translate(FILTER_RAW))
else:
return make_api_response({}, "You are not allowed to view this file.", 403)
@file_api.route("/children/<srl>/", methods=["GET"])
@api_login(required_priv=['R'])
def get_file_children(srl, **kwargs):
"""
Get the list of children files for a given file
Variables:
srl => A resource locator for the file (sha256)
Arguments:
None
Data Block:
None
API call example:
/api/v3/file/children/123456...654321/
Result example:
[ # List of children
{"name": "NAME OF FILE", # Name of the children
"srl": "123..DEF"}, # SRL of the children (SHA256)
]
"""
user = kwargs['user']
file_obj = STORAGE.get_file(srl)
if file_obj:
if user and Classification.is_accessible(user['classification'], file_obj['classification']):
return make_api_response(STORAGE.list_file_childrens(srl, access_control=user["access_control"]))
else:
return make_api_response({}, "You are not allowed to view this file.", 403)
else:
return make_api_response({}, "This file does not exists.", 404)
@file_api.route("/info/<srl>/", methods=["GET"])
@api_login(required_priv=['R'])
def get_file_information(srl, **kwargs):
"""
Get information about the file like:
Hashes, size, frequency count, etc...
Variables:
srl => A resource locator for the file (sha256)
Arguments:
None
Data Block:
None
API call example:
/api/v3/file/info/123456...654321/
Result example:
{ # File information block
"ascii": "PK..", # First 64 bytes as ASCII
"classification": "UNRESTRICTED", # Access control for the file
"entropy": 7.99, # File's entropy
"hex": "504b...c0b2", # First 64 bytes as hex
"magic": "Zip archive data", # File's identification description (from magic)
"md5": "8f31...a048", # File's MD5 hash
"mime": "application/zip", # Mimetype of the file (from magic)
"seen_count": 7, # Number of time we've seen this file
"seen_first": "2015-03-04T21:59:13.204861Z", # Time at which we first seen this file
"seen_last": "2015-03-10T19:42:04.587233Z", # Last time we've seen the file
"sha256": "e021...4de2", # File's sha256 hash
"sha1": "354f...fdab", # File's sha1 hash
"size": 3417, # Size of the file
"ssdeep": "4:Smm...OHY+", # File's SSDEEP hash
"tag": "archive/zip" # Type of file that we identified
}
"""
user = kwargs['user']
file_obj = STORAGE.get_file(srl)
if file_obj:
if user and Classification.is_accessible(user['classification'], file_obj['classification']):
return make_api_response(file_obj)
else:
return make_api_response({}, "You are not allowed to view this file.", 403)
else:
return make_api_response({}, "This file does not exists.", 404)
@file_api.route("/result/<srl>/", methods=["GET"])
@api_login(required_priv=['R'])
def get_file_results(srl, **kwargs):
"""
Get the all the file results of a specific file.
Variables:
srl => A resource locator for the file (SHA256)
Arguments:
None
Data Block:
None
API call example:
/api/v3/file/result/123456...654321/
Result example:
{"file_info": {}, # File info Block
"results": {}, # Full result list
"errors": {}, # Full error list
"parents": {}, # List of possible parents
"childrens": {}, # List of children files
"tags": {}, # List tags generated
"metadata": {}, # Metadata facets results
"file_viewer_only": True } # UI switch to disable features
"""
user = kwargs['user']
file_obj = STORAGE.get_file(srl)
if not file_obj:
return make_api_response({}, "This file does not exists", 404)
if user and Classification.is_accessible(user['classification'], file_obj['classification']):
output = {"file_info": {}, "results": [], "tags": []}
plan = [
(STORAGE.list_file_active_keys, (srl, user["access_control"]), "results"),
(STORAGE.list_file_parents, (srl, user["access_control"]), "parents"),
(STORAGE.list_file_childrens, (srl, user["access_control"]), "children"),
(STORAGE.get_file_submission_meta, (srl, user["access_control"]), "meta"),
]
temp = execute_concurrently(plan)
active_keys, alternates = temp['results']
output['parents'] = temp['parents']
output['childrens'] = temp['children']
output['metadata'] = temp['meta']
output['file_info'] = file_obj
output['results'] = []
output['alternates'] = {}
res = STORAGE.get_results(active_keys)
for r in res:
res = format_result(user['classification'], r, file_obj['classification'])
if res:
output['results'].append(res)
for i in alternates:
if i['response']['service_name'] not in output["alternates"]:
output["alternates"][i['response']['service_name']] = []
i['response']['service_version'] = i['_yz_rk'].split(".", 3)[2].replace("_", ".")
output["alternates"][i['response']['service_name']].append(i)
output['errors'] = []
output['file_viewer_only'] = True
for res in output['results']:
# noinspection PyBroadException
try:
if "result" in res:
if 'tags' in res['result']:
output['tags'].extend(res['result']['tags'])
except:
pass
return make_api_response(output)
else:
return make_api_response({}, "You are not allowed to view this file", 403)
@file_api.route("/result/<srl>/<service>/", methods=["GET"])
@api_login(required_priv=['R'])
def get_file_results_for_service(srl, service, **kwargs):
"""
Get the all the file results of a specific file and a specific query.
Variables:
srl => A resource locator for the file (SHA256)
Arguments:
all => if all argument is present, it will return all versions
NOTE: Max to 100 results...
Data Block:
None
API call example:
/api/v3/file/result/123456...654321/service_name/
Result example:
{"file_info": {}, # File info Block
"results": {}} # Full result list for the service
"""
user = kwargs['user']
file_obj = STORAGE.get_file(srl)
args = [("fl", "_yz_rk"),
("sort", "created desc")]
if "all" in request.args:
args.append(("rows", "100"))
else:
args.append(("rows", "1"))
if not file_obj:
return make_api_response([], "This file does not exists", 404)
if user and Classification.is_accessible(user['classification'], file_obj['classification']):
res = STORAGE.direct_search("result", "_yz_rk:%s.%s*" % (srl, service), args,
__access_control__=user["access_control"])['response']['docs']
keys = [k["_yz_rk"] for k in res]
results = []
for r in STORAGE.get_results(keys):
result = format_result(user['classification'], r, file_obj['classification'])
if result:
results.append(result)
return make_api_response({"file_info": file_obj, "results": results})
else:
return make_api_response([], "You are not allowed to view this file", 403)
@file_api.route("/score/<srl>/", methods=["GET"])
@api_login(required_priv=['R'])
def get_file_score(srl, **kwargs):
"""
Get the score of the latest service run for a given file.
Variables:
srl => A resource locator for the file (SHA256)
Arguments:
None
Data Block:
None
API call example:
/api/v3/file/score/123456...654321/
Result example:
{"file_info": {}, # File info Block
"result_keys": [<keys>] # List of keys used to compute the score
"score": 0} # Latest score for the file
"""
user = kwargs['user']
file_obj = STORAGE.get_file(srl)
if not file_obj:
return make_api_response([], "This file does not exists", 404)
args = [
("group", "on"),
("group.field", "response.service_name"),
("group.format", "simple"),
("fl", "result.score,_yz_rk"),
("sort", "created desc"),
("rows", "100")
]
if user and Classification.is_accessible(user['classification'], file_obj['classification']):
score = 0
keys = []
res = STORAGE.direct_search("result", "_yz_rk:%s*" % srl, args,
__access_control__=user["access_control"])
docs = res['grouped']['response.service_name']['doclist']['docs']
for d in docs:
score += d['result.score']
keys.append(d["_yz_rk"])
return make_api_response({"file_info": file_obj, "score": score, "result_keys": keys})
else:
return make_api_response([], "You are not allowed to view this file", 403)
| 32.975258 | 117 | 0.591259 |
99f51bdc692b0710b5b9923eae4fac1a31a1fb4b | 1,729 | py | Python | app/user/serializers.py | venky-web/recipe-app-api | 0cef9aae704fe6e3412c6aa0790631411840420b | [
"MIT"
] | null | null | null | app/user/serializers.py | venky-web/recipe-app-api | 0cef9aae704fe6e3412c6aa0790631411840420b | [
"MIT"
] | null | null | null | app/user/serializers.py | venky-web/recipe-app-api | 0cef9aae704fe6e3412c6aa0790631411840420b | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False,
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password,
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| 32.018519 | 74 | 0.646038 |
e69f527604513382e02007574f8acd649f4bc859 | 648 | py | Python | src/app/migrations/0001_initial.py | mp5maker/djangoblog | bc23ea0bc6975673d0fc374f10bfd61cae7255fb | [
"MIT"
] | null | null | null | src/app/migrations/0001_initial.py | mp5maker/djangoblog | bc23ea0bc6975673d0fc374f10bfd61cae7255fb | [
"MIT"
] | null | null | null | src/app/migrations/0001_initial.py | mp5maker/djangoblog | bc23ea0bc6975673d0fc374f10bfd61cae7255fb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-06 18:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='App',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
],
options={
'verbose_name_plural': 'List of Landing Pages',
},
),
]
| 24 | 114 | 0.566358 |
6a06c125fbd297ef587c675b38fe907829c058c7 | 3,316 | py | Python | tessagon/types/square_tri2_tessagon.py | virtualritz/tessagon | af1b1b12a6e92b226f76bfd616bde90d5e15b07c | [
"Apache-2.0"
] | 199 | 2017-10-27T12:13:08.000Z | 2022-03-24T09:54:48.000Z | tessagon/types/square_tri2_tessagon.py | virtualritz/tessagon | af1b1b12a6e92b226f76bfd616bde90d5e15b07c | [
"Apache-2.0"
] | 4 | 2018-03-23T03:15:37.000Z | 2019-11-06T15:58:24.000Z | tessagon/types/square_tri2_tessagon.py | virtualritz/tessagon | af1b1b12a6e92b226f76bfd616bde90d5e15b07c | [
"Apache-2.0"
] | 20 | 2017-10-27T14:41:08.000Z | 2021-12-28T10:12:59.000Z | from math import sqrt
from tessagon.core.tile import Tile
from tessagon.core.tessagon import Tessagon
from tessagon.core.tessagon_metadata import TessagonMetadata
metadata = TessagonMetadata(name='Other Squares and Triangles',
classification='archimedean',
shapes=['squares', 'triangles'],
sides=[4, 3])
class SquareTri2Tile(Tile):
# 6 verts, 11 faces (3 internal, 8 on boundary)
#
# ^ ..|..
# | --O--
# | ./.\.
# | O---O
# |...|
# V O---O
# .\./.
# --O--
# ..|..
#
# U ----->
def __init__(self, tessagon, **kwargs):
super().__init__(tessagon, **kwargs)
self.u_symmetric = True
self.v_symmetric = True
def init_verts(self):
return {'left': {'top': {'u_boundary': None},
'bottom': {'u_boundary': None}},
'right': {'top': {'u_boundary': None},
'bottom': {'u_boundary': None}},
'center': {'top': None,
'bottom': None}}
def init_faces(self):
return {'left': {'top': {'corner': None,
'u_boundary': None},
'bottom': {'corner': None,
'u_boundary': None}},
'right': {'top': {'corner': None,
'u_boundary': None},
'bottom': {'corner': None,
'u_boundary': None}},
'center': {'top': None,
'middle': None,
'bottom': None}}
def calculate_verts(self):
v_unit = 1.0 / (2 + sqrt(3))
v1 = v_unit * 0.5
v2 = 0.5 - v1
# Other verts defined through symmetry
self.add_vert(['center', 'bottom'], 0.5, v1)
self.add_vert(['left', 'bottom', 'u_boundary'], 0, v2, u_boundary=True)
def calculate_faces(self):
self.add_face(['left', 'bottom', 'corner'],
[['center', 'bottom'],
[['left'], ['center', 'bottom']],
[['left', 'bottom'], ['center', 'top']],
[['bottom'], ['center', 'top']]],
face_type='square', corner=True)
self.add_face(['left', 'bottom', 'u_boundary'],
[['center', 'bottom'],
['left', 'bottom', 'u_boundary'],
[['left'], ['center', 'bottom']]],
face_type='triangle', u_boundary=True)
self.add_face(['center', 'bottom'],
[['left', 'bottom', 'u_boundary'],
['center', 'bottom'],
['right', 'bottom', 'u_boundary']],
face_type='triangle')
self.add_face(['center', 'middle'],
[['left', 'bottom', 'u_boundary'],
['right', 'bottom', 'u_boundary'],
['right', 'top', 'u_boundary'],
['left', 'top', 'u_boundary']],
face_type='square')
class SquareTri2Tessagon(Tessagon):
tile_class = SquareTri2Tile
metadata = metadata
| 35.655914 | 79 | 0.422799 |
ffc27aae32fc33e9b86cde57edd1d0d92d3f0266 | 3,015 | py | Python | MindwaveDataPointReader.py | RoyFlo360/mind-wave-mobile-drone | a8837187c39688246796d95dedc7ab18ed026172 | [
"MIT"
] | null | null | null | MindwaveDataPointReader.py | RoyFlo360/mind-wave-mobile-drone | a8837187c39688246796d95dedc7ab18ed026172 | [
"MIT"
] | null | null | null | MindwaveDataPointReader.py | RoyFlo360/mind-wave-mobile-drone | a8837187c39688246796d95dedc7ab18ed026172 | [
"MIT"
] | null | null | null | from MindwaveMobileRawReader import MindwaveMobileRawReader
import struct
import collections
from MindwavePacketPayloadParser import MindwavePacketPayloadParser
class MindwaveDataPointReader:
def __init__(self):
self._mindwaveMobileRawReader = MindwaveMobileRawReader()
self._dataPointQueue = collections.deque()
def start(self):
self._mindwaveMobileRawReader.connectToMindWaveMobile()
def readNextDataPoint(self):
if (not self._moreDataPointsInQueue()):
self._putNextDataPointsInQueue()
return self._getDataPointFromQueue()
def _moreDataPointsInQueue(self):
return len(self._dataPointQueue) > 0
def _getDataPointFromQueue(self):
return self._dataPointQueue.pop();
def _putNextDataPointsInQueue(self):
dataPoints = self._readDataPointsFromOnePacket()
self._dataPointQueue.extend(dataPoints)
def _readDataPointsFromOnePacket(self):
self._goToStartOfNextPacket()
payloadBytes, checkSum = self._readOnePacket()
if (not self._checkSumIsOk(payloadBytes, checkSum)):
print ("checksum of packet was not correct, discarding packet...")
return self._readDataPointsFromOnePacket();
else:
dataPoints = self._readDataPointsFromPayload(payloadBytes)
self._mindwaveMobileRawReader.clearAlreadyReadBuffer()
return dataPoints;
def _goToStartOfNextPacket(self):
while(True):
byte = self._mindwaveMobileRawReader.getByte()
if (byte == MindwaveMobileRawReader.START_OF_PACKET_BYTE): # need two of these bytes at the start..
byte = self._mindwaveMobileRawReader.getByte()
if (byte == MindwaveMobileRawReader.START_OF_PACKET_BYTE):
# now at the start of the packet..
return;
def _readOnePacket(self):
payloadLength = self._readPayloadLength();
payloadBytes, checkSum = self._readPacket(payloadLength);
return payloadBytes, checkSum
def _readPayloadLength(self):
payloadLength = self._mindwaveMobileRawReader.getByte()
return payloadLength
def _readPacket(self, payloadLength):
payloadBytes = self._mindwaveMobileRawReader.getBytes(payloadLength)
checkSum = self._mindwaveMobileRawReader.getByte()
return payloadBytes, checkSum
def _checkSumIsOk(self, payloadBytes, checkSum):
sumOfPayload = sum(payloadBytes)
lastEightBits = sumOfPayload % 256
invertedLastEightBits = self._computeOnesComplement(lastEightBits) #1's complement!
return invertedLastEightBits == checkSum;
def _computeOnesComplement(self, lastEightBits):
return ~lastEightBits + 256
def _readDataPointsFromPayload(self, payloadBytes):
payloadParser = MindwavePacketPayloadParser(payloadBytes)
return payloadParser.parseDataPoints();
| 38.164557 | 112 | 0.691542 |
2b796f8fce06e2970bda67e5fc62724cdd25ce3a | 2,586 | py | Python | venv/Lib/site-packages/pyrogram/raw/types/channel_admin_log_event_action_change_photo.py | D1ne2021/jjhhhjj | a090da30983b3ef276dfe4cef2ded4526f36002a | [
"MIT"
] | 2 | 2021-12-13T07:09:55.000Z | 2022-01-12T12:15:20.000Z | venv/Lib/site-packages/pyrogram/raw/types/channel_admin_log_event_action_change_photo.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyrogram/raw/types/channel_admin_log_event_action_change_photo.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class ChannelAdminLogEventActionChangePhoto(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.ChannelAdminLogEventAction`.
Details:
- Layer: ``126``
- ID: ``0x434bd2af``
Parameters:
prev_photo: :obj:`Photo <pyrogram.raw.base.Photo>`
new_photo: :obj:`Photo <pyrogram.raw.base.Photo>`
"""
__slots__: List[str] = ["prev_photo", "new_photo"]
ID = 0x434bd2af
QUALNAME = "types.ChannelAdminLogEventActionChangePhoto"
def __init__(self, *, prev_photo: "raw.base.Photo", new_photo: "raw.base.Photo") -> None:
self.prev_photo = prev_photo # Photo
self.new_photo = new_photo # Photo
@staticmethod
def read(data: BytesIO, *args: Any) -> "ChannelAdminLogEventActionChangePhoto":
# No flags
prev_photo = TLObject.read(data)
new_photo = TLObject.read(data)
return ChannelAdminLogEventActionChangePhoto(prev_photo=prev_photo, new_photo=new_photo)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(self.prev_photo.write())
data.write(self.new_photo.write())
return data.getvalue()
| 34.48 | 106 | 0.644625 |
ee2b12cdda71eaa80cfaf05a0a5db58fb5786ab3 | 8,669 | py | Python | python/surf/protocols/clink/_ClinkChannel.py | qarlosalberto/surf | 69df91296d77efc9e812da051841545e320ebf69 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-05-13T19:56:51.000Z | 2021-05-21T13:33:02.000Z | python/surf/protocols/clink/_ClinkChannel.py | qarlosalberto/surf | 69df91296d77efc9e812da051841545e320ebf69 | [
"BSD-3-Clause-LBNL"
] | null | null | null | python/surf/protocols/clink/_ClinkChannel.py | qarlosalberto/surf | 69df91296d77efc9e812da051841545e320ebf69 | [
"BSD-3-Clause-LBNL"
] | null | null | null | #-----------------------------------------------------------------------------
# Title : PyRogue CameraLink Channel
#-----------------------------------------------------------------------------
# Description:
# PyRogue CameraLink module
#-----------------------------------------------------------------------------
# This file is part of the 'SLAC Firmware Standard Library'. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the 'SLAC Firmware Standard Library', including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
import surf.protocols.clink
class ClinkChannel(pr.Device):
def __init__(
self,
serial = None,
camType = None,
**kwargs):
super().__init__(**kwargs)
##############################
# Variables
##############################
self.add(pr.RemoteVariable(
name = "LinkMode",
offset = 0x00,
bitSize = 3,
bitOffset = 0,
mode = "RW",
enum = { 0 : 'Disable' , 1 : 'Base', 2 : 'Medium', 3 : 'Full', 4 : 'Deca'},
description = """
Link mode control for camera link lanes:
Disable: Nothing connected
Base: Port Supported [A,B,C], # of chips = 1, # of connectors = 1
Medium: Port Supported [A,B,C,D,E,F], # of chips = 2, # of connectors = 2
Full: Port Supported [A,B,C,D,E,F,G,H], # of chips = 3, # of connectors = 3
Deca: Refer to section /"2.2.3 Camera Link 80 bit/" CameraLink spec V2.0, page 16
""",
))
self.add(pr.RemoteVariable(
name = "DataMode",
description = "Data mode",
offset = 0x04,
bitSize = 4,
bitOffset = 0,
mode = "RW",
enum = { 0 : 'None', 1 : '8Bit', 2 : '10Bit', 3 : '12Bit', 4 : '14Bit',
5 : '16Bit', 6 : '24Bit', 7 : '30Bit', 8 : '36Bit'},
))
self.add(pr.RemoteVariable(
name = "FrameMode",
offset = 0x08,
bitSize = 2,
bitOffset = 0,
mode = "RW",
enum = { 0 : 'None', 1 : 'Line', 2 : 'Frame'},
description = """
None: Disables output
Line: 1D camera
Frame: 2D pixel array
""",
))
self.add(pr.RemoteVariable(
name = "TapCount",
description = "# of video output taps on the Camera Link Interface (# of individual data value channels)",
offset = 0x0C,
bitSize = 4,
bitOffset = 0,
minimum = 0,
maximum = 10,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "DataEn",
description = "Data enable. When 0x0 causes reset on ClinkData\'s FSM module",
offset = 0x10,
bitSize = 1,
bitOffset = 0,
base = pr.Bool,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "Blowoff",
description = "Blows off the outbound AXIS stream (for debugging)",
offset = 0x10,
bitSize = 1,
bitOffset = 1,
base = pr.Bool,
mode = "RW",
))
self.add(pr.RemoteCommand(
name = "CntRst",
description = "",
offset = 0x10,
bitSize = 1,
bitOffset = 2,
function = pr.BaseCommand.toggle,
))
self.add(pr.RemoteVariable(
name = "SerThrottle",
description = "Throttles the UART Serial TX byte rate. Used when the camera cannot accept new bytes until the previous command processed",
offset = 0x10,
bitSize = 16,
bitOffset = 16,
disp = '{}',
mode = "RW",
units = "microsec",
value = 30000, # 30ms/byte
))
self.add(pr.RemoteVariable(
name = "BaudRate",
description = "Baud rate",
offset = 0x14,
bitSize = 24,
bitOffset = 0,
disp = '{}',
mode = "RW",
units = "bps",
value = 9600,
))
self.add(pr.RemoteVariable(
name = "SwControlValue",
description = "Software camera control bit values",
offset = 0x18,
bitSize = 4,
bitOffset = 0,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "SwControlEn",
description = "Software camera control bit enable mask for lane A",
offset = 0x1C,
bitSize = 4,
bitOffset = 0,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = "Running",
description = "Camera link lane running status",
offset = 0x20,
bitSize = 1,
bitOffset = 0,
base = pr.Bool,
mode = "RO",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "FrameCount",
description = "Frame counter",
offset = 0x24,
bitSize = 32,
bitOffset = 0,
disp = '{}',
mode = "RO",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "DropCount",
description = "Drop counter",
offset = 0x28,
bitSize = 32,
bitOffset = 0,
disp = '{}',
mode = "RO",
pollInterval = 1,
))
##############################################################################
self._rx = None
self._tx = None
# Check if serial interface defined
if serial is not None:
# Check for OPA1000 camera
if (camType=='Opal1000'):
# Override defaults
self.BaudRate._default = 57600
# Add the device
self.add(surf.protocols.clink.UartOpal1000(
name = 'UartOpal1000',
serial = serial,
expand = False,
))
# Check for Piranha4 camera
elif (camType=='Piranha4'):
# Add the device
self.add(surf.protocols.clink.UartPiranha4(
name = 'UartPiranha4',
serial = serial,
expand = False,
))
# Check for Uniq UP-900CL-12B camera
elif (camType=='Up900cl12b'):
# Override defaults
self.SerThrottle._default = 30000
# Add the device
self.add(surf.protocols.clink.UartUp900cl12b(
name = 'UartUp900cl12b',
serial = serial,
expand = False,
))
# Else generic interface to serial stream
elif camType is None:
# Add the device
self.add(surf.protocols.clink.UartGeneric(
name = 'UartGeneric',
serial = serial,
expand = False,
))
else:
raise ValueError('Invalid camType (%s)' % (camType) )
##############################################################################
def hardReset(self):
self.CntRst()
def initialize(self):
self.CntRst()
def countReset(self):
self.CntRst()
| 33.996078 | 151 | 0.399354 |
b93cb3178d1dd502d521c720b107608349ba4963 | 164 | py | Python | vizdoomaze/envs/vizdoomselfmaze2.py | fanyuzeng/Vizdoomaze | 5b444f2d861c908c4d96ae374bcce660d364f22e | [
"MIT"
] | 3 | 2020-09-25T16:00:49.000Z | 2020-10-29T10:32:30.000Z | vizdoomaze/envs/vizdoomselfmaze2.py | fanyuzeng/Vizdoomaze | 5b444f2d861c908c4d96ae374bcce660d364f22e | [
"MIT"
] | null | null | null | vizdoomaze/envs/vizdoomselfmaze2.py | fanyuzeng/Vizdoomaze | 5b444f2d861c908c4d96ae374bcce660d364f22e | [
"MIT"
] | 1 | 2021-12-17T07:50:47.000Z | 2021-12-17T07:50:47.000Z | from vizdoomaze.envs.vizdoomenv import VizdoomEnv
class VizdoomSelfMaze2(VizdoomEnv):
def __init__(self):
super(VizdoomSelfMaze2, self).__init__(12)
| 20.5 | 50 | 0.762195 |
9b9c69339bdf4cd4d5a617d7761c142b2906ac26 | 3,132 | py | Python | setup.py | snehasi/simple_grid_yaml_compiler | 7dea4d239980770a9bdd5f7478b959bebbf6648a | [
"Apache-2.0"
] | null | null | null | setup.py | snehasi/simple_grid_yaml_compiler | 7dea4d239980770a9bdd5f7478b959bebbf6648a | [
"Apache-2.0"
] | null | null | null | setup.py | snehasi/simple_grid_yaml_compiler | 7dea4d239980770a9bdd5f7478b959bebbf6648a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import setup, find_packages, Command
# Package Info
NAME = 'simple_grid_yaml_compiler'
MODULE_SRC_DIR = ''
DESCRIPTION = 'The YAML compiler for the SIMPLE Grid Framework'
URL = 'https://github.com/WLCG-Lightweight-Sites/simple_grid_yaml_compiler'
EMAIL = 'mayank.sharma@cern.ch'
AUTHOR = 'Mayank Sharma'
REQUIRED = [
'PyYaml',
'ruamel.ordereddict',
'ruamel.yaml',
'argparse'
]
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about = {}
with open(os.path.join(here, MODULE_SRC_DIR ,'__version__.py')) as f:
exec(f.read(), about)
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
sys.exit()
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(exclude=('tests',)),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
include_package_data=True,
license='Apache Software License',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
#'Programming Language :: Python :: Implementation :: CPython',
#'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
) | 29.271028 | 86 | 0.640805 |
e2b2ee29b8b097c91a5dbdcf75c0c38936e137c9 | 352 | py | Python | Python/Tests/TestData/VirtualEnv/env/Scripts/pip-2.7-script.py | nanshuiyu/pytools | 9f9271fe8cf564b4f94e9456d400f4306ea77c23 | [
"Apache-2.0"
] | null | null | null | Python/Tests/TestData/VirtualEnv/env/Scripts/pip-2.7-script.py | nanshuiyu/pytools | 9f9271fe8cf564b4f94e9456d400f4306ea77c23 | [
"Apache-2.0"
] | null | null | null | Python/Tests/TestData/VirtualEnv/env/Scripts/pip-2.7-script.py | nanshuiyu/pytools | 9f9271fe8cf564b4f94e9456d400f4306ea77c23 | [
"Apache-2.0"
] | null | null | null | #!C:\PTVS\Main\Common\Tests\TestData\VirtualEnv\env\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==1.3.1','console_scripts','pip-2.7'
__requires__ = 'pip==1.3.1'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('pip==1.3.1', 'console_scripts', 'pip-2.7')()
)
| 32 | 71 | 0.676136 |
7d242c494e2e4bcdfee69ca3f53b53ac5892fac7 | 397 | py | Python | chapter4/solutions/series_summation.py | hexu1985/Doing.Math.With.Python | b6a02805cd450325e794a49f55d2d511f9db15a5 | [
"MIT"
] | 109 | 2015-08-28T10:23:24.000Z | 2022-02-15T01:39:51.000Z | chapter4/solutions/series_summation.py | hexu1985/Doing.Math.With.Python | b6a02805cd450325e794a49f55d2d511f9db15a5 | [
"MIT"
] | 371 | 2020-03-04T21:51:56.000Z | 2022-03-31T20:59:11.000Z | chapter4/solutions/series_summation.py | hexu1985/Doing.Math.With.Python | b6a02805cd450325e794a49f55d2d511f9db15a5 | [
"MIT"
] | 74 | 2015-10-15T18:09:15.000Z | 2022-01-30T05:06:21.000Z | '''
series_summation.py
Sum an arbitrary series
'''
from sympy import summation, sympify, Symbol, pprint
def find_sum(n_term, num_terms):
n = Symbol('n')
s = summation(n_term, (n, 1, num_terms))
pprint(s)
if __name__ == '__main__':
n_term = sympify(input('Enter the nth term: '))
num_terms = int(input('Enter the number of terms: '))
find_sum(n_term, num_terms)
| 20.894737 | 57 | 0.657431 |
3141df96b6d3d2f2f3c2dcaea6a127fa50b68cff | 904 | py | Python | 189. Rotate Array.py | MapleLove2014/leetcode | 135c79ebe98815d0e38280edfadaba90e677aff5 | [
"Apache-2.0"
] | 1 | 2020-12-04T07:38:16.000Z | 2020-12-04T07:38:16.000Z | 189. Rotate Array.py | MapleLove2014/leetcode | 135c79ebe98815d0e38280edfadaba90e677aff5 | [
"Apache-2.0"
] | null | null | null | 189. Rotate Array.py | MapleLove2014/leetcode | 135c79ebe98815d0e38280edfadaba90e677aff5 | [
"Apache-2.0"
] | null | null | null | class Solution:
def rotate(self, nums, k: int):
"""
Do not return anything, modify nums in-place instead.
"""
k = k % len(nums)
count = 0
start = 0
while count < len(nums):
pre = nums[start]
current = start
while True:
next = (current + k) % len(nums)
temp = nums[next]
nums[next] = pre
pre = temp
count += 1
current = next
if start == current:
break
start += 1
s = Solution()
print(s.rotate([1,2,3,4,5], 9))
print(s.rotate([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53],82))
print(s.rotate([1,2,3,4,5,6,7], 1))
print(s.rotate([1,2,3], 4)) | 28.25 | 171 | 0.456858 |
4cf27c2120eabc2ccfdf96efff4820d67d8f64af | 7,464 | py | Python | CNN/CIfNN.py | marioviti/NN | fdd23cbb4333b9124ad3d87320f039a0496a1e57 | [
"MIT"
] | null | null | null | CNN/CIfNN.py | marioviti/NN | fdd23cbb4333b9124ad3d87320f039a0496a1e57 | [
"MIT"
] | null | null | null | CNN/CIfNN.py | marioviti/NN | fdd23cbb4333b9124ad3d87320f039a0496a1e57 | [
"MIT"
] | null | null | null | import argparse
import os
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
datasets.CIFAR10.url = "http://webia.lip6.fr/~robert/cours/rdfia/cifar-10-python.tar.gz"
from preprocessing import ZCA_withening_matrix, ZCA_whitening
from tme7 import *
PRINT_INTERVAL = 50
CUDA = False
class ConvNet(nn.Module):
"""
Contains the NN architecture
"""
def __init__(self):
super(ConvNet, self).__init__()
# Definition of the unsupervised compression
self.features = nn.Sequential(
nn.Conv2d(3, 32, (5, 5), stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d((2, 2), stride=2, padding=0),
nn.Conv2d(32, 64, (5, 5), stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d((2, 2), stride=2, padding=0),
nn.Conv2d(64, 64, (5, 5), stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d((2, 2), stride=2, padding=0)
)
# Classifier
self.classifier = nn.Sequential(
nn.Linear(1024, 1000),
nn.ReLU(),
nn.Linear(1000, 10)
# MEMO: Il softmax è incluso nella Loss
)
# méthode appelée quand on applique le réseau à un batch d'input
def forward(self, input):
bsize = input.size(0) # taille du batch
output = self.features(input) # on calcule la sortie des conv
output = output.view(bsize, -1) # on applati les feature map 2D en un
# vecteur 1D pour chaque input
output = self.classifier(output) # on calcule la sortie des fc
return output
def preprocess(path):
train_dataset = datasets.CIFAR10(path, train=True, download=True)
X = train_dataset.train_data/255.
ZCA_mat = ZCA_withening_matrix(X)
transforms_to_data = [transforms.ToTensor(), ZCA_whitening(ZCA_mat)]
print('ok',transforms_to_data)
return transforms_to_data
def get_dataset(batch_size, path):
transforms_to_data = preprocess(path)
train_dataset = datasets.CIFAR10(path, train=True, download=True,
transform=transforms.Compose(transforms_to_data))
val_dataset = datasets.CIFAR10(path, train=False, download=True,
transform=transforms.Compose(transforms_to_data))
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size, shuffle=True, pin_memory=CUDA, num_workers=2)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=batch_size, shuffle=False, pin_memory=CUDA, num_workers=2)
return train_loader, val_loader
def epoch(data, model, criterion, optimizer=None):
"""
Fait une passe (appelée epoch en anglais) sur les données `data` avec le
modèle `model`. Evalue `criterion` comme loss.
Si `optimizer` est fourni, effectue une epoch d'apprentissage en utilisant
l'optimiseur donné, sinon, effectue une epoch d'évaluation (pas de backward)
du modèle.
"""
# indique si le modele est en mode eval ou train (certaines couches se
# comportent différemment en train et en eval)
model.eval() if optimizer is None else model.train()
# objets pour stocker les moyennes des metriques
avg_loss = AverageMeter()
avg_top1_acc = AverageMeter()
avg_top5_acc = AverageMeter()
avg_batch_time = AverageMeter()
global loss_plot
# on itere sur les batchs du dataset
tic = time.time()
for i, (input, target) in enumerate(data):
if CUDA: # si on fait du GPU, passage en CUDA
input = input.cuda()
target = target.cuda()
# forward
output = model(Variable(input))
loss = criterion(output, Variable(target))
# backward si on est en "train"
if optimizer:
optimizer.zero_grad()
loss.backward()
optimizer.step()
# calcul des metriques
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
batch_time = time.time() - tic
tic = time.time()
# mise a jour des moyennes
avg_loss.update(loss.data[0])
avg_top1_acc.update(prec1[0])
avg_top5_acc.update(prec5[0])
avg_batch_time.update(batch_time)
if optimizer:
loss_plot.update(avg_loss.val)
# affichage des infos
if i % PRINT_INTERVAL == 0:
print('[{0:s} Batch {1:03d}/{2:03d}]\t'
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:5.1f} ({top1.avg:5.1f})\t'
'Prec@5 {top5.val:5.1f} ({top5.avg:5.1f})'.format(
"EVAL" if optimizer is None else "TRAIN", i, len(data), batch_time=avg_batch_time, loss=avg_loss,
top1=avg_top1_acc, top5=avg_top5_acc))
if optimizer:
loss_plot.plot()
# Affichage des infos sur l'epoch
print('\n===============> Total time {batch_time:d}s\t'
'Avg loss {loss.avg:.4f}\t'
'Avg Prec@1 {top1.avg:5.2f} %\t'
'Avg Prec@5 {top5.avg:5.2f} %\n'.format(
batch_time=int(avg_batch_time.sum), loss=avg_loss,
top1=avg_top1_acc, top5=avg_top5_acc))
return avg_top1_acc, avg_top5_acc, avg_loss
def main(params):
# ex de params :
# {"batch_size": 128, "epochs": 5, "lr": 0.1, "path": '/tmp/datasets/mnist'}
# define model, loss, optim
model = ConvNet()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), params.lr)
if CUDA: # si on fait du GPU, passage en CUDA
model = model.cuda()
criterion = criterion.cuda()
global loss_plot
# On récupère les données
train, test = get_dataset(params.batch_size, params.path)
# init plots
plot = AccLossPlot()
global loss_plot
loss_plot = TrainLossPlot()
# On itère sur les epochs
for i in range(params.epochs):
print("=================\n=== EPOCH "+str(i+1)+" =====\n=================\n")
# Phase de train
top1_acc, avg_top5_acc, loss = epoch(train, model, criterion, optimizer)
# Phase d'evaluation
top1_acc_test, top5_acc_test, loss_test = epoch(test, model, criterion)
# plot
plot.update(loss.avg, loss_test.avg, top1_acc.avg, top1_acc_test.avg)
if __name__ == '__main__':
# Paramètres en ligne de commande
parser = argparse.ArgumentParser()
parser.add_argument('--path', default="../Datasets/", type=str, metavar='DIR', help='path to dataset')
parser.add_argument('--epochs', default=5, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--batch-size', default=128, type=int, metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', default=0.1, type=float, metavar='LR', help='learning rate')
parser.add_argument('--cuda', dest='cuda', action='store_true', help='activate GPU acceleration')
parser.add_argument('--zca', dest='zca', action='store_true', help='use zca whitening')
print(parser)
args = parser.parse_args()
if args.cuda:
CUDA = True
cudnn.benchmark = True
main(args)
input("done")
| 35.042254 | 116 | 0.625 |
fc32924066b2c78270b86c6a1e80f25647c9e6c0 | 2,900 | py | Python | packages/videodownloader/youtube.py | darvin/leon | e4a3975eb14e4f72c2dd1fa612fab2fe35149eb6 | [
"MIT"
] | 4 | 2019-04-16T12:04:34.000Z | 2019-07-03T17:28:57.000Z | packages/videodownloader/youtube.py | sadiqmmm/leon | 360d1020c4bd8bf1df3764645c635c64e0364702 | [
"MIT"
] | 378 | 2019-02-26T05:34:51.000Z | 2020-06-17T07:03:23.000Z | packages/videodownloader/youtube.py | sadiqmmm/leon | 360d1020c4bd8bf1df3764645c635c64e0364702 | [
"MIT"
] | 1 | 2019-02-22T07:04:05.000Z | 2019-02-22T07:04:05.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import requests
import os
import utils
from time import time
from pytube import YouTube
def youtube(string):
"""Download new videos from a YouTube playlist"""
db = utils.db()['db']
query = utils.db()['query']
operations = utils.db()['operations']
apikey = utils.config('api_key')
playlistid = utils.config('playlist_id')
# https://developers.google.com/youtube/v3/docs/playlistItems/list
url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId=' + playlistid + '&key=' + apikey
utils.output('inter', 'reaching_playlist', utils.translate('reaching_playlist'))
# Get videos from the playlist
try:
r = utils.http('GET', url)
# In case there is a problem like wrong settings
if 'error' in r.json():
error = r.json()['error']['errors'][0]
return utils.output('settings_error', 'settings_error', utils.translate('settings_errors', {
'reason': error['reason'],
'message': error['message']
}))
items = r.json()['items']
videoids = []
videos = []
for item in items:
resource = item['snippet']['resourceId']
if resource['kind'] == 'youtube#video':
videoids.append(resource['videoId'])
videos.append({
'id': resource['videoId'],
'title': item['snippet']['title']
})
except requests.exceptions.RequestException as e:
return utils.output('request_error', 'request_error', utils.translate('request_errors'))
Entry = query()
# First initialization
if db.count(Entry.platform == 'youtube') == 0:
db.insert({
'platform': 'youtube',
'checked_at': int(time()),
'downloaded_videos': []
})
else:
db.update({ 'checked_at': int(time()) }, Entry.platform == 'youtube')
# Get videos already downloaded
downloadedvideos = db.get(Entry.platform == 'youtube')['downloaded_videos']
todownload = []
for video in videos:
if video['id'] not in downloadedvideos:
todownload.append(video)
nbrtodownload = len(todownload)
if nbrtodownload == 0:
return utils.output('nothing_to_download', 'nothing_to_download', utils.translate('nothing_to_download'))
utils.output('inter', 'nb_to_download', utils.translate('nb_to_download', {
'nb': nbrtodownload
}))
# Create the module downloads directory
moduledldir = utils.createdldir()
for i, video in enumerate(todownload):
utils.output('inter', 'downloading', utils.translate('downloading', {
'video_title': video['title']
}))
# Download the video
yt = YouTube('https://youtube.com/watch?v=' + video['id'])
yt.streams.first().download(moduledldir)
# Add the new downloaded video to the DB
downloadedvideos.append(video['id'])
db.update({ 'downloaded_videos': downloadedvideos }, Entry.platform == 'youtube')
# Will synchronize the content (because "end" type) if synchronization enabled
return utils.output('end', 'success', utils.translate('success'))
| 29.591837 | 131 | 0.692069 |
471c97288deb1b49a9da4c882dd09b06b28c29b6 | 2,478 | py | Python | landmark_generate/train.py | yourmean/Moving-Emoji-Generation_Sol1 | 5113b91ba2f30e7aa196dd089af341ecb97a1895 | [
"MIT"
] | 4 | 2021-01-21T04:33:33.000Z | 2022-03-04T17:02:38.000Z | landmark_generate/train.py | shkim960520/Moving-Emoji-Generation_Sol1 | 52751d9ff0e648b08650162ea2e40a232e6abafd | [
"MIT"
] | null | null | null | landmark_generate/train.py | shkim960520/Moving-Emoji-Generation_Sol1 | 52751d9ff0e648b08650162ea2e40a232e6abafd | [
"MIT"
] | 3 | 2021-01-21T04:36:26.000Z | 2021-01-21T05:59:02.000Z | import os
import PIL
import functools
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
import models
from trainers import Trainer
import data
def video_transform(video, image_transform):
vid = []
for im in video:
vid.append(image_transform(im))
vid = torch.stack(vid).permute(1,0,2,3)
return vid
if __name__ == "__main__":
n_channels = 1 #landmark image는 1 channel
image_transforms = transforms.Compose([
PIL.Image.fromarray,
transforms.Scale(64),
transforms.Grayscale(1), #mocogan 데이터 때문에 grayscale 변환 시킨
transforms.ToTensor(),
lambda x: x[:n_channels, ::],
transforms.Normalize((0.5,), (0.5,)),
])
video_transforms = functools.partial(video_transform, image_transform = image_transforms)
#다음 하이퍼파라미터들은 최적화 하면서 다시 수정해야됨!
#특히 z_content, z_motion 조절 필요, z_category는 우리 task에 맡게 수정
video_length = 16
image_batch = 32
video_batch = 32
dim_z_content = 30
dim_z_motion = 10
dim_z_category = 4
data_path = '../data/actions'
log_path = '../logs'
dataset = data.VideoFolderDataset(data_path)
image_dataset = data.ImageDataset(dataset, image_transforms)
image_loader = DataLoader(image_dataset, batch_size=image_batch, drop_last=True, num_workers=2, shuffle=True)
video_dataset = data.VideoDataset(dataset, 16, 2, video_transforms)
video_loader = DataLoader(video_dataset, batch_size=video_batch, drop_last=True, num_workers=2, shuffle=True)
generator = models.VideoGenerator(n_channels, dim_z_content, dim_z_category, dim_z_motion, video_length)
image_discriminator = models.PatchImageDiscriminator(n_channels=n_channels, use_noise=True, noise_sigma=0.1)
video_discriminator = models.CategoricalVideoDiscriminator(dim_categorical=dim_z_category, n_channels=n_channels, use_noise=True, noise_sigma=0.2)
if torch.cuda.is_available():
generator.cuda()
image_discriminator.cuda()
video_discriminator.cuda()
#train_batches: 조절해서 epoch 정해야됨
trainer = Trainer(image_loader, video_loader,
log_interval=10,
train_batches=30,
log_folder=log_path,
use_cuda=torch.cuda.is_available(),
use_infogan=True,
use_categories=True)
trainer.train(generator, image_discriminator, video_discriminator)
| 30.219512 | 150 | 0.69088 |
58b317a352f30dcdb1f7558721a8998c631b7674 | 5,439 | py | Python | tests/orders/test_galaxy.py | cacrespo/pythonium | 74cc5d4333212adfb6eedade8fcd8dfe86d221d5 | [
"MIT"
] | null | null | null | tests/orders/test_galaxy.py | cacrespo/pythonium | 74cc5d4333212adfb6eedade8fcd8dfe86d221d5 | [
"MIT"
] | null | null | null | tests/orders/test_galaxy.py | cacrespo/pythonium | 74cc5d4333212adfb6eedade8fcd8dfe86d221d5 | [
"MIT"
] | null | null | null | import random
import pytest
from pythonium.orders.galaxy import (
ProduceResources,
ResolvePlanetsConflicts,
ResolveShipsConflicts,
)
class TestProduceResources:
@pytest.fixture()
def player_planets_count(self, galaxy, faker):
return len(list(galaxy.get_ocuped_planets()))
@pytest.fixture()
def order(self, galaxy):
return ProduceResources(galaxy=galaxy)
def test_produce_in_occuped_planets(
self, order, mocker, galaxy, player_planets_count
):
spy = mocker.spy(order, "_produce_resources")
order.execute()
assert spy.call_count == player_planets_count
def test_produce_happypoints(
self, order, colonized_planet, happypoints_tolerance
):
dhappypoints = colonized_planet.dhappypoints
happypoints = colonized_planet.happypoints
order._produce_resources(colonized_planet)
assert colonized_planet.happypoints == happypoints + dhappypoints
assert colonized_planet.happypoints > happypoints_tolerance
def test_produce_megacredits(
self, order, colonized_planet, happypoints_tolerance
):
dmegacredits = colonized_planet.dmegacredits
megacredits = colonized_planet.megacredits
order._produce_resources(colonized_planet)
assert colonized_planet.megacredits == megacredits + dmegacredits
assert colonized_planet.happypoints > happypoints_tolerance
def test_produce_pythonium(
self, order, colonized_planet, happypoints_tolerance
):
dpythonium = colonized_planet.dpythonium
pythonium = colonized_planet.pythonium
order._produce_resources(colonized_planet)
assert colonized_planet.pythonium == pythonium + dpythonium
assert colonized_planet.happypoints > happypoints_tolerance
def test_produce_clans(
self, order, colonized_planet, happypoints_tolerance
):
dclans = colonized_planet.dclans
clans = colonized_planet.clans
order._produce_resources(colonized_planet)
assert colonized_planet.clans == clans + dclans
assert colonized_planet.happypoints > happypoints_tolerance
class TestResolveShipsConflicts:
@pytest.fixture()
def winner_ships(self, ships_in_conflict, winner):
return [s for s in ships_in_conflict if s.player == winner]
@pytest.fixture()
def spy_remove_destroyed_ships(self, mocker, ships_in_conflict_galaxy):
return mocker.spy(ships_in_conflict_galaxy, "remove_destroyed_ships")
@pytest.fixture
def expected_destroyed_ships(self, winner, ships_in_conflict):
return [s for s in ships_in_conflict if s.player != winner]
@pytest.fixture(autouse=True)
def execute_order(
self,
spy_remove_destroyed_ships,
ships_in_conflict_galaxy,
tenacity,
winner,
):
order = ResolveShipsConflicts(ships_in_conflict_galaxy, tenacity)
assert not ships_in_conflict_galaxy.explosions
order.execute()
return order
@pytest.fixture
def winner(self, ships_in_conflict_galaxy, mocker):
winner = random.choice(list(ships_in_conflict_galaxy.known_races))
mocker.patch(
"pythonium.orders.galaxy.ResolveShipsConflicts._compute_winner",
return_value=winner,
)
return winner
def test_destroyed_ships_for_loosers(
self, ships_in_conflict_galaxy, expected_destroyed_ships
):
exploded_ships = [e.ship for e in ships_in_conflict_galaxy.explosions]
assert exploded_ships == expected_destroyed_ships
def test_remove_destroyed_ships(self, spy_remove_destroyed_ships):
assert spy_remove_destroyed_ships.call_count == 1
def test_winner_ships_still_exist(
self, winner_ships, ships_in_conflict_galaxy
):
assert all(s in ships_in_conflict_galaxy.ships for s in winner_ships)
class TestResolvePlanetsConflicts:
@pytest.fixture()
def winner(self, planet_conflict_galaxy):
planet, ships = next(planet_conflict_galaxy.get_planets_conflicts())
return ships[0].player
@pytest.fixture()
def conquered_planet_id(self, planet_conflict_galaxy):
planet, ships = next(planet_conflict_galaxy.get_planets_conflicts())
return planet.id
def test_ship_without_attack_do_nothing(
self, mocker, planet_pasive_conflict_galaxy
):
order = ResolvePlanetsConflicts(planet_pasive_conflict_galaxy)
spy_resolve_conflict = mocker.spy(order, "_resolve_planets_conflicts")
order.execute()
assert spy_resolve_conflict.call_count == 0
def test_enemy_ships_conquer_planet(
self, conquered_planet_id, winner, planet_conflict_galaxy
):
order = ResolvePlanetsConflicts(planet_conflict_galaxy)
order.execute()
conquered_planet = planet_conflict_galaxy.search_planet(
conquered_planet_id
)
assert conquered_planet.player == winner
def test_conquered_planet_state(
self, planet_conflict_galaxy, conquered_planet_id
):
order = ResolvePlanetsConflicts(planet_conflict_galaxy)
order.execute()
conquered_planet = planet_conflict_galaxy.search_planet(
conquered_planet_id
)
assert conquered_planet.clans == 1
assert conquered_planet.mines == 0
assert conquered_planet.taxes == 0
| 35.090323 | 78 | 0.718882 |
1df85b9a41275cb70dc59fecbfd8bddf6b4483b9 | 6,875 | py | Python | src/beanmachine/ppl/compiler/tests/fix_matrix_scale_test.py | horizon-blue/beanmachine-1 | b13e4e3e28ffb860947eb8046863b0cabb581222 | [
"MIT"
] | 177 | 2021-12-12T14:19:05.000Z | 2022-03-24T05:48:10.000Z | src/beanmachine/ppl/compiler/tests/fix_matrix_scale_test.py | horizon-blue/beanmachine-1 | b13e4e3e28ffb860947eb8046863b0cabb581222 | [
"MIT"
] | 171 | 2021-12-11T06:12:05.000Z | 2022-03-31T20:26:29.000Z | src/beanmachine/ppl/compiler/tests/fix_matrix_scale_test.py | horizon-blue/beanmachine-1 | b13e4e3e28ffb860947eb8046863b0cabb581222 | [
"MIT"
] | 31 | 2021-12-11T06:27:19.000Z | 2022-03-25T13:31:56.000Z | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
import torch
from beanmachine.ppl.inference import BMGInference
from torch.distributions import Normal
@bm.random_variable
def scalar():
return Normal(0.0, 1.0)
matrix = torch.tensor([20, 40])
@bm.functional
def scaled():
return scalar() * matrix
@bm.functional
def scaled_sym():
return matrix * scalar()
@bm.functional
def scaled2():
return scalar() * torch.tensor([scalar(), scalar()])
@bm.functional
def scaled2_sym():
return (torch.tensor([scalar(), scalar()])) * scalar()
class FixMatrixScaleTest(unittest.TestCase):
def test_fix_matrix_scale_1(self) -> None:
self.maxDiff = None
observations = {}
queries = [scaled()]
num_samples = 1000
num_chains = 1
# Sanity check to make sure the model is valid
nmc = bm.SingleSiteNewtonianMonteCarlo()
_ = nmc.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=num_chains,
)
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before optimization
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label="[20,40]"];
N5[label="*"];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After optimization:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label="[20,40]"];
N5[label=MatrixScale];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}"""
self.assertEqual(expected.strip(), observed.strip())
# The model runs on Bean Machine Graph
_ = BMGInference().infer(queries, observations, num_samples=num_samples)
def test_fix_matrix_scale_1_sym(self) -> None:
self.maxDiff = None
observations = {}
queries = [scaled_sym()]
num_samples = 1000
num_chains = 1
# Sanity check to make sure the model is valid
nmc = bm.SingleSiteNewtonianMonteCarlo()
_ = nmc.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=num_chains,
)
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before optimization
expected = """
digraph "graph" {
N0[label="[20,40]"];
N1[label=0.0];
N2[label=1.0];
N3[label=Normal];
N4[label=Sample];
N5[label="*"];
N6[label=Query];
N0 -> N5;
N1 -> N3;
N2 -> N3;
N3 -> N4;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After optimization:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label="[20,40]"];
N5[label=MatrixScale];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}"""
self.assertEqual(expected.strip(), observed.strip())
# The model runs on Bean Machine Graph
_ = BMGInference().infer(queries, observations, num_samples=num_samples)
def test_fix_matrix_scale_2(self) -> None:
self.maxDiff = None
observations = {}
queries = [scaled2()]
num_samples = 1000
num_chains = 1
# Sanity check to make sure the model is valid
nmc = bm.SingleSiteNewtonianMonteCarlo()
_ = nmc.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=num_chains,
)
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before optimization
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Tensor];
N5[label="*"];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N3 -> N4;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After optimization:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=2];
N5[label=1];
N6[label=ToMatrix];
N7[label=MatrixScale];
N8[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N6;
N3 -> N6;
N3 -> N7;
N4 -> N6;
N5 -> N6;
N6 -> N7;
N7 -> N8;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# The model runs on Bean Machine Graph
_ = BMGInference().infer(queries, observations, num_samples=num_samples)
def test_fix_matrix_scale_2_sym(self) -> None:
self.maxDiff = None
observations = {}
queries = [scaled2_sym()]
num_samples = 1000
num_chains = 1
# Sanity check to make sure the model is valid
nmc = bm.SingleSiteNewtonianMonteCarlo()
_ = nmc.infer(
queries=queries,
observations=observations,
num_samples=num_samples,
num_chains=num_chains,
)
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before optimization
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=Tensor];
N5[label="*"];
N6[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N4;
N3 -> N4;
N3 -> N5;
N4 -> N5;
N5 -> N6;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After optimization:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N0[label=0.0];
N1[label=1.0];
N2[label=Normal];
N3[label=Sample];
N4[label=2];
N5[label=1];
N6[label=ToMatrix];
N7[label=MatrixScale];
N8[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N6;
N3 -> N6;
N3 -> N7;
N4 -> N6;
N5 -> N6;
N6 -> N7;
N7 -> N8;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# The model runs on Bean Machine Graph
_ = BMGInference().infer(queries, observations, num_samples=num_samples)
| 21.894904 | 86 | 0.592291 |
2ad5977c73577530628939c99fe7885856c7f0f3 | 3,349 | py | Python | AL_scripts/dataloader_withxray.py | The-very-most-awesome-team-of-cool-kids/02463_Active_Learning | abc35a31996de1c2e3275cf946b6a44f62abb781 | [
"MIT"
] | null | null | null | AL_scripts/dataloader_withxray.py | The-very-most-awesome-team-of-cool-kids/02463_Active_Learning | abc35a31996de1c2e3275cf946b6a44f62abb781 | [
"MIT"
] | null | null | null | AL_scripts/dataloader_withxray.py | The-very-most-awesome-team-of-cool-kids/02463_Active_Learning | abc35a31996de1c2e3275cf946b6a44f62abb781 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torchvision import datasets, transforms
from torch.utils.data import Dataset
from PIL import Image
from LOAD_XRAY import concat_, zeropad, Dataload as concat_, zeropad, Dataload
def get_dataset(name):
if name.upper() == 'CIFAR10':
return get_CIFAR10()
elif name.upper() == 'XRAY':
return get_Xray()
def get_CIFAR10():
"""
Gets data set:
--------------------------------------
Parameters:
name: The name of the wanted data set, options are: "CIFAR10"
--------------------------------------
Outputs:
X_tr: The training data
Y_tr: The training labels
X_te: The test data
Y_te: The test labels
"""
#if name.upper() == "CIFAR10":
data_tr = datasets.CIFAR10('./CIFAR10', train=True, download=True)
data_te = datasets.CIFAR10('./CIFAR10', train=False, download=True)
X_tr = data_tr.train_data
Y_tr = torch.from_numpy(np.array(data_tr.train_labels))
X_te = data_te.test_data
Y_te = torch.from_numpy(np.array(data_te.test_labels))
return X_tr, Y_tr, X_te, Y_te
# elif name.upper() == "Xray":
def get_Xray():
path_train ="/Users/mat05/OneDrive - Danmarks Tekniske Universitet/02463_Active_Learning/AL_scripts/Egne_filer/Train/chest_xray/train/"
path_test = "/Users/mat05/OneDrive - Danmarks Tekniske Universitet/02463_Active_Learning/AL_scripts/Egne_filer/Test/chest_xray/test/"
X0_tr, y0_tr = Dataload(path_train, "NORMAL", 125)
X1_tr, y1_tr = Dataload(path_train, "PNEUMONIA", 125)
X_tr = np.concatenate((X0_tr,X1_tr),axis=0)
Y_tr = np.concatenate((y0_tr,y1_tr))
X0_te, y0_te = Dataload(path_test, "NORMAL", 125)
X1_te, y1_te = Dataload(path_test, "PNEUMONIA", 125)
X_te = np.concatenate((X0_te,X1_te),axis=0)
Y_te = np.concatenate((y0_te,y1_te))
Y_tr = torch.from_numpy(Y_tr)
Y_te = torch.from_numpy(Y_te)
return X_tr, Y_tr, X_te, Y_te
def get_handler(name):
if name.upper() == "CIFAR10":
return handler1
if name.upper() == "Xray":
return handler1
def get_args(name):
if name.upper() == "CIFAR10":
return {'n_epoch': 1,
'transform': transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),
'loader_tr_args':{'batch_size': 4, 'num_workers': 1},
'loader_te_args':{'batch_size': 1000, 'num_workers': 1},
'optimizer_args':{'lr': 0.0009}}
if name.upper() == "Xray":
return {'n_epoch': 1,
'transform': transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),
'loader_tr_args':{'batch_size': 4, 'num_workers': 1},
'loader_te_args':{'batch_size': 1000, 'num_workers': 1},
'optimizer_args':{'lr': 0.0009}}
class handler1(Dataset):
def __init__(self, X, Y, transform = None):
self.X = X
self.Y = Y
self.transform = transform
def __getitem__(self, index):
x, y = self.X[index], self.Y[index]
if self.transform is not None:
x = Image.fromarray(x)
x = self.transform(x)
return x, y, index
def __len__(self):
return len(self.X)
| 32.833333 | 139 | 0.601672 |
9590c853093e14326bdbca1f8b4588c5bbeb7d32 | 32,252 | py | Python | vnpy/gateway/hsoption/hsoption_gateway.py | xiumingxu/vnpy-xx | 8b2d9ecdabcb7931d46fd92fad2d3701b7e66975 | [
"MIT"
] | null | null | null | vnpy/gateway/hsoption/hsoption_gateway.py | xiumingxu/vnpy-xx | 8b2d9ecdabcb7931d46fd92fad2d3701b7e66975 | [
"MIT"
] | null | null | null | vnpy/gateway/hsoption/hsoption_gateway.py | xiumingxu/vnpy-xx | 8b2d9ecdabcb7931d46fd92fad2d3701b7e66975 | [
"MIT"
] | null | null | null | import wmi
import socket
from typing import Dict, List
import requests
from datetime import datetime
from time import sleep
import traceback
import pytz
from vnpy.api.t2sdk import py_t2sdk
from vnpy.api.sopt import MdApi
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.utility import get_folder_path
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.constant import (
Direction,
Offset,
Exchange,
OrderType,
Product,
Status,
OptionType
)
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
OrderRequest,
CancelRequest,
SubscribeRequest,
)
EXCHANGE_HSOPTION2VT: Dict[str, Exchange] = {
"1": Exchange.SSE,
"2": Exchange.SZSE
}
EXCHANGE_VT2HSOPTION = {
Exchange.SSE: "1",
Exchange.SZSE: "2"
}
DIRECTION_VT2HSOPTION: Dict[Direction, str] = {
Direction.LONG: "1",
Direction.SHORT: "2",
}
DIRECTION_HSOPTION2VT = {v: k for k, v in DIRECTION_VT2HSOPTION.items()}
POS_DIRECTION_VT2HSOPTION: Dict[Direction, str] = {
Direction.LONG: "0",
Direction.SHORT: "1",
}
POS_DIRECTION_HSOPTION2VT: Dict[str, Direction] = {
"0": Direction.LONG,
"1": Direction.SHORT,
"2": Direction.SHORT
}
DIRECTION_HSOPTION2VT = {v: k for k, v in DIRECTION_VT2HSOPTION.items()}
OFFSET_HSOPTION2VT: Dict[str, Offset] = {
"O": Offset.OPEN,
"C": Offset.CLOSE
}
OFFSET_VT2HSOPTION = {v: k for k, v in OFFSET_HSOPTION2VT.items()}
STATUS_HSOPTION2VT: Dict[str, Status] = {
"0": Status.SUBMITTING,
"1": Status.SUBMITTING,
"2": Status.NOTTRADED,
"3": Status.SUBMITTING,
"4": Status.PARTTRADED,
"5": Status.CANCELLED,
"6": Status.CANCELLED,
"7": Status.PARTTRADED,
"8": Status.ALLTRADED,
"9": Status.REJECTED
}
STATUS_VT2HSOPTION = {v: k for k, v in STATUS_HSOPTION2VT.items()}
ORDERTYPE_VT2HSOPTION = {
OrderType.LIMIT: "0",
OrderType.MARKET: "OPB"
}
ORDERTYPE_HSOPTION2VT = {v: k for k, v in ORDERTYPE_VT2HSOPTION.items()}
OPTIONTYPE_HSOPTION2VT = {
"C": OptionType.CALL,
"P": OptionType.PUT
}
symbol_exchange_map = {}
symbol_name_map = {}
symbol_size_map = {}
FUNCTION_USER_LOGIN = 331100
FUNCTION_QUERY_POSITION = 338023
FUNCTION_QUERY_ACCOUNT = 338022
FUNCTION_QUERY_ORDER = 338020
FUNCTION_QUERY_TRADE = 338021
FUNCTION_QUERY_CONTRACT = 338000
FUNCTION_SEND_ORDER = 338011
FUNCTION_CANCEL_ORDER = 338012
ISSUE_ORDER = "33012"
ISSUE_TRADE = "33011"
CHINA_TZ = pytz.timezone("Asia/Shanghai")
class HsoptionGateway(BaseGateway):
"""
VN Trader Gateway for Hundsun Option.
"""
default_setting = {
"交易用户名": "",
"交易密码": "",
"行情用户名": "",
"行情密码": "",
"行情经纪商代码": "",
"行情服务器": "",
}
exchanges = [Exchange.SSE, Exchange.SZSE]
def __init__(self, event_engine):
"""Constructor"""
super().__init__(event_engine, "HSOPTION")
self.td_api = TdApi(self)
self.md_api = SoptMdApi(self)
def connect(self, setting: dict) -> None:
""""""
td_userid = setting["交易用户名"]
td_password = setting["交易密码"]
md_userid = setting["行情用户名"]
md_password = setting["行情密码"]
md_brokerid = setting["行情经纪商代码"]
md_address = setting["行情服务器"]
if not md_address.startswith("tcp://"):
md_address = "tcp://" + md_address
self.md_api.connect(md_address, md_userid, md_password, md_brokerid)
self.td_api.connect(td_userid, td_password)
self.init_query()
def subscribe(self, req: SubscribeRequest) -> None:
""""""
self.md_api.subscribe(req)
def send_order(self, req: OrderRequest) -> str:
""""""
return self.td_api.send_order(req)
def cancel_order(self, req: CancelRequest) -> None:
""""""
self.td_api.cancel_order(req)
def query_account(self) -> None:
""""""
self.td_api.query_account()
def query_position(self) -> None:
""""""
self.td_api.query_position()
def close(self) -> None:
""""""
self.md_api.close()
def write_error(self, msg: str, error: dict) -> None:
""""""
error_id = error["ErrorID"]
error_msg = error["ErrorMsg"]
msg = f"{msg},代码:{error_id},信息:{error_msg}"
self.write_log(msg)
def process_timer_event(self, event) -> None:
""""""
self.count += 1
if self.count < 2:
return
self.count = 0
func = self.query_functions.pop(0)
func()
self.query_functions.append(func)
def init_query(self) -> None:
""""""
self.count = 0
self.query_functions = [self.query_account, self.query_position]
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
class SoptMdApi(MdApi):
""""""
def __init__(self, gateway):
"""Constructor"""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.reqid = 0
self.connect_status = False
self.login_status = False
self.subscribed = set()
self.userid = ""
self.password = ""
self.brokerid = ""
def onFrontConnected(self) -> None:
"""
Callback when front server is connected.
"""
self.gateway.write_log("行情服务器连接成功")
self.login()
def onFrontDisconnected(self, reason: int) -> None:
"""
Callback when front server is disconnected.
"""
self.login_status = False
self.gateway.write_log(f"行情服务器连接断开,原因{reason}")
def onRspUserLogin(self, data: dict, error: dict, reqid: int, last: bool) -> None:
"""
Callback when user is logged in.
"""
if not error["ErrorID"]:
self.login_status = True
self.gateway.write_log("行情服务器登录成功")
for symbol in self.subscribed:
self.subscribeMarketData(symbol)
else:
self.gateway.write_error("行情服务器登录失败", error)
def onRspError(self, error: dict, reqid: int, last: bool) -> None:
"""
Callback when error occured.
"""
self.gateway.write_error("行情接口报错", error)
def onRspSubMarketData(
self,
data: dict,
error: dict,
reqid: int,
last: bool
) -> None:
""""""
if not error or not error["ErrorID"]:
return
self.gateway.write_error("行情订阅失败", error)
def onRtnDepthMarketData(self, data: dict) -> None:
"""
Callback of tick data update.
"""
symbol = data["InstrumentID"]
exchange = symbol_exchange_map.get(symbol, "")
if not exchange:
return
timestamp = f"{data['TradingDay']} {data['UpdateTime']}.{int(data['UpdateMillisec']/100)}"
tick = TickData(
symbol=symbol,
exchange=exchange,
datetime=datetime.strptime(timestamp, "%Y%m%d %H:%M:%S.%f"),
name=symbol_name_map.get(symbol, ""),
volume=data["Volume"],
open_interest=data["OpenInterest"],
last_price=data["LastPrice"],
limit_up=data["UpperLimitPrice"],
limit_down=data["LowerLimitPrice"],
open_price=data["OpenPrice"],
high_price=data["HighestPrice"],
low_price=data["LowestPrice"],
pre_close=data["PreClosePrice"],
bid_price_1=data["BidPrice1"],
ask_price_1=data["AskPrice1"],
bid_volume_1=data["BidVolume1"],
ask_volume_1=data["AskVolume1"],
gateway_name=self.gateway_name
)
tick.bid_price_2 = data["BidPrice2"]
tick.bid_price_3 = data["BidPrice3"]
tick.bid_price_4 = data["BidPrice4"]
tick.bid_price_5 = data["BidPrice5"]
tick.ask_price_2 = data["AskPrice2"]
tick.ask_price_3 = data["AskPrice3"]
tick.ask_price_4 = data["AskPrice4"]
tick.ask_price_5 = data["AskPrice5"]
tick.bid_volume_2 = data["BidVolume2"]
tick.bid_volume_3 = data["BidVolume3"]
tick.bid_volume_4 = data["BidVolume4"]
tick.bid_volume_5 = data["BidVolume5"]
tick.ask_volume_2 = data["AskVolume2"]
tick.ask_volume_3 = data["AskVolume3"]
tick.ask_volume_4 = data["AskVolume4"]
tick.ask_volume_5 = data["AskVolume5"]
self.gateway.on_tick(tick)
def connect(
self,
address: str,
userid: str,
password: str,
brokerid: int
) -> None:
"""
Start connection to server.
"""
self.userid = userid
self.password = password
self.brokerid = brokerid
# If not connected, then start connection first.
if not self.connect_status:
path = get_folder_path(self.gateway_name.lower())
self.createFtdcMdApi(str(path) + "\\Md")
self.registerFront(address)
self.init()
self.connect_status = True
# Sleep 1 second and check trigger callback manually
# (temp fix of the bug of Huaxi futures SOPT system)
sleep(1)
if not self.login_status:
self.onFrontConnected()
# If already connected, then login immediately.
elif not self.login_status:
self.login()
def login(self) -> None:
"""
Login onto server.
"""
req = {
"UserID": self.userid,
"Password": self.password,
"BrokerID": self.brokerid
}
self.reqid += 1
self.reqUserLogin(req, self.reqid)
sleep(3)
self.gateway.write_log("行情服务器登录成功")
self.login_status = True
def subscribe(self, req: SubscribeRequest) -> None:
"""
Subscribe to tick data update.
"""
if self.login_status:
self.subscribeMarketData(req.symbol)
self.subscribed.add(req.symbol)
def close(self) -> None:
"""
Close the connection.
"""
if self.connect_status:
self.exit()
class TdApi:
""""""
def __init__(self, gateway: HsoptionGateway):
""""""
global td_api
if not td_api:
td_api = self
self.userid: str = ""
self.password: str = ""
self.op_station: str = ""
self.connect_status: bool = False
self.login_status: bool = False
self.batch_no: int = 1000000
self.batch_entrust_id: Dict[str, str] = {}
self.entrust_batch_id: Dict[str, str] = {}
self.orders: Dict[str, OrderData] = {}
self.cancels: Dict[str, CancelRequest] = {}
self.gateway: HsoptionGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.config: py_t2sdk.pyCConfigInterface = None
self.connection: py_t2sdk.pyConnectionInterface = None
self.callbacks = {
FUNCTION_USER_LOGIN: self.on_login,
FUNCTION_QUERY_POSITION: self.on_query_position,
FUNCTION_QUERY_ACCOUNT: self.on_query_account,
FUNCTION_QUERY_ORDER: self.on_query_order,
FUNCTION_QUERY_TRADE: self.on_query_trade,
FUNCTION_QUERY_CONTRACT: self.on_query_contract,
FUNCTION_SEND_ORDER: self.on_send_order,
FUNCTION_CANCEL_ORDER: self.on_cancel_order,
ISSUE_ORDER: self.on_return_order,
ISSUE_TRADE: self.on_return_trade,
}
def connect(
self,
userid: str,
password: str,
) -> None:
""""""
self.userid = userid
self.password = password
self.op_station = self.generate_op_station()
# If not connected, then start connection first.
if not self.connect_status:
self.config = py_t2sdk.pyCConfigInterface()
self.config.Load("citic.ini")
self.connection = py_t2sdk.pyConnectionInterface(self.config)
async_callback = py_t2sdk.pyCallbackInterface(
"vnpy-xx.gateway.hsoption.hsoption_gateway",
"TdAsyncCallback"
)
async_callback.InitInstance()
ret = self.connection.Create2BizMsg(async_callback)
if ret:
msg = str(self.connection.GetErrorMsg(ret))
self.gateway.write_log(f"初始化失败,错误码:{ret},信息:{msg}")
ret = self.connection.Connect(1000)
if ret:
msg = str(self.connection.GetErrorMsg(ret))
self.gateway.write_log(f"连接失败,错误码:{ret},信息:{msg}")
self.connect_status = True
self.gateway.write_log("交易服务器连接成功")
# If already connected, then login immediately.
if not self.login_status:
self.login()
def on_login(self, data: List[Dict[str, str]]) -> None:
""""""
if not data:
self.gateway.write_log("交易服务器登录失败")
return
else:
self.gateway.write_log("交易服务器登录成功")
self.login_status = True
for d in data:
self.user_token = d["user_token"]
self.branch_no = d["branch_no"]
self.client_id = d["client_id"]
self.asset_prop = d["asset_prop"]
self.sysnode_id = d["sysnode_id"]
# Generate ETF contract data
contract_1 = ContractData(
symbol="510050",
exchange=Exchange.SSE,
name="50ETF",
size=1,
pricetick=0.001,
product=Product.ETF,
gateway_name=self.gateway_name
)
self.gateway.on_contract(contract_1)
contract_2 = ContractData(
symbol="510300",
exchange=Exchange.SSE,
name="300ETF",
size=1,
pricetick=0.001,
product=Product.ETF,
gateway_name=self.gateway_name
)
self.gateway.on_contract(contract_2)
contract_3 = ContractData(
symbol="159919",
exchange=Exchange.SZSE,
name="300ETF",
size=1,
pricetick=0.001,
product=Product.ETF,
gateway_name=self.gateway_name
)
self.gateway.on_contract(contract_3)
self.query_contract()
def on_query_position(self, data: List[Dict[str, str]]) -> None:
""""""
if not data:
return
for d in data:
position = PositionData(
symbol=d["option_code"],
exchange=EXCHANGE_HSOPTION2VT[d["exchange_type"]],
direction=POS_DIRECTION_HSOPTION2VT[d["opthold_type"]],
volume=int(float(d["hold_amount"])),
pnl=float(d["income_balance"]),
price=float(d["opt_cost_price"]),
frozen=int((float(d["hold_amount"]) - float(d["enable_amount"]))),
gateway_name=self.gateway_name
)
self.gateway.on_position(position)
def on_query_account(self, data: List[Dict[str, str]]) -> None:
""""""
if not data:
return
for d in data:
account = AccountData(
accountid=self.userid,
balance=float(d["total_asset"]),
frozen=float(d["total_asset"]) - float(d["enable_balance"]),
gateway_name=self.gateway_name
)
self.gateway.on_account(account)
self.query_position()
def on_query_order(self, data: List[Dict[str, str]]) -> None:
""""""
if not data:
self.gateway.write_log("委托信息查询成功")
self.query_trade()
return
for d in data:
batch_no = d["batch_no"]
self.batch_no = max(self.batch_no, int(batch_no))
self.batch_entrust_id[batch_no] = d["entrust_no"]
self.entrust_batch_id[d["entrust_no"]] = batch_no
order = OrderData(
symbol=d["option_code"],
exchange=EXCHANGE_HSOPTION2VT[d["exchange_type"]],
direction=DIRECTION_HSOPTION2VT[d["entrust_bs"]],
status=STATUS_HSOPTION2VT.get(d["entrust_status"], Status.SUBMITTING),
orderid=batch_no,
offset=OFFSET_HSOPTION2VT[d["entrust_oc"]],
volume=int(float(d["entrust_amount"])),
traded=int(float(d["business_amount"])),
price=float(d["opt_entrust_price"]),
datetime=generate_datetime(d["entrust_time"]),
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
self.orders[batch_no] = order
self.gateway.write_log("委托信息查询成功")
self.query_trade()
def on_query_trade(self, data: List[Dict[str, str]]) -> None:
""""""
if not data:
self.gateway.write_log("成交信息查询成功")
return
for d in data:
price = float(d["opt_business_price"])
if price == 0:
continue
batch_no = self.entrust_batch_id[d["entrust_no"]]
trade = TradeData(
orderid=batch_no,
tradeid=d["business_id"],
symbol=d["option_code"],
exchange=EXCHANGE_HSOPTION2VT[d["exchange_type"]],
direction=DIRECTION_HSOPTION2VT[d["entrust_bs"]],
offset=OFFSET_HSOPTION2VT[d["entrust_oc"]],
price=price,
volume=int(float(d["business_amount"])),
datetime=generate_datetime(d["business_time"]),
gateway_name=self.gateway_name
)
self.gateway.on_trade(trade)
self.gateway.write_log("成交信息查询成功")
self.subcribe_order()
self.subcribe_trade()
def on_query_contract(self, data: List[Dict[str, str]]) -> None:
""""""
if not data:
self.gateway.write_log("合约信息查询失败")
return
# Process option contract
for d in data:
contract = ContractData(
symbol=d["option_code"],
exchange=EXCHANGE_HSOPTION2VT[d["exchange_type"]],
name=d["option_name"],
size=int(float(d["amount_per_hand"])),
pricetick=float(d["opt_price_step"]),
product=Product.OPTION,
gateway_name=self.gateway_name
)
contract.option_portfolio = d["stock_code"] + "_O"
contract.option_underlying = (
d["stock_code"]
+ "-"
+ str(d["end_date"])
)
contract.option_type = OPTIONTYPE_HSOPTION2VT[d["option_type"]]
contract.option_strike = float(d["exercise_price"])
contract.option_expiry = datetime.strptime(d["end_date"], "%Y%m%d")
contract.option_index = get_option_index(
contract.option_strike, d["optcontract_id"]
)
self.gateway.on_contract(contract)
symbol_exchange_map[contract.symbol] = contract.exchange
symbol_name_map[contract.symbol] = contract.name
if len(data) == 1000:
position_str = d["position_str"]
self.query_contract(position_str)
else:
self.gateway.write_log("合约信息查询成功")
self.query_order()
def on_send_order(self, data: List[Dict[str, str]]) -> None:
""""""
if not data:
self.gateway.write_log("委托失败")
return
for d in data:
batch_no = d["batch_no"]
self.batch_entrust_id[batch_no] = d["entrust_no"]
self.entrust_batch_id[d["entrust_no"]] = batch_no
if batch_no in self.cancels:
cancel_req = self.cancels[batch_no]
self.cancel_order(cancel_req)
order = self.orders[batch_no]
order.datetime = generate_datetime(d["entrust_time"]),
self.gateway.on_order(order)
def on_cancel_order(self, data: List[Dict[str, str]]) -> None:
""""""
if not data:
self.gateway.write_log("撤单失败")
return
def on_return_order(self, data: List[Dict[str, str]]) -> None:
""""""
for d in data:
entrust_no = d["entrust_no"]
if entrust_no not in self.entrust_batch_id:
return
batch_no = self.entrust_batch_id[entrust_no]
order = self.orders[batch_no]
order.status = STATUS_HSOPTION2VT.get(d["entrust_status"], Status.SUBMITTING)
self.gateway.on_order(order)
def on_return_trade(self, data: List[Dict[str, str]]) -> None:
""""""
for d in data:
traded = int(float(d["business_amount"]))
entrust_no = d["entrust_no"]
batch_no = self.entrust_batch_id[entrust_no]
if traded > 0:
trade = TradeData(
symbol=d["option_code"],
exchange=EXCHANGE_HSOPTION2VT[d["exchange_type"]],
orderid=batch_no,
tradeid=d["business_id"],
direction=DIRECTION_HSOPTION2VT[d["entrust_bs"]],
offset=OFFSET_HSOPTION2VT[d["entrust_oc"]],
price=float(d["opt_business_price"]),
volume=traded,
datetime=generate_datetime(d["business_time"]),
gateway_name=self.gateway_name,
)
self.gateway.on_trade(trade)
order = self.orders[batch_no]
if traded > 0:
order.traded += traded
order.status = STATUS_HSOPTION2VT[d["entrust_status"]]
self.gateway.on_order(order)
def on_error(self, error: dict) -> None:
""""""
self.gateway.write_log(f"触发错误:{str(error)}")
def on_callback(self, function: int, data: dict) -> None:
""""""
try:
func = self.callbacks[function]
func(data)
except Exception:
traceback.print_exc()
def send_req(self, function: int, req: dict) -> int:
""""""
packer = py_t2sdk.pyIF2Packer()
packer.BeginPack()
for Field in req.keys():
packer.AddField(Field)
for value in req.values():
packer.AddStr(str(value))
packer.EndPack()
msg = py_t2sdk.pyIBizMessage()
msg.SetFunction(function)
msg.SetPacketType(0)
msg.SetContent(packer.GetPackBuf(), packer.GetPackLen())
n = self.connection.SendBizMsg(msg, 1)
packer.FreeMem()
packer.Release()
msg.Release()
return n
def generate_req(self) -> Dict[str, str]:
""""""
req = {
"op_branch_no": self.branch_no,
"op_entrust_way": "3",
"op_station": self.op_station,
"fund_account": self.userid,
"branch_no": self.branch_no,
"client_id": self.client_id,
"password": self.password,
"password_type": "2",
"asset_prop": self.asset_prop,
"sysnode_Id": self.sysnode_id
}
return req
def generate_op_station(self):
""""""
company = "SHWN"
f = requests.request("GET", "http://myip.dnsomatic.com")
iip = f.text
c = wmi.WMI()
try:
for interface in c.Win32_NetworkAdapterConfiguration(IPEnabled=1):
mac = interface.MACAddress
lip = interface.IPAddress[0]
for processor in c.Win32_Processor():
cpu = processor.Processorid.strip()
for disk in c.Win32_DiskDrive():
hd = disk.SerialNumber.strip()
for disk in c.Win32_LogicalDisk(DriveType=3):
pi = ",".join([disk.Caption, disk.Size])
pcn = socket.gethostname()
op_station = f"TYJR-{company}-IIP.{iip}-LIP.{lip}-MAC.{mac}-HD.{hd}-PCN.{pcn}-CPU.{cpu}-PI.{pi}"
except Exception:
op_station = ""
return op_station
def login(self) -> int:
""""""
req = {
"password_type": "2",
"input_content": "1",
"op_entrust_way": "3",
"password": self.password,
"account_content": self.userid
}
self.send_req(FUNCTION_USER_LOGIN, req)
def send_order(self, req: OrderRequest) -> int:
"""
Send new order.
"""
td_req = self.generate_req()
td_req["exchange_type"] = EXCHANGE_VT2HSOPTION[req.exchange]
td_req["option_code"] = req.symbol
td_req["entrust_amount"] = str(req.volume)
td_req["opt_entrust_price"] = str(req.price)
td_req["entrust_bs"] = DIRECTION_VT2HSOPTION[req.direction]
td_req["entrust_oc"] = OFFSET_VT2HSOPTION[req.offset]
td_req["covered_flag"] = ""
td_req["entrust_prop"] = ORDERTYPE_VT2HSOPTION[req.type]
self.batch_no += 1
batch_no = str(self.batch_no)
td_req["batch_no"] = batch_no
self.send_req(FUNCTION_SEND_ORDER, td_req)
order = req.create_order_data(batch_no, self.gateway_name)
self.gateway.on_order(order)
self.orders[batch_no] = order
return order.vt_orderid
def subcribe_topic(self, biz_name: str, topic_name: str) -> int:
""""""
# Create subscrbe callback
sub_callback = py_t2sdk.pySubCallBack(
"vnpy-xx.gateway.hsoption.hsoption_gateway",
"TdSubCallback"
)
sub_callback.initInstance()
# Create subscriber
ret, subscriber = self.connection.NewSubscriber(
sub_callback,
biz_name,
5000
)
if ret != 0:
error_msg = str(self.connection.GetMCLastError(), encoding="gbk")
msg = f"订阅推送失败:{error_msg}"
self.gateway.write_log(msg)
return
# Set subscribe parameters
sub_param = py_t2sdk.pySubscribeParamInterface()
sub_param.SetTopicName(topic_name)
sub_param.SetFilter("branch_no", str(self.branch_no))
sub_param.SetFilter("fund_account", str(self.userid))
# Pack data
packer = py_t2sdk.pyIF2Packer()
packer.BeginPack()
packer.AddField("fund_account")
packer.AddField("password")
packer.AddStr(self.userid)
packer.AddStr(self.password)
packer.EndPack()
# Start subcribe
unpacker = py_t2sdk.pyIF2UnPacker()
result = subscriber.SubscribeTopic(sub_param, 5000, unpacker, packer)
packer.FreeMem()
packer.Release()
unpacker.Release()
sub_param.Release()
return result
def cancel_order(self, req: CancelRequest) -> None:
"""
Cancel existing order.
"""
batch_no = req.orderid
if batch_no not in self.batch_entrust_id:
self.cancels[batch_no] = req
return
td_req = self.generate_req()
entrust_no = self.batch_entrust_id[batch_no]
td_req["entrust_no"] = entrust_no
self.send_req(FUNCTION_CANCEL_ORDER, td_req)
if batch_no in self.cancels:
del self.cancels[batch_no]
def query_account(self) -> int:
""""""
if not self.login_status:
return
req = self.generate_req()
self.send_req(FUNCTION_QUERY_ACCOUNT, req)
def query_position(self) -> int:
""""""
if not self.login_status:
return
req = self.generate_req()
self.send_req(FUNCTION_QUERY_POSITION, req)
def query_trade(self) -> int:
""""""
req = self.generate_req()
req["request_num"] = "10000"
self.send_req(FUNCTION_QUERY_TRADE, req)
def query_order(self) -> int:
""""""
req = self.generate_req()
req["request_num"] = "10000"
self.send_req(FUNCTION_QUERY_ORDER, req)
def query_contract(self, position_str: str = None) -> int:
""""""
req = self.generate_req()
req["request_num"] = "10000"
if position_str:
req["position_str"] = position_str
self.send_req(FUNCTION_QUERY_CONTRACT, req)
def subcribe_order(self) -> None:
""""""
n = self.subcribe_topic("order_subcriber", ISSUE_ORDER)
if n <= 0:
msg = f"委托订阅失败,原因{self.connection.GetErrorMsg(n)}"
self.gateway.write_log(msg)
else:
self.gateway.write_log("委托回报订阅成功")
def subcribe_trade(self) -> None:
""""""
n = self.subcribe_topic("trade_subcriber", ISSUE_TRADE)
if n <= 0:
msg = f"成交订阅失败,原因{self.connection.GetErrorMsg(n)}"
self.gateway.write_log(msg)
else:
self.gateway.write_log("成交回报订阅成功")
class TdAsyncCallback:
""""""
def __init__(self):
""""""
global td_api
self.td_api: TdApi = td_api
def OnRegister(self) -> None:
""""""
pass
def OnClose(self) -> None:
""""""
pass
def OnReceivedBizMsg(self, hSend, sBuff, iLenght) -> None:
""""""
biz_msg = py_t2sdk.pyIBizMessage()
biz_msg.SetBuff(sBuff, iLenght)
ret = biz_msg.GetReturnCode()
if not ret:
function = biz_msg.GetFunction()
buf, len = biz_msg.GetContent()
unpacker = py_t2sdk.pyIF2UnPacker()
unpacker.Open(buf, len)
data = unpack_data(unpacker)
self.td_api.on_callback(function, data)
unpacker.Release()
else:
buf, len = biz_msg.GetContent()
unpacker = py_t2sdk.pyIF2UnPacker()
unpacker.Open(buf, len)
if unpacker:
data = unpack_data(unpacker)
self.td_api.on_error(data)
unpacker.Release()
else:
error_msg = str(biz_msg.GetErrorInfo(), encoding="gbk")
msg = f"请求失败,信息:{error_msg}"
self.td_api.gateway.write_log(msg)
biz_msg.Release()
class TdSubCallback:
""""""
def __init__(self):
""""""
global td_api
self.td_api: TdApi = td_api
def OnReceived(self, topic, sBuff, iLen) -> None:
""""""
biz_msg = py_t2sdk.pyIBizMessage()
biz_msg.SetBuff(sBuff, iLen)
buf, len = biz_msg.GetContent()
unpacker = py_t2sdk.pyIF2UnPacker()
unpacker.Open(buf, len)
data = unpack_data(unpacker)
self.td_api.on_callback(topic, data)
unpacker.Release()
biz_msg.Release()
def unpack_data(unpacker: py_t2sdk.pyIF2UnPacker) -> List[Dict[str, str]]:
""""""
row_count = unpacker.GetRowCount()
col_count = unpacker.GetColCount()
data = []
for row_index in range(row_count):
d = {}
for col_index in range(col_count):
name = unpacker.GetColName(col_index)
value = unpacker.GetStrByIndex(col_index)
d[name] = value
unpacker.Next()
data.append(d)
return data
def get_option_index(strike_price: float, exchange_instrument_id: str) -> str:
""""""
exchange_instrument_id = exchange_instrument_id.replace(" ", "")
if "M" in exchange_instrument_id:
n = exchange_instrument_id.index("M")
elif "A" in exchange_instrument_id:
n = exchange_instrument_id.index("A")
elif "B" in exchange_instrument_id:
n = exchange_instrument_id.index("B")
else:
return str(strike_price)
index = exchange_instrument_id[n:]
option_index = f"{strike_price:.3f}-{index}"
return option_index
td_api = None
def generate_datetime(time: str) -> datetime:
""""""
time = time.rjust(6, "0")
today = datetime.now().strftime("%Y%m%d")
timestamp = f"{today} {time}"
dt = datetime.strptime(timestamp, "%Y%m%d %H:%M:%S")
dt = CHINA_TZ.localize(dt)
return dt
| 29.18733 | 108 | 0.568368 |
00397c80684981b544d95634cde82475e3c3a805 | 10,911 | py | Python | ecg_balancing/migrations/0014_auto__add_unique_companybalance_company_year.py | sinnwerkstatt/ecg-balancing | f7553ce22dda54f00d090b1ec0cd11364e2aebb5 | [
"MIT"
] | null | null | null | ecg_balancing/migrations/0014_auto__add_unique_companybalance_company_year.py | sinnwerkstatt/ecg-balancing | f7553ce22dda54f00d090b1ec0cd11364e2aebb5 | [
"MIT"
] | 5 | 2015-04-22T15:56:19.000Z | 2015-10-16T14:28:10.000Z | ecg_balancing/migrations/0014_auto__add_unique_companybalance_company_year.py | sinnwerkstatt/ecg-balancing | f7553ce22dda54f00d090b1ec0cd11364e2aebb5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'CompanyBalance', fields ['company', 'year']
db.create_unique(u'ecg_balancing_companybalance', ['company_id', 'year'])
def backwards(self, orm):
# Removing unique constraint on 'CompanyBalance', fields ['company', 'year']
db.delete_unique(u'ecg_balancing_companybalance', ['company_id', 'year'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ecg_balancing.company': {
'Meta': {'object_name': 'Company'},
'activities': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'employees_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'foundation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'managing_directors': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'model_creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owners': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'revenue': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'zipcode': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'ecg_balancing.companybalance': {
'Meta': {'unique_together': "((u'company', u'year'),)", 'object_name': 'CompanyBalance'},
'auditor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'common_good': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'balance'", 'to': u"orm['ecg_balancing.Company']"}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matrix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balances'", 'to': u"orm['ecg_balancing.ECGMatrix']"}),
'peer_companies': ('django.db.models.fields.related.ManyToManyField', [], {'max_length': '255', 'to': u"orm['ecg_balancing.Company']", 'null': 'True', 'symmetrical': 'False', 'blank': 'True'}),
'process_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'prospect': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'year': ('django.db.models.fields.SmallIntegerField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'})
},
u'ecg_balancing.companybalanceindicator': {
'Meta': {'object_name': 'CompanyBalanceIndicator'},
'company_balance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balance'", 'to': u"orm['ecg_balancing.CompanyBalance']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'evaluation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balance'", 'to': u"orm['ecg_balancing.Indicator']"})
},
u'ecg_balancing.ecgmatrix': {
'Meta': {'object_name': 'ECGMatrix'},
'contact': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "u'4.1'", 'max_length': '6'})
},
u'ecg_balancing.indicator': {
'Meta': {'object_name': 'Indicator'},
'contact': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'ecg_value': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matrix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'indicators'", 'to': u"orm['ecg_balancing.ECGMatrix']"}),
'max_evaluation': ('django.db.models.fields.IntegerField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'parent_indicator'", 'null': 'True', 'to': u"orm['ecg_balancing.Indicator']"}),
'stakeholder': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'subindicator_number': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'ecg_balancing.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'companies': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['ecg_balancing.Company']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'ecg_balancing.userrole': {
'Meta': {'object_name': 'UserRole'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ecg_balancing.Company']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['ecg_balancing'] | 79.642336 | 205 | 0.569517 |
ecefbacdbd5ed20bf65a24b40ac6267d51423f85 | 19,271 | py | Python | tensorflow/python/debug/lib/check_numerics_callback_test.py | mengwangk/tensorflow | 7aad97c27b803a0d3524a715ed08f4db7a71de63 | [
"Apache-2.0"
] | 1 | 2019-09-18T06:29:47.000Z | 2019-09-18T06:29:47.000Z | tensorflow/python/debug/lib/check_numerics_callback_test.py | mengwangk/tensorflow | 7aad97c27b803a0d3524a715ed08f4db7a71de63 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/debug/lib/check_numerics_callback_test.py | mengwangk/tensorflow | 7aad97c27b803a0d3524a715ed08f4db7a71de63 | [
"Apache-2.0"
] | 1 | 2021-03-29T16:42:47.000Z | 2021-03-29T16:42:47.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.debug.lib import check_numerics_callback
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizer_v2
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class LimitStringLengthTest(test_util.TensorFlowTestCase):
def testLimitStringLengthWithExplicitLimit(self):
self.assertEqual(
check_numerics_callback.limit_string_length("", max_len=2), "")
self.assertEqual(
check_numerics_callback.limit_string_length("e", max_len=2), "e")
self.assertEqual(
check_numerics_callback.limit_string_length("de", max_len=2), "de")
self.assertEqual(
check_numerics_callback.limit_string_length("abcde", max_len=2),
"...de")
def testLimitStringLengthWithNoLimit(self):
self.assertEqual(check_numerics_callback.limit_string_length(
"A" * 100 + "B", max_len=None), "A" * 100 + "B")
self.assertEqual(
check_numerics_callback.limit_string_length("", max_len=None), "")
def testLimitStringLengthWithDefaultLimit(self):
self.assertEqual(
check_numerics_callback.limit_string_length("A" * 50 + "B"),
"..." + "A" * 49 + "B")
class CheckNumericsCallbackTest(test_util.TensorFlowTestCase):
def tearDown(self):
check_numerics_callback.disable_check_numerics()
super(CheckNumericsCallbackTest, self).tearDown()
def _assertRaisesInvalidArgumentErrorAndGetMessage(self, func):
caught = None
try:
func()
except errors.InvalidArgumentError as error:
caught = error
self.assertTrue(caught, "Failed to catch expected InvalidArgumentError")
return caught.message
def testCatchEagerOpFloat32Inf(self):
"""Test catching Infinity in eager op execution: float32."""
check_numerics_callback.enable_check_numerics()
x = constant_op.constant([2.0, 3.0])
y = constant_op.constant([1.0, 0.0])
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: x / y)
# Check the content of the error message.
self.assertTrue(re.search(r"eagerly-executing op.*\"RealDiv\"", message))
self.assertTrue(re.search(r"dtype.*float32", message))
self.assertIn("shape: (2,)\n", message)
self.assertIn("# of +Inf elements: 1\n", message)
self.assertIn("0: %s" % x, message)
self.assertIn("1: %s" % y, message)
def testEnableCheckNumericsIsIdempotent(self):
"""Two calls to enable_check_numerics() have same effect as one call."""
check_numerics_callback.enable_check_numerics()
check_numerics_callback.enable_check_numerics()
x = constant_op.constant([2.0, 3.0])
y = constant_op.constant([1.0, 0.0])
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: x / y)
# Check the content of the error message.
self.assertTrue(re.search(r"eagerly-executing op.*\"RealDiv\"", message))
self.assertTrue(re.search(r"dtype.*float32", message))
self.assertIn("shape: (2,)\n", message)
self.assertIn("# of +Inf elements: 1\n", message)
self.assertIn("0: %s" % x, message)
self.assertIn("1: %s" % y, message)
def testCallingDisableCheckNumericsWithoutEnablingFirstIsTolerated(self):
check_numerics_callback.disable_check_numerics()
def testCatchEagerOpFloat16NaN(self):
"""Test catching Infinity in eager op execution: float16."""
check_numerics_callback.enable_check_numerics()
def log1p(x):
y = 1.0 + x
return math_ops.log(y)
x = constant_op.constant([[-1.0]], dtype=dtypes.float16)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: log1p(x))
# Check the content of the error message.
self.assertTrue(re.search(r"eagerly-executing op.*\"Log\"", message))
self.assertTrue(re.search(r"dtype.*float16", message))
self.assertIn("shape: (1, 1)\n", message)
self.assertIn("# of -Inf elements: 1\n", message)
self.assertTrue(re.search(r"Input tensor.*0\.", message))
def testNoCatchEagerOpExecution(self):
"""Test running multiple steps of eager execution without Inf/NaN."""
check_numerics_callback.enable_check_numerics()
x = constant_op.constant([2.0, 3.0])
y = constant_op.constant([1.0, 0.0])
self.assertAllClose((x + y) * (x - y), [3.0, 9.0])
@test_util.run_in_graph_and_eager_modes
def testCatchFunctionOpInfFloat64(self):
"""Test catching infinites generated in a FuncGraph."""
check_numerics_callback.enable_check_numerics()
@def_function.function
def divide_sum_with_diff(x, y):
w1 = x + y
w2 = x - y
u = w1 / w2
return u * 2.0
x = constant_op.constant(2.0, dtype=dtypes.float64)
y = constant_op.constant(2.0, dtype=dtypes.float64)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(divide_sum_with_diff(x, y)))
# Check the content of the error message.
self.assertTrue(re.search(r"graph op.*\"RealDiv\"", message))
self.assertTrue(re.search(r"dtype.*float64", message))
self.assertIn("shape: ()\n", message)
self.assertIn("Input tensors (2):", message)
# Check that the correct input ops are printed.
self.assertTrue(re.search(r"0:.*Tensor.*add:0", message))
self.assertTrue(re.search(r"1:.*Tensor.*sub:0", message))
# Check that the correct line for op creation is printed.
self.assertTrue(re.search(r"Stack trace of op's creation", message))
self.assertIn("u = w1 / w2", message)
@test_util.run_in_graph_and_eager_modes
def testControlFlowGraphWithNaNBFloat16(self):
"""Test catching bfloat16 NaNs in a control-flow-v2 FuncGraph."""
check_numerics_callback.enable_check_numerics()
@def_function.function
def my_conditional(x):
if math_ops.less(math_ops.reduce_sum(x), 0.0):
return math_ops.log(x)
else:
return math_ops.log(-x)
x = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.bfloat16)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(my_conditional(x)))
# Check the content of the error message.
self.assertTrue(re.search(r"graph op.*\"Log\"", message))
self.assertTrue(re.search(r"dtype.*bfloat16", message))
self.assertIn("shape: (3,)\n", message)
# Check that the correct input op is printed.
self.assertTrue(re.search(r"Input tensor.*Tensor.*Neg", message))
# Check that the correct line for op creation is printed.
self.assertTrue(re.search(r"Stack trace of op's creation", message))
self.assertIn("return math_ops.log(-x)", message)
if context.executing_eagerly():
# The code path for raising error is slightly different under graph mode.
self.assertTrue(message.endswith("\n"))
@test_util.run_in_graph_and_eager_modes
def testOverflowInTfFunction(self):
"""Test catching Infinity caused by overflow in a tf.function with while."""
check_numerics_callback.enable_check_numerics()
@def_function.function
def accumulation_function(counter, lim, accum):
while math_ops.less(counter, lim):
accum.assign(accum * 2.0)
counter.assign_add(1)
counter = variables.Variable(0, dtype=dtypes.int32)
# Repeated `* 2.0` overflows a float32 tensor in 128 steps. So the
# 1000-step limit is sufficient.
lim = constant_op.constant(1000, dtype=dtypes.int32)
accum = variables.Variable(1.0)
if not context.executing_eagerly():
self.evaluate([counter.initializer, accum.initializer])
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(accumulation_function(counter, lim, accum)))
self.assertAllClose(self.evaluate(counter), 128)
# Check the content of the error message.
# The overflow to +Infinity happens during the `* 2.0` operation.
self.assertTrue(re.search(r"graph op.*\"Mul\"", message))
self.assertTrue(re.search(r"dtype.*float32", message))
self.assertIn("shape: ()\n", message)
# Check that the correct input op is printed.
self.assertIn("Input tensors (2):", message)
# Check that the correct input ops are printed.
self.assertTrue(re.search(r"0:.*Tensor.*ReadVariableOp:0", message))
self.assertTrue(re.search(r"1:.*Tensor.*mul/y:0", message))
# Check that the correct line for op creation is printed.
self.assertTrue(re.search(r"Stack trace of op's creation", message))
self.assertIn("accum.assign(accum * 2.0)", message)
@test_util.run_in_graph_and_eager_modes
def testKerasModelHealthyPredictAndFitCalls(self):
"""Test a simple healthy keras model runs fine under the callback."""
check_numerics_callback.enable_check_numerics()
model = models.Sequential()
model.add(layers.Dense(
units=100,
input_shape=(5,),
use_bias=False,
activation="relu",
kernel_initializer="ones"))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(
units=1,
activation="linear",
kernel_initializer="ones"))
model.compile(
loss="mse", optimizer=optimizer_v2.gradient_descent.SGD(1e-3))
batch_size = 16
xs = np.zeros([batch_size, 5])
ys = np.ones([batch_size, 1])
outputs = model.predict(xs)
self.assertEqual(outputs.shape, (batch_size, 1))
epochs = 100
history = model.fit(xs, ys, epochs=epochs, verbose=0)
self.assertEqual(len(history.history["loss"]), epochs)
@test_util.run_in_graph_and_eager_modes
def testKerasModelUnhealthyPredictAndFitCallsWithLargeLearningRate(self):
"""Test keras model training crashes with Infinity is caught by callback."""
check_numerics_callback.enable_check_numerics()
model = models.Sequential()
# Use weight initializers for deterministic behavior during test.
model.add(layers.Dense(
units=100,
input_shape=(5,),
activation="relu",
kernel_initializer="ones"))
model.add(layers.Dense(
units=1,
activation="linear",
kernel_initializer="ones"))
lr = 1e3 # Intentionally huge learning rate.
model.compile(loss="mse", optimizer=optimizer_v2.gradient_descent.SGD(lr))
batch_size = 16
xs = np.zeros([batch_size, 5])
ys = np.ones([batch_size, 1])
outputs = model.predict(xs)
self.assertEqual(outputs.shape, (batch_size, 1))
epochs = 100
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: model.fit(xs, ys, epochs=epochs, verbose=0))
# Check the content of the error message.
# Let's not hardcode the op name for future-proof.
self.assertTrue(re.search(r"graph op.*\".*\"", message))
self.assertTrue(re.search(r"dtype:.*float32", message))
self.assertTrue(re.search(r"shape:.*\(.*\)", message))
# Check that the correct input op is printed.
self.assertTrue(re.search(r"Input tensor.*", message))
# Check that the correct line for op creation is printed.
self.assertTrue(re.search(r"Stack trace of op's creation", message))
# The stacks are different between when eager execution is enabled and
# when it's not (i.e., v1 graph). TODO(cais): Investigate if we can improve
# this.
if context.executing_eagerly():
self.assertIn("lambda: model.fit(xs, ys,", message)
else:
self.assertIn("model.compile(", message)
@test_util.run_in_graph_and_eager_modes
def testInfInCustomKerasLayerWithTfFunctionPredictCall(self):
"""Test catching Infinity in a custom layer, w/ tf.function."""
check_numerics_callback.enable_check_numerics()
class DivByXLayer(layers.Layer):
@def_function.function
def call(self, x):
"""The computation performed by the for-test custom layer.
Generates Infinity by intention.
Args:
x: Input tensor of scalar shape.
Returns:
A scalar tensor.
"""
one_over_x = 1.0 / x
return one_over_x
model = models.Sequential()
model.add(DivByXLayer(input_shape=[5]))
# TODO(b/140245224): Currently the model must be compiled prior to
# predict() being called(). Or keras will fall back to V1 behavior.
# Remove this after the bug is fixed.
model.compile(loss="mse", optimizer="sgd")
xs = np.ones([1, 5])
# Calling the model with non-zero inputs should be fine.
self.assertAllClose(model.predict(xs), [[1.0, 1.0, 1.0, 1.0, 1.0]])
xs = np.zeros([1, 5])
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: model.predict(xs))
# Check the content of the error message.
self.assertTrue(re.search(r"graph op.*\"RealDiv\"", message))
self.assertTrue(re.search(r"dtype.*float32", message))
self.assertTrue(re.search(r"shape: \(.*, 5\)", message))
# # Check that the correct input op is printed.
self.assertIn("Input tensors (2):", message)
# # # Check that the correct line for op creation is printed.
self.assertTrue(re.search(r"Stack trace of op's creation", message))
self.assertIn("one_over_x = 1.0 / x", message)
@test_util.run_in_graph_and_eager_modes
def testInfInCustomKerasLayerWithoutTfFuntionPredictCall(self):
"""Test catching Infinity in a custom layer, w/o tf.function."""
check_numerics_callback.enable_check_numerics()
class DivByXLayer(layers.Layer):
# Not using the tf.function decorator here.
def call(self, x):
"""The computation performed by the for-test custom layer.
Generates Infinity by intention.
Args:
x: Input tensor of scalar shape.
Returns:
A scalar tensor.
"""
one_over_x = 1.0 / x
return one_over_x
model = models.Sequential()
model.add(DivByXLayer(input_shape=[5]))
# TODO(b/140245224): Currently the model must be compiled prior to
# predict() being called(). Or keras will fall back to V1 behavior.
# Remove this after the bug is fixed.
model.compile(loss="mse", optimizer="sgd")
xs = np.ones([1, 5])
# Calling the model with non-zero inputs should be fine.
self.assertAllClose(model.predict(xs), [[1.0, 1.0, 1.0, 1.0, 1.0]])
xs = np.zeros([1, 5])
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: model.predict(xs))
# Check the content of the error message.
self.assertTrue(re.search(r"graph op.*\"RealDiv\"", message))
self.assertTrue(re.search(r"dtype.*float32", message))
self.assertTrue(re.search(r"shape: \(.*, 5\)", message))
# Check that the correct input op is printed.
self.assertIn("Input tensors (2):", message)
# Check that the correct line for op creation is printed.
self.assertTrue(re.search(r"Stack trace of op's creation", message))
self.assertIn("one_over_x = 1.0 / x", message)
@test_util.run_in_graph_and_eager_modes
def testDatasetMapHealthyResults(self):
check_numerics_callback.enable_check_numerics()
tensor = constant_op.constant(
[0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0])
def map_fn(x):
return math_ops.log(math_ops.square(x) + 1)
dataset = dataset_ops.Dataset.from_tensor_slices(tensor).batch(2).map(
map_fn)
iterator = dataset_ops.make_one_shot_iterator(dataset)
self.assertAllClose(self.evaluate(iterator.get_next()), np.log([1.25, 2]))
self.assertAllClose(self.evaluate(iterator.get_next()), np.log([3.25, 5]))
@test_util.run_in_graph_and_eager_modes
def testCatchInfinityInDatasetMapFunction(self):
"""Test that callback catches NaN in a tf.dataset map function."""
check_numerics_callback.enable_check_numerics()
def generate_nan(x):
"""Intetionally generates NaNs by taking log of negative number."""
casted_x = math_ops.cast(x, dtypes.float32)
return math_ops.log([[-1.0, 1.0], [3.0, 5.0]]) + casted_x
dataset = dataset_ops.Dataset.range(10).map(generate_nan)
iterator = dataset_ops.make_one_shot_iterator(dataset)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(iterator.get_next()))
# Check the content of the error message.
self.assertTrue(re.search(r"graph op.*\"Log\"", message))
self.assertTrue(re.search(r"dtype.*float32", message))
self.assertIn("shape: (2, 2)\n", message)
self.assertTrue(re.search(r"Input tensor.*Tensor.*Log/x:0", message))
self.assertIn(
"-> | return math_ops.log([[-1.0, 1.0], [3.0, 5.0]]) + casted_x",
message)
@test_util.run_in_graph_and_eager_modes
def testCustomGradietWithNaNWithTfFunction(self):
"""Test that callback catches NaN in a gradient function during backprop."""
check_numerics_callback.enable_check_numerics()
@custom_gradient.custom_gradient
def func_with_bad_grad(x):
output = math_ops.sin(x)
@def_function.function
def grad(dy):
# `dy` will come in as 1.0. Taking log of -1.0 leads to NaN.
return math_ops.log(-dy)
return output, grad
x = constant_op.constant(-2.0, dtype=dtypes.float16)
def f(x):
return func_with_bad_grad(x)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: gradient_checker_v2.compute_gradient(f, [x]))
# Check the content of the error message.
self.assertTrue(re.search(r"graph op.*\"Log\"", message))
self.assertTrue(re.search(r"dtype.*float16", message))
if context.executing_eagerly():
self.assertIn("shape: ()\n", message)
self.assertTrue(re.search(r"Input tensor.*Tensor.*Neg:0", message))
self.assertIn("-> | return math_ops.log(-dy)", message)
# TODO(cais): Tests for Infs and NaNs during distributed execution.
# TODO(cais): Benchmark the slowdown due to callbacks and inserted nodes.
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
| 39.010121 | 80 | 0.698407 |
334cb10ac3fe8a68348bd1f018d5dd8c042cf8a8 | 1,227 | py | Python | migrations/versions/27b70c1be3f0_.py | ENCODE-DCC/genomic-data-service | 954017a5bcc5f448fbe2867768186df5e066c67c | [
"MIT"
] | 3 | 2020-10-26T02:15:55.000Z | 2022-01-26T18:39:09.000Z | migrations/versions/27b70c1be3f0_.py | ENCODE-DCC/genomic-data-service | 954017a5bcc5f448fbe2867768186df5e066c67c | [
"MIT"
] | 3 | 2021-08-17T02:01:54.000Z | 2022-03-30T17:14:02.000Z | migrations/versions/27b70c1be3f0_.py | ENCODE-DCC/genomic-data-service | 954017a5bcc5f448fbe2867768186df5e066c67c | [
"MIT"
] | 1 | 2022-03-24T21:15:34.000Z | 2022-03-24T21:15:34.000Z | """empty message
Revision ID: 27b70c1be3f0
Revises: e7cf6ce136f4
Create Date: 2021-04-01 10:18:11.615734
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '27b70c1be3f0'
down_revision = 'e7cf6ce136f4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('files', sa.Column('biosample_classification', sa.String(), nullable=True))
op.add_column('files', sa.Column('biosample_organ', sa.String(), nullable=True))
op.add_column('files', sa.Column('biosample_summary', sa.String(), nullable=True))
op.add_column('files', sa.Column('biosample_system', sa.String(), nullable=True))
op.add_column('files', sa.Column('biosample_term_name', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('files', 'biosample_term_name')
op.drop_column('files', 'biosample_system')
op.drop_column('files', 'biosample_summary')
op.drop_column('files', 'biosample_organ')
op.drop_column('files', 'biosample_classification')
# ### end Alembic commands ###
| 33.162162 | 93 | 0.708231 |
6a59b4abde93fc8c0287389517c0087b514ff7f7 | 9,573 | py | Python | release/stubs.min/System/__init___parts/BitConverter.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/System/__init___parts/BitConverter.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/System/__init___parts/BitConverter.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class BitConverter(object):
""" Converts base data types to an array of bytes,and an array of bytes to base data types. """
@staticmethod
def DoubleToInt64Bits(value):
"""
DoubleToInt64Bits(value: float) -> Int64
Converts the specified double-precision floating point number to a 64-bit signed integer.
value: The number to convert.
Returns: A 64-bit signed integer whose value is equivalent to value.
"""
pass
@staticmethod
def GetBytes(value):
"""
GetBytes(value: UInt32) -> Array[Byte]
Returns the specified 32-bit unsigned integer value as an array of bytes.
value: The number to convert.
Returns: An array of bytes with length 4.
GetBytes(value: UInt16) -> Array[Byte]
Returns the specified 16-bit unsigned integer value as an array of bytes.
value: The number to convert.
Returns: An array of bytes with length 2.
GetBytes(value: UInt64) -> Array[Byte]
Returns the specified 64-bit unsigned integer value as an array of bytes.
value: The number to convert.
Returns: An array of bytes with length 8.
GetBytes(value: float) -> Array[Byte]
Returns the specified double-precision floating point value as an array of bytes.
value: The number to convert.
Returns: An array of bytes with length 8.
GetBytes(value: Single) -> Array[Byte]
Returns the specified single-precision floating point value as an array of bytes.
value: The number to convert.
Returns: An array of bytes with length 4.
GetBytes(value: Char) -> Array[Byte]
Returns the specified Unicode character value as an array of bytes.
value: A character to convert.
Returns: An array of bytes with length 2.
GetBytes(value: bool) -> Array[Byte]
Returns the specified Boolean value as an array of bytes.
value: A Boolean value.
Returns: An array of bytes with length 1.
GetBytes(value: Int16) -> Array[Byte]
Returns the specified 16-bit signed integer value as an array of bytes.
value: The number to convert.
Returns: An array of bytes with length 2.
GetBytes(value: Int64) -> Array[Byte]
Returns the specified 64-bit signed integer value as an array of bytes.
value: The number to convert.
Returns: An array of bytes with length 8.
GetBytes(value: int) -> Array[Byte]
Returns the specified 32-bit signed integer value as an array of bytes.
value: The number to convert.
Returns: An array of bytes with length 4.
"""
pass
@staticmethod
def Int64BitsToDouble(value):
"""
Int64BitsToDouble(value: Int64) -> float
Converts the specified 64-bit signed integer to a double-precision floating point number.
value: The number to convert.
Returns: A double-precision floating point number whose value is equivalent to value.
"""
pass
@staticmethod
def ToBoolean(value, startIndex):
"""
ToBoolean(value: Array[Byte],startIndex: int) -> bool
Returns a Boolean value converted from one byte at a specified position in a byte array.
value: An array of bytes.
startIndex: The starting position within value.
Returns: true if the byte at startIndex in value is nonzero; otherwise,false.
"""
pass
@staticmethod
def ToChar(value, startIndex):
"""
ToChar(value: Array[Byte],startIndex: int) -> Char
Returns a Unicode character converted from two bytes at a specified position in a byte array.
value: An array.
startIndex: The starting position within value.
Returns: A character formed by two bytes beginning at startIndex.
"""
pass
@staticmethod
def ToDouble(value, startIndex):
"""
ToDouble(value: Array[Byte],startIndex: int) -> float
Returns a double-precision floating point number converted from eight bytes at a specified
position in a byte array.
value: An array of bytes.
startIndex: The starting position within value.
Returns: A double precision floating point number formed by eight bytes beginning at startIndex.
"""
pass
@staticmethod
def ToInt16(value, startIndex):
"""
ToInt16(value: Array[Byte],startIndex: int) -> Int16
Returns a 16-bit signed integer converted from two bytes at a specified position in a byte array.
value: An array of bytes.
startIndex: The starting position within value.
Returns: A 16-bit signed integer formed by two bytes beginning at startIndex.
"""
pass
@staticmethod
def ToInt32(value, startIndex):
"""
ToInt32(value: Array[Byte],startIndex: int) -> int
Returns a 32-bit signed integer converted from four bytes at a specified position in a byte
array.
value: An array of bytes.
startIndex: The starting position within value.
Returns: A 32-bit signed integer formed by four bytes beginning at startIndex.
"""
pass
@staticmethod
def ToInt64(value, startIndex):
"""
ToInt64(value: Array[Byte],startIndex: int) -> Int64
Returns a 64-bit signed integer converted from eight bytes at a specified position in a byte
array.
value: An array of bytes.
startIndex: The starting position within value.
Returns: A 64-bit signed integer formed by eight bytes beginning at startIndex.
"""
pass
@staticmethod
def ToSingle(value, startIndex):
"""
ToSingle(value: Array[Byte],startIndex: int) -> Single
Returns a single-precision floating point number converted from four bytes at a specified
position in a byte array.
value: An array of bytes.
startIndex: The starting position within value.
Returns: A single-precision floating point number formed by four bytes beginning at startIndex.
"""
pass
@staticmethod
def ToString(value=None, startIndex=None, length=None):
"""
ToString(value: Array[Byte],startIndex: int) -> str
Converts the numeric value of each element of a specified subarray of bytes to its equivalent
hexadecimal string representation.
value: An array of bytes.
startIndex: The starting position within value.
Returns: A string of hexadecimal pairs separated by hyphens,where each pair represents the corresponding
element in a subarray of value; for example,"7F-2C-4A-00".
ToString(value: Array[Byte]) -> str
Converts the numeric value of each element of a specified array of bytes to its equivalent
hexadecimal string representation.
value: An array of bytes.
Returns: A string of hexadecimal pairs separated by hyphens,where each pair represents the corresponding
element in value; for example,"7F-2C-4A-00".
ToString(value: Array[Byte],startIndex: int,length: int) -> str
Converts the numeric value of each element of a specified subarray of bytes to its equivalent
hexadecimal string representation.
value: An array of bytes.
startIndex: The starting position within value.
length: The number of array elements in value to convert.
Returns: A string of hexadecimal pairs separated by hyphens,where each pair represents the corresponding
element in a subarray of value; for example,"7F-2C-4A-00".
"""
pass
@staticmethod
def ToUInt16(value, startIndex):
"""
ToUInt16(value: Array[Byte],startIndex: int) -> UInt16
Returns a 16-bit unsigned integer converted from two bytes at a specified position in a byte
array.
value: The array of bytes.
startIndex: The starting position within value.
Returns: A 16-bit unsigned integer formed by two bytes beginning at startIndex.
"""
pass
@staticmethod
def ToUInt32(value, startIndex):
"""
ToUInt32(value: Array[Byte],startIndex: int) -> UInt32
Returns a 32-bit unsigned integer converted from four bytes at a specified position in a byte
array.
value: An array of bytes.
startIndex: The starting position within value.
Returns: A 32-bit unsigned integer formed by four bytes beginning at startIndex.
"""
pass
@staticmethod
def ToUInt64(value, startIndex):
"""
ToUInt64(value: Array[Byte],startIndex: int) -> UInt64
Returns a 64-bit unsigned integer converted from eight bytes at a specified position in a byte
array.
value: An array of bytes.
startIndex: The starting position within value.
Returns: A 64-bit unsigned integer formed by the eight bytes beginning at startIndex.
"""
pass
IsLittleEndian = True
__all__ = [
"DoubleToInt64Bits",
"GetBytes",
"Int64BitsToDouble",
"IsLittleEndian",
"ToBoolean",
"ToChar",
"ToDouble",
"ToInt16",
"ToInt32",
"ToInt64",
"ToSingle",
"ToString",
"ToUInt16",
"ToUInt32",
"ToUInt64",
]
| 20.455128 | 109 | 0.639507 |
dd57ecb3519d9a5ccda368256eda98c0d14afa2b | 2,050 | py | Python | ingestion/src/metadata/generated/schema/entity/policies/policy.py | avignd/OpenMetadata | f81fb3d5e8ec391928afba57868cf0f9d7dc0f74 | [
"Apache-2.0"
] | null | null | null | ingestion/src/metadata/generated/schema/entity/policies/policy.py | avignd/OpenMetadata | f81fb3d5e8ec391928afba57868cf0f9d7dc0f74 | [
"Apache-2.0"
] | null | null | null | ingestion/src/metadata/generated/schema/entity/policies/policy.py | avignd/OpenMetadata | f81fb3d5e8ec391928afba57868cf0f9d7dc0f74 | [
"Apache-2.0"
] | null | null | null | # generated by datamodel-codegen:
# filename: schema/entity/policies/policy.json
# timestamp: 2021-11-11T08:56:06+00:00
from __future__ import annotations
from enum import Enum
from typing import Optional
from pydantic import AnyUrl, BaseModel, Field, constr
from ...type import basic, entityHistory, entityReference
class PolicyType(Enum):
AccessControl = 'AccessControl'
Lifecycle = 'Lifecycle'
class Policy(BaseModel):
id: basic.Uuid = Field(
..., description='Unique identifier that identifies this Policy.'
)
name: constr(min_length=1, max_length=64) = Field(
..., description='Name that identifies this Policy.'
)
fullyQualifiedName: Optional[constr(min_length=1, max_length=128)] = Field(
None, description='Name that uniquely identifies a Policy.'
)
displayName: Optional[str] = Field(None, description='Title for this Policy.')
description: Optional[str] = Field(
None,
description='A short description of the Policy, comprehensible to regular users.',
)
owner: entityReference.EntityReference = Field(
..., description='Owner of this Policy.'
)
policyUrl: Optional[AnyUrl] = Field(
None, description='Link to a well documented definition of this Policy.'
)
href: Optional[basic.Href] = Field(
None, description='Link to the resource corresponding to this entity.'
)
policyType: PolicyType
enabled: Optional[bool] = Field(True, description='Is the policy enabled.')
version: Optional[entityHistory.EntityVersion] = Field(
None, description='Metadata version of the Policy.'
)
updatedAt: Optional[basic.DateTime] = Field(
None,
description='Last update time corresponding to the new version of the Policy.',
)
updatedBy: Optional[str] = Field(None, description='User who made the update.')
changeDescription: Optional[entityHistory.ChangeDescription] = Field(
None, description='Change that led to this version of the entity.'
)
| 35.964912 | 90 | 0.696098 |
fdc1390a792b2643494e181bdb7df1015a4b6a9a | 541 | py | Python | traces/tests/factories.py | bitlabstudio/django-traces | f08c0f46caf5af9da49b8882bd98f354cd9c174f | [
"MIT"
] | 1 | 2019-07-15T19:24:03.000Z | 2019-07-15T19:24:03.000Z | traces/tests/factories.py | bitlabstudio/django-traces | f08c0f46caf5af9da49b8882bd98f354cd9c174f | [
"MIT"
] | null | null | null | traces/tests/factories.py | bitlabstudio/django-traces | f08c0f46caf5af9da49b8882bd98f354cd9c174f | [
"MIT"
] | null | null | null | """Factories for the ``traces`` app."""
from factory import DjangoModelFactory, Sequence
from .. import models
class TraceFactory(DjangoModelFactory):
FACTORY_FOR = models.Trace
ip = Sequence(lambda x: '66.66.66.{}'.format(x))
class BlacklistIPFactory(DjangoModelFactory):
FACTORY_FOR = models.BlacklistIP
ip = Sequence(lambda x: '77.77.77.{}'.format(x))
class BlacklistUserAgentFactory(DjangoModelFactory):
FACTORY_FOR = models.BlacklistUserAgent
user_agent = Sequence(lambda x: 'user_agent_{}'.format(x))
| 23.521739 | 62 | 0.730129 |
2cddb6566d10a322fbcd3493c5afad6f35c0a5ad | 35 | py | Python | Python/Tests/TestData/RemoveImport/FromImportParensTrailingComma1.py | nanshuiyu/pytools | 9f9271fe8cf564b4f94e9456d400f4306ea77c23 | [
"Apache-2.0"
] | null | null | null | Python/Tests/TestData/RemoveImport/FromImportParensTrailingComma1.py | nanshuiyu/pytools | 9f9271fe8cf564b4f94e9456d400f4306ea77c23 | [
"Apache-2.0"
] | null | null | null | Python/Tests/TestData/RemoveImport/FromImportParensTrailingComma1.py | nanshuiyu/pytools | 9f9271fe8cf564b4f94e9456d400f4306ea77c23 | [
"Apache-2.0"
] | null | null | null | from sys import (oar, baz, )
baz | 11.666667 | 29 | 0.628571 |
da8ef81fd82788d12b13d31141821ceb9411945f | 14,441 | py | Python | lwganrt/models/holoportator_rt.py | darkAlert/impersonator-rt | 8a2b879cf60f2094944a0104592d460fee3bda6a | [
"MIT"
] | 6 | 2020-04-17T08:47:58.000Z | 2021-07-02T10:58:52.000Z | lwganrt/models/holoportator_rt.py | darkAlert/impersonator-rt | 8a2b879cf60f2094944a0104592d460fee3bda6a | [
"MIT"
] | null | null | null | lwganrt/models/holoportator_rt.py | darkAlert/impersonator-rt | 8a2b879cf60f2094944a0104592d460fee3bda6a | [
"MIT"
] | 1 | 2020-05-24T23:46:54.000Z | 2020-05-24T23:46:54.000Z | import torch
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from .models import BaseModel
from lwganrt.networks.networks import NetworksFactory, HumanModelRecovery
from lwganrt.utils.detectors import PersonMaskRCNNDetector
from lwganrt.utils.nmr import SMPLRenderer
import lwganrt.utils.cv_utils as cv_utils
import lwganrt.utils.util as util
import lwganrt.models.adaptive_personalization_rt
class HoloportatorRT(BaseModel):
def __init__(self, opt, device):
super(HoloportatorRT, self).__init__(opt)
self._name = 'HoloportatorRT'
self.device = device
self._create_networks()
# prefetch variables
self.src_info = None
self.tsf_info = None
def _create_networks(self):
# 1. create generator
self.generator = self._create_generator()
# 2. create hmr
self.hmr = self._create_hmr()
# 3. create render
self.render = SMPLRenderer(face_path=self._opt.smpl_faces,
uv_map_path=self._opt.uv_mapping,
image_size=self._opt.image_size,
tex_size=self._opt.tex_size,
has_front=self._opt.front_warp,
fill_back=False,
part_info=self._opt.part_info,
front_info=self._opt.front_info,
head_info=self._opt.head_info,
device = self.device)
# 4. pre-processor
if self._opt.has_detector:
self.detector = PersonMaskRCNNDetector(ks=self._opt.bg_ks, threshold=0.5, device=self.device)
else:
self.detector = None
def _create_generator(self):
net = NetworksFactory.get_by_name(self._opt.gen_name, src_dim=3+self._G_cond_nc,
tsf_dim=3+self._G_cond_nc, repeat_num=self._opt.repeat_num,
device=self.device)
if self._opt.load_path:
self._load_params(net, self._opt.load_path, remove_bg_model=True)
elif self._opt.load_epoch > 0:
self._load_network(net, 'G', self._opt.load_epoch)
else:
raise ValueError('load_path {} is empty and load_epoch {} is 0'.format(
self._opt.load_path, self._opt.load_epoch))
net.eval()
return net
def _create_hmr(self):
hmr = HumanModelRecovery(self._opt.smpl_model, device=self.device)
saved_data = torch.load(self._opt.hmr_model)
hmr.load_state_dict(saved_data)
hmr.eval()
return hmr
@torch.no_grad()
def personalize(self, src_img, src_smpl, use_mask=False):
# source process, {'theta', 'cam', 'pose', 'shape', 'verts', 'j2d', 'j3d'}
src_info = self.hmr.get_details(src_smpl)
src_f2verts, src_fim, src_wim = self.render.render_fim_wim(src_info['cam'], src_info['verts'])
src_info['fim'] = src_fim
src_info['wim'] = src_wim
src_info['cond'], _ = self.render.encode_fim(src_info['cam'], src_info['verts'], fim=src_fim, transpose=True)
src_info['f2verts'] = src_f2verts
src_info['p2verts'] = src_f2verts[:, :, :, 0:2]
src_info['p2verts'][:, :, :, 1] *= -1
# add image to source info
src_info['img'] = src_img
if use_mask:
# get front mask:
if self.detector is not None:
bbox, ft_mask = self.detector.inference(src_img[0])
else:
ft_mask = 1 - util.morph(src_info['cond'][:, -1:, :, :], ks=self._opt.ft_ks, mode='erode')
src_inputs = torch.cat([src_img * ft_mask, src_info['cond']], dim=1)
else:
src_inputs = torch.cat([src_img, src_info['cond']], dim=1)
src_info['feats'] = self.generator.encode_src(src_inputs)
self.src_info = src_info
@torch.no_grad()
def view(self, rt, t, output_dir=None):
# get source info
src_info = self.src_info
src_mesh = self.src_info['verts']
tsf_mesh = self.rotate_trans(rt, t, src_mesh)
tsf_f2verts, tsf_fim, tsf_wim = self.render.render_fim_wim(src_info['cam'], tsf_mesh)
tsf_cond, _ = self.render.encode_fim(src_info['cam'], tsf_mesh, fim=tsf_fim, transpose=True)
T = self.render.cal_bc_transform(src_info['p2verts'], tsf_fim, tsf_wim)
tsf_img = F.grid_sample(src_info['img'], T)
tsf_inputs = torch.cat([tsf_img, tsf_cond], dim=1)
preds, tsf_mask = self.forward(tsf_inputs, T)
preds = preds.permute(0, 2, 3, 1)
preds = preds.cpu().detach().numpy()
if output_dir is not None:
cv_utils.save_cv2_img(preds, output_dir, normalize=True)
return preds
@torch.no_grad()
def inference(self, tgt_smpl, cam_strategy='smooth', output_dir=None, view=None):
# get target info
self.src_info['cam'] = tgt_smpl[:, 0:3].contiguous()
tsf_inputs = self.transfer_params_by_smpl(tgt_smpl, cam_strategy, view=view)
preds,_ = self.forward(tsf_inputs, self.tsf_info['T'])
preds = preds.permute(0, 2, 3, 1)
preds = preds.cpu().detach().numpy()
if output_dir is not None:
cv_utils.save_cv2_img(preds, output_dir, normalize=True)
return preds
def forward(self, tsf_inputs, T, use_mask=False):
src_encoder_outs, src_resnet_outs = self.src_info['feats']
tsf_color, tsf_mask = self.generator.inference(src_encoder_outs, src_resnet_outs, tsf_inputs, T)
if use_mask:
pred_imgs = (1 - tsf_mask) * tsf_color
else:
pred_imgs = tsf_color
return pred_imgs, tsf_mask
def rotate_trans(self, rt, t, X):
R = cv_utils.euler2matrix(rt) # (3 x 3)
R = torch.FloatTensor(R)[None, :, :].to(self.device)
t = torch.FloatTensor(t)[None, None, :].to(self.device)
# (bs, Nv, 3) + (bs, 1, 3)
return torch.bmm(X, R) + t
def transfer_params_by_smpl(self, tgt_smpl, cam_strategy='smooth', t=0, view=None):
# get source info
src_info = self.src_info
if t == 0 and cam_strategy == 'smooth':
self.first_cam = tgt_smpl[:, 0:3].clone()
# get transfer smpl
tsf_smpl = self.swap_smpl(src_info['cam'], src_info['shape'], tgt_smpl, cam_strategy=cam_strategy)
# transfer process, {'theta', 'cam', 'pose', 'shape', 'verts', 'j2d', 'j3d'}
tsf_info = self.hmr.get_details(tsf_smpl)
# New view for tsf_smpl:
if view is not None:
tsf_mesh = tsf_info['verts']
tsf_info['verts'] = self.rotate_trans(view['R'], view['t'], tsf_mesh)
tsf_f2verts, tsf_fim, tsf_wim = self.render.render_fim_wim(tsf_info['cam'], tsf_info['verts'])
# src_f2pts = src_f2verts[:, :, :, 0:2]
tsf_info['fim'] = tsf_fim
tsf_info['wim'] = tsf_wim
tsf_info['cond'], _ = self.render.encode_fim(tsf_info['cam'], tsf_info['verts'], fim=tsf_fim, transpose=True)
# tsf_info['sil'] = util.morph((tsf_fim != -1).float(), ks=self._opt.ft_ks, mode='dilate')
T = self.render.cal_bc_transform(src_info['p2verts'], tsf_fim, tsf_wim)
tsf_img = F.grid_sample(src_info['img'], T)
tsf_inputs = torch.cat([tsf_img, tsf_info['cond']], dim=1)
# add target image to tsf info
tsf_info['tsf_img'] = tsf_img
tsf_info['T'] = T
self.tsf_info = tsf_info
return tsf_inputs
def swap_smpl(self, src_cam, src_shape, tgt_smpl, cam_strategy='smooth'):
tgt_cam = tgt_smpl[:, 0:3].contiguous()
pose = tgt_smpl[:, 3:75].contiguous()
# TODO, need more tricky ways
if cam_strategy == 'smooth':
cam = src_cam.clone()
delta_xy = tgt_cam[:, 1:] - self.first_cam[:, 1:]
cam[:, 1:] += delta_xy
elif cam_strategy == 'source':
cam = src_cam
else:
cam = tgt_cam
tsf_smpl = torch.cat([cam, pose, src_shape], dim=1)
return tsf_smpl
def post_personalize(self, data_loader, verbose=True):
from lwganrt.networks.networks import FaceLoss
@torch.no_grad()
def set_gen_inputs(sample):
j2ds = sample['j2d'].cuda() # (N, 4)
T = sample['T'].cuda() # (N, h, w, 2)
T_cycle = sample['T_cycle'].cuda() # (N, h, w, 2)
src_inputs = sample['src_inputs'].cuda() # (N, 6, h, w)
tsf_inputs = sample['tsf_inputs'].cuda() # (N, 6, h, w)
src_fim = sample['src_fim'].cuda()
tsf_fim = sample['tsf_fim'].cuda()
init_preds = sample['preds'].cuda()
images = sample['images']
images = torch.cat([images[:, 0, ...], images[:, 1, ...]], dim=0).cuda() # (2N, 3, h, w)
pseudo_masks = sample['pseudo_masks']
pseudo_masks = torch.cat([pseudo_masks[:, 0, ...], pseudo_masks[:, 1, ...]],
dim=0).cuda() # (2N, 1, h, w)
return src_fim, tsf_fim, j2ds, T, T_cycle, \
src_inputs, tsf_inputs, images, init_preds, pseudo_masks
def set_cycle_inputs(fake_tsf_imgs, src_inputs, tsf_inputs, T_cycle):
# set cycle src inputs
cycle_src_inputs = torch.cat([fake_tsf_imgs * tsf_inputs[:, -1:, ...], tsf_inputs[:, 3:]], dim=1)
# set cycle tsf inputs
cycle_tsf_img = F.grid_sample(fake_tsf_imgs, T_cycle)
cycle_tsf_inputs = torch.cat([cycle_tsf_img, src_inputs[:, 3:]], dim=1)
return cycle_src_inputs, cycle_tsf_inputs
def inference(src_inputs, tsf_inputs, T, T_cycle):
fake_src_color, fake_src_mask, fake_tsf_color, fake_tsf_mask = \
self.generator.infer_front(src_inputs, tsf_inputs, T=T)
fake_src_imgs = (1 - fake_src_mask) * fake_src_color
fake_tsf_imgs = (1 - fake_tsf_mask) * fake_tsf_color
cycle_src_inputs, cycle_tsf_inputs = set_cycle_inputs(
fake_tsf_imgs, src_inputs, tsf_inputs, T_cycle)
cycle_src_color, cycle_src_mask, cycle_tsf_color, cycle_tsf_mask = \
self.generator.infer_front(cycle_src_inputs, cycle_tsf_inputs, T=T_cycle)
cycle_src_imgs = (1 - cycle_src_mask) * cycle_src_color
cycle_tsf_imgs = (1 - cycle_tsf_mask) * cycle_tsf_color
return fake_src_imgs, fake_tsf_imgs, cycle_src_imgs, cycle_tsf_imgs, fake_src_mask, fake_tsf_mask
def create_criterion():
face_criterion = FaceLoss(pretrained_path=self._opt.face_model).cuda()
idt_criterion = torch.nn.L1Loss()
mask_criterion = torch.nn.BCELoss()
return face_criterion, idt_criterion, mask_criterion
init_lr = 0.0002
nodecay_epochs = 5
optimizer = torch.optim.Adam(self.generator.parameters(), lr=init_lr, betas=(0.5, 0.999))
face_cri, idt_cri, msk_cri = create_criterion()
step = 0
logger = tqdm(range(nodecay_epochs))
for epoch in logger:
for i, sample in enumerate(data_loader):
src_fim, tsf_fim, j2ds, T, T_cycle, src_inputs, tsf_inputs, \
images, init_preds, pseudo_masks = set_gen_inputs(sample)
# print(bg_inputs.shape, src_inputs.shape, tsf_inputs.shape)
bs = tsf_inputs.shape[0]
src_imgs = images[0:bs]
fake_src_imgs, fake_tsf_imgs, cycle_src_imgs, cycle_tsf_imgs, fake_src_mask, fake_tsf_mask = inference(
src_inputs, tsf_inputs, T, T_cycle)
# cycle reconstruction loss
cycle_loss = idt_cri(src_imgs, fake_src_imgs) + idt_cri(src_imgs, cycle_tsf_imgs)
# structure loss
bg_mask = src_inputs[:, -1:]
body_mask = 1 - bg_mask
str_src_imgs = src_imgs * body_mask
cycle_warp_imgs = F.grid_sample(fake_tsf_imgs, T_cycle)
back_head_mask = 1 - self.render.encode_front_fim(tsf_fim, transpose=True, front_fn=False)
struct_loss = idt_cri(init_preds, fake_tsf_imgs) + \
2 * idt_cri(str_src_imgs * back_head_mask, cycle_warp_imgs * back_head_mask)
fid_loss = face_cri(src_imgs, cycle_tsf_imgs, kps1=j2ds[:, 0], kps2=j2ds[:, 0]) + \
face_cri(init_preds, fake_tsf_imgs, kps1=j2ds[:, 1], kps2=j2ds[:, 1])
# mask loss
# mask_loss = msk_cri(fake_tsf_mask, tsf_inputs[:, -1:]) + msk_cri(fake_src_mask, src_inputs[:, -1:])
# mask_loss = msk_cri(torch.cat([fake_src_mask, fake_tsf_mask], dim=0), pseudo_masks)
# loss = 10 * cycle_loss + 10 * struct_loss + fid_loss + 5 * mask_loss
loss = 10 * cycle_loss + 10 * struct_loss + fid_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if verbose:
logger.set_description(
(
f'epoch: {epoch + 1}; step: {step}; '
f'total: {loss.item():.6f}; cyc: {cycle_loss.item():.6f}; '
f'str: {struct_loss.item():.6f}; fid: {fid_loss.item():.6f}; '
# f'msk: {mask_loss.item():.6f}'
)
)
step += 1
self.generator.eval()
def prepare_input(img, smpl, image_size=256, device=None):
# resize image and convert the color space from [0, 255] to [-1, 1]
if isinstance(img, np.ndarray):
prep_img = cv_utils.transform_img(img, image_size, transpose=True) * 2 - 1.0
prep_img = torch.tensor(prep_img, dtype=torch.float32).unsqueeze(0)
else:
raise NotImplementedError
if isinstance(smpl, np.ndarray):
if smpl.ndim == 1:
prep_smpl = torch.tensor(smpl, dtype=torch.float32).unsqueeze(0)
else:
prep_smpl = torch.tensor(smpl, dtype=torch.float32)
else:
raise NotImplementedError
if device is not None:
prep_img = prep_img.to(device)
prep_smpl = prep_smpl.to(device)
return prep_img, prep_smpl
| 39.241848 | 119 | 0.585486 |
dddb8c7e56d862f43a320ea78812accb2041a635 | 3,152 | py | Python | AppServer/lib/django-1.4/tests/regressiontests/forms/tests/util.py | loftwah/appscale | 586fc1347ebc743d7a632de698f4dbfb09ae38d6 | [
"Apache-2.0"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | AppServer/lib/django-1.4/tests/regressiontests/forms/tests/util.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | AppServer/lib/django-1.4/tests/regressiontests/forms/tests/util.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | # -*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from django.forms.util import flatatt, ErrorDict, ErrorList
from django.test import TestCase
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy
class FormsUtilTestCase(TestCase):
# Tests for forms/util.py module.
def test_flatatt(self):
###########
# flatatt #
###########
self.assertEqual(flatatt({'id': "header"}), u' id="header"')
self.assertEqual(flatatt({'class': "news", 'title': "Read this"}), u' class="news" title="Read this"')
self.assertEqual(flatatt({}), u'')
def test_validation_error(self):
###################
# ValidationError #
###################
# Can take a string.
self.assertHTMLEqual(str(ErrorList(ValidationError("There was an error.").messages)),
'<ul class="errorlist"><li>There was an error.</li></ul>')
# Can take a unicode string.
self.assertHTMLEqual(unicode(ErrorList(ValidationError(u"Not \u03C0.").messages)),
u'<ul class="errorlist"><li>Not π.</li></ul>')
# Can take a lazy string.
self.assertHTMLEqual(str(ErrorList(ValidationError(ugettext_lazy("Error.")).messages)),
'<ul class="errorlist"><li>Error.</li></ul>')
# Can take a list.
self.assertHTMLEqual(str(ErrorList(ValidationError(["Error one.", "Error two."]).messages)),
'<ul class="errorlist"><li>Error one.</li><li>Error two.</li></ul>')
# Can take a mixture in a list.
self.assertHTMLEqual(str(ErrorList(ValidationError(["First error.", u"Not \u03C0.", ugettext_lazy("Error.")]).messages)),
'<ul class="errorlist"><li>First error.</li><li>Not π.</li><li>Error.</li></ul>')
class VeryBadError:
def __unicode__(self): return u"A very bad error."
# Can take a non-string.
self.assertHTMLEqual(str(ErrorList(ValidationError(VeryBadError()).messages)),
'<ul class="errorlist"><li>A very bad error.</li></ul>')
# Escapes non-safe input but not input marked safe.
example = 'Example of link: <a href="http://www.example.com/">example</a>'
self.assertHTMLEqual(str(ErrorList([example])),
'<ul class="errorlist"><li>Example of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorList([mark_safe(example)])),
'<ul class="errorlist"><li>Example of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorDict({'name': example})),
'<ul class="errorlist"><li>nameExample of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorDict({'name': mark_safe(example)})),
'<ul class="errorlist"><li>nameExample of link: <a href="http://www.example.com/">example</a></li></ul>')
| 50.031746 | 152 | 0.586929 |
e692bb270a92639e3f1ccd81e9fc72c740395a19 | 4,369 | py | Python | bin/CreateHydroShareResource.py | selimnairb/EcohydroLib | 38aa4020c88a57c9d2f1fb66acd393b6e989e897 | [
"Unlicense"
] | 12 | 2015-03-03T05:08:55.000Z | 2021-01-27T12:38:33.000Z | bin/CreateHydroShareResource.py | selimnairb/EcohydroLib | 38aa4020c88a57c9d2f1fb66acd393b6e989e897 | [
"Unlicense"
] | 3 | 2016-01-04T15:05:43.000Z | 2019-02-01T02:19:45.000Z | bin/CreateHydroShareResource.py | selimnairb/EcohydroLib | 38aa4020c88a57c9d2f1fb66acd393b6e989e897 | [
"Unlicense"
] | 5 | 2015-02-15T18:20:38.000Z | 2017-05-21T13:14:32.000Z | #!/usr/bin/env python
"""@package GetUSGSDEMForBoundingbox
@brief Create a new HydroShare resource by uploading the contents of an
EcohydroLib project.
This software is provided free of charge under the New BSD License. Please see
the following license information:
Copyright (c) 2015, University of North Carolina at Chapel Hill
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of North Carolina at Chapel Hill nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF NORTH CAROLINA AT CHAPEL HILL
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@author Brian Miles <brian_miles@unc.edu>
"""
import sys
import os
import argparse
import traceback
import getpass
from ecohydrolib.hydroshare import create_console_callback
from ecohydrolib.hydroshare import get_password_authentication
from ecohydrolib.command.exceptions import *
from ecohydrolib.command.hydroshare import HydroShareCreateResource
if __name__ == "__main__":
# Handle command line options
parser = argparse.ArgumentParser(description='Create a new HydroShare resource by uploading the contents of an EcohydroLib project.')
parser.add_argument('-i', '--configfile', dest='configfile', required=False,
help='The configuration file.')
parser.add_argument('-p', '--projectDir', dest='projectDir', required=True,
help='The directory to which metadata, intermediate, and final files should be saved')
parser.add_argument('--title', dest='title', required=True,
help='The title of the HydroShare resource to create.')
parser.add_argument('--abstract', dest='abstract',
help='The abstract of the new resource.')
parser.add_argument('--keywords', dest='keywords', nargs='+',
help='Key works to associate with the new resource.')
parser.add_argument('--overwrite', dest='overwrite', action='store_true', required=False,
help='Overwrite existing data in project directory. If not specified, program will halt if a dataset already exists.')
args = parser.parse_args()
configFile = None
if args.configfile:
configFile = args.configfile
command = HydroShareCreateResource(args.projectDir, configFile)
exitCode = os.EX_OK
try:
sys.stdout.write('HydroShare username (this will not be stored on disk): ')
username = sys.stdin.readline().strip()
password = getpass.getpass('HydroShare password (this will not be stored on disk): ')
auth = get_password_authentication(username, password)
command.run(auth=auth,
title=args.title,
abstract=args.abstract,
keywords=args.keywords,
create_callback=create_console_callback,
verbose=True,
overwrite=args.overwrite)
except CommandException as e:
traceback.print_exc(file=sys.stderr)
exitCode = os.EX_DATAERR
sys.exit(exitCode) | 47.48913 | 143 | 0.715496 |
16e6ae004646b3c56461beab4978a93d10723c64 | 49,645 | py | Python | malaya/topic/location.py | leowmjw/Malaya | 33f39835eca08c238d2dd68aeca3b09c5d0a45ab | [
"MIT"
] | 2 | 2019-06-23T20:19:22.000Z | 2020-04-16T13:02:32.000Z | malaya/topic/location.py | Jeansding/Malaya | fdf1af178ecc5ec4575298612101362ccc4a94fb | [
"MIT"
] | null | null | null | malaya/topic/location.py | Jeansding/Malaya | fdf1af178ecc5ec4575298612101362ccc4a94fb | [
"MIT"
] | null | null | null | location = {
0: {'negeri': 'JOHOR', 'parlimen': 'SEGAMAT', 'dun': 'BULOH KASAP'},
1: {'negeri': 'JOHOR', 'parlimen': 'SEGAMAT', 'dun': 'JEMENTAH'},
2: {'negeri': 'JOHOR', 'parlimen': 'SEKIJANG', 'dun': 'PEMANIS'},
3: {'negeri': 'JOHOR', 'parlimen': 'SEKIJANG', 'dun': 'KEMELAH'},
4: {'negeri': 'JOHOR', 'parlimen': 'LABIS', 'dun': 'TENANG'},
5: {'negeri': 'JOHOR', 'parlimen': 'LABIS', 'dun': 'BEKOK'},
6: {'negeri': 'JOHOR', 'parlimen': 'PAGOH', 'dun': 'BUKIT SERAMPANG'},
7: {'negeri': 'JOHOR', 'parlimen': 'PAGOH', 'dun': 'JORAK'},
8: {'negeri': 'JOHOR', 'parlimen': 'LEDANG', 'dun': 'GAMBIR'},
9: {'negeri': 'JOHOR', 'parlimen': 'LEDANG', 'dun': 'TANGKAK'},
10: {'negeri': 'JOHOR', 'parlimen': 'LEDANG', 'dun': 'SEROM'},
11: {'negeri': 'JOHOR', 'parlimen': 'BAKRI', 'dun': 'BENTAYAN'},
12: {'negeri': 'JOHOR', 'parlimen': 'BAKRI', 'dun': 'SUNGAI ABONG'},
13: {'negeri': 'JOHOR', 'parlimen': 'BAKRI', 'dun': 'BUKIT NANING'},
14: {'negeri': 'JOHOR', 'parlimen': 'MUAR', 'dun': 'MAHARANI'},
15: {'negeri': 'JOHOR', 'parlimen': 'MUAR', 'dun': 'SUNGAI BALANG'},
16: {'negeri': 'JOHOR', 'parlimen': 'PARIT SULONG', 'dun': 'SEMERAH'},
17: {'negeri': 'JOHOR', 'parlimen': 'PARIT SULONG', 'dun': 'SRI MEDAN'},
18: {'negeri': 'JOHOR', 'parlimen': 'AYER HITAM', 'dun': 'YONG PENG'},
19: {'negeri': 'JOHOR', 'parlimen': 'AYER HITAM', 'dun': 'SEMARANG'},
20: {'negeri': 'JOHOR', 'parlimen': 'SRI GADING', 'dun': 'PARIT YAANI'},
21: {'negeri': 'JOHOR', 'parlimen': 'SRI GADING', 'dun': 'PARIT RAJA'},
22: {'negeri': 'JOHOR', 'parlimen': 'BATU PAHAT', 'dun': 'PENGGARAM'},
23: {'negeri': 'JOHOR', 'parlimen': 'BATU PAHAT', 'dun': 'SENGGARANG'},
24: {'negeri': 'JOHOR', 'parlimen': 'BATU PAHAT', 'dun': 'RENGIT'},
25: {'negeri': 'JOHOR', 'parlimen': 'SIMPANG RENGGAM', 'dun': 'MACHAP'},
26: {
'negeri': 'JOHOR',
'parlimen': 'SIMPANG RENGGAM',
'dun': 'LAYANG-LAYANG',
},
27: {'negeri': 'JOHOR', 'parlimen': 'KLUANG', 'dun': 'MENGKIBOL'},
28: {'negeri': 'JOHOR', 'parlimen': 'KLUANG', 'dun': 'MAHKOTA'},
29: {'negeri': 'JOHOR', 'parlimen': 'SEMBRONG', 'dun': 'PALOH'},
30: {'negeri': 'JOHOR', 'parlimen': 'SEMBRONG', 'dun': 'KAHANG'},
31: {'negeri': 'JOHOR', 'parlimen': 'MERSING', 'dun': 'ENDAU'},
32: {'negeri': 'JOHOR', 'parlimen': 'MERSING', 'dun': 'TENGGAROH'},
33: {'negeri': 'JOHOR', 'parlimen': 'TENGGARA', 'dun': 'PANTI'},
34: {'negeri': 'JOHOR', 'parlimen': 'TENGGARA', 'dun': 'PASIR RAJA'},
35: {'negeri': 'JOHOR', 'parlimen': 'KOTA TINGGI', 'dun': 'SEDILI'},
36: {'negeri': 'JOHOR', 'parlimen': 'KOTA TINGGI', 'dun': 'JOHOR LAMA'},
37: {'negeri': 'JOHOR', 'parlimen': 'PENGERANG', 'dun': 'PENAWAR'},
38: {'negeri': 'JOHOR', 'parlimen': 'PENGERANG', 'dun': 'TANJONG SURAT'},
39: {'negeri': 'JOHOR', 'parlimen': 'TEBRAU', 'dun': 'TIRAM'},
40: {'negeri': 'JOHOR', 'parlimen': 'TEBRAU', 'dun': 'PUTERI WANGSA'},
41: {'negeri': 'JOHOR', 'parlimen': 'PASIR GUDANG', 'dun': 'JOHOR JAYA'},
42: {'negeri': 'JOHOR', 'parlimen': 'PASIR GUDANG', 'dun': 'PERMAS'},
43: {'negeri': 'JOHOR', 'parlimen': 'JOHOR BAHRU', 'dun': 'TANJONG PUTERI'},
44: {'negeri': 'JOHOR', 'parlimen': 'JOHOR BAHRU', 'dun': 'STULANG'},
45: {'negeri': 'JOHOR', 'parlimen': 'PULAI', 'dun': 'PENGKALAN RINTING'},
46: {'negeri': 'JOHOR', 'parlimen': 'PULAI', 'dun': 'KEMPAS'},
47: {'negeri': 'JOHOR', 'parlimen': 'GELANG PATAH', 'dun': 'SKUDAI'},
48: {'negeri': 'JOHOR', 'parlimen': 'GELANG PATAH', 'dun': 'NUSA JAYA'},
49: {'negeri': 'JOHOR', 'parlimen': 'KULAI', 'dun': 'BUKIT PERMAI'},
50: {'negeri': 'JOHOR', 'parlimen': 'KULAI', 'dun': 'BUKIT BATU'},
51: {'negeri': 'JOHOR', 'parlimen': 'KULAI', 'dun': 'SENAI'},
52: {'negeri': 'JOHOR', 'parlimen': 'PONTIAN', 'dun': 'BENUT'},
53: {'negeri': 'JOHOR', 'parlimen': 'PONTIAN', 'dun': 'PULAI SEBATANG'},
54: {'negeri': 'JOHOR', 'parlimen': 'TANJONG PIAI', 'dun': 'PEKAN NENAS'},
55: {'negeri': 'JOHOR', 'parlimen': 'TANJONG PIAI', 'dun': 'KUKUP'},
56: {'negeri': 'KEDAH', 'parlimen': 'LANGKAWI', 'dun': 'AYER HANGAT'},
57: {'negeri': 'KEDAH', 'parlimen': 'LANGKAWI', 'dun': 'KUAH'},
58: {'negeri': 'KEDAH', 'parlimen': 'JERLUN', 'dun': 'KOTA SIPUTEH'},
59: {'negeri': 'KEDAH', 'parlimen': 'JERLUN', 'dun': 'AYER HITAM'},
60: {
'negeri': 'KEDAH',
'parlimen': 'KUBANG PASU',
'dun': 'BUKIT KAYU HITAM',
},
61: {'negeri': 'KEDAH', 'parlimen': 'KUBANG PASU', 'dun': 'JITRA'},
62: {'negeri': 'KEDAH', 'parlimen': 'PADANG TERAP', 'dun': 'KUALA NERANG'},
63: {'negeri': 'KEDAH', 'parlimen': 'PADANG TERAP', 'dun': 'PEDU'},
64: {'negeri': 'KEDAH', 'parlimen': 'POKOK SENA', 'dun': 'BUKIT LADA'},
65: {'negeri': 'KEDAH', 'parlimen': 'POKOK SENA', 'dun': 'BUKIT PINANG'},
66: {'negeri': 'KEDAH', 'parlimen': 'POKOK SENA', 'dun': 'DERGA'},
67: {'negeri': 'KEDAH', 'parlimen': 'ALOR STAR', 'dun': 'BAKAR BATA'},
68: {'negeri': 'KEDAH', 'parlimen': 'ALOR STAR', 'dun': 'KOTA DARUL AMAN'},
69: {'negeri': 'KEDAH', 'parlimen': 'ALOR STAR', 'dun': 'ALOR MENGKUDU'},
70: {'negeri': 'KEDAH', 'parlimen': 'KUALA KEDAH', 'dun': 'ANAK BUKIT'},
71: {'negeri': 'KEDAH', 'parlimen': 'KUALA KEDAH', 'dun': 'KUBANG ROTAN'},
72: {
'negeri': 'KEDAH',
'parlimen': 'KUALA KEDAH',
'dun': 'PENGKALAN KUNDOR',
},
73: {'negeri': 'KEDAH', 'parlimen': 'PENDANG', 'dun': 'TOKAI'},
74: {'negeri': 'KEDAH', 'parlimen': 'PENDANG', 'dun': 'SUNGAI TIANG'},
75: {'negeri': 'KEDAH', 'parlimen': 'JERAI', 'dun': 'SUNGAI LIMAU'},
76: {'negeri': 'KEDAH', 'parlimen': 'JERAI', 'dun': 'GUAR CHEMPEDAK'},
77: {'negeri': 'KEDAH', 'parlimen': 'JERAI', 'dun': 'GURUN'},
78: {'negeri': 'KEDAH', 'parlimen': 'SIK', 'dun': 'BELANTEK'},
79: {'negeri': 'KEDAH', 'parlimen': 'SIK', 'dun': 'JENERI'},
80: {'negeri': 'KEDAH', 'parlimen': 'MERBOK', 'dun': 'BUKIT SELAMBAU'},
81: {'negeri': 'KEDAH', 'parlimen': 'MERBOK', 'dun': 'TANJONG DAWAI'},
82: {
'negeri': 'KEDAH',
'parlimen': 'SUNGAI PETANI',
'dun': 'PANTAI MERDEKA',
},
83: {'negeri': 'KEDAH', 'parlimen': 'SUNGAI PETANI', 'dun': 'BAKAR ARANG'},
84: {'negeri': 'KEDAH', 'parlimen': 'SUNGAI PETANI', 'dun': 'SIDAM'},
85: {'negeri': 'KEDAH', 'parlimen': 'BALING', 'dun': 'BAYU'},
86: {'negeri': 'KEDAH', 'parlimen': 'BALING', 'dun': 'KUPANG'},
87: {'negeri': 'KEDAH', 'parlimen': 'BALING', 'dun': 'KUALA KETIL'},
88: {'negeri': 'KEDAH', 'parlimen': 'PADANG SERAI', 'dun': 'MERBAU PULAS'},
89: {'negeri': 'KEDAH', 'parlimen': 'PADANG SERAI', 'dun': 'LUNAS'},
90: {'negeri': 'KEDAH', 'parlimen': 'KULIM-BANDAR BAHARU', 'dun': 'KULIM'},
91: {
'negeri': 'KEDAH',
'parlimen': 'KULIM-BANDAR BAHARU',
'dun': 'BANDAR BAHARU',
},
92: {'negeri': 'KELANTAN', 'parlimen': 'TUMPAT', 'dun': 'PENGKALAN KUBOR'},
93: {'negeri': 'KELANTAN', 'parlimen': 'TUMPAT', 'dun': 'KELABORAN'},
94: {'negeri': 'KELANTAN', 'parlimen': 'TUMPAT', 'dun': 'PASIR PEKAN'},
95: {'negeri': 'KELANTAN', 'parlimen': 'TUMPAT', 'dun': 'WAKAF BHARU'},
96: {'negeri': 'KELANTAN', 'parlimen': 'PENGKALAN CHEPA', 'dun': 'KIJANG'},
97: {
'negeri': 'KELANTAN',
'parlimen': 'PENGKALAN CHEPA',
'dun': 'CHEMPAKA',
},
98: {'negeri': 'KELANTAN', 'parlimen': 'PENGKALAN CHEPA', 'dun': 'PANCHOR'},
99: {'negeri': 'KELANTAN', 'parlimen': 'KOTA BHARU', 'dun': 'TANJONG MAS'},
100: {'negeri': 'KELANTAN', 'parlimen': 'KOTA BHARU', 'dun': 'KOTA LAMA'},
101: {
'negeri': 'KELANTAN',
'parlimen': 'KOTA BHARU',
'dun': 'BUNUT PAYONG',
},
102: {'negeri': 'KELANTAN', 'parlimen': 'PASIR MAS', 'dun': 'TENDONG'},
103: {
'negeri': 'KELANTAN',
'parlimen': 'PASIR MAS',
'dun': 'PENGKALAN PASIR',
},
104: {'negeri': 'KELANTAN', 'parlimen': 'PASIR MAS', 'dun': 'CHETOK'},
105: {'negeri': 'KELANTAN', 'parlimen': 'RANTAU PANJANG', 'dun': 'MERANTI'},
106: {
'negeri': 'KELANTAN',
'parlimen': 'RANTAU PANJANG',
'dun': 'GUAL PERIOK',
},
107: {
'negeri': 'KELANTAN',
'parlimen': 'RANTAU PANJANG',
'dun': 'BUKIT TUKU',
},
108: {'negeri': 'KELANTAN', 'parlimen': 'KUBANG KERIAN', 'dun': 'SALOR'},
109: {
'negeri': 'KELANTAN',
'parlimen': 'KUBANG KERIAN',
'dun': 'PASIR TUMBOH',
},
110: {'negeri': 'KELANTAN', 'parlimen': 'KUBANG KERIAN', 'dun': 'DEMIT'},
111: {'negeri': 'KELANTAN', 'parlimen': 'BACHOK', 'dun': 'TAWANG'},
112: {'negeri': 'KELANTAN', 'parlimen': 'BACHOK', 'dun': 'PERUPOK'},
113: {'negeri': 'KELANTAN', 'parlimen': 'BACHOK', 'dun': 'JELAWAT'},
114: {'negeri': 'KELANTAN', 'parlimen': 'KETEREH', 'dun': 'MELOR'},
115: {'negeri': 'KELANTAN', 'parlimen': 'KETEREH', 'dun': 'KADOK'},
116: {'negeri': 'KELANTAN', 'parlimen': 'KETEREH', 'dun': 'KOK LANAS'},
117: {
'negeri': 'KELANTAN',
'parlimen': 'TANAH MERAH',
'dun': 'BUKIT PANAU',
},
118: {'negeri': 'KELANTAN', 'parlimen': 'TANAH MERAH', 'dun': 'GUAL IPOH'},
119: {'negeri': 'KELANTAN', 'parlimen': 'TANAH MERAH', 'dun': 'KEMAHANG'},
120: {'negeri': 'KELANTAN', 'parlimen': 'PASIR PUTEH', 'dun': 'SELISING'},
121: {'negeri': 'KELANTAN', 'parlimen': 'PASIR PUTEH', 'dun': 'LIMBONGAN'},
122: {'negeri': 'KELANTAN', 'parlimen': 'PASIR PUTEH', 'dun': 'SEMERAK'},
123: {'negeri': 'KELANTAN', 'parlimen': 'PASIR PUTEH', 'dun': 'GAAL'},
124: {'negeri': 'KELANTAN', 'parlimen': 'MACHANG', 'dun': 'PULAI CHONDONG'},
125: {'negeri': 'KELANTAN', 'parlimen': 'MACHANG', 'dun': 'TEMANGAN'},
126: {'negeri': 'KELANTAN', 'parlimen': 'MACHANG', 'dun': 'KEMUNING'},
127: {'negeri': 'KELANTAN', 'parlimen': 'JELI', 'dun': 'BUKIT BUNGA'},
128: {'negeri': 'KELANTAN', 'parlimen': 'JELI', 'dun': 'AIR LANAS'},
129: {'negeri': 'KELANTAN', 'parlimen': 'JELI', 'dun': 'KUALA BALAH'},
130: {'negeri': 'KELANTAN', 'parlimen': 'KUALA KRAI', 'dun': 'MENGKEBANG'},
131: {'negeri': 'KELANTAN', 'parlimen': 'KUALA KRAI', 'dun': 'GUCHIL'},
132: {'negeri': 'KELANTAN', 'parlimen': 'KUALA KRAI', 'dun': 'MANEK URAI'},
133: {'negeri': 'KELANTAN', 'parlimen': 'KUALA KRAI', 'dun': 'DABONG'},
134: {'negeri': 'KELANTAN', 'parlimen': 'GUA MUSANG', 'dun': 'NENGGIRI'},
135: {'negeri': 'KELANTAN', 'parlimen': 'GUA MUSANG', 'dun': 'PALOH'},
136: {'negeri': 'KELANTAN', 'parlimen': 'GUA MUSANG', 'dun': 'GALAS'},
137: {
'negeri': 'MELAKA',
'parlimen': 'MASJID TANAH',
'dun': 'KUALA LINGGI',
},
138: {
'negeri': 'MELAKA',
'parlimen': 'MASJID TANAH',
'dun': 'TANJUNG BIDARA',
},
139: {'negeri': 'MELAKA', 'parlimen': 'MASJID TANAH', 'dun': 'AYER LIMAU'},
140: {'negeri': 'MELAKA', 'parlimen': 'MASJID TANAH', 'dun': 'LENDU'},
141: {
'negeri': 'MELAKA',
'parlimen': 'MASJID TANAH',
'dun': 'TABOH NANING',
},
142: {'negeri': 'MELAKA', 'parlimen': 'ALOR GAJAH', 'dun': 'REMBIA'},
143: {'negeri': 'MELAKA', 'parlimen': 'ALOR GAJAH', 'dun': 'GADEK'},
144: {'negeri': 'MELAKA', 'parlimen': 'ALOR GAJAH', 'dun': 'MACHAP'},
145: {
'negeri': 'MELAKA',
'parlimen': 'ALOR GAJAH',
'dun': 'DURIAN TUNGGAL',
},
146: {'negeri': 'MELAKA', 'parlimen': 'ALOR GAJAH', 'dun': 'ASAHAN'},
147: {'negeri': 'MELAKA', 'parlimen': 'TANGGA BATU', 'dun': 'SUNGAI UDANG'},
148: {
'negeri': 'MELAKA',
'parlimen': 'TANGGA BATU',
'dun': 'PANTAI KUNDOR',
},
149: {'negeri': 'MELAKA', 'parlimen': 'TANGGA BATU', 'dun': 'PAYA RUMPUT'},
150: {'negeri': 'MELAKA', 'parlimen': 'TANGGA BATU', 'dun': 'KELEBANG'},
151: {'negeri': 'MELAKA', 'parlimen': 'BUKIT KATIL', 'dun': 'BACHANG'},
152: {'negeri': 'MELAKA', 'parlimen': 'BUKIT KATIL', 'dun': 'AYER KEROH'},
153: {'negeri': 'MELAKA', 'parlimen': 'BUKIT KATIL', 'dun': 'BUKIT BARU'},
154: {'negeri': 'MELAKA', 'parlimen': 'BUKIT KATIL', 'dun': 'AYER MOLEK'},
155: {'negeri': 'MELAKA', 'parlimen': 'KOTA MELAKA', 'dun': 'KESIDANG'},
156: {
'negeri': 'MELAKA',
'parlimen': 'KOTA MELAKA',
'dun': 'KOTA LAKSAMANA',
},
157: {'negeri': 'MELAKA', 'parlimen': 'KOTA MELAKA', 'dun': 'DUYONG'},
158: {'negeri': 'MELAKA', 'parlimen': 'KOTA MELAKA', 'dun': 'BANDAR HILIR'},
159: {'negeri': 'MELAKA', 'parlimen': 'KOTA MELAKA', 'dun': 'TELOK MAS'},
160: {'negeri': 'MELAKA', 'parlimen': 'JASIN', 'dun': 'BEMBAN'},
161: {'negeri': 'MELAKA', 'parlimen': 'JASIN', 'dun': 'RIM'},
162: {'negeri': 'MELAKA', 'parlimen': 'JASIN', 'dun': 'SERKAM'},
163: {'negeri': 'MELAKA', 'parlimen': 'JASIN', 'dun': 'MERLIMAU'},
164: {'negeri': 'MELAKA', 'parlimen': 'JASIN', 'dun': 'SUNGAI RAMBAI'},
165: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'JELEBU', 'dun': 'CHENNAH'},
166: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'JELEBU', 'dun': 'PERTANG'},
167: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'JELEBU',
'dun': 'SUNGAI LUI',
},
168: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'JELEBU', 'dun': 'KLAWANG'},
169: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'JEMPOL', 'dun': 'SERTING'},
170: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'JEMPOL', 'dun': 'PALONG'},
171: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'JEMPOL',
'dun': 'JERAM PADANG',
},
172: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'JEMPOL', 'dun': 'BAHAU'},
173: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'SEREMBAN',
'dun': 'LENGGENG',
},
174: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'SEREMBAN', 'dun': 'NILAI'},
175: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'SEREMBAN', 'dun': 'LOBAK'},
176: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'SEREMBAN',
'dun': 'TEMIANG',
},
177: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'SEREMBAN',
'dun': 'SIKAMAT',
},
178: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'SEREMBAN',
'dun': 'AMPANGAN',
},
179: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'KUALA PILAH',
'dun': 'JUASSEH',
},
180: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'KUALA PILAH',
'dun': 'SERI MENANTI',
},
181: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'KUALA PILAH',
'dun': 'SENALING',
},
182: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'KUALA PILAH',
'dun': 'PILAH',
},
183: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'KUALA PILAH',
'dun': 'JOHOL',
},
184: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'RASAH', 'dun': 'LABU'},
185: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'RASAH',
'dun': 'BUKIT KEPAYANG',
},
186: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'RASAH', 'dun': 'RAHANG'},
187: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'RASAH', 'dun': 'MAMBAU'},
188: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'RASAH', 'dun': 'SENAWANG'},
189: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'REMBAU', 'dun': 'PAROI'},
190: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'REMBAU', 'dun': 'CHEMBONG'},
191: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'REMBAU', 'dun': 'RANTAU'},
192: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'REMBAU', 'dun': 'KOTA'},
193: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'TELOK KEMANG',
'dun': 'CHUAH',
},
194: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'TELOK KEMANG',
'dun': 'LUKUT',
},
195: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'TELOK KEMANG',
'dun': 'BAGAN PINANG',
},
196: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'TELOK KEMANG',
'dun': 'LINGGI',
},
197: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'TELOK KEMANG',
'dun': 'PORT DICKSON',
},
198: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'TAMPIN', 'dun': 'GEMAS'},
199: {
'negeri': 'NEGERI SEMBILAN',
'parlimen': 'TAMPIN',
'dun': 'GEMENCHEH',
},
200: {'negeri': 'NEGERI SEMBILAN', 'parlimen': 'TAMPIN', 'dun': 'REPAH'},
201: {
'negeri': 'PAHANG',
'parlimen': 'CAMERON HIGHLANDS',
'dun': 'TANAH RATA',
},
202: {'negeri': 'PAHANG', 'parlimen': 'CAMERON HIGHLANDS', 'dun': 'JELAI'},
203: {'negeri': 'PAHANG', 'parlimen': 'LIPIS', 'dun': 'PADANG TENGKU'},
204: {'negeri': 'PAHANG', 'parlimen': 'LIPIS', 'dun': 'CHEKA'},
205: {'negeri': 'PAHANG', 'parlimen': 'LIPIS', 'dun': 'BENTA'},
206: {'negeri': 'PAHANG', 'parlimen': 'RAUB', 'dun': 'BATU TALAM'},
207: {'negeri': 'PAHANG', 'parlimen': 'RAUB', 'dun': 'TRAS'},
208: {'negeri': 'PAHANG', 'parlimen': 'RAUB', 'dun': 'DONG'},
209: {'negeri': 'PAHANG', 'parlimen': 'JERANTUT', 'dun': 'TAHAN'},
210: {'negeri': 'PAHANG', 'parlimen': 'JERANTUT', 'dun': 'DAMAK'},
211: {'negeri': 'PAHANG', 'parlimen': 'JERANTUT', 'dun': 'PULAU TAWAR'},
212: {'negeri': 'PAHANG', 'parlimen': 'INDERA MAHKOTA', 'dun': 'BESERAH'},
213: {'negeri': 'PAHANG', 'parlimen': 'INDERA MAHKOTA', 'dun': 'SEMAMBU'},
214: {'negeri': 'PAHANG', 'parlimen': 'KUANTAN', 'dun': 'TERUNTUM'},
215: {'negeri': 'PAHANG', 'parlimen': 'KUANTAN', 'dun': 'TANJUNG LUMPUR'},
216: {'negeri': 'PAHANG', 'parlimen': 'KUANTAN', 'dun': 'INDERAPURA'},
217: {
'negeri': 'PAHANG',
'parlimen': 'PAYA BESAR',
'dun': 'SUNGAI LEMBING',
},
218: {'negeri': 'PAHANG', 'parlimen': 'PAYA BESAR', 'dun': 'LEPAR'},
219: {'negeri': 'PAHANG', 'parlimen': 'PAYA BESAR', 'dun': 'PANCHING'},
220: {'negeri': 'PAHANG', 'parlimen': 'PEKAN', 'dun': 'PULAU MANIS'},
221: {'negeri': 'PAHANG', 'parlimen': 'PEKAN', 'dun': 'PERAMU JAYA'},
222: {'negeri': 'PAHANG', 'parlimen': 'PEKAN', 'dun': 'BEBAR'},
223: {'negeri': 'PAHANG', 'parlimen': 'PEKAN', 'dun': 'CHINI'},
224: {'negeri': 'PAHANG', 'parlimen': 'MARAN', 'dun': 'LUIT'},
225: {'negeri': 'PAHANG', 'parlimen': 'MARAN', 'dun': 'KUALA SENTUL'},
226: {'negeri': 'PAHANG', 'parlimen': 'MARAN', 'dun': 'CHENOR'},
227: {'negeri': 'PAHANG', 'parlimen': 'KUALA KRAU', 'dun': 'JENDERAK'},
228: {'negeri': 'PAHANG', 'parlimen': 'KUALA KRAU', 'dun': 'KERDAU'},
229: {'negeri': 'PAHANG', 'parlimen': 'KUALA KRAU', 'dun': 'JENGKA'},
230: {'negeri': 'PAHANG', 'parlimen': 'TEMERLOH', 'dun': 'MENTAKAB'},
231: {'negeri': 'PAHANG', 'parlimen': 'TEMERLOH', 'dun': 'LANCHANG'},
232: {'negeri': 'PAHANG', 'parlimen': 'TEMERLOH', 'dun': 'KUALA SEMANTAN'},
233: {'negeri': 'PAHANG', 'parlimen': 'BENTONG', 'dun': 'BILUT'},
234: {'negeri': 'PAHANG', 'parlimen': 'BENTONG', 'dun': 'KETARI'},
235: {'negeri': 'PAHANG', 'parlimen': 'BENTONG', 'dun': 'SABAI'},
236: {'negeri': 'PAHANG', 'parlimen': 'BENTONG', 'dun': 'PELANGAI'},
237: {'negeri': 'PAHANG', 'parlimen': 'BERA', 'dun': 'GUAI'},
238: {'negeri': 'PAHANG', 'parlimen': 'BERA', 'dun': 'TRIANG'},
239: {'negeri': 'PAHANG', 'parlimen': 'BERA', 'dun': 'KEMAYAN'},
240: {'negeri': 'PAHANG', 'parlimen': 'ROMPIN', 'dun': 'BUKIT IBAM'},
241: {'negeri': 'PAHANG', 'parlimen': 'ROMPIN', 'dun': 'MUADZAM SHAH'},
242: {'negeri': 'PAHANG', 'parlimen': 'ROMPIN', 'dun': 'TIOMAN'},
243: {'negeri': 'PERAK', 'parlimen': 'GERIK', 'dun': 'PENGKALAN HULU'},
244: {'negeri': 'PERAK', 'parlimen': 'GERIK', 'dun': 'TEMENGOR'},
245: {'negeri': 'PERAK', 'parlimen': 'LENGGONG', 'dun': 'KENERING'},
246: {'negeri': 'PERAK', 'parlimen': 'LENGGONG', 'dun': 'KOTA TAMPAN'},
247: {'negeri': 'PERAK', 'parlimen': 'LARUT', 'dun': 'SELAMA'},
248: {'negeri': 'PERAK', 'parlimen': 'LARUT', 'dun': 'KUBU GAJAH'},
249: {'negeri': 'PERAK', 'parlimen': 'LARUT', 'dun': 'BATU KURAU'},
250: {'negeri': 'PERAK', 'parlimen': 'PARIT BUNTAR', 'dun': 'TITI SERONG'},
251: {'negeri': 'PERAK', 'parlimen': 'PARIT BUNTAR', 'dun': 'KUALA KURAU'},
252: {'negeri': 'PERAK', 'parlimen': 'BAGAN SERAI', 'dun': 'ALOR PONGSU'},
253: {
'negeri': 'PERAK',
'parlimen': 'BAGAN SERAI',
'dun': 'GUNONG SEMANGGOL',
},
254: {'negeri': 'PERAK', 'parlimen': 'BAGAN SERAI', 'dun': 'SELINSING'},
255: {
'negeri': 'PERAK',
'parlimen': 'BUKIT GANTANG',
'dun': 'KUALA SAPETANG',
},
256: {
'negeri': 'PERAK',
'parlimen': 'BUKIT GANTANG',
'dun': 'CHANGKAT JERING',
},
257: {'negeri': 'PERAK', 'parlimen': 'BUKIT GANTANG', 'dun': 'TRONG'},
258: {'negeri': 'PERAK', 'parlimen': 'TAIPING', 'dun': 'KAMUNTING'},
259: {'negeri': 'PERAK', 'parlimen': 'TAIPING', 'dun': 'POKOK ASSAM'},
260: {'negeri': 'PERAK', 'parlimen': 'TAIPING', 'dun': 'AULONG'},
261: {'negeri': 'PERAK', 'parlimen': 'PADANG RENGAS', 'dun': 'CHENDEROH'},
262: {
'negeri': 'PERAK',
'parlimen': 'PADANG RENGAS',
'dun': 'LUBOK MERBAU',
},
263: {'negeri': 'PERAK', 'parlimen': 'SUNGAI SIPUT', 'dun': 'LINTANG'},
264: {'negeri': 'PERAK', 'parlimen': 'SUNGAI SIPUT', 'dun': 'JALONG'},
265: {'negeri': 'PERAK', 'parlimen': 'TAMBUN', 'dun': 'MANJOI'},
266: {'negeri': 'PERAK', 'parlimen': 'TAMBUN', 'dun': 'HULU KINTA'},
267: {'negeri': 'PERAK', 'parlimen': 'IPOH TIMOR', 'dun': 'CANNING'},
268: {'negeri': 'PERAK', 'parlimen': 'IPOH TIMOR', 'dun': 'TEBING TINGGI'},
269: {'negeri': 'PERAK', 'parlimen': 'IPOH TIMOR', 'dun': 'PASIR PINJI'},
270: {'negeri': 'PERAK', 'parlimen': 'IPOH BARAT', 'dun': 'BERCHAM'},
271: {'negeri': 'PERAK', 'parlimen': 'IPOH BARAT', 'dun': 'KEPAYANG'},
272: {'negeri': 'PERAK', 'parlimen': 'IPOH BARAT', 'dun': 'BUNTONG'},
273: {'negeri': 'PERAK', 'parlimen': 'BATU GAJAH', 'dun': 'JELAPANG'},
274: {'negeri': 'PERAK', 'parlimen': 'BATU GAJAH', 'dun': 'MENGLEMBU'},
275: {'negeri': 'PERAK', 'parlimen': 'BATU GAJAH', 'dun': 'TRONOH'},
276: {
'negeri': 'PERAK',
'parlimen': 'KUALA KANGSAR',
'dun': 'BUKIT CHANDAN',
},
277: {'negeri': 'PERAK', 'parlimen': 'KUALA KANGSAR', 'dun': 'MANONG'},
278: {'negeri': 'PERAK', 'parlimen': 'BERUAS', 'dun': 'PENGKALAN BAHARU'},
279: {'negeri': 'PERAK', 'parlimen': 'BERUAS', 'dun': 'PANTAI REMIS'},
280: {'negeri': 'PERAK', 'parlimen': 'PARIT', 'dun': 'BELANJA'},
281: {'negeri': 'PERAK', 'parlimen': 'PARIT', 'dun': 'BOTA'},
282: {'negeri': 'PERAK', 'parlimen': 'KAMPAR', 'dun': 'MALIM NAWAR'},
283: {'negeri': 'PERAK', 'parlimen': 'KAMPAR', 'dun': 'KERANJI'},
284: {'negeri': 'PERAK', 'parlimen': 'KAMPAR', 'dun': 'TUALANG SEKAH'},
285: {'negeri': 'PERAK', 'parlimen': 'GOPENG', 'dun': 'SUNGAI RAPAT'},
286: {'negeri': 'PERAK', 'parlimen': 'GOPENG', 'dun': 'SIMPANG PULAI'},
287: {'negeri': 'PERAK', 'parlimen': 'GOPENG', 'dun': 'TEJA'},
288: {'negeri': 'PERAK', 'parlimen': 'TAPAH', 'dun': 'CHENDERIANG'},
289: {'negeri': 'PERAK', 'parlimen': 'TAPAH', 'dun': 'AYER KUNING'},
290: {'negeri': 'PERAK', 'parlimen': 'PASIR SALAK', 'dun': 'SUNGAI MANIK'},
291: {'negeri': 'PERAK', 'parlimen': 'PASIR SALAK', 'dun': 'KAMPONG GAJAH'},
292: {'negeri': 'PERAK', 'parlimen': 'LUMUT', 'dun': 'SITIAWAN'},
293: {'negeri': 'PERAK', 'parlimen': 'LUMUT', 'dun': 'PASIR PANJANG'},
294: {'negeri': 'PERAK', 'parlimen': 'LUMUT', 'dun': 'PANGKOR'},
295: {'negeri': 'PERAK', 'parlimen': 'BAGAN DATOK', 'dun': 'RUNGKUP'},
296: {
'negeri': 'PERAK',
'parlimen': 'BAGAN DATOK',
'dun': 'HUTAN MELINTANG',
},
297: {'negeri': 'PERAK', 'parlimen': 'TELOK INTAN', 'dun': 'PASIR BEDAMAR'},
298: {'negeri': 'PERAK', 'parlimen': 'TELOK INTAN', 'dun': 'CHANGKAT JONG'},
299: {'negeri': 'PERAK', 'parlimen': 'TANJONG MALIM', 'dun': 'SUNGKAI'},
300: {'negeri': 'PERAK', 'parlimen': 'TANJONG MALIM', 'dun': 'SLIM'},
301: {'negeri': 'PERAK', 'parlimen': 'TANJONG MALIM', 'dun': 'BEHRANG'},
302: {'negeri': 'PERLIS', 'parlimen': 'PADANG BESAR', 'dun': 'TITI TINGGI'},
303: {'negeri': 'PERLIS', 'parlimen': 'PADANG BESAR', 'dun': 'BESERI'},
304: {'negeri': 'PERLIS', 'parlimen': 'PADANG BESAR', 'dun': 'CHUPING'},
305: {'negeri': 'PERLIS', 'parlimen': 'PADANG BESAR', 'dun': 'MATA AYER'},
306: {'negeri': 'PERLIS', 'parlimen': 'PADANG BESAR', 'dun': 'SANTAN'},
307: {'negeri': 'PERLIS', 'parlimen': 'KANGAR', 'dun': 'BINTONG'},
308: {'negeri': 'PERLIS', 'parlimen': 'KANGAR', 'dun': 'SENA'},
309: {'negeri': 'PERLIS', 'parlimen': 'KANGAR', 'dun': 'INDERA KAYANGAN'},
310: {'negeri': 'PERLIS', 'parlimen': 'KANGAR', 'dun': 'KUALA PERLIS'},
311: {'negeri': 'PERLIS', 'parlimen': 'KANGAR', 'dun': 'KAYANG'},
312: {'negeri': 'PERLIS', 'parlimen': 'ARAU', 'dun': 'PAUH'},
313: {'negeri': 'PERLIS', 'parlimen': 'ARAU', 'dun': 'TAMBUN TULANG'},
314: {'negeri': 'PERLIS', 'parlimen': 'ARAU', 'dun': 'GUAR SANJI'},
315: {'negeri': 'PERLIS', 'parlimen': 'ARAU', 'dun': 'SIMPANG EMPAT'},
316: {'negeri': 'PERLIS', 'parlimen': 'ARAU', 'dun': 'SANGLANG'},
317: {
'negeri': 'PULAU PINANG',
'parlimen': 'KEPALA BATAS',
'dun': 'PENAGA',
},
318: {
'negeri': 'PULAU PINANG',
'parlimen': 'KEPALA BATAS',
'dun': 'BERTAM',
},
319: {
'negeri': 'PULAU PINANG',
'parlimen': 'KEPALA BATAS',
'dun': 'PINANG TUNGGAL',
},
320: {
'negeri': 'PULAU PINANG',
'parlimen': 'TASEK GELUGOR',
'dun': 'PERMATANG BERANGAN',
},
321: {
'negeri': 'PULAU PINANG',
'parlimen': 'TASEK GELUGOR',
'dun': 'SUNGAI DUA',
},
322: {
'negeri': 'PULAU PINANG',
'parlimen': 'TASEK GELUGOR',
'dun': 'TELOK AYER TAWAR',
},
323: {'negeri': 'PULAU PINANG', 'parlimen': 'BAGAN', 'dun': 'SUNGAI PUYU'},
324: {'negeri': 'PULAU PINANG', 'parlimen': 'BAGAN', 'dun': 'BAGAN JERMAL'},
325: {'negeri': 'PULAU PINANG', 'parlimen': 'BAGAN', 'dun': 'BAGAN DALAM'},
326: {
'negeri': 'PULAU PINANG',
'parlimen': 'PERMATANG PAUH',
'dun': 'SEBERANG JAYA',
},
327: {
'negeri': 'PULAU PINANG',
'parlimen': 'PERMATANG PAUH',
'dun': 'PERMATANG PASIR',
},
328: {
'negeri': 'PULAU PINANG',
'parlimen': 'PERMATANG PAUH',
'dun': 'PENANTI',
},
329: {
'negeri': 'PULAU PINANG',
'parlimen': 'BUKIT MERTAJAM',
'dun': 'BERAPIT',
},
330: {
'negeri': 'PULAU PINANG',
'parlimen': 'BUKIT MERTAJAM',
'dun': 'MACHANG BUBUK',
},
331: {
'negeri': 'PULAU PINANG',
'parlimen': 'BUKIT MERTAJAM',
'dun': 'PADANG LALANG',
},
332: {'negeri': 'PULAU PINANG', 'parlimen': 'BATU KAWAN', 'dun': 'PERAI'},
333: {
'negeri': 'PULAU PINANG',
'parlimen': 'BATU KAWAN',
'dun': 'BUKIT TENGAH',
},
334: {
'negeri': 'PULAU PINANG',
'parlimen': 'BATU KAWAN',
'dun': 'BUKIT TAMBUN',
},
335: {'negeri': 'PULAU PINANG', 'parlimen': 'NIBONG TEBAL', 'dun': 'JAWI'},
336: {
'negeri': 'PULAU PINANG',
'parlimen': 'NIBONG TEBAL',
'dun': 'SUNGAI BAKAP',
},
337: {
'negeri': 'PULAU PINANG',
'parlimen': 'NIBONG TEBAL',
'dun': 'SUNGAI ACHEH',
},
338: {
'negeri': 'PULAU PINANG',
'parlimen': 'BUKIT BENDERA',
'dun': 'TANJONG BUNGA',
},
339: {
'negeri': 'PULAU PINANG',
'parlimen': 'BUKIT BENDERA',
'dun': 'AIR PUTIH',
},
340: {
'negeri': 'PULAU PINANG',
'parlimen': 'BUKIT BENDERA',
'dun': 'KEBUN BUNGA',
},
341: {
'negeri': 'PULAU PINANG',
'parlimen': 'BUKIT BENDERA',
'dun': 'PULAU TIKUS',
},
342: {
'negeri': 'PULAU PINANG',
'parlimen': 'TANJONG',
'dun': 'PADANG KOTA',
},
343: {
'negeri': 'PULAU PINANG',
'parlimen': 'TANJONG',
'dun': 'PENGKALAN KOTA',
},
344: {'negeri': 'PULAU PINANG', 'parlimen': 'TANJONG', 'dun': 'KOMTAR'},
345: {
'negeri': 'PULAU PINANG',
'parlimen': 'JELUTONG',
'dun': 'DATOK KERAMAT',
},
346: {
'negeri': 'PULAU PINANG',
'parlimen': 'JELUTONG',
'dun': 'SUNGAI PINANG',
},
347: {
'negeri': 'PULAU PINANG',
'parlimen': 'JELUTONG',
'dun': 'BATU LANCANG',
},
348: {
'negeri': 'PULAU PINANG',
'parlimen': 'BUKIT GELUGOR',
'dun': 'SERI DELIMA',
},
349: {
'negeri': 'PULAU PINANG',
'parlimen': 'BUKIT GELUGOR',
'dun': 'AIR ITAM',
},
350: {
'negeri': 'PULAU PINANG',
'parlimen': 'BUKIT GELUGOR',
'dun': 'PAYA TERUBONG',
},
351: {
'negeri': 'PULAU PINANG',
'parlimen': 'BAYAN BARU',
'dun': 'BATU UBAN',
},
352: {
'negeri': 'PULAU PINANG',
'parlimen': 'BAYAN BARU',
'dun': 'PANTAI JEREJAK',
},
353: {
'negeri': 'PULAU PINANG',
'parlimen': 'BAYAN BARU',
'dun': 'BATU MAUNG',
},
354: {
'negeri': 'PULAU PINANG',
'parlimen': 'BALIK PULAU',
'dun': 'BAYAN LEPAS',
},
355: {
'negeri': 'PULAU PINANG',
'parlimen': 'BALIK PULAU',
'dun': 'PULAU BETONG',
},
356: {
'negeri': 'PULAU PINANG',
'parlimen': 'BALIK PULAU',
'dun': 'TELOK BAHANG',
},
357: {'negeri': 'SABAH', 'parlimen': 'KUDAT', 'dun': 'BANGGI'},
358: {'negeri': 'SABAH', 'parlimen': 'KUDAT', 'dun': 'TANJONG KAPOR'},
359: {'negeri': 'SABAH', 'parlimen': 'KUDAT', 'dun': 'PITAS'},
360: {'negeri': 'SABAH', 'parlimen': 'KOTA MARUDU', 'dun': 'MATUNGGONG'},
361: {'negeri': 'SABAH', 'parlimen': 'KOTA MARUDU', 'dun': 'TANDEK'},
362: {'negeri': 'SABAH', 'parlimen': 'KOTA BELUD', 'dun': 'TEMPASUK'},
363: {'negeri': 'SABAH', 'parlimen': 'KOTA BELUD', 'dun': 'KADAMAIAN'},
364: {'negeri': 'SABAH', 'parlimen': 'KOTA BELUD', 'dun': 'USUKAN'},
365: {'negeri': 'SABAH', 'parlimen': 'TUARAN', 'dun': 'TAMPARULI'},
366: {'negeri': 'SABAH', 'parlimen': 'TUARAN', 'dun': 'SULAMAN'},
367: {'negeri': 'SABAH', 'parlimen': 'TUARAN', 'dun': 'KIULU'},
368: {'negeri': 'SABAH', 'parlimen': 'SEPANGGAR', 'dun': 'KARAMBUNAI'},
369: {'negeri': 'SABAH', 'parlimen': 'SEPANGGAR', 'dun': 'INANAM'},
370: {'negeri': 'SABAH', 'parlimen': 'KOTA KINABALU', 'dun': 'LIKAS'},
371: {'negeri': 'SABAH', 'parlimen': 'KOTA KINABALU', 'dun': 'API-API'},
372: {'negeri': 'SABAH', 'parlimen': 'KOTA KINABALU', 'dun': 'LUYANG'},
373: {'negeri': 'SABAH', 'parlimen': 'PUTATAN', 'dun': 'TANJONG ARU'},
374: {'negeri': 'SABAH', 'parlimen': 'PUTATAN', 'dun': 'PETAGAS'},
375: {'negeri': 'SABAH', 'parlimen': 'PENAMPANG', 'dun': 'KAPAYAN'},
376: {'negeri': 'SABAH', 'parlimen': 'PENAMPANG', 'dun': 'MOYOG'},
377: {'negeri': 'SABAH', 'parlimen': 'PAPAR', 'dun': 'KAWANG'},
378: {'negeri': 'SABAH', 'parlimen': 'PAPAR', 'dun': 'PANTAI MANIS'},
379: {'negeri': 'SABAH', 'parlimen': 'KIMANIS', 'dun': 'BONGAWAN'},
380: {'negeri': 'SABAH', 'parlimen': 'KIMANIS', 'dun': 'MEMBAKUT'},
381: {'negeri': 'SABAH', 'parlimen': 'BEAUFORT', 'dun': 'KLIAS'},
382: {'negeri': 'SABAH', 'parlimen': 'BEAUFORT', 'dun': 'KUALA PENYU'},
383: {'negeri': 'SABAH', 'parlimen': 'SIPITANG', 'dun': 'LUMADAN'},
384: {'negeri': 'SABAH', 'parlimen': 'SIPITANG', 'dun': 'SINDUMIN'},
385: {'negeri': 'SABAH', 'parlimen': 'RANAU', 'dun': 'KUNDASANG'},
386: {'negeri': 'SABAH', 'parlimen': 'RANAU', 'dun': 'KARANAAN'},
387: {'negeri': 'SABAH', 'parlimen': 'RANAU', 'dun': 'PAGINATAN'},
388: {'negeri': 'SABAH', 'parlimen': 'KENINGAU', 'dun': 'TAMBUNAN'},
389: {'negeri': 'SABAH', 'parlimen': 'KENINGAU', 'dun': 'BINGKOR'},
390: {'negeri': 'SABAH', 'parlimen': 'KENINGAU', 'dun': 'LIAWAN'},
391: {'negeri': 'SABAH', 'parlimen': 'TENOM', 'dun': 'MELALAP'},
392: {'negeri': 'SABAH', 'parlimen': 'TENOM', 'dun': 'KEMABONG'},
393: {'negeri': 'SABAH', 'parlimen': 'PENSIANGAN', 'dun': 'SOOK'},
394: {'negeri': 'SABAH', 'parlimen': 'PENSIANGAN', 'dun': 'NABAWAN'},
395: {'negeri': 'SABAH', 'parlimen': 'BELURAN', 'dun': 'SUGUT'},
396: {'negeri': 'SABAH', 'parlimen': 'BELURAN', 'dun': 'LABUK'},
397: {'negeri': 'SABAH', 'parlimen': 'LIBARAN', 'dun': 'GUM-GUM'},
398: {'negeri': 'SABAH', 'parlimen': 'LIBARAN', 'dun': 'SUNGAI SIBUGA'},
399: {'negeri': 'SABAH', 'parlimen': 'BATU SAPI', 'dun': 'SEKONG'},
400: {'negeri': 'SABAH', 'parlimen': 'BATU SAPI', 'dun': 'KARAMUNTING'},
401: {'negeri': 'SABAH', 'parlimen': 'SANDAKAN', 'dun': 'ELOPURA'},
402: {'negeri': 'SABAH', 'parlimen': 'SANDAKAN', 'dun': 'TANJONG PAPAT'},
403: {'negeri': 'SABAH', 'parlimen': 'KINABATANGAN', 'dun': 'KUAMUT'},
404: {'negeri': 'SABAH', 'parlimen': 'KINABATANGAN', 'dun': 'SUKAU'},
405: {'negeri': 'SABAH', 'parlimen': 'SILAM', 'dun': 'TUNGKU'},
406: {'negeri': 'SABAH', 'parlimen': 'SILAM', 'dun': 'LAHAD DATU'},
407: {'negeri': 'SABAH', 'parlimen': 'SILAM', 'dun': 'KUNAK'},
408: {'negeri': 'SABAH', 'parlimen': 'SEMPORNA', 'dun': 'SULABAYAN'},
409: {'negeri': 'SABAH', 'parlimen': 'SEMPORNA', 'dun': 'SENALLANG'},
410: {'negeri': 'SABAH', 'parlimen': 'SEMPORNA', 'dun': 'BUGAYA'},
411: {'negeri': 'SABAH', 'parlimen': 'TAWAU', 'dun': 'BALUNG'},
412: {'negeri': 'SABAH', 'parlimen': 'TAWAU', 'dun': 'APAS'},
413: {'negeri': 'SABAH', 'parlimen': 'TAWAU', 'dun': 'SRI TANJONG'},
414: {'negeri': 'SABAH', 'parlimen': 'KALABAKAN', 'dun': 'MEROTAI'},
415: {'negeri': 'SABAH', 'parlimen': 'KALABAKAN', 'dun': 'TANJONG BATU'},
416: {'negeri': 'SABAH', 'parlimen': 'KALABAKAN', 'dun': 'SEBATIK'},
417: {'negeri': 'SARAWAK', 'parlimen': 'MAS GADING', 'dun': 'OPAR'},
418: {'negeri': 'SARAWAK', 'parlimen': 'MAS GADING', 'dun': 'TASIK BIRU'},
419: {'negeri': 'SARAWAK', 'parlimen': 'SANTUBONG', 'dun': 'TANJONG DATU'},
420: {'negeri': 'SARAWAK', 'parlimen': 'SANTUBONG', 'dun': 'PANTAI DAMAI'},
421: {'negeri': 'SARAWAK', 'parlimen': 'SANTUBONG', 'dun': 'DEMAK LAUT'},
422: {'negeri': 'SARAWAK', 'parlimen': 'PETRA JAYA', 'dun': 'TUPONG'},
423: {'negeri': 'SARAWAK', 'parlimen': 'PETRA JAYA', 'dun': 'SAMARIANG'},
424: {'negeri': 'SARAWAK', 'parlimen': 'PETRA JAYA', 'dun': 'SATOK'},
425: {'negeri': 'SARAWAK', 'parlimen': 'BANDAR KUCHING', 'dun': 'PADUNGAN'},
426: {'negeri': 'SARAWAK', 'parlimen': 'BANDAR KUCHING', 'dun': 'PENDING'},
427: {
'negeri': 'SARAWAK',
'parlimen': 'BANDAR KUCHING',
'dun': 'BATU LINTANG',
},
428: {'negeri': 'SARAWAK', 'parlimen': 'STAMPIN', 'dun': 'KOTA SENTOSA'},
429: {'negeri': 'SARAWAK', 'parlimen': 'STAMPIN', 'dun': 'BATU KITANG'},
430: {'negeri': 'SARAWAK', 'parlimen': 'STAMPIN', 'dun': 'BATU KAWAH'},
431: {'negeri': 'SARAWAK', 'parlimen': 'KOTA SAMARAHAN', 'dun': 'ASAJAYA'},
432: {
'negeri': 'SARAWAK',
'parlimen': 'KOTA SAMARAHAN',
'dun': 'MUARA TUANG',
},
433: {'negeri': 'SARAWAK', 'parlimen': 'KOTA SAMARAHAN', 'dun': 'STAKAN'},
434: {'negeri': 'SARAWAK', 'parlimen': 'MAMBONG', 'dun': 'SEREMBU'},
435: {'negeri': 'SARAWAK', 'parlimen': 'MAMBONG', 'dun': 'MAMBONG'},
436: {'negeri': 'SARAWAK', 'parlimen': 'MAMBONG', 'dun': 'TARAT'},
437: {'negeri': 'SARAWAK', 'parlimen': 'SERIAN', 'dun': 'TEBEDU'},
438: {'negeri': 'SARAWAK', 'parlimen': 'SERIAN', 'dun': 'KEDUP'},
439: {'negeri': 'SARAWAK', 'parlimen': 'SERIAN', 'dun': 'BUKIT SEMUJA'},
440: {
'negeri': 'SARAWAK',
'parlimen': 'BATANG SADONG',
'dun': 'SADONG JAYA',
},
441: {'negeri': 'SARAWAK', 'parlimen': 'BATANG SADONG', 'dun': 'SIMUNJAN'},
442: {'negeri': 'SARAWAK', 'parlimen': 'BATANG SADONG', 'dun': 'GEDONG'},
443: {'negeri': 'SARAWAK', 'parlimen': 'BATANG LUPAR', 'dun': 'SEBUYAU'},
444: {'negeri': 'SARAWAK', 'parlimen': 'BATANG LUPAR', 'dun': 'LINGGA'},
445: {
'negeri': 'SARAWAK',
'parlimen': 'BATANG LUPAR',
'dun': 'BETING MARO',
},
446: {'negeri': 'SARAWAK', 'parlimen': 'SRI AMAN', 'dun': 'BALAI RINGIN'},
447: {'negeri': 'SARAWAK', 'parlimen': 'SRI AMAN', 'dun': 'BUKIT BEGUNAN'},
448: {'negeri': 'SARAWAK', 'parlimen': 'SRI AMAN', 'dun': 'SIMANGGANG'},
449: {'negeri': 'SARAWAK', 'parlimen': 'LUBOK ANTU', 'dun': 'ENGKILILI'},
450: {'negeri': 'SARAWAK', 'parlimen': 'LUBOK ANTU', 'dun': 'BATANG AI'},
451: {'negeri': 'SARAWAK', 'parlimen': 'BETONG', 'dun': 'SARIBAS'},
452: {'negeri': 'SARAWAK', 'parlimen': 'BETONG', 'dun': 'LAYAR'},
453: {'negeri': 'SARAWAK', 'parlimen': 'BETONG', 'dun': 'BUKIT SABAN'},
454: {'negeri': 'SARAWAK', 'parlimen': 'SARATOK', 'dun': 'KALAKA'},
455: {'negeri': 'SARAWAK', 'parlimen': 'SARATOK', 'dun': 'KRIAN'},
456: {'negeri': 'SARAWAK', 'parlimen': 'SARATOK', 'dun': 'KABONG'},
457: {
'negeri': 'SARAWAK',
'parlimen': 'TANJONG MANIS',
'dun': 'KUALA RAJANG',
},
458: {'negeri': 'SARAWAK', 'parlimen': 'TANJONG MANIS', 'dun': 'SEMOP'},
459: {'negeri': 'SARAWAK', 'parlimen': 'IGAN', 'dun': 'DARO'},
460: {'negeri': 'SARAWAK', 'parlimen': 'IGAN', 'dun': 'JEMORENG'},
461: {'negeri': 'SARAWAK', 'parlimen': 'SARIKEI', 'dun': 'REPOK'},
462: {'negeri': 'SARAWAK', 'parlimen': 'SARIKEI', 'dun': 'MERADONG'},
463: {'negeri': 'SARAWAK', 'parlimen': 'JULAU', 'dun': 'PAKAN'},
464: {'negeri': 'SARAWAK', 'parlimen': 'JULAU', 'dun': 'MELUAN'},
465: {'negeri': 'SARAWAK', 'parlimen': 'KANOWIT', 'dun': 'NGEMAH'},
466: {'negeri': 'SARAWAK', 'parlimen': 'KANOWIT', 'dun': 'MACHAN'},
467: {'negeri': 'SARAWAK', 'parlimen': 'LANANG', 'dun': 'BUKIT ASSEK'},
468: {'negeri': 'SARAWAK', 'parlimen': 'LANANG', 'dun': 'DUDONG'},
469: {'negeri': 'SARAWAK', 'parlimen': 'SIBU', 'dun': 'BAWANG ASSAN'},
470: {'negeri': 'SARAWAK', 'parlimen': 'SIBU', 'dun': 'PELAWAN'},
471: {'negeri': 'SARAWAK', 'parlimen': 'SIBU', 'dun': 'NANGKA'},
472: {'negeri': 'SARAWAK', 'parlimen': 'MUKAH', 'dun': 'DALAT'},
473: {'negeri': 'SARAWAK', 'parlimen': 'MUKAH', 'dun': 'TELLIAN'},
474: {'negeri': 'SARAWAK', 'parlimen': 'MUKAH', 'dun': 'BALINGIAN'},
475: {'negeri': 'SARAWAK', 'parlimen': 'SELANGAU', 'dun': 'TAMIN'},
476: {'negeri': 'SARAWAK', 'parlimen': 'SELANGAU', 'dun': 'KAKUS'},
477: {'negeri': 'SARAWAK', 'parlimen': 'KAPIT', 'dun': 'PELAGUS'},
478: {'negeri': 'SARAWAK', 'parlimen': 'KAPIT', 'dun': 'KATIBAS'},
479: {'negeri': 'SARAWAK', 'parlimen': 'KAPIT', 'dun': 'BUKIT GORAM'},
480: {'negeri': 'SARAWAK', 'parlimen': 'HULU RAJANG', 'dun': 'BALEH'},
481: {'negeri': 'SARAWAK', 'parlimen': 'HULU RAJANG', 'dun': 'BELAGA'},
482: {'negeri': 'SARAWAK', 'parlimen': 'HULU RAJANG', 'dun': 'MURUM'},
483: {'negeri': 'SARAWAK', 'parlimen': 'BINTULU', 'dun': 'JEPAK'},
484: {'negeri': 'SARAWAK', 'parlimen': 'BINTULU', 'dun': 'TANJONG BATU'},
485: {'negeri': 'SARAWAK', 'parlimen': 'BINTULU', 'dun': 'KEMENA'},
486: {'negeri': 'SARAWAK', 'parlimen': 'BINTULU', 'dun': 'SAMALAJU'},
487: {'negeri': 'SARAWAK', 'parlimen': 'SIBUTI', 'dun': 'BEKENU'},
488: {'negeri': 'SARAWAK', 'parlimen': 'SIBUTI', 'dun': 'LAMBIR'},
489: {'negeri': 'SARAWAK', 'parlimen': 'MIRI', 'dun': 'PIASAU'},
490: {'negeri': 'SARAWAK', 'parlimen': 'MIRI', 'dun': 'PUJUT'},
491: {'negeri': 'SARAWAK', 'parlimen': 'MIRI', 'dun': 'SENADIN'},
492: {'negeri': 'SARAWAK', 'parlimen': 'BARAM', 'dun': 'MARUDI'},
493: {'negeri': 'SARAWAK', 'parlimen': 'BARAM', 'dun': 'TELANG USAN'},
494: {'negeri': 'SARAWAK', 'parlimen': 'BARAM', 'dun': 'MULU'},
495: {'negeri': 'SARAWAK', 'parlimen': 'LIMBANG', 'dun': 'BUKIT KOTA'},
496: {'negeri': 'SARAWAK', 'parlimen': 'LIMBANG', 'dun': 'BATU DANAU'},
497: {'negeri': 'SARAWAK', 'parlimen': 'LAWAS', 'dun': 'BA`KELALAN'},
498: {'negeri': 'SARAWAK', 'parlimen': 'LAWAS', 'dun': 'BUKIT SARI'},
499: {
'negeri': 'SELANGOR',
'parlimen': 'SABAK BERNAM',
'dun': 'SUNGAI AIR TAWAR',
},
500: {'negeri': 'SELANGOR', 'parlimen': 'SABAK BERNAM', 'dun': 'SABAK'},
501: {
'negeri': 'SELANGOR',
'parlimen': 'SUNGAI BESAR',
'dun': 'SUNGAI PANJANG',
},
502: {'negeri': 'SELANGOR', 'parlimen': 'SUNGAI BESAR', 'dun': 'SEKINCHAN'},
503: {
'negeri': 'SELANGOR',
'parlimen': 'HULU SELANGOR',
'dun': 'HULU BERNAM',
},
504: {
'negeri': 'SELANGOR',
'parlimen': 'HULU SELANGOR',
'dun': 'KUALA KUBU BAHARU',
},
505: {
'negeri': 'SELANGOR',
'parlimen': 'HULU SELANGOR',
'dun': 'BATANG KALI',
},
506: {
'negeri': 'SELANGOR',
'parlimen': 'TANJONG KARANG',
'dun': 'SUNGAI BURONG',
},
507: {
'negeri': 'SELANGOR',
'parlimen': 'TANJONG KARANG',
'dun': 'PERMATANG',
},
508: {
'negeri': 'SELANGOR',
'parlimen': 'KUALA SELANGOR',
'dun': 'BUKIT MELAWATI',
},
509: {'negeri': 'SELANGOR', 'parlimen': 'KUALA SELANGOR', 'dun': 'IJOK'},
510: {'negeri': 'SELANGOR', 'parlimen': 'KUALA SELANGOR', 'dun': 'JERAM'},
511: {'negeri': 'SELANGOR', 'parlimen': 'SELAYANG', 'dun': 'KUANG'},
512: {'negeri': 'SELANGOR', 'parlimen': 'SELAYANG', 'dun': 'RAWANG'},
513: {'negeri': 'SELANGOR', 'parlimen': 'SELAYANG', 'dun': 'TAMAN TEMPLER'},
514: {'negeri': 'SELANGOR', 'parlimen': 'GOMBAK', 'dun': 'BATU CAVES'},
515: {'negeri': 'SELANGOR', 'parlimen': 'GOMBAK', 'dun': 'GOMBAK SETIA'},
516: {'negeri': 'SELANGOR', 'parlimen': 'GOMBAK', 'dun': 'HULU KELANG'},
517: {
'negeri': 'SELANGOR',
'parlimen': 'AMPANG',
'dun': 'BUKIT ANTARABANGSA',
},
518: {'negeri': 'SELANGOR', 'parlimen': 'AMPANG', 'dun': 'LEMBAH JAYA'},
519: {'negeri': 'SELANGOR', 'parlimen': 'PANDAN', 'dun': 'CHEMPAKA'},
520: {'negeri': 'SELANGOR', 'parlimen': 'PANDAN', 'dun': 'TERATAI'},
521: {'negeri': 'SELANGOR', 'parlimen': 'HULU LANGAT', 'dun': 'DUSUN TUA'},
522: {'negeri': 'SELANGOR', 'parlimen': 'HULU LANGAT', 'dun': 'SEMENYIH'},
523: {'negeri': 'SELANGOR', 'parlimen': 'HULU LANGAT', 'dun': 'KAJANG'},
524: {'negeri': 'SELANGOR', 'parlimen': 'SERDANG', 'dun': 'BANGI'},
525: {'negeri': 'SELANGOR', 'parlimen': 'SERDANG', 'dun': 'BALAKONG'},
526: {'negeri': 'SELANGOR', 'parlimen': 'SERDANG', 'dun': 'SERI KEMBANGAN'},
527: {'negeri': 'SELANGOR', 'parlimen': 'PUCHONG', 'dun': 'SERI SERDANG'},
528: {'negeri': 'SELANGOR', 'parlimen': 'PUCHONG', 'dun': 'KINRARA'},
529: {
'negeri': 'SELANGOR',
'parlimen': 'KELANA JAYA',
'dun': 'SUBANG JAYA',
},
530: {'negeri': 'SELANGOR', 'parlimen': 'KELANA JAYA', 'dun': 'SERI SETIA'},
531: {
'negeri': 'SELANGOR',
'parlimen': 'PETALING JAYA SELATAN',
'dun': 'TAMAN MEDAN',
},
532: {
'negeri': 'SELANGOR',
'parlimen': 'PETALING JAYA SELATAN',
'dun': 'BUKIT GASING',
},
533: {
'negeri': 'SELANGOR',
'parlimen': 'PETALING JAYA UTARA',
'dun': 'KAMPUNG TUNKU',
},
534: {
'negeri': 'SELANGOR',
'parlimen': 'PETALING JAYA UTARA',
'dun': 'DAMANSARA UTAMA',
},
535: {'negeri': 'SELANGOR', 'parlimen': 'SUBANG', 'dun': 'BUKIT LANJAN'},
536: {'negeri': 'SELANGOR', 'parlimen': 'SUBANG', 'dun': 'PAYA JARAS'},
537: {'negeri': 'SELANGOR', 'parlimen': 'SUBANG', 'dun': 'KOTA DAMANSARA'},
538: {
'negeri': 'SELANGOR',
'parlimen': 'SHAH ALAM',
'dun': 'KOTA ANGGERIK',
},
539: {'negeri': 'SELANGOR', 'parlimen': 'SHAH ALAM', 'dun': 'BATU TIGA'},
540: {'negeri': 'SELANGOR', 'parlimen': 'KAPAR', 'dun': 'MERU'},
541: {'negeri': 'SELANGOR', 'parlimen': 'KAPAR', 'dun': 'SEMENTA'},
542: {'negeri': 'SELANGOR', 'parlimen': 'KAPAR', 'dun': 'SUNGAI PINANG'},
543: {'negeri': 'SELANGOR', 'parlimen': 'KAPAR', 'dun': 'SELAT KLANG'},
544: {'negeri': 'SELANGOR', 'parlimen': 'KLANG', 'dun': 'PELABUHAN KLANG'},
545: {'negeri': 'SELANGOR', 'parlimen': 'KLANG', 'dun': 'PANDAMARAN'},
546: {'negeri': 'SELANGOR', 'parlimen': 'KLANG', 'dun': 'KOTA ALAM SHAH'},
547: {'negeri': 'SELANGOR', 'parlimen': 'KOTA RAJA', 'dun': 'SERI ANDALAS'},
548: {'negeri': 'SELANGOR', 'parlimen': 'KOTA RAJA', 'dun': 'SRI MUDA'},
549: {
'negeri': 'SELANGOR',
'parlimen': 'KUALA LANGAT',
'dun': 'SIJANGKANG',
},
550: {
'negeri': 'SELANGOR',
'parlimen': 'KUALA LANGAT',
'dun': 'TELUK DATUK',
},
551: {'negeri': 'SELANGOR', 'parlimen': 'KUALA LANGAT', 'dun': 'MORIB'},
552: {'negeri': 'SELANGOR', 'parlimen': 'SEPANG', 'dun': 'TANJONG SEPAT'},
553: {'negeri': 'SELANGOR', 'parlimen': 'SEPANG', 'dun': 'DENGKIL'},
554: {'negeri': 'SELANGOR', 'parlimen': 'SEPANG', 'dun': 'SUNGAI PELEK'},
555: {'negeri': 'TERENGGANU', 'parlimen': 'BESUT', 'dun': 'KUALA BESUT'},
556: {'negeri': 'TERENGGANU', 'parlimen': 'BESUT', 'dun': 'KOTA PUTERA'},
557: {'negeri': 'TERENGGANU', 'parlimen': 'BESUT', 'dun': 'JERTIH'},
558: {'negeri': 'TERENGGANU', 'parlimen': 'BESUT', 'dun': 'HULU BESUT'},
559: {'negeri': 'TERENGGANU', 'parlimen': 'SETIU', 'dun': 'JABI'},
560: {'negeri': 'TERENGGANU', 'parlimen': 'SETIU', 'dun': 'PERMAISURI'},
561: {'negeri': 'TERENGGANU', 'parlimen': 'SETIU', 'dun': 'LANGKAP'},
562: {'negeri': 'TERENGGANU', 'parlimen': 'SETIU', 'dun': 'BATU RAKIT'},
563: {'negeri': 'TERENGGANU', 'parlimen': 'KUALA NERUS', 'dun': 'TEPUH'},
564: {
'negeri': 'TERENGGANU',
'parlimen': 'KUALA NERUS',
'dun': 'TELUK PASU',
},
565: {
'negeri': 'TERENGGANU',
'parlimen': 'KUALA NERUS',
'dun': 'SEBERANG TAKIR',
},
566: {
'negeri': 'TERENGGANU',
'parlimen': 'KUALA NERUS',
'dun': 'BUKIT TUNGGAL',
},
567: {
'negeri': 'TERENGGANU',
'parlimen': 'KUALA TERENGGANU',
'dun': 'WAKAF MEMPELAM',
},
568: {
'negeri': 'TERENGGANU',
'parlimen': 'KUALA TERENGGANU',
'dun': 'BANDAR',
},
569: {
'negeri': 'TERENGGANU',
'parlimen': 'KUALA TERENGGANU',
'dun': 'LADANG',
},
570: {
'negeri': 'TERENGGANU',
'parlimen': 'KUALA TERENGGANU',
'dun': 'BATU BURUK',
},
571: {'negeri': 'TERENGGANU', 'parlimen': 'MARANG', 'dun': 'ALUR LIMBAT'},
572: {'negeri': 'TERENGGANU', 'parlimen': 'MARANG', 'dun': 'BUKIT PAYUNG'},
573: {'negeri': 'TERENGGANU', 'parlimen': 'MARANG', 'dun': 'RU RENDANG'},
574: {
'negeri': 'TERENGGANU',
'parlimen': 'MARANG',
'dun': 'PENGKALAN BERANGAN',
},
575: {
'negeri': 'TERENGGANU',
'parlimen': 'HULU TERENGGANU',
'dun': 'TELEMUNG',
},
576: {
'negeri': 'TERENGGANU',
'parlimen': 'HULU TERENGGANU',
'dun': 'MANIR',
},
577: {
'negeri': 'TERENGGANU',
'parlimen': 'HULU TERENGGANU',
'dun': 'KUALA BERANG',
},
578: {'negeri': 'TERENGGANU', 'parlimen': 'HULU TERENGGANU', 'dun': 'AJIL'},
579: {'negeri': 'TERENGGANU', 'parlimen': 'DUNGUN', 'dun': 'BUKIT BESI'},
580: {'negeri': 'TERENGGANU', 'parlimen': 'DUNGUN', 'dun': 'RANTAU ABANG'},
581: {'negeri': 'TERENGGANU', 'parlimen': 'DUNGUN', 'dun': 'SURA'},
582: {'negeri': 'TERENGGANU', 'parlimen': 'DUNGUN', 'dun': 'PAKA'},
583: {'negeri': 'TERENGGANU', 'parlimen': 'KEMAMAN', 'dun': 'KEMASIK'},
584: {'negeri': 'TERENGGANU', 'parlimen': 'KEMAMAN', 'dun': 'KIJAL'},
585: {'negeri': 'TERENGGANU', 'parlimen': 'KEMAMAN', 'dun': 'CUKAI'},
586: {'negeri': 'TERENGGANU', 'parlimen': 'KEMAMAN', 'dun': 'AIR PUTIH'},
587: {'negeri': 'WILAYAH PERSEKUTUAN', 'parlimen': 'KEPONG', 'dun': 'NAN'},
588: {
'negeri': 'WILAYAH PERSEKUTUAN',
'parlimen': 'WANGSA MAJU',
'dun': 'NAN',
},
589: {
'negeri': 'WILAYAH PERSEKUTUAN',
'parlimen': 'SEGAMBUT',
'dun': 'NAN',
},
590: {
'negeri': 'WILAYAH PERSEKUTUAN',
'parlimen': 'SETIAWANGSA',
'dun': 'NAN',
},
591: {
'negeri': 'WILAYAH PERSEKUTUAN',
'parlimen': 'TITIWANGSA',
'dun': 'NAN',
},
592: {
'negeri': 'WILAYAH PERSEKUTUAN',
'parlimen': 'BUKIT BINTANG',
'dun': 'NAN',
},
593: {
'negeri': 'WILAYAH PERSEKUTUAN',
'parlimen': 'LEMBAH PANTAI',
'dun': 'NAN',
},
594: {'negeri': 'WILAYAH PERSEKUTUAN', 'parlimen': 'SEPUTEH', 'dun': 'NAN'},
595: {'negeri': 'WILAYAH PERSEKUTUAN', 'parlimen': 'CHERAS', 'dun': 'NAN'},
596: {
'negeri': 'WILAYAH PERSEKUTUAN',
'parlimen': 'BANDAR TUN RAZAK',
'dun': 'NAN',
},
597: {
'negeri': 'WILAYAH PERSEKUTUAN',
'parlimen': 'PUTRAJAYA',
'dun': 'NAN',
},
598: {'negeri': 'WILAYAH PERSEKUTUAN', 'parlimen': 'LABUAN', 'dun': 'NAN'},
}
| 46.052876 | 80 | 0.527002 |
958537c71891988b7a77d4b7e40a346c1b067a3b | 8,027 | py | Python | torch2trt/converters/interpolate.py | d1ngn1gefe1/torch2trt | 4e9af7da09ce158d6d87f6a30b886fb46c33c815 | [
"MIT"
] | 1 | 2021-09-07T10:48:16.000Z | 2021-09-07T10:48:16.000Z | torch2trt/converters/interpolate.py | maronuu/torch2trt | 311f328cd45799ad8d72f1bebcc818d71c301f62 | [
"MIT"
] | null | null | null | torch2trt/converters/interpolate.py | maronuu/torch2trt | 311f328cd45799ad8d72f1bebcc818d71c301f62 | [
"MIT"
] | 1 | 2021-09-29T19:22:44.000Z | 2021-09-29T19:22:44.000Z | import torch.nn.functional as F
import torch.nn as nn
from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
import collections
def has_interpolate_plugin():
try:
from torch2trt.plugins import InterpolatePlugin
return True
except:
return False
def get_interpolate_plugin(size, mode, align_corners):
from torch2trt.plugins import InterpolatePlugin
PLUGIN_NAME = 'interpolate'
registry = trt.get_plugin_registry()
creator = [c for c in registry.plugin_creator_list if c.name == PLUGIN_NAME and c.plugin_namespace == 'torch2trt'][0]
torch2trt_plugin = InterpolatePlugin(size=size, mode=mode, align_corners=align_corners)
return creator.deserialize_plugin(PLUGIN_NAME, torch2trt_plugin.serializeToString())
@tensorrt_converter('torch.nn.functional.interpolate', enabled=trt_version() < '7.1' and has_interpolate_plugin())
def convert_interpolate_plugin(ctx):
input = ctx.method_args[0]
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
try:
mode = get_arg(ctx, 'mode', pos=3, default='nearest')
except KeyError:
mode = 'nearest'
try:
align_corners = get_arg(ctx, 'align_corners', pos=4, default=None)
except KeyError:
align_corners = False
# currently only works for NCHW
size = list(output.shape[2:])
plugin = get_interpolate_plugin(size=size, mode=mode, align_corners=align_corners)
layer = ctx.network.add_plugin_v2([input_trt], plugin)
output._trt = layer.get_output(0)
@tensorrt_converter('torch.nn.functional.interpolate', enabled=trt_version() >= '7.1')
@tensorrt_converter('torch.nn.functional.upsample', enabled=trt_version() >= '7.1')
def convert_interpolate_trt7(ctx):
#parse args
input = get_arg(ctx, 'input', pos=0, default=None)
size = get_arg(ctx, 'size', pos=1, default=None)
scale_factor=get_arg(ctx, 'scale_factor', pos=2, default=None)
mode = get_arg(ctx, 'mode', pos=3, default='nearest')
align_corners = get_arg(ctx, 'align_corners', pos=4, default=None)
input_dim = input.dim() - 2
input_trt = add_missing_trt_tensors(ctx.network, [input])[0]
output = ctx.method_return
layer = ctx.network.add_resize(input=input_trt)
shape = size
if shape != None:
if isinstance(shape, collections.Sequence):
shape = [input.size(1)] + list(shape)
else:
shape = [input.size(1)] + [shape] * input_dim
layer.shape = shape
scales = scale_factor
if scales != None:
if not isinstance(scales, collections.Sequence):
scales = [scales] * input_dim
layer.scales = [1] + list(scales)
resize_mode = mode
if resize_mode.lower() in ["linear","bilinear","trilinear"]:
layer.resize_mode = trt.ResizeMode.LINEAR
else:
layer.resize_mode=trt.ResizeMode.NEAREST
if align_corners != None:
layer.align_corners = align_corners
output._trt = layer.get_output(0)
class Interpolate(torch.nn.Module):
def __init__(self, size=None,scale_factor=None, mode=None, align_corners=None):
super(Interpolate, self).__init__()
## Use either size or scale factor.
self.size = size
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return F.interpolate(x, size=self.size, scale_factor=self.scale_factor,mode=self.mode, align_corners=self.align_corners)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 112, 112)], enabled=trt_version() < '7.1' and has_interpolate_plugin())
def test_interpolate_nearest():
return Interpolate(size=(224, 224), mode='nearest', align_corners=None)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 112, 112)], enabled=trt_version() < '7.1' and has_interpolate_plugin())
def test_interpolate_bilinear():
return Interpolate(size=(224, 224), mode= 'bilinear', align_corners=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 112, 112)], enabled=trt_version() < '7.1' and has_interpolate_plugin())
def test_interpolate_bicubic():
return Interpolate(size=(224, 224), mode='bicubic',align_corners= False)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 112, 112)], enabled=trt_version() < '7.1' and has_interpolate_plugin())
def test_interpolate_area():
return Interpolate(size=(56, 56), mode='area',align_corners= None)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 10, 112, 112)], enabled=trt_version() < '7.1' and has_interpolate_plugin())
def test_upsample_scale_factor2():
return nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1,2,12,12)], enabled=trt_version() >= '7.1')
def test_nearest_mode():
return torch.nn.Upsample(scale_factor=2, mode="nearest")
@add_module_test(torch.float32, torch.device('cuda'), [(1,4,12,12)], enabled=trt_version() >= '7.1')
def test_bilinear_mode():
return torch.nn.Upsample(scale_factor=3, mode="bilinear",align_corners=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,12,12)], enabled=trt_version() >= '7.1')
def test_align_corner():
return torch.nn.Upsample(scale_factor=2.0, mode="bilinear", align_corners=True)
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,12,12)], enabled=trt_version() >= '7.1')
def test_align_corner_functional():
return Interpolate(scale_factor=2.0, mode="bilinear", align_corners=True)
@add_module_test(torch.float32, torch.device('cuda'), [(1,5,13,13)], enabled=trt_version() >= '7.1')
def test_bilinear_mode_odd_input_shape():
return torch.nn.Upsample(scale_factor=2,mode="bilinear",align_corners=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1,4,12,12)], enabled=trt_version() >= '7.1')
def test_size_parameter():
return torch.nn.Upsample(size=3,mode="nearest")
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,13,13)], enabled=trt_version() >= '7.1')
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,1,1)], enabled=trt_version() >= '7.1')
def test_size_parameter_odd_input():
return torch.nn.Upsample(size=[6,3],mode="nearest")
@add_module_test(torch.float32, torch.device('cuda'), [(1,4,6,6,6)], enabled=trt_version() >= '7.1')
def test_nearest_mode_3d():
return torch.nn.Upsample(scale_factor=2, mode="nearest")
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,5,5,5)], enabled=trt_version() >= '7.1')
def test_bilinear_mode_3d():
return torch.nn.Upsample(scale_factor=3, mode="trilinear",align_corners=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1,4,8,8,8)], enabled=trt_version() >= '7.1')
def test_align_corner_3d():
return torch.nn.Upsample(scale_factor=4, mode="trilinear", align_corners=True)
@add_module_test(torch.float32, torch.device('cuda'), [(1,6,7,7,7)], enabled=trt_version() >= '7.1')
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,2,4,4)], enabled=trt_version() >= '7.1')
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,1,1,1)], enabled=trt_version() >= '7.1')
def test_bilinear_mode_odd_input_shape_3d():
return torch.nn.Upsample(scale_factor=2, mode="trilinear",align_corners=False)
@add_module_test(torch.float32, torch.device('cuda'), [(1,1,12,12,12)], enabled=trt_version() >= '7.1')
def test_size_parameter_3d():
return torch.nn.Upsample(size=3,mode="trilinear", align_corners=True)
@add_module_test(torch.float32, torch.device('cuda'), [(1,3,7,9,5)], enabled=trt_version() >= '7.1')
@add_module_test(torch.float32, torch.device('cuda'), [(1,4,3,5,1)], enabled=trt_version() >= '7.1')
def test_size_parameter_odd_input_3d():
return torch.nn.Upsample(size=[11,14,17],mode="trilinear", align_corners=False)
| 43.155914 | 134 | 0.694531 |
22e0e471c8ddf87c0e4650cf40bb2a098d80610b | 5,615 | py | Python | ep.py | tommarcoen/ep-jails | 03514b9674db1d4d631da92ce5d31d7a73f721ea | [
"BSD-2-Clause"
] | 1 | 2020-08-03T18:57:42.000Z | 2020-08-03T18:57:42.000Z | ep.py | vonbeitthia/ep-jails | 03514b9674db1d4d631da92ce5d31d7a73f721ea | [
"BSD-2-Clause"
] | 6 | 2019-04-28T11:27:30.000Z | 2019-04-28T17:10:41.000Z | ep.py | vonbeitthia/ep-jails | 03514b9674db1d4d631da92ce5d31d7a73f721ea | [
"BSD-2-Clause"
] | 1 | 2020-08-03T18:57:33.000Z | 2020-08-03T18:57:33.000Z | #!/usr/bin/env python3
#-
# Copyright (c) 2019 Tom Marcoen
# All rights reserved.
#-
# This script assumes the bridges (IF_BRIDGE(4)) have been created
# in advance, e.g. in /etc/rc.conf, and will only create a pair of
# virtual back-to-back connected Ethernet interfaces for use with
# jails.
#
# Excerpt /etc/rc.conf:
#
# cloned_interfaces="bridge0 bridge1"
# ifconfig_bridge0_name="b0_wan"
# ifconfig_bridge1_name="b1_dmz"
#
# Excerpt /etc/jail.conf:
#
# XXX {
# vnet;
# vnet.interface = e0a_wan_XXX, e0a_dmz_XXX;
# exec.prestart = "ep.py create XXX wan dmz";
# exec.poststop = "ep.py destroy XXX wan dmz";
#-
import argparse
from pprint import pprint
from re import sub
from subprocess import Popen, PIPE, STDOUT
from sys import exit
IF_NAMESIZE = 16 # /sys/net/if.h
debug_level = 0
# usage: ep.py create [-h] name bridge [bridge ...]
#
# For each bridge listed, create an epair and assign one end to the jail
# and the other to the bridge.
#
# The default (long) names are: e<i>[ab]_<bridge>_<name>
# The short names are: <name><i>[ab]
#
def create(args):
jail = args['name']
if (debug_level>=2): print("[DEBUG] Jail name: {}".format(jail))
# Loop through the list of bridges and create an epair for each.
for i, bridge in enumerate(args['bridge']):
if (debug_level>=2): print("[DEBUG] Bridge: {}".format(bridge))
# Create a new epair
output = Popen('ifconfig epair create'.split(),
stdout=PIPE,
stderr=STDOUT)
stdout,stderr = output.communicate()
epair = stdout.decode('UTF-8').strip()
if (debug_level>=2): print("[DEBUG] epair: {}".format(epair))
# Rename the epair and bring the interfaces up
if(not args['short_names']):
new_a = 'e' + str(i) + 'a_' + bridge + '_' + jail
new_b = 'e' + str(i) + 'b_' + bridge + '_' + jail
if (len(new_a)>=IF_NAMESIZE):
print("[ERROR] Interface name too long. Using short names instead.")
args['short_names'] = True
if(args['short_names']):
new_a = jail + str(i) + 'a'
new_b = jail + str(i) + 'b'
if (debug_level>=2): print("[DEBUG] new_a: {}".format(new_a))
if (debug_level>=2): print("[DEBUG] new_b: {}".format(new_b))
if (debug_level>=1): print("[INFO] Creating {}...".format(new_a))
output = Popen('ifconfig {} name {} up'.format(epair,new_a).split(),
stdout=PIPE,
stderr=STDOUT)
stdout,stderr = output.communicate()
if (debug_level>=2):
print("[DEBUG] out: {}".format(stdout))
print("[DEBUG] err: {}".format(stderr))
if (debug_level>=1): print("[INFO] Creating {}...".format(new_b))
output = Popen('ifconfig {} name {} up'.format(sub('a$','b', epair),new_b).split(),
stdout=PIPE,
stderr=STDOUT)
# Attach one end to the bridge
if (args['aside']):
output = Popen('ifconfig {} addm {}'.format(bridge, new_a).split(),
stdout=PIPE,
stderr=STDOUT)
else:
output = Popen('ifconfig {} addm {}'.format(bridge, new_b).split(),
stdout=PIPE,
stderr=STDOUT)
return
def destroy(args):
jail = args['name']
if (debug_level>=2): print("[DEBUG] Jail: {}".format(jail))
for i, bridge in enumerate(args['bridge']):
if (debug_level>=2): print("[DEBUG] Bridge: {}".format(bridge))
intf = 'e' + str(i) + 'a_' + bridge + '_' + jail
if (debug_level>=1): print('[INFO] Destroying {}...'.format(intf))
output = Popen('ifconfig {} destroy'.format(intf).split(),
stdout=PIPE,
stderr=STDOUT)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create and destroy epair interfaces for jails.')
# Option: verbosity in debug level
parser.add_argument('-v', '--verbose',
action='count', default=0,
help='Increase the verbosity level by adding this argument multiple times.')
# Option: use the 'A' side of the epair instead of the default 'B' pair
parser.add_argument('-a', '--aside',
action='store_true',
help="Connect the 'A' side of the epair to the bridge.")
# Option: use short names instead of the default long names
parser.add_argument('-s', '--short_names',
action='store_true',
help="Use short names instead of the default long names.")
# We have two commands: create new interfaces or destroy existing ones.
subparsers = parser.add_subparsers(title='Commands',dest='cmd')
# Command: create
parser_create = subparsers.add_parser(
'create',
help='Create epair interfaces for the given jail.')
parser_create.add_argument('name', help='The name of the jail')
parser_create.add_argument('bridge', nargs='+')
# Command: destroy
parser_destroy = subparsers.add_parser(
'destroy',
help='Destroy the epair interfaces for the given jail.')
parser_destroy.add_argument('name', help='The name of the jail')
parser_destroy.add_argument('bridge', nargs='+')
# Parse the argument and call the function create() or destroy()
args = parser.parse_args()
debug_level = vars(args)['verbose']
# There must be a better way to write this...
if (vars(args)['cmd'] == 'create'):
create(vars(args))
elif (vars(args)['cmd'] == 'destroy'):
destroy(vars(args))
| 38.724138 | 98 | 0.597685 |
3f697a47c4418df58f94f56e14d951ea272c1370 | 729 | py | Python | python/problem0035.py | rado0x54/project-euler | cf3b128784922d19f17e940aa4be727752b29bf2 | [
"MIT"
] | null | null | null | python/problem0035.py | rado0x54/project-euler | cf3b128784922d19f17e940aa4be727752b29bf2 | [
"MIT"
] | null | null | null | python/problem0035.py | rado0x54/project-euler | cf3b128784922d19f17e940aa4be727752b29bf2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Project Euler - Problem 35 Module"""
import pelib
FPC = pelib.FastPrimeChecker()
def is_circular_primes(number):
str_number = str(number)
# we already know that number is prime
for i in range(1, len(str_number)):
# shift
shifted = int(str_number[i:] + str_number[:i])
if not FPC.is_prime(shifted):
return False
return True
def problem35(limit):
"""Problem 35 - Circular primes"""
result = 0
for p in pelib.primes_sieve(limit):
if is_circular_primes(p):
result += 1
return result
def run():
"""Default Run Method"""
return problem35(1000000)
if __name__ == '__main__':
print("Result: ", run())
| 20.25 | 54 | 0.618656 |
6da789318ddb3d30545f84b5b13e2b21ab70d7e0 | 816 | py | Python | src/app/service/paykeep/urls.py | Fassial/IOTDC2020-WHU | 3010f954f22e4ace968dfbd0baee22b830db9127 | [
"MIT"
] | 1 | 2021-06-16T16:44:58.000Z | 2021-06-16T16:44:58.000Z | src/app/service/paykeep/urls.py | Fassial/IOTDC2020-WHU | 3010f954f22e4ace968dfbd0baee22b830db9127 | [
"MIT"
] | null | null | null | src/app/service/paykeep/urls.py | Fassial/IOTDC2020-WHU | 3010f954f22e4ace968dfbd0baee22b830db9127 | [
"MIT"
] | 2 | 2020-04-22T12:55:29.000Z | 2020-05-03T00:16:32.000Z | """paykeep URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from paykeep_service.views import pay
urlpatterns = [
path('admin/', admin.site.urls),
path('add_pay/', pay),
]
| 31.384615 | 77 | 0.708333 |
0b2414c6c98c10a4ce77079b4e770fa41937d125 | 18,942 | py | Python | sympy/simplify/gammasimp.py | harsh-98/sympy | 53fc684467088cdf0acccb6ad770cbde97e32268 | [
"BSD-3-Clause"
] | null | null | null | sympy/simplify/gammasimp.py | harsh-98/sympy | 53fc684467088cdf0acccb6ad770cbde97e32268 | [
"BSD-3-Clause"
] | null | null | null | sympy/simplify/gammasimp.py | harsh-98/sympy | 53fc684467088cdf0acccb6ad770cbde97e32268 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function, division
from sympy.core import Function, S, Mul, Pow, Add
from sympy.core.compatibility import ordered, default_sort_key
from sympy.core.basic import preorder_traversal
from sympy.core.function import count_ops, expand_func
from sympy.functions.combinatorial.factorials import (binomial,
CombinatorialFunction, factorial)
from sympy.functions import gamma, sqrt, sin
from sympy.polys import factor, cancel
from sympy.utilities.iterables import sift, uniq
def gammasimp(expr):
r"""
Simplify expressions with gamma functions.
This function takes as input an expression containing gamma
functions or functions that can be rewritten in terms of gamma
functions and tries to minimize the number of those functions and
reduce the size of their arguments.
The algorithm works by rewriting all gamma functions as expressions
involving rising factorials (Pochhammer symbols) and applies
recurrence relations and other transformations applicable to rising
factorials, to reduce their arguments, possibly letting the resulting
rising factorial to cancel. Rising factorials with the second argument
being an integer are expanded into polynomial forms and finally all
other rising factorial are rewritten in terms of gamma functions.
Then the following two steps are performed.
1. Reduce the number of gammas by applying the reflection theorem
gamma(x)*gamma(1-x) == pi/sin(pi*x).
2. Reduce the number of gammas by applying the multiplication theorem
gamma(x)*gamma(x+1/n)*...*gamma(x+(n-1)/n) == C*gamma(n*x).
It then reduces the number of prefactors by absorbing them into gammas
where possible and expands gammas with rational argument.
All transformation rules can be found (or was derived from) here:
1. http://functions.wolfram.com/GammaBetaErf/Pochhammer/17/01/02/
2. http://functions.wolfram.com/GammaBetaErf/Pochhammer/27/01/0005/
Examples
========
>>> from sympy.simplify import gammasimp
>>> from sympy import gamma, factorial, Symbol
>>> from sympy.abc import x
>>> n = Symbol('n', integer = True)
>>> gammasimp(gamma(x)/gamma(x - 3))
(x - 3)*(x - 2)*(x - 1)
>>> gammasimp(gamma(n + 3))
gamma(n + 3)
"""
expr = expr.rewrite(gamma)
return _gammasimp(expr, as_comb = False)
def _gammasimp(expr, as_comb):
"""
Helper function for gammasimp and combsimp.
Simplifies expressions written in terms of gamma function. If
as_comb is True, it tries to preserve integer arguments. See
docstring of gammasimp for more information. This was part of
combsimp() in combsimp.py.
"""
expr = expr.replace(gamma,
lambda n: _rf(1, (n - 1).expand()))
if as_comb:
expr = expr.replace(_rf,
lambda a, b: binomial(a + b - 1, b)*gamma(b + 1))
else:
expr = expr.replace(_rf,
lambda a, b: gamma(a + b)/gamma(a))
def rule(n, k):
coeff, rewrite = S.One, False
cn, _n = n.as_coeff_Add()
if _n and cn.is_Integer and cn:
coeff *= _rf(_n + 1, cn)/_rf(_n - k + 1, cn)
rewrite = True
n = _n
# this sort of binomial has already been removed by
# rising factorials but is left here in case the order
# of rule application is changed
if k.is_Add:
ck, _k = k.as_coeff_Add()
if _k and ck.is_Integer and ck:
coeff *= _rf(n - ck - _k + 1, ck)/_rf(_k + 1, ck)
rewrite = True
k = _k
if count_ops(k) > count_ops(n - k):
rewrite = True
k = n - k
if rewrite:
return coeff*binomial(n, k)
expr = expr.replace(binomial, rule)
def rule_gamma(expr, level=0):
""" Simplify products of gamma functions further. """
if expr.is_Atom:
return expr
def gamma_rat(x):
# helper to simplify ratios of gammas
was = x.count(gamma)
xx = x.replace(gamma, lambda n: _rf(1, (n - 1).expand()
).replace(_rf, lambda a, b: gamma(a + b)/gamma(a)))
if xx.count(gamma) < was:
x = xx
return x
def gamma_factor(x):
# return True if there is a gamma factor in shallow args
if isinstance(x, gamma):
return True
if x.is_Add or x.is_Mul:
return any(gamma_factor(xi) for xi in x.args)
if x.is_Pow and (x.exp.is_integer or x.base.is_positive):
return gamma_factor(x.base)
return False
# recursion step
if level == 0:
expr = expr.func(*[rule_gamma(x, level + 1) for x in expr.args])
level += 1
if not expr.is_Mul:
return expr
# non-commutative step
if level == 1:
args, nc = expr.args_cnc()
if not args:
return expr
if nc:
return rule_gamma(Mul._from_args(args), level + 1)*Mul._from_args(nc)
level += 1
# pure gamma handling, not factor absorption
if level == 2:
T, F = sift(expr.args, gamma_factor, binary=True)
gamma_ind = Mul(*F)
d = Mul(*T)
nd, dd = d.as_numer_denom()
for ipass in range(2):
args = list(ordered(Mul.make_args(nd)))
for i, ni in enumerate(args):
if ni.is_Add:
ni, dd = Add(*[
rule_gamma(gamma_rat(a/dd), level + 1) for a in ni.args]
).as_numer_denom()
args[i] = ni
if not dd.has(gamma):
break
nd = Mul(*args)
if ipass == 0 and not gamma_factor(nd):
break
nd, dd = dd, nd # now process in reversed order
expr = gamma_ind*nd/dd
if not (expr.is_Mul and (gamma_factor(dd) or gamma_factor(nd))):
return expr
level += 1
# iteration until constant
if level == 3:
while True:
was = expr
expr = rule_gamma(expr, 4)
if expr == was:
return expr
numer_gammas = []
denom_gammas = []
numer_others = []
denom_others = []
def explicate(p):
if p is S.One:
return None, []
b, e = p.as_base_exp()
if e.is_Integer:
if isinstance(b, gamma):
return True, [b.args[0]]*e
else:
return False, [b]*e
else:
return False, [p]
newargs = list(ordered(expr.args))
while newargs:
n, d = newargs.pop().as_numer_denom()
isg, l = explicate(n)
if isg:
numer_gammas.extend(l)
elif isg is False:
numer_others.extend(l)
isg, l = explicate(d)
if isg:
denom_gammas.extend(l)
elif isg is False:
denom_others.extend(l)
# =========== level 2 work: pure gamma manipulation =========
if not as_comb:
# Try to reduce the number of gamma factors by applying the
# reflection formula gamma(x)*gamma(1-x) = pi/sin(pi*x)
for gammas, numer, denom in [(
numer_gammas, numer_others, denom_others),
(denom_gammas, denom_others, numer_others)]:
new = []
while gammas:
g1 = gammas.pop()
if g1.is_integer:
new.append(g1)
continue
for i, g2 in enumerate(gammas):
n = g1 + g2 - 1
if not n.is_Integer:
continue
numer.append(S.Pi)
denom.append(sin(S.Pi*g1))
gammas.pop(i)
if n > 0:
for k in range(n):
numer.append(1 - g1 + k)
elif n < 0:
for k in range(-n):
denom.append(-g1 - k)
break
else:
new.append(g1)
# /!\ updating IN PLACE
gammas[:] = new
# Try to reduce the number of gammas by using the duplication
# theorem to cancel an upper and lower: gamma(2*s)/gamma(s) =
# 2**(2*s + 1)/(4*sqrt(pi))*gamma(s + 1/2). Although this could
# be done with higher argument ratios like gamma(3*x)/gamma(x),
# this would not reduce the number of gammas as in this case.
for ng, dg, no, do in [(numer_gammas, denom_gammas, numer_others,
denom_others),
(denom_gammas, numer_gammas, denom_others,
numer_others)]:
while True:
for x in ng:
for y in dg:
n = x - 2*y
if n.is_Integer:
break
else:
continue
break
else:
break
ng.remove(x)
dg.remove(y)
if n > 0:
for k in range(n):
no.append(2*y + k)
elif n < 0:
for k in range(-n):
do.append(2*y - 1 - k)
ng.append(y + S(1)/2)
no.append(2**(2*y - 1))
do.append(sqrt(S.Pi))
# Try to reduce the number of gamma factors by applying the
# multiplication theorem (used when n gammas with args differing
# by 1/n mod 1 are encountered).
#
# run of 2 with args differing by 1/2
#
# >>> gammasimp(gamma(x)*gamma(x+S.Half))
# 2*sqrt(2)*2**(-2*x - 1/2)*sqrt(pi)*gamma(2*x)
#
# run of 3 args differing by 1/3 (mod 1)
#
# >>> gammasimp(gamma(x)*gamma(x+S(1)/3)*gamma(x+S(2)/3))
# 6*3**(-3*x - 1/2)*pi*gamma(3*x)
# >>> gammasimp(gamma(x)*gamma(x+S(1)/3)*gamma(x+S(5)/3))
# 2*3**(-3*x - 1/2)*pi*(3*x + 2)*gamma(3*x)
#
def _run(coeffs):
# find runs in coeffs such that the difference in terms (mod 1)
# of t1, t2, ..., tn is 1/n
u = list(uniq(coeffs))
for i in range(len(u)):
dj = ([((u[j] - u[i]) % 1, j) for j in range(i + 1, len(u))])
for one, j in dj:
if one.p == 1 and one.q != 1:
n = one.q
got = [i]
get = list(range(1, n))
for d, j in dj:
m = n*d
if m.is_Integer and m in get:
get.remove(m)
got.append(j)
if not get:
break
else:
continue
for i, j in enumerate(got):
c = u[j]
coeffs.remove(c)
got[i] = c
return one.q, got[0], got[1:]
def _mult_thm(gammas, numer, denom):
# pull off and analyze the leading coefficient from each gamma arg
# looking for runs in those Rationals
# expr -> coeff + resid -> rats[resid] = coeff
rats = {}
for g in gammas:
c, resid = g.as_coeff_Add()
rats.setdefault(resid, []).append(c)
# look for runs in Rationals for each resid
keys = sorted(rats, key=default_sort_key)
for resid in keys:
coeffs = list(sorted(rats[resid]))
new = []
while True:
run = _run(coeffs)
if run is None:
break
# process the sequence that was found:
# 1) convert all the gamma functions to have the right
# argument (could be off by an integer)
# 2) append the factors corresponding to the theorem
# 3) append the new gamma function
n, ui, other = run
# (1)
for u in other:
con = resid + u - 1
for k in range(int(u - ui)):
numer.append(con - k)
con = n*(resid + ui) # for (2) and (3)
# (2)
numer.append((2*S.Pi)**(S(n - 1)/2)*
n**(S(1)/2 - con))
# (3)
new.append(con)
# restore resid to coeffs
rats[resid] = [resid + c for c in coeffs] + new
# rebuild the gamma arguments
g = []
for resid in keys:
g += rats[resid]
# /!\ updating IN PLACE
gammas[:] = g
for l, numer, denom in [(numer_gammas, numer_others, denom_others),
(denom_gammas, denom_others, numer_others)]:
_mult_thm(l, numer, denom)
# =========== level >= 2 work: factor absorbtion =========
if level >= 2:
# Try to absorb factors into the gammas: x*gamma(x) -> gamma(x + 1)
# and gamma(x)/(x - 1) -> gamma(x - 1)
# This code (in particular repeated calls to find_fuzzy) can be very
# slow.
def find_fuzzy(l, x):
if not l:
return
S1, T1 = compute_ST(x)
for y in l:
S2, T2 = inv[y]
if T1 != T2 or (not S1.intersection(S2) and
(S1 != set() or S2 != set())):
continue
# XXX we want some simplification (e.g. cancel or
# simplify) but no matter what it's slow.
a = len(cancel(x/y).free_symbols)
b = len(x.free_symbols)
c = len(y.free_symbols)
# TODO is there a better heuristic?
if a == 0 and (b > 0 or c > 0):
return y
# We thus try to avoid expensive calls by building the following
# "invariants": For every factor or gamma function argument
# - the set of free symbols S
# - the set of functional components T
# We will only try to absorb if T1==T2 and (S1 intersect S2 != emptyset
# or S1 == S2 == emptyset)
inv = {}
def compute_ST(expr):
if expr in inv:
return inv[expr]
return (expr.free_symbols, expr.atoms(Function).union(
set(e.exp for e in expr.atoms(Pow))))
def update_ST(expr):
inv[expr] = compute_ST(expr)
for expr in numer_gammas + denom_gammas + numer_others + denom_others:
update_ST(expr)
for gammas, numer, denom in [(
numer_gammas, numer_others, denom_others),
(denom_gammas, denom_others, numer_others)]:
new = []
while gammas:
g = gammas.pop()
cont = True
while cont:
cont = False
y = find_fuzzy(numer, g)
if y is not None:
numer.remove(y)
if y != g:
numer.append(y/g)
update_ST(y/g)
g += 1
cont = True
y = find_fuzzy(denom, g - 1)
if y is not None:
denom.remove(y)
if y != g - 1:
numer.append((g - 1)/y)
update_ST((g - 1)/y)
g -= 1
cont = True
new.append(g)
# /!\ updating IN PLACE
gammas[:] = new
# =========== rebuild expr ==================================
return Mul(*[gamma(g) for g in numer_gammas]) \
/ Mul(*[gamma(g) for g in denom_gammas]) \
* Mul(*numer_others) / Mul(*denom_others)
# (for some reason we cannot use Basic.replace in this case)
was = factor(expr)
expr = rule_gamma(was)
if expr != was:
expr = factor(expr)
expr = expr.replace(gamma,
lambda n: expand_func(gamma(n)) if n.is_Rational else gamma(n))
return expr
class _rf(Function):
@classmethod
def eval(cls, a, b):
if b.is_Integer:
if not b:
return S.One
n, result = int(b), S.One
if n > 0:
for i in range(n):
result *= a + i
return result
elif n < 0:
for i in range(1, -n + 1):
result *= a - i
return 1/result
else:
if b.is_Add:
c, _b = b.as_coeff_Add()
if c.is_Integer:
if c > 0:
return _rf(a, _b)*_rf(a + _b, c)
elif c < 0:
return _rf(a, _b)/_rf(a + _b + c, -c)
if a.is_Add:
c, _a = a.as_coeff_Add()
if c.is_Integer:
if c > 0:
return _rf(_a, b)*_rf(_a + b, c)/_rf(_a, c)
elif c < 0:
return _rf(_a, b)*_rf(_a + c, -c)/_rf(_a + b + c, -c)
| 36.923977 | 85 | 0.445043 |
bcfc4cccc1bcdd2abb8f058576684687173610e2 | 1,400 | py | Python | day20/particles.py | ecly/adventofcode2017 | b96b450a517baa2f3615eec7138491b3c3e22604 | [
"MIT"
] | null | null | null | day20/particles.py | ecly/adventofcode2017 | b96b450a517baa2f3615eec7138491b3c3e22604 | [
"MIT"
] | null | null | null | day20/particles.py | ecly/adventofcode2017 | b96b450a517baa2f3615eec7138491b3c3e22604 | [
"MIT"
] | null | null | null | import copy
import collections
import re
def parse(file):
reg = 'p=<(.*)>, v=<(.*)>, a=<(.*)>'
particles = []
for line in file:
res = re.search(reg, line)
p = list(map(int, res.group(1).split(',')))
v = list(map(int, res.group(2).split(',')))
a = list(map(int, res.group(3).split(',')))
particles.append([p,v,a])
return particles
def step(particle):
particle[1] = [x + dx for x, dx in zip(particle[1], particle[2])]
particle[0] = [x + dx for x, dx in zip(particle[0], particle[1])]
return particle
def first(particles):
for _ in range(1000):
particles = map(lambda p: step(p), particles)
dists = list(map(lambda p: sum(map(abs, p[0])), particles))
return dists.index(min(dists))
def second(particles):
for _ in range(1000):
particles = list(map(lambda p: step(p), particles))
positions = collections.defaultdict(list)
for i, p in enumerate(particles):
positions[tuple(p[0])].append(i)
collisions = set()
for lst in positions.values():
if len(lst) > 1:
collisions.update(lst)
particles = [p for i, p in enumerate(particles) if i not in collisions]
return len(particles)
with open('input.in') as f:
particles = parse(f)
print(first(copy.deepcopy(particles)))
print(second(particles))
| 28.571429 | 79 | 0.58 |
ecf466ed39d3a04d1eccde5a168fbddb86feaa86 | 3,348 | py | Python | sempler/test/test_generators.py | juangamella/sempler | 90d01c32d9c8d56653c17e607ce9dc5ebd7ad04d | [
"BSD-3-Clause"
] | 4 | 2020-12-01T05:19:39.000Z | 2022-01-09T12:19:18.000Z | sempler/test/test_generators.py | juangamella/sempler | 90d01c32d9c8d56653c17e607ce9dc5ebd7ad04d | [
"BSD-3-Clause"
] | 3 | 2020-08-13T16:52:35.000Z | 2020-09-28T11:36:36.000Z | sempler/test/test_generators.py | juangamella/sempler | 90d01c32d9c8d56653c17e607ce9dc5ebd7ad04d | [
"BSD-3-Clause"
] | 1 | 2022-03-11T23:44:11.000Z | 2022-03-11T23:44:11.000Z | # Copyright 2021 Juan L Gamella
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#---------------------------------------------------------------------
# Unit testing for module utils
import unittest
import numpy as np
import sempler.generators
import sempler.utils as utils
# Tests for the DAG generation
class GeneratorTests(unittest.TestCase):
def test_avg_degree(self):
p = 1000
for k in range(1,5):
W = sempler.generators.dag_avg_deg(p, k, 1, 2)
av_deg = np.sum(W > 0) * 2 / p
self.assertEqual(len(W), p)
self.assertTrue(av_deg - k < 0.5)
self.assertTrue(utils.is_dag(W))
def test_disconnected_graph(self):
W = sempler.generators.dag_avg_deg(10, 0, 1, 1)
self.assertEqual(np.sum(W), 0)
def test_full_dag(self):
for p in range(10):
W = sempler.generators.dag_full(p)
self.assertEqual(p*(p-1)/2, W.sum())
def test_intervention_targets(self):
possible_targets = set(range(10))
# Test random-sized interventions
interventions = sempler.generators.intervention_targets(10, 100, (0,3))
for intervention in interventions:
self.assertLessEqual(len(intervention), 3)
self.assertGreaterEqual(len(intervention), 0)
self.assertEqual(len(intervention), len(set(intervention) & possible_targets))
# Test empty-sized interventions
interventions = sempler.generators.intervention_targets(10, 100, (0,0))
for intervention in interventions:
self.assertEqual(len(intervention), 0)
# Test single-target interventions
interventions = sempler.generators.intervention_targets(10, 100, 1)
for intervention in interventions:
self.assertEqual(len(intervention), 1)
self.assertEqual(len(intervention), len(set(intervention) & possible_targets))
| 43.480519 | 90 | 0.701016 |
5f33da6c19464c9d4a064fee8477e45eecd485e6 | 538 | py | Python | party_profit.py | GYosifov88/Python-Fundamentals | b46ba2822bd2dac6ff46830c6a520e559b448442 | [
"MIT"
] | null | null | null | party_profit.py | GYosifov88/Python-Fundamentals | b46ba2822bd2dac6ff46830c6a520e559b448442 | [
"MIT"
] | null | null | null | party_profit.py | GYosifov88/Python-Fundamentals | b46ba2822bd2dac6ff46830c6a520e559b448442 | [
"MIT"
] | null | null | null | import math
group_size = int(input())
days = int(input())
coins = 0
coin_per_person = 0
for i in range (1, days + 1):
if i % 10 == 0:
group_size -= 2
if i % 15 == 0:
group_size += 5
coins += 50 - (group_size * 2)
if i % 3 == 0:
coins -= 3 * group_size
if i % 5 == 0:
coins += 20 * group_size
if i % 3 == 0 and i % 5 == 0:
coins -= 2 * group_size
coin_per_person = math.floor(coins / group_size)
print (f"{group_size} companions received {coin_per_person} coins each.") | 21.52 | 73 | 0.553903 |
084fcd7ee8d6d66cf76c2493f01c53b4cb0c55b2 | 4,691 | py | Python | old/pyllusion/Delboeuf.py | RebeccaHirst/Pyllusion | 9944076e38bced0eabb49c607482b71809150bdb | [
"MIT"
] | 17 | 2020-09-30T07:00:57.000Z | 2022-03-01T19:01:27.000Z | old/pyllusion/Delboeuf.py | RebeccaHirst/Pyllusion | 9944076e38bced0eabb49c607482b71809150bdb | [
"MIT"
] | 11 | 2020-10-05T09:43:26.000Z | 2022-03-16T07:08:04.000Z | old/pyllusion/Delboeuf.py | RebeccaHirst/Pyllusion | 9944076e38bced0eabb49c607482b71809150bdb | [
"MIT"
] | 3 | 2021-01-15T07:31:09.000Z | 2022-02-11T06:46:23.000Z | """
The Delboeuf illusion.
"""
import numpy as np
import pandas as pd
import neuropsydia as n
def delboeuf_compute(difficulty=0, illusion=0, minimum_size=2.5, distance=5, distance_auto=True, background_color="white"):
"""
Delboeuf Illusion
Parameters
----------
difficulty : float
Size of right inner circle.
illusion : float
Size of outer circles.
minimum_size : float
Size of smaller inner circle.
distance : float
distance between circles.
distance_auto : bool
If true, distance is between edges (fixed spacing), if false, between centers (fixed location).
background_color : str
Background color.
"""
if difficulty > 0: # if right is smaller
inner_size_right = minimum_size
inner_size_left = inner_size_right + inner_size_right * abs(difficulty)
outer_size_left = inner_size_left + (inner_size_left/10)
outer_size_right = inner_size_right + (inner_size_right/10)
if illusion > 0:
illusion_type = "Incongruent"
outer_size_left = outer_size_left + outer_size_left * abs(illusion)
else:
illusion_type = "Congruent"
outer_size_right = outer_size_right + outer_size_right * abs(illusion)
else:
inner_size_left = minimum_size
inner_size_right = inner_size_left + inner_size_left * abs(difficulty)
outer_size_left = inner_size_left + (inner_size_left/10)
outer_size_right = inner_size_right + (inner_size_right/10)
if illusion > 0:
illusion_type = "Incongruent"
outer_size_right = outer_size_right + outer_size_right * abs(illusion)
else:
illusion_type = "Congruent"
outer_size_left = outer_size_left + outer_size_left * abs(illusion)
inner_size_smaller = min([inner_size_left, inner_size_right])
inner_size_larger = max([inner_size_left, inner_size_right])
outer_size_smaller = min([outer_size_left, outer_size_right])
outer_size_larger = max([outer_size_left, outer_size_right])
if distance_auto is False:
distance_centers = distance
position_left = 0 - distance_centers/2
position_right = 0 + distance_centers/2
distance_edges_inner = distance_centers - (inner_size_left/2 + inner_size_right/2)
distance_edges_outer = distance_centers - (outer_size_left/2 + outer_size_right/2)
else:
distance_edges_outer = distance
distance_centers = distance_edges_outer + (inner_size_left/2 + inner_size_right/2)
distance_edges_inner = distance_centers - (outer_size_left/2 + outer_size_right/2)
position_left = 0-distance_centers/2
position_right = 0+distance_centers/2
parameters = {"Illusion": illusion,
"Illusion_Absolute": abs(illusion),
"Illusion_Type": illusion_type,
"Difficulty": difficulty,
"Difficulty_Absolute": abs(difficulty),
"Difficulty_Ratio": inner_size_larger/inner_size_smaller,
"Difficulty_Diff": inner_size_larger-inner_size_smaller,
"Size_Inner_Left": inner_size_left,
"Size_Inner_Right": inner_size_right,
"Size_Outer_Left": outer_size_left,
"Size_Outer_Right": outer_size_right,
"Distance_Centers": distance_centers,
"Distance_Edges_Inner": distance_edges_inner,
"Distance_Edges_Outer": distance_edges_outer,
"Auto_Distance": distance_auto,
"Size_Inner_Smaller": inner_size_smaller,
"Size_Inner_Larger": inner_size_larger,
"Size_Outer_Smaller": outer_size_smaller,
"Size_Outer_Larger": outer_size_larger,
"Position_Left": position_left,
"Position_Right": position_right,
"Background_Color": background_color
}
return(parameters)
def delboeuf_display(parameters=None):
"""
"""
n.circle(x=parameters["Position_Left"], size=parameters["Size_Outer_Left"], fill_color=parameters["Background_Color"], line_color="black", thickness=0.05)
n.circle(x=parameters["Position_Left"], size=parameters["Size_Inner_Left"], fill_color="red", line_color="white")
n.circle(x=parameters["Position_Right"], size=parameters["Size_Outer_Right"], fill_color=parameters["Background_Color"], line_color="black", thickness=0.05)
n.circle(x=parameters["Position_Right"], size=parameters["Size_Inner_Right"], fill_color="red", line_color="white")
| 35.80916 | 160 | 0.66084 |
9032b144ba6baa29b3c290e5db821f6a24e91947 | 5,658 | py | Python | MDT_ProstateX/experiments/exp0/default_configs.py | ihsgnef/prostate_lesion_detection | 94b2c3a80f7c3ed311cbe2d497c7283ea9e7bc92 | [
"MIT"
] | null | null | null | MDT_ProstateX/experiments/exp0/default_configs.py | ihsgnef/prostate_lesion_detection | 94b2c3a80f7c3ed311cbe2d497c7283ea9e7bc92 | [
"MIT"
] | null | null | null | MDT_ProstateX/experiments/exp0/default_configs.py | ihsgnef/prostate_lesion_detection | 94b2c3a80f7c3ed311cbe2d497c7283ea9e7bc92 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Default Configurations script. Avoids changing configs of all experiments if general settings are to be changed."""
import os
class DefaultConfigs:
def __init__(self, model, server_env=False, dim=2):
self.server_env = server_env
#########################
# I/O #
#########################
self.model = model
self.dim = dim
# int [0 < dataset_size]. select n patients from dataset for prototyping.
self.select_prototype_subset = None
# some default paths.
self.backbone_path = 'models/backbone.py'
self.source_dir = os.path.dirname(os.path.realpath(__file__)) #current dir.
self.input_df_name = 'info_df.pickle'
self.model_path = 'models/{}.py'.format(self.model)
if server_env:
self.source_dir = '/home/jaegerp/code/mamma_code/medicaldetectiontoolkit'
#########################
# Data Loader #
#########################
#random seed for fold_generator and batch_generator.
self.seed = 0
#number of threads for multithreaded batch generation.
self.n_workers = 16 if server_env else os.cpu_count()-1
# if True, segmentation losses learn all categories, else only foreground vs. background.
self.class_specific_seg_flag = False
#########################
# Architecture #
#########################
self.weight_decay = 0.0
# what weight or layer types to exclude from weight decay. options: ["bias", "norm"].
self.exclude_from_wd = ("norm",)
# nonlinearity to be applied after convs with nonlinearity. one of 'relu' or 'leaky_relu'
self.relu = 'relu'
# if True initializes weights as specified in model script. else use default Pytorch init.
self.custom_init = False
# if True adds high-res decoder levels to feature pyramid: P1 + P0. (e.g. set to true in retina_unet configs)
self.operate_stride1 = False
#########################
# Schedule #
#########################
# number of folds in cross validation.
self.n_cv_splits = 5
# number of probabilistic samples in validation.
self.n_probabilistic_samples = None
#########################
# Testing / Plotting #
#########################
# perform mirroring at test time. (only XY. Z not done to not blow up predictions times).
self.test_aug = True
# if True, test data lies in a separate folder and is not part of the cross validation.
self.hold_out_test_set = False
# if hold_out_test_set provided, ensemble predictions over models of all trained cv-folds.
# implications for hold-out test sets: if True, evaluate folds separately on the test set, aggregate only the
# evaluations. if False, aggregate the raw predictions across all folds, then evaluate.
self.ensemble_folds = False
# color specifications for all box_types in prediction_plot.
self.box_color_palette = {'det': 'b', 'gt': 'r', 'neg_class': 'purple',
'prop': 'w', 'pos_class': 'g', 'pos_anchor': 'c', 'neg_anchor': 'c'}
# scan over confidence score in evaluation to optimize it on the validation set.
self.scan_det_thresh = False
# plots roc-curves / prc-curves in evaluation.
self.plot_stat_curves = False
# evaluates average precision per image and averages over images. instead computing one ap over data set.
self.per_patient_ap = False
# threshold for clustering 2D box predictions to 3D Cubes. Overlap is computed in XY.
self.merge_3D_iou = 0.1
# monitor any value from training.
self.n_monitoring_figures = 1
# dict to assign specific plot_values to monitor_figures > 0. {1: ['class_loss'], 2: ['kl_loss', 'kl_sigmas']}
self.assign_values_to_extra_figure = {}
# save predictions to csv file in experiment dir.
self.save_preds_to_csv = True
# select a maximum number of patient cases to test. number or "all" for all
self.max_test_patients = "all"
#########################
# MRCNN #
#########################
# if True, mask loss is not applied. used for data sets, where no pixel-wise annotations are provided.
self.frcnn_mode = False
# if True, unmolds masks in Mask R-CNN to full-res for plotting/monitoring.
self.return_masks_in_val = False
self.return_masks_in_test = False # needed if doing instance segmentation. evaluation not yet implemented.
# add P6 to Feature Pyramid Network.
self.sixth_pooling = False
# for probabilistic detection
self.n_latent_dims = 0
| 39.291667 | 118 | 0.605691 |
9cd81ec46ebbc392faeba0ec25c403986377251a | 47,226 | py | Python | edb/pgsql/compiler/pathctx.py | aaronbrighton/edgedb | 4aacd1d4e248ae0d483c075ba93fc462da291ef4 | [
"Apache-2.0"
] | 1 | 2019-11-14T07:08:55.000Z | 2019-11-14T07:08:55.000Z | edb/pgsql/compiler/pathctx.py | aaronbrighton/edgedb | 4aacd1d4e248ae0d483c075ba93fc462da291ef4 | [
"Apache-2.0"
] | null | null | null | edb/pgsql/compiler/pathctx.py | aaronbrighton/edgedb | 4aacd1d4e248ae0d483c075ba93fc462da291ef4 | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Helpers to manage statement path contexts."""
from __future__ import annotations
import functools
from typing import *
from edb.common import enum as s_enum
from edb.ir import ast as irast
from edb.ir import typeutils as irtyputils
from edb.schema import pointers as s_pointers
from edb.pgsql import ast as pgast
from edb.pgsql import types as pg_types
from . import astutils
from . import context
from . import output
class PathAspect(s_enum.StrEnum):
IDENTITY = 'identity'
VALUE = 'value'
SOURCE = 'source'
SERIALIZED = 'serialized'
# A mapping of more specific aspect -> less specific aspect for objects
OBJECT_ASPECT_SPECIFICITY_MAP = {
PathAspect.IDENTITY: PathAspect.VALUE,
PathAspect.VALUE: PathAspect.SOURCE,
PathAspect.SERIALIZED: PathAspect.SOURCE,
}
# A mapping of more specific aspect -> less specific aspect for primitives
PRIMITIVE_ASPECT_SPECIFICITY_MAP = {
PathAspect.SERIALIZED: PathAspect.VALUE,
}
def get_less_specific_aspect(
path_id: irast.PathId,
aspect: str,
) -> Optional[str]:
if path_id.is_objtype_path():
mapping = OBJECT_ASPECT_SPECIFICITY_MAP
else:
mapping = PRIMITIVE_ASPECT_SPECIFICITY_MAP
less_specific_aspect = mapping.get(PathAspect(aspect))
if less_specific_aspect is not None:
return str(less_specific_aspect)
else:
return None
def map_path_id(
path_id: irast.PathId,
path_id_map: Dict[irast.PathId, irast.PathId]) -> irast.PathId:
sorted_map = sorted(
path_id_map.items(), key=lambda kv: len(kv[0]), reverse=True)
for outer_id, inner_id in sorted_map:
new_path_id = path_id.replace_prefix(
outer_id, inner_id, permissive_ptr_path=True)
if new_path_id != path_id:
path_id = new_path_id
break
return path_id
def reverse_map_path_id(
path_id: irast.PathId,
path_id_map: Dict[irast.PathId, irast.PathId]) -> irast.PathId:
for outer_id, inner_id in path_id_map.items():
new_path_id = path_id.replace_prefix(inner_id, outer_id)
if new_path_id != path_id:
path_id = new_path_id
break
return path_id
def put_path_id_map(
rel: pgast.Query,
outer_path_id: irast.PathId,
inner_path_id: irast.PathId,
) -> None:
inner_path_id = map_path_id(inner_path_id, rel.view_path_id_map)
rel.view_path_id_map[outer_path_id] = inner_path_id
def get_path_var(
rel: pgast.Query, path_id: irast.PathId, *,
flavor: str='normal',
aspect: str, env: context.Environment) -> pgast.BaseExpr:
"""Return a value expression for a given *path_id* in a given *rel*."""
if isinstance(rel, pgast.CommonTableExpr):
rel = rel.query
if flavor == 'normal':
# Check if we already have a var, before remapping the path_id.
# This is useful for serialized aspect disambiguation in tuples,
# since process_set_as_tuple() records serialized vars with
# original path_id.
if (path_id, aspect) in rel.path_namespace:
return rel.path_namespace[path_id, aspect]
if rel.view_path_id_map:
path_id = map_path_id(path_id, rel.view_path_id_map)
if (path_id, aspect) in rel.path_namespace:
return rel.path_namespace[path_id, aspect]
elif flavor == 'packed':
if (
rel.packed_path_namespace
and (path_id, aspect) in rel.packed_path_namespace
):
return rel.packed_path_namespace[path_id, aspect]
if astutils.is_set_op_query(rel):
return _get_path_var_in_setop(
rel, path_id, aspect=aspect, flavor=flavor, env=env)
ptrref = path_id.rptr()
ptrref_dir = path_id.rptr_dir()
is_type_intersection = path_id.is_type_intersection_path()
src_path_id: Optional[irast.PathId] = None
if ptrref is not None and not is_type_intersection:
ptr_info = pg_types.get_ptrref_storage_info(
ptrref, resolve_type=False, link_bias=False, allow_missing=True)
ptr_dir = path_id.rptr_dir()
is_inbound = ptr_dir == s_pointers.PointerDirection.Inbound
if is_inbound:
src_path_id = path_id
else:
src_path_id = path_id.src_path()
assert src_path_id is not None
src_rptr = src_path_id.rptr()
if (
irtyputils.is_id_ptrref(ptrref)
and (
src_rptr is None
or ptrref_dir is not s_pointers.PointerDirection.Inbound
)
):
# When there is a reference to the id property of
# an object which is linked to by a link stored
# inline, we want to route the reference to the
# inline attribute. For example,
# Foo.__type__.id gets resolved to the Foo.__type__
# column. This can only be done if Foo is visible
# in scope, and Foo.__type__ is not a computable.
pid = src_path_id
while pid.is_type_intersection_path():
# Skip type intersection step(s).
src_pid = pid.src_path()
if src_pid is not None:
src_rptr = src_pid.rptr()
pid = src_pid
else:
break
if (src_rptr is not None
and not irtyputils.is_computable_ptrref(src_rptr)
and env.ptrref_source_visibility.get(src_rptr)):
src_ptr_info = pg_types.get_ptrref_storage_info(
src_rptr, resolve_type=False, link_bias=False,
allow_missing=True)
if (src_ptr_info
and src_ptr_info.table_type == 'ObjectType'):
src_path_id = src_path_id.src_path()
ptr_info = src_ptr_info
else:
ptr_info = None
ptr_dir = None
var: Optional[pgast.BaseExpr]
if ptrref is None:
if len(path_id) == 1:
# This is an scalar set derived from an expression.
src_path_id = path_id
elif ptrref.source_ptr is not None:
if ptr_info and ptr_info.table_type != 'link' and not is_inbound:
# This is a link prop that is stored in source rel,
# step back to link source rvar.
_prefix_pid = path_id.src_path()
assert _prefix_pid is not None
src_path_id = _prefix_pid.src_path()
elif is_type_intersection:
src_path_id = path_id
assert src_path_id is not None
# Find which rvar will have path_id as an output
src_aspect, rel_rvar, found_path_var = _find_rel_rvar(
rel, path_id, src_path_id, aspect=aspect, flavor=flavor, env=env)
if found_path_var:
return found_path_var
if rel_rvar is None:
raise LookupError(
f'there is no range var for '
f'{src_path_id} {src_aspect} in {rel}')
if isinstance(rel_rvar, pgast.IntersectionRangeVar):
if (
(path_id.is_objtype_path() and src_path_id == path_id)
or (ptrref is not None and irtyputils.is_id_ptrref(ptrref))
):
rel_rvar = rel_rvar.component_rvars[-1]
else:
# Intersection rvars are basically JOINs of the relevant
# parts of the type intersection, and so we need to make
# sure we pick the correct component relation of that JOIN.
rel_rvar = _find_rvar_in_intersection_by_typeref(
path_id,
rel_rvar.component_rvars,
)
source_rel = rel_rvar.query
if isinstance(ptrref, irast.PointerRef) and rel_rvar.typeref is not None:
assert ptrref_dir
actual_ptrref = irtyputils.maybe_find_actual_ptrref(
rel_rvar.typeref, ptrref, dir=ptrref_dir)
if actual_ptrref is not None:
ptr_info = pg_types.get_ptrref_storage_info(
actual_ptrref, resolve_type=False, link_bias=False)
outvar = get_path_output(
source_rel, path_id, ptr_info=ptr_info,
aspect=aspect, flavor=flavor, env=env)
var = astutils.get_rvar_var(rel_rvar, outvar)
put_path_var(rel, path_id, var, aspect=aspect, flavor=flavor, env=env)
if isinstance(var, pgast.TupleVar):
for element in var.elements:
put_path_var_if_not_exists(rel, element.path_id, element.val,
flavor=flavor,
aspect=aspect, env=env)
return var
def _find_rel_rvar(
rel: pgast.Query, path_id: irast.PathId, src_path_id: irast.PathId, *,
aspect: str, flavor: str, env: context.Environment,
) -> Tuple[str, Optional[pgast.PathRangeVar], Optional[pgast.BaseExpr]]:
"""Rummage around rel looking for an appropriate rvar for path_id.
Somewhat unfortunately, some checks to find the actual path var
(in a particular tuple case) need to occur in the middle of the
rvar rel search, so we can also find the actual path var in passing.
"""
src_aspect = aspect
rel_rvar = maybe_get_path_rvar(
rel, path_id, aspect=aspect, flavor=flavor, env=env)
if rel_rvar is None:
alt_aspect = get_less_specific_aspect(path_id, aspect)
if alt_aspect is not None:
rel_rvar = maybe_get_path_rvar(
rel, path_id, aspect=alt_aspect, env=env)
else:
alt_aspect = None
if rel_rvar is None:
if flavor == 'packed':
src_aspect = aspect
elif src_path_id.is_objtype_path():
src_aspect = 'source'
else:
src_aspect = aspect
if src_path_id.is_tuple_path():
if src_aspect == 'identity':
src_aspect = 'value'
if (var := _find_in_output_tuple(
rel, path_id, src_aspect, env=env)):
return src_aspect, None, var
rel_rvar = maybe_get_path_rvar(
rel, src_path_id, aspect=src_aspect, env=env)
if rel_rvar is None:
_src_path_id_prefix = src_path_id.src_path()
if _src_path_id_prefix is not None:
rel_rvar = maybe_get_path_rvar(
rel, _src_path_id_prefix, aspect=src_aspect, env=env)
else:
rel_rvar = maybe_get_path_rvar(
rel, src_path_id, aspect=src_aspect, env=env)
if (rel_rvar is None
and src_aspect != 'source' and path_id != src_path_id):
rel_rvar = maybe_get_path_rvar(
rel, src_path_id, aspect='source', env=env)
if rel_rvar is None and alt_aspect is not None:
# There is no source range var for the requested aspect,
# check if there is a cached var with less specificity.
assert flavor == 'normal'
var = rel.path_namespace.get((path_id, alt_aspect))
if var is not None:
put_path_var(rel, path_id, var, aspect=aspect, env=env)
return src_aspect, None, var
return src_aspect, rel_rvar, None
def _get_path_var_in_setop(
rel: pgast.Query, path_id: irast.PathId, *,
aspect: str, flavor: str, env: context.Environment,
) -> pgast.BaseExpr:
test_vals = []
if aspect in ('value', 'serialized'):
test_cb = functools.partial(
maybe_get_path_var, env=env, path_id=path_id, aspect=aspect)
test_vals = astutils.for_each_query_in_set(rel, test_cb)
# In order to ensure output balance, we only want to output
# a TupleVar if *every* subquery outputs a TupleVar.
# If some but not all output TupleVars, we need to fix up
# the output TupleVars by outputting them as a real tuple.
# This is needed for cases like `(Foo.bar UNION (1,2))`.
if (
any(isinstance(x, pgast.TupleVarBase) for x in test_vals)
and not all(isinstance(x, pgast.TupleVarBase) for x in test_vals)
):
def fixup(subrel: pgast.Query) -> None:
cur = get_path_var_and_fix_tuple(
subrel, env=env, path_id=path_id, aspect=aspect)
assert flavor == 'normal'
if isinstance(cur, pgast.TupleVarBase):
new = output.output_as_value(cur, env=env)
new_path_id = map_path_id(path_id, subrel.view_path_id_map)
put_path_var(
subrel, new_path_id, new,
force=True, env=env, aspect=aspect)
astutils.for_each_query_in_set(rel, fixup)
# We disable the find_path_output optimization when doing
# UNIONs to avoid situations where they have different numbers
# of columns.
cb = functools.partial(
get_path_output_or_null,
env=env,
disable_output_fusion=True,
path_id=path_id,
aspect=aspect,
flavor=flavor)
outputs = astutils.for_each_query_in_set(rel, cb)
counts = astutils.for_each_query_in_set(
rel, lambda x: len(x.target_list))
assert counts == [counts[0]] * len(counts)
first: Optional[pgast.OutputVar] = None
optional = False
all_null = True
nullable = False
for colref, is_null in outputs:
if colref.nullable:
nullable = True
if first is None:
first = colref
if is_null:
optional = True
else:
all_null = False
if all_null:
raise LookupError(
f'cannot find refs for '
f'path {path_id} {aspect} in {rel}')
if first is None:
raise AssertionError(
f'union did not produce any outputs')
# Path vars produced by UNION expressions can be "optional",
# i.e the record is accepted as-is when such var is NULL.
# This is necessary to correctly join heterogeneous UNIONs.
var = astutils.strip_output_var(
first, optional=optional, nullable=optional or nullable)
put_path_var(rel, path_id, var, aspect=aspect, flavor=flavor, env=env)
return var
def _find_rvar_in_intersection_by_typeref(
path_id: irast.PathId,
component_rvars: Sequence[pgast.PathRangeVar],
) -> pgast.PathRangeVar:
assert component_rvars
pid_rptr = path_id.rptr()
if pid_rptr is not None:
if pid_rptr.material_ptr is not None:
pid_rptr = pid_rptr.material_ptr
tref = pid_rptr.out_source
else:
tref = path_id.target
for component_rvar in component_rvars:
if (
component_rvar.typeref is not None
and irtyputils.type_contains(tref, component_rvar.typeref)
):
rel_rvar = component_rvar
break
else:
raise AssertionError(
f'no rvar in intersection matches path id {path_id}'
)
return rel_rvar
def _find_in_output_tuple(
rel: pgast.Query,
path_id: irast.PathId,
aspect: str,
env: context.Environment) -> Optional[pgast.BaseExpr]:
"""Try indirecting a source tuple already present as an output.
Normally tuple indirections are handled by
process_set_as_tuple_indirection, but UNIONing an explicit tuple with a
tuple coming from a base relation (like `(Foo.bar UNION (1,2)).0`)
can lead to us looking for a tuple path in relations that only have
the actual full tuple.
(See test_edgeql_coalesce_tuple_{08,09}).
We handle this by checking whether some prefix of the tuple path
is present in the path_outputs.
This is sufficient because the relevant cases are all caused by
set ops, and the "fixup" done in set op cases ensures that the
tuple will be already present.
"""
steps = []
src_path_id = path_id.src_path()
ptrref = path_id.rptr()
while (
src_path_id
and src_path_id.is_tuple_path()
and isinstance(ptrref, irast.TupleIndirectionPointerRef)
):
steps.append((ptrref.shortname.name, src_path_id))
if (
(var := rel.path_namespace.get((src_path_id, aspect)))
and not isinstance(var, pgast.TupleVarBase)
):
for name, src in reversed(steps):
var = astutils.tuple_getattr(var, src.target, name)
put_path_var(rel, path_id, var, aspect=aspect, env=env)
return var
ptrref = src_path_id.rptr()
src_path_id = src_path_id.src_path()
return None
def get_path_identity_var(
rel: pgast.Query, path_id: irast.PathId, *,
env: context.Environment) -> pgast.BaseExpr:
return get_path_var(rel, path_id, aspect='identity', env=env)
def get_path_value_var(
rel: pgast.Query, path_id: irast.PathId, *,
env: context.Environment) -> pgast.BaseExpr:
return get_path_var(rel, path_id, aspect='value', env=env)
def is_relation_rvar(
rvar: pgast.BaseRangeVar) -> bool:
return (
isinstance(rvar, pgast.RelRangeVar) and
is_terminal_relation(rvar.query)
)
def is_terminal_relation(
rel: pgast.BaseRelation) -> bool:
return isinstance(rel, (pgast.Relation, pgast.NullRelation))
def is_values_relation(
rel: pgast.BaseRelation) -> bool:
return bool(getattr(rel, 'values', None))
def maybe_get_path_var(
rel: pgast.Query, path_id: irast.PathId, *, aspect: str,
env: context.Environment) -> Optional[pgast.BaseExpr]:
try:
return get_path_var(rel, path_id, aspect=aspect, env=env)
except LookupError:
return None
def maybe_get_path_identity_var(
rel: pgast.Query,
path_id: irast.PathId, *,
env: context.Environment) -> Optional[pgast.BaseExpr]:
try:
return get_path_var(rel, path_id, aspect='identity', env=env)
except LookupError:
return None
def maybe_get_path_value_var(
rel: pgast.Query,
path_id: irast.PathId, *,
env: context.Environment) -> Optional[pgast.BaseExpr]:
try:
return get_path_var(rel, path_id, aspect='value', env=env)
except LookupError:
return None
def maybe_get_path_serialized_var(
rel: pgast.Query,
path_id: irast.PathId, *,
env: context.Environment) -> Optional[pgast.BaseExpr]:
try:
return get_path_var(rel, path_id, aspect='serialized', env=env)
except LookupError:
return None
def put_path_var(
rel: pgast.BaseRelation, path_id: irast.PathId, var: pgast.BaseExpr, *,
aspect: str, flavor: str='normal', force: bool=False,
env: context.Environment) -> None:
if flavor == 'packed':
if rel.packed_path_namespace is None:
rel.packed_path_namespace = {}
path_namespace = rel.packed_path_namespace
else:
path_namespace = rel.path_namespace
if (path_id, aspect) in path_namespace and not force:
raise KeyError(
f'{aspect} of {path_id} is already present in {rel}')
path_namespace[path_id, aspect] = var
def put_path_var_if_not_exists(
rel: pgast.Query, path_id: irast.PathId, var: pgast.BaseExpr, *,
flavor: str='normal',
aspect: str, env: context.Environment) -> None:
try:
put_path_var(rel, path_id, var, aspect=aspect, flavor=flavor, env=env)
except KeyError:
pass
def put_path_identity_var(
rel: pgast.BaseRelation, path_id: irast.PathId, var: pgast.BaseExpr, *,
force: bool=False, env: context.Environment) -> None:
put_path_var(rel, path_id, var, aspect='identity', force=force, env=env)
def put_path_value_var(
rel: pgast.BaseRelation, path_id: irast.PathId, var: pgast.BaseExpr, *,
force: bool = False, env: context.Environment) -> None:
put_path_var(rel, path_id, var, aspect='value', force=force, env=env)
def put_path_serialized_var(
rel: pgast.BaseRelation, path_id: irast.PathId, var: pgast.BaseExpr, *,
force: bool = False, env: context.Environment) -> None:
put_path_var(rel, path_id, var, aspect='serialized', force=force, env=env)
def put_path_value_var_if_not_exists(
rel: pgast.BaseRelation, path_id: irast.PathId, var: pgast.BaseExpr, *,
force: bool = False, env: context.Environment) -> None:
try:
put_path_var(rel, path_id, var, aspect='value', force=force, env=env)
except KeyError:
pass
def put_path_serialized_var_if_not_exists(
rel: pgast.BaseRelation, path_id: irast.PathId, var: pgast.BaseExpr, *,
force: bool = False, env: context.Environment) -> None:
try:
put_path_var(rel, path_id, var, aspect='serialized',
force=force, env=env)
except KeyError:
pass
def put_path_bond(
stmt: pgast.BaseRelation, path_id: irast.PathId) -> None:
stmt.path_scope.add(path_id)
def put_rvar_path_bond(
rvar: pgast.PathRangeVar, path_id: irast.PathId) -> None:
put_path_bond(rvar.query, path_id)
def get_path_output_alias(
path_id: irast.PathId, aspect: str, *,
env: context.Environment) -> str:
rptr = path_id.rptr()
if rptr is not None:
alias_base = rptr.shortname.name
elif path_id.is_collection_path():
assert path_id.target.collection is not None
alias_base = path_id.target.collection
else:
alias_base = path_id.target_name_hint.name
return env.aliases.get(f'{alias_base}_{aspect}')
def get_rvar_path_var(
rvar: pgast.PathRangeVar, path_id: irast.PathId, aspect: str, *,
flavor: str='normal',
env: context.Environment) -> pgast.OutputVar:
"""Return ColumnRef for a given *path_id* in a given *range var*."""
if (path_id, aspect) in (outs := rvar.query.get_path_outputs(flavor)):
outvar = outs[path_id, aspect]
elif is_relation_rvar(rvar):
assert flavor == 'normal'
ptr_si: Optional[pg_types.PointerStorageInfo]
if (
(rptr := path_id.rptr()) is not None
and rvar.typeref is not None
and rvar.query.path_id
and rvar.query.path_id != path_id
and (not rvar.query.path_id.is_type_intersection_path()
or rvar.query.path_id.src_path() != path_id)
):
ptrref_dir = path_id.rptr_dir()
assert ptrref_dir
actual_rptr = irtyputils.maybe_find_actual_ptrref(
rvar.typeref, rptr, dir=ptrref_dir
)
if actual_rptr is not None:
ptr_si = pg_types.get_ptrref_storage_info(actual_rptr)
else:
ptr_si = None
else:
ptr_si = None
outvar = _get_rel_path_output(
rvar.query, path_id, ptr_info=ptr_si,
aspect=aspect, flavor=flavor, env=env)
else:
# Range is another query.
outvar = get_path_output(
rvar.query, path_id, aspect=aspect, flavor=flavor, env=env)
return astutils.get_rvar_var(rvar, outvar)
def put_rvar_path_output(
rvar: pgast.PathRangeVar, path_id: irast.PathId, aspect: str,
var: pgast.OutputVar, *, env: context.Environment) -> None:
_put_path_output_var(rvar.query, path_id, aspect, var, env=env)
def get_rvar_path_identity_var(
rvar: pgast.PathRangeVar, path_id: irast.PathId, *,
env: context.Environment) -> pgast.OutputVar:
return get_rvar_path_var(rvar, path_id, aspect='identity', env=env)
def maybe_get_rvar_path_identity_var(
rvar: pgast.PathRangeVar, path_id: irast.PathId, *,
env: context.Environment) -> Optional[pgast.OutputVar]:
try:
return get_rvar_path_var(rvar, path_id, aspect='identity', env=env)
except LookupError:
return None
def get_rvar_path_value_var(
rvar: pgast.PathRangeVar, path_id: irast.PathId, *,
env: context.Environment) -> pgast.OutputVar:
return get_rvar_path_var(rvar, path_id, aspect='value', env=env)
def maybe_get_rvar_path_value_var(
rvar: pgast.PathRangeVar, path_id: irast.PathId, *,
env: context.Environment) -> Optional[pgast.OutputVar]:
try:
return get_rvar_path_var(rvar, path_id, aspect='value', env=env)
except LookupError:
return None
def get_rvar_output_var_as_col_list(
rvar: pgast.PathRangeVar, outvar: pgast.OutputVar, aspect: str, *,
env: context.Environment) -> List[pgast.OutputVar]:
cols: List[pgast.OutputVar]
if isinstance(outvar, pgast.ColumnRef):
cols = [outvar]
elif isinstance(outvar, pgast.TupleVarBase):
cols = []
for el in outvar.elements:
col = get_rvar_path_var(rvar, el.path_id, aspect=aspect, env=env)
cols.extend(get_rvar_output_var_as_col_list(
rvar, col, aspect=aspect, env=env))
else:
raise RuntimeError(f'unexpected OutputVar: {outvar!r}')
return cols
def put_path_rvar(
stmt: pgast.Query, path_id: irast.PathId, rvar: pgast.PathRangeVar, *,
flavor: str='normal',
aspect: str, env: context.Environment) -> None:
assert isinstance(path_id, irast.PathId)
stmt.get_rvar_map(flavor)[path_id, aspect] = rvar
# Normally, masked paths (i.e paths that are only behind a fence below),
# will not be exposed in a query namespace. However, when the masked
# path in the *main* path of a set, it must still be exposed, but no
# further than the immediate parent query.
try:
query = rvar.query
except NotImplementedError:
pass
else:
if path_id in query.path_id_mask:
stmt.path_id_mask.add(path_id)
def put_path_value_rvar(
stmt: pgast.Query, path_id: irast.PathId, rvar: pgast.PathRangeVar, *,
flavor: str='normal',
env: context.Environment) -> None:
put_path_rvar(stmt, path_id, rvar, aspect='value', flavor=flavor, env=env)
def put_path_source_rvar(
stmt: pgast.Query, path_id: irast.PathId, rvar: pgast.PathRangeVar, *,
flavor: str='normal',
env: context.Environment) -> None:
put_path_rvar(stmt, path_id, rvar, aspect='source', flavor=flavor, env=env)
def has_rvar(
stmt: pgast.Query, rvar: pgast.PathRangeVar, *,
env: context.Environment) -> bool:
return any(
rvar in set(stmt.get_rvar_map(flavor).values())
for flavor in ('normal', 'packed')
)
def put_path_rvar_if_not_exists(
stmt: pgast.Query, path_id: irast.PathId, rvar: pgast.PathRangeVar, *,
flavor: str='normal',
aspect: str, env: context.Environment) -> None:
if (path_id, aspect) not in stmt.get_rvar_map(flavor):
put_path_rvar(
stmt, path_id, rvar, aspect=aspect, flavor=flavor, env=env)
def get_path_rvar(
stmt: pgast.Query, path_id: irast.PathId, *,
flavor: str='normal',
aspect: str, env: context.Environment) -> pgast.PathRangeVar:
rvar = maybe_get_path_rvar(
stmt, path_id, aspect=aspect, flavor=flavor, env=env)
if rvar is None:
raise LookupError(
f'there is no range var for {path_id} {aspect} in {stmt}')
return rvar
def maybe_get_path_rvar(
stmt: pgast.Query, path_id: irast.PathId, *, aspect: str,
flavor: str='normal',
env: context.Environment) -> Optional[pgast.PathRangeVar]:
rvar = env.external_rvars.get((path_id, aspect))
path_rvar_map = stmt.maybe_get_rvar_map(flavor)
if path_rvar_map is not None:
if rvar is None and path_rvar_map:
rvar = path_rvar_map.get((path_id, aspect))
if rvar is None and aspect == 'identity':
rvar = path_rvar_map.get((path_id, 'value'))
return rvar
def list_path_aspects(
stmt: pgast.Query, path_id: irast.PathId, *,
env: context.Environment) -> Set[str]:
path_id = map_path_id(path_id, stmt.view_path_id_map)
aspects = set()
for rvar_path_id, aspect in stmt.path_rvar_map:
if path_id == rvar_path_id:
aspects.add(aspect)
for ns_path_id, aspect in stmt.path_namespace:
if path_id == ns_path_id:
aspects.add(aspect)
for ns_path_id, aspect in stmt.path_outputs:
if path_id == ns_path_id:
aspects.add(aspect)
return aspects
def maybe_get_path_value_rvar(
stmt: pgast.Query, path_id: irast.PathId, *,
env: context.Environment) -> Optional[pgast.BaseRangeVar]:
return maybe_get_path_rvar(stmt, path_id, aspect='value', env=env)
def _same_expr(expr1: pgast.BaseExpr, expr2: pgast.BaseExpr) -> bool:
if (isinstance(expr1, pgast.ColumnRef) and
isinstance(expr2, pgast.ColumnRef)):
return expr1.name == expr2.name
else:
return expr1 == expr2
def put_path_packed_output(
rel: pgast.EdgeQLPathInfo, path_id: irast.PathId,
val: pgast.OutputVar, aspect: str='value') -> None:
if rel.packed_path_outputs is None:
rel.packed_path_outputs = {}
rel.packed_path_outputs[path_id, aspect] = val
def _put_path_output_var(
rel: pgast.BaseRelation, path_id: irast.PathId, aspect: str,
var: pgast.OutputVar, *, flavor: str='normal',
env: context.Environment) -> None:
if flavor == 'packed':
put_path_packed_output(rel, path_id, var, aspect)
else:
rel.path_outputs[path_id, aspect] = var
def _get_rel_object_id_output(
rel: pgast.BaseRelation, path_id: irast.PathId, *,
aspect: str,
ptr_info: Optional[pg_types.PointerStorageInfo]=None,
env: context.Environment) -> pgast.OutputVar:
var = rel.path_outputs.get((path_id, aspect))
if var is not None:
return var
if isinstance(rel, pgast.NullRelation):
name = env.aliases.get('id')
val = pgast.TypeCast(
arg=pgast.NullConstant(),
type_name=pgast.TypeName(
name=('uuid',),
)
)
rel.target_list.append(pgast.ResTarget(name=name, val=val))
result = pgast.ColumnRef(name=[name], nullable=True)
else:
result = pgast.ColumnRef(name=['id'], nullable=False)
_put_path_output_var(rel, path_id, aspect, result, env=env)
return result
def _get_rel_path_output(
rel: pgast.BaseRelation, path_id: irast.PathId, *,
aspect: str,
flavor: str,
ptr_info: Optional[pg_types.PointerStorageInfo]=None,
env: context.Environment) -> pgast.OutputVar:
if path_id.is_objtype_path():
if aspect == 'identity':
aspect = 'value'
if aspect != 'value':
raise LookupError(
f'invalid request for non-scalar path {path_id} {aspect}')
if (path_id == rel.path_id or
(rel.path_id and
rel.path_id.is_type_intersection_path() and
path_id == rel.path_id.src_path())):
return _get_rel_object_id_output(
rel, path_id, aspect=aspect, env=env)
else:
if aspect == 'identity':
raise LookupError(
f'invalid request for scalar path {path_id} {aspect}')
elif aspect == 'serialized':
aspect = 'value'
var = rel.path_outputs.get((path_id, aspect))
if var is not None:
return var
ptrref = path_id.rptr()
rptr_dir = path_id.rptr_dir()
if (rptr_dir is not None and
rptr_dir != s_pointers.PointerDirection.Outbound):
raise LookupError(
f'{path_id} is an inbound pointer and cannot be resolved '
f'on a base relation')
if isinstance(rel, pgast.NullRelation):
if ptrref is not None:
target = ptrref.out_target
else:
target = path_id.target
pg_type = pg_types.pg_type_from_ir_typeref(target)
if ptr_info is not None:
name = env.aliases.get(ptr_info.column_name)
else:
name = env.aliases.get('v')
val = pgast.TypeCast(
arg=pgast.NullConstant(),
type_name=pgast.TypeName(
name=pg_type,
)
)
rel.target_list.append(pgast.ResTarget(name=name, val=val))
result = pgast.ColumnRef(name=[name], nullable=True)
else:
if ptrref is None:
raise ValueError(
f'could not resolve trailing pointer class for {path_id}')
assert not ptrref.is_computable
if ptr_info is None:
ptr_info = pg_types.get_ptrref_storage_info(
ptrref, resolve_type=False, link_bias=False)
result = pgast.ColumnRef(
name=[ptr_info.column_name],
nullable=not ptrref.required)
_put_path_output_var(rel, path_id, aspect, result, flavor=flavor, env=env)
return result
def find_path_output(
rel: pgast.BaseRelation, path_id: irast.PathId, ref: pgast.BaseExpr, *,
env: context.Environment) -> Optional[pgast.OutputVar]:
if isinstance(ref, pgast.TupleVarBase):
return None
for key, other_ref in rel.path_namespace.items():
if _same_expr(other_ref, ref) and key in rel.path_outputs:
return rel.path_outputs.get(key)
else:
return None
def get_path_output(
rel: pgast.BaseRelation, path_id: irast.PathId, *,
aspect: str, allow_nullable: bool=True,
disable_output_fusion: bool=False,
ptr_info: Optional[pg_types.PointerStorageInfo]=None,
flavor: str='normal',
env: context.Environment) -> pgast.OutputVar:
if isinstance(rel, pgast.Query) and flavor == 'normal':
path_id = map_path_id(path_id, rel.view_path_id_map)
return _get_path_output(rel, path_id=path_id, aspect=aspect,
disable_output_fusion=disable_output_fusion,
ptr_info=ptr_info, allow_nullable=allow_nullable,
flavor=flavor,
env=env)
def _get_path_output(
rel: pgast.BaseRelation, path_id: irast.PathId, *,
aspect: str, allow_nullable: bool=True,
disable_output_fusion: bool=False,
ptr_info: Optional[pg_types.PointerStorageInfo]=None,
flavor: str,
env: context.Environment) -> pgast.OutputVar:
if flavor == 'packed':
result = (rel.packed_path_outputs.get((path_id, aspect))
if rel.packed_path_outputs else None)
else:
result = rel.path_outputs.get((path_id, aspect))
if result is not None:
return result
ref: pgast.BaseExpr
alias = None
rptr = path_id.rptr()
if rptr is not None and irtyputils.is_id_ptrref(rptr) and not (
(src_path_id := path_id.src_path())
and (src_rptr := src_path_id.rptr())
and (
src_rptr.is_computable
or src_rptr.out_cardinality.is_multi()
)
):
# A value reference to Object.id is the same as a value
# reference to the Object itself. (Though we want to only
# apply this in the cases that process_set_as_path does this
# optimization, which means not for multi props.)
src_path_id = path_id.src_path()
assert src_path_id is not None
id_output = maybe_get_path_output(rel, src_path_id,
aspect='value',
allow_nullable=allow_nullable,
ptr_info=ptr_info, env=env)
if id_output is not None:
_put_path_output_var(rel, path_id, aspect, id_output, env=env)
return id_output
if is_terminal_relation(rel):
return _get_rel_path_output(rel, path_id, aspect=aspect, flavor=flavor,
ptr_info=ptr_info, env=env)
assert isinstance(rel, pgast.Query)
if is_values_relation(rel):
# The VALUES() construct seems to always expose its
# value as "column1".
alias = 'column1'
ref = pgast.ColumnRef(name=[alias])
else:
ref = get_path_var(rel, path_id, aspect=aspect, flavor=flavor, env=env)
# As an optimization, look to see if the same expression is being
# output on a different aspect. This can save us needing to do the
# work twice in the query.
other_output = find_path_output(rel, path_id, ref, env=env)
if other_output is not None and not disable_output_fusion:
_put_path_output_var(
rel, path_id, aspect, other_output, flavor=flavor, env=env)
return other_output
if isinstance(ref, pgast.TupleVarBase):
elements = []
for el in ref.elements:
el_path_id = reverse_map_path_id(
el.path_id, rel.view_path_id_map)
try:
# Similarly to get_path_var(), check for outer path_id
# first for tuple serialized var disambiguation.
element = _get_path_output(
rel, el_path_id, aspect=aspect,
disable_output_fusion=disable_output_fusion,
flavor=flavor,
allow_nullable=allow_nullable, env=env)
except LookupError:
element = get_path_output(
rel, el_path_id, aspect=aspect,
disable_output_fusion=disable_output_fusion,
flavor=flavor,
allow_nullable=allow_nullable, env=env)
elements.append(pgast.TupleElementBase(
path_id=el_path_id, name=element))
result = pgast.TupleVarBase(
elements=elements,
named=ref.named,
typeref=ref.typeref,
is_packed_multi=ref.is_packed_multi,
)
else:
if astutils.is_set_op_query(rel):
assert isinstance(ref, pgast.OutputVar)
result = astutils.strip_output_var(ref)
else:
assert isinstance(rel, pgast.ReturningQuery), \
"expected ReturningQuery"
if alias is None:
alias = get_path_output_alias(path_id, aspect, env=env)
restarget = pgast.ResTarget(
name=alias, val=ref, ser_safe=getattr(ref, 'ser_safe', False))
rel.target_list.append(restarget)
nullable = is_nullable(ref, env=env)
optional = None
is_packed_multi = False
if isinstance(ref, pgast.ColumnRef):
optional = ref.optional
is_packed_multi = ref.is_packed_multi
if nullable and not allow_nullable:
assert isinstance(rel, pgast.SelectStmt), \
"expected SelectStmt"
var = get_path_var(rel, path_id, aspect=aspect, env=env)
rel.where_clause = astutils.extend_binop(
rel.where_clause,
pgast.NullTest(arg=var, negated=True)
)
nullable = False
result = pgast.ColumnRef(
name=[alias], nullable=nullable, optional=optional,
is_packed_multi=is_packed_multi)
_put_path_output_var(rel, path_id, aspect, result, flavor=flavor, env=env)
if (path_id.is_objtype_path()
and not isinstance(result, pgast.TupleVarBase)):
equiv_aspect = None
if aspect == 'identity':
equiv_aspect = 'value'
elif aspect == 'value':
equiv_aspect = 'identity'
if (equiv_aspect is not None
and (path_id, equiv_aspect) not in rel.path_outputs):
_put_path_output_var(
rel, path_id, equiv_aspect, result, flavor=flavor, env=env)
return result
def maybe_get_path_output(
rel: pgast.BaseRelation, path_id: irast.PathId, *,
aspect: str, allow_nullable: bool=True,
disable_output_fusion: bool=False,
ptr_info: Optional[pg_types.PointerStorageInfo]=None,
flavor: str='normal',
env: context.Environment) -> Optional[pgast.OutputVar]:
try:
return get_path_output(rel, path_id=path_id, aspect=aspect,
allow_nullable=allow_nullable,
disable_output_fusion=disable_output_fusion,
flavor=flavor,
ptr_info=ptr_info, env=env)
except LookupError:
return None
def get_path_identity_output(
rel: pgast.Query, path_id: irast.PathId, *,
env: context.Environment) -> pgast.OutputVar:
return get_path_output(rel, path_id, aspect='identity', env=env)
def get_path_value_output(
rel: pgast.Query, path_id: irast.PathId, *,
env: context.Environment) -> pgast.OutputVar:
return get_path_output(rel, path_id, aspect='value', env=env)
def get_path_serialized_or_value_var(
rel: pgast.Query, path_id: irast.PathId, *,
env: context.Environment) -> pgast.BaseExpr:
ref = maybe_get_path_serialized_var(rel, path_id, env=env)
if ref is None:
ref = get_path_value_var(rel, path_id, env=env)
return ref
def fix_tuple(
rel: pgast.Query, ref: pgast.BaseExpr, *,
aspect: str, output: bool=False,
env: context.Environment) -> pgast.BaseExpr:
if (
isinstance(ref, pgast.TupleVarBase)
and not isinstance(ref, pgast.TupleVar)
):
elements = []
for el in ref.elements:
assert el.path_id is not None
val = _get_and_fix_tuple(
rel, el.path_id, aspect=aspect, output=output, env=env)
elements.append(
pgast.TupleElement(
path_id=el.path_id, name=el.name, val=val))
ref = pgast.TupleVar(
elements,
named=ref.named,
typeref=ref.typeref,
)
return ref
def _get_and_fix_tuple(
rel: pgast.Query, path_id: irast.PathId, *,
output: bool=False,
aspect: str, env: context.Environment) -> pgast.BaseExpr:
ref = (
get_path_output(rel, path_id, aspect=aspect, env=env)
if output else
get_path_var(rel, path_id, aspect=aspect, env=env)
)
return fix_tuple(rel, ref, aspect=aspect, output=output, env=env)
def get_path_var_and_fix_tuple(
rel: pgast.Query, path_id: irast.PathId, *,
aspect: str, env: context.Environment) -> pgast.BaseExpr:
return _get_and_fix_tuple(
rel, path_id, output=False, aspect=aspect, env=env)
def get_path_output_and_fix_tuple(
rel: pgast.Query, path_id: irast.PathId, *,
aspect: str, env: context.Environment) -> pgast.BaseExpr:
return _get_and_fix_tuple(
rel, path_id, output=True, aspect=aspect, env=env)
def get_path_serialized_output(
rel: pgast.Query, path_id: irast.PathId, *,
env: context.Environment) -> pgast.OutputVar:
# Serialized output is a special case, we don't
# want this behaviour to be recursive, so it
# must be kept outside of get_path_output() generic.
aspect = 'serialized'
path_id = map_path_id(path_id, rel.view_path_id_map)
result = rel.path_outputs.get((path_id, aspect))
if result is not None:
return result
ref = get_path_serialized_or_value_var(rel, path_id, env=env)
if (
isinstance(ref, pgast.TupleVarBase)
and not isinstance(ref, pgast.TupleVar)
):
elements = []
for el in ref.elements:
assert el.path_id is not None
val = get_path_serialized_or_value_var(rel, el.path_id, env=env)
elements.append(
pgast.TupleElement(
path_id=el.path_id, name=el.name, val=val))
ref = pgast.TupleVar(
elements,
named=ref.named,
typeref=ref.typeref,
)
refexpr = output.serialize_expr(ref, path_id=path_id, env=env)
alias = get_path_output_alias(path_id, aspect, env=env)
restarget = pgast.ResTarget(name=alias, val=refexpr, ser_safe=True)
rel.target_list.append(restarget)
result = pgast.ColumnRef(
name=[alias], nullable=refexpr.nullable, ser_safe=True)
_put_path_output_var(rel, path_id, aspect, result, env=env)
return result
def get_path_output_or_null(
rel: pgast.Query, path_id: irast.PathId, *,
disable_output_fusion: bool=False,
flavor: str='normal',
aspect: str, env: context.Environment) -> \
Tuple[pgast.OutputVar, bool]:
path_id = map_path_id(path_id, rel.view_path_id_map)
ref = maybe_get_path_output(
rel, path_id,
disable_output_fusion=disable_output_fusion,
flavor=flavor,
aspect=aspect, env=env)
if ref is not None:
return ref, False
alt_aspect = get_less_specific_aspect(path_id, aspect)
if alt_aspect is not None and flavor == 'normal':
# If disable_output_fusion is true, we need to be careful
# to not reuse an existing column
if disable_output_fusion:
preexisting = rel.path_outputs.pop((path_id, alt_aspect), None)
ref = maybe_get_path_output(
rel, path_id,
disable_output_fusion=disable_output_fusion,
aspect=alt_aspect, env=env)
if disable_output_fusion:
# Put back the path_output to whatever it was before
if not preexisting:
rel.path_outputs.pop((path_id, alt_aspect), None)
else:
rel.path_outputs[(path_id, alt_aspect)] = preexisting
if ref is not None:
_put_path_output_var(rel, path_id, aspect, ref, env=env)
return ref, False
alias = env.aliases.get('null')
restarget = pgast.ResTarget(
name=alias,
val=pgast.NullConstant())
rel.target_list.append(restarget)
ref = pgast.ColumnRef(name=[alias], nullable=True)
_put_path_output_var(rel, path_id, aspect, ref, flavor=flavor, env=env)
return ref, True
def is_nullable(
expr: pgast.BaseExpr, *,
env: context.Environment) -> Optional[bool]:
try:
return expr.nullable
except AttributeError:
if isinstance(expr, pgast.ReturningQuery):
tl_len = len(expr.target_list)
if tl_len != 1:
raise RuntimeError(
f'subquery used as a value returns {tl_len} columns')
return is_nullable(expr.target_list[0].val, env=env)
else:
raise
| 33.902369 | 79 | 0.628658 |
0bee298764962086d60be9a73ebdc67487e2315f | 7,782 | py | Python | deep_hk/train.py | nsblunt/deep-hk-map | aa0b647e1302fc9677400b7add1a9f889afa70fe | [
"Apache-2.0"
] | null | null | null | deep_hk/train.py | nsblunt/deep-hk-map | aa0b647e1302fc9677400b7add1a9f889afa70fe | [
"Apache-2.0"
] | null | null | null | deep_hk/train.py | nsblunt/deep-hk-map | aa0b647e1302fc9677400b7add1a9f889afa70fe | [
"Apache-2.0"
] | 1 | 2021-07-06T21:38:33.000Z | 2021-07-06T21:38:33.000Z | """Functions to train models of maps between lattice model properties."""
from deep_hk.data import MultipleDatasets
import time
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
class Infidelity(nn.Module):
r"""A loss function that uses the infidelity of a wave function,
defined as:
I(\psi_p, \psi_e) = 1 - |\langle \psi_p | \psi_e \rangle|.
In other words, unity minus the overlap of the predicted and exact
wave functions.
"""
def __init__(self):
"""Initialise the object."""
super(Infidelity, self).__init__()
def forward(self, outputs, labels):
"""Calculate the infidelity using the provided outputs and labels.
Args
----
outputs : torch tensor
The batch of output data.
labels : torch tensor
The batch of labels being targeted.
"""
dot_products = torch.sum(outputs * labels, dim=1)
loss = 1 - torch.mean(torch.abs(dot_products))
return loss
def criterion_list(criterion, outputs, labels, device):
"""Apply the criterion to outputs and labels, which are both lists.
Args
----
criterion : torch criterion object
Used to measure the loss function between predicted and targeted
data.
outputs : list of 1d torch tensors
Tensors holding the input data points in their rows. Each tensor
corresponds to data of a given input size.
labels : list of 1d torch tensors
Tensors holding the labels for the inputs in their rows. Each
tensor corresponds to data of a given input size.
device : torch.device object
The device on which outputs and labels are held (cpu or cuda).
"""
ndata = 0
loss = torch.FloatTensor([0.0])
loss = loss.to(device)
for output, label in zip(outputs, labels):
nbatch = output.size()[0]
loss += nbatch * criterion(output, label)
ndata += nbatch
loss /= ndata
return loss
def collate_as_list_of_tensors(batch):
"""Merge a batch of data into lists of 2d tensors, each tensor
corresponding to a given input size.
Args
----
batch : list of tuples
Contains the data for the batch. The list length is equal to the
number of data points in the batch. For each data point, the
tuple contains the input as its first element, and the label as
its second element.
"""
input_sizes = []
# Indexed by the input size:
inputs = {}
labels = {}
for input, label in batch:
input_size = len(input)
if input_size in input_sizes:
inputs[input_size].append(input)
labels[input_size].append(label)
if input_size not in input_sizes:
input_sizes.append(input_size)
inputs[input_size] = [input]
labels[input_size] = [label]
# Merge the entries for a given input size into a 2d tensor.
# Do this for every input size, and store the results in a list.
inputs_list = []
labels_list = []
for input_size, v in inputs.items():
nbatch = len(v)
inputs_merged = torch.cat(v)
inputs_merged = inputs_merged.view(nbatch, input_size)
inputs_list.append(inputs_merged)
for ninput, v in labels.items():
nbatch = len(v)
label_size = len(label)
labels_merged = torch.cat(v)
labels_merged = labels_merged.view(nbatch, label_size)
labels_list.append(labels_merged)
return inputs_list, labels_list
def print_header(data_validation=None):
"""Print the header for the table of data during training.
Args
----
data_validation : Tuple of Data objects
The validation data set(s).
"""
if data_validation is None:
print('# 1. Epoch' + 2*' ' + '2. Train. Loss' + 3*' ' +
'3. Epoch time')
else:
print('# 1. Epoch' + 2*' ' + '2. Train. Loss', end='')
col_ind = 3
for i in range(len(data_validation)):
print(' {:1d}. Valid. loss {:1d}'.format(col_ind, i), end='')
col_ind += 1
print(' {:1d}. Epoch time'.format(col_ind))
def train(net,
data_train,
data_validation,
criterion,
optimizer,
nepochs,
batch_size,
device=torch.device('cpu'),
save_net=False,
save_root='./network',
save_net_every=100):
"""Train the network.
Args
----
net : network object
The neural network to be trained.
data_train : Data object or tuple of Data objects
The training data set(s).
data_validation : Tuple of Data objects
The validation data set(s).
criterion : torch criterion object
Used to measure the loss function between predicted and targeted
data.
optimizer : torch optimizer object
Implements the optimization algorithm, such as Adam.
nepochs : int
The number of epochs to perform.
batch_size : int
The number of data points passed in each batch.
device : torch.device object
The device on which training will be performed.
save_net : bool
If True, save the network state to a file at regular intervals.
save_root : string
The path and root of the filenames where networks will be saved, if
save_net is True.
save_net_every : int
The frequency (in epochs) at which the network will be saved to a
file, if save_net is True.
"""
print_header(data_validation)
# For consistency, we just convert data_train to a tuple if
# it isn't already.
if not isinstance(data_train, tuple):
data_train = (data_train,)
# Concatenate data sets, in case we have multiple in use.
data_train_all = MultipleDatasets(data_train)
input_sizes = [dat.ninput for dat in data_train]
output_sizes = [dat.noutput for dat in data_train]
# Are all input/output sizes the same?
fixed_ninput = input_sizes.count(input_sizes[0]) == len(input_sizes)
fixed_noutput = output_sizes.count(output_sizes[0]) == len(output_sizes)
data_loader = DataLoader(
data_train_all,
batch_size=batch_size,
shuffle=False,
num_workers=0,
collate_fn=collate_as_list_of_tensors)
# Train the network.
for epoch in range(nepochs):
start_time = time.time()
total_loss = 0.0
nbatches = 0
for batch_inputs, batch_labels in data_loader:
optimizer.zero_grad()
# Inputs and labels are a list of 2d tensors.
# Each corresponds to all data for a given input size.
outputs = []
labels = [x.to(device) for x in batch_labels]
for inputs in batch_inputs:
# inputs is a 2d tensor for a single input size.
inputs = inputs.to(device)
outputs.append(net(inputs))
loss = criterion_list(criterion, outputs, labels, device)
loss.backward()
optimizer.step()
total_loss += loss.item()
nbatches += 1
av_loss = total_loss/nbatches
if data_validation is None:
end_time = time.time()
epoch_time = end_time - start_time
print('{:10d} {:12.8f} {:12.8f}'.format(
epoch,
av_loss,
epoch_time
), flush=True)
else:
# Calculate the loss for validation data.
valid_loss = []
for data in data_validation:
valid_inputs = data.inputs.to(device)
valid_labels = data.labels.to(device)
valid_outputs = net(valid_inputs)
loss = criterion(valid_outputs, valid_labels)
loss = loss.to(torch.device('cpu'))
loss = loss.item()
valid_loss.append(loss)
end_time = time.time()
epoch_time = end_time - start_time
print('{:10d} {:12.8f}'.format(epoch, av_loss), end='')
for loss in valid_loss:
print(' {:12.8f}'.format(loss), end='')
print(' {:12.8f}'.format(epoch_time), flush=True)
if save_net:
if epoch % save_net_every == save_net_every-1:
nepochs_done = epoch+1
filename = save_root + '_' + str(nepochs_done) + '.pt'
net.save(filename)
print(flush=True)
| 30.637795 | 74 | 0.662298 |
84c1551a392299577c69055b99393b38685bcee5 | 842 | py | Python | app/api_1_0/comment.py | Ivicel/flasky | aa6940b43a7bd32e4721db4867a789c1344b8962 | [
"Apache-2.0"
] | null | null | null | app/api_1_0/comment.py | Ivicel/flasky | aa6940b43a7bd32e4721db4867a789c1344b8962 | [
"Apache-2.0"
] | null | null | null | app/api_1_0/comment.py | Ivicel/flasky | aa6940b43a7bd32e4721db4867a789c1344b8962 | [
"Apache-2.0"
] | null | null | null | from . import api
from ..models import Comment
from flask import request, current_app, jsonify, url_for
@api.route('/comments/')
def get_comments():
page = request.args.get('page', 1, type=int)
pagination = Comment.query.order_by(Comment.timestamp.asc()).paginate(page=page,
per_page=current_app.config['POST_PER_PAGE'], error_out=False)
prev = None
if pagination.has_prev:
prev = url_for('api.get_comments', page=page - 1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_comments', page=page + 1, _external=True)
return jsonify({
'comments': [comment.to_json() for comment in pagination.items],
'prev': prev,
'next': next,
'comments_count': pagination.total
})
@api.route('/comment/<int:id>')
def get_comment(id):
comment = Comment.query.get_or_404(id)
return jsonify(comment.to_json()) | 31.185185 | 81 | 0.729216 |
03ef9594f1a3e733d31f579d9b9c1a8a31aad654 | 918 | py | Python | test_import.py | low2by/holofan | 48e1ba50be8e969d053ecaa61dbc9c571ef6c8fc | [
"MIT"
] | null | null | null | test_import.py | low2by/holofan | 48e1ba50be8e969d053ecaa61dbc9c571ef6c8fc | [
"MIT"
] | null | null | null | test_import.py | low2by/holofan | 48e1ba50be8e969d053ecaa61dbc9c571ef6c8fc | [
"MIT"
] | null | null | null | import timeit
import math
from encoder_test import Encoder
rpm = 600.0;
radius_mm = 200.0;
def main():
encoder = Encoder()
count = 0
total = 0
for i in range(10000):
start_time = timeit.default_timer()
encoder.readpos();
total += timeit.default_timer() - start_time
count += 1;
sample_time = total/count
print("average time for one sample(average of 100000):" + str(sample_time) + " microseconds")
rotation_time_sec = ((1/rpm)*60);
samp_per_revolution =rotation_time_sec/sample_time
print("samples per revolution: " + str(samp_per_revolution))
angular_travel = ((rpm*360)/60)*sample_time
print("degrees turned between samples: " + str(angular_travel) + "degrees")
dist = radius_mm * angular_travel * math.pi/180
print("distance traveled at ends between samples: " + str(dist) + "mm")
if __name__ == "__main__":
main()
| 30.6 | 97 | 0.66122 |
e1fe73f58982a26c7cf022dad53d911e51e981f5 | 12,069 | py | Python | examples/lightning_base.py | IBM/LongAnswer | 4cf8f0a111a127af2ac671a6efbd28bd782cb4d9 | [
"Apache-2.0"
] | null | null | null | examples/lightning_base.py | IBM/LongAnswer | 4cf8f0a111a127af2ac671a6efbd28bd782cb4d9 | [
"Apache-2.0"
] | null | null | null | examples/lightning_base.py | IBM/LongAnswer | 4cf8f0a111a127af2ac671a6efbd28bd782cb4d9 | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
import os
import random
import numpy as np
import pytorch_lightning as pl
import torch
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
get_linear_schedule_with_warmup,
)
logger = logging.getLogger(__name__)
MODEL_MODES = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
}
def set_seed(args: argparse.Namespace):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
class BaseTransformer(pl.LightningModule):
def __init__(self, hparams: argparse.Namespace, num_labels=None, mode="base", **config_kwargs):
"Initialize a model."
super().__init__()
self.hparams = hparams
cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None
self.config = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,
**({"num_labels": num_labels} if num_labels is not None else {}),
cache_dir=cache_dir,
**config_kwargs,
)
self.tokenizer = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,
cache_dir=cache_dir,
)
self.model = MODEL_MODES[mode].from_pretrained(
self.hparams.model_name_or_path,
from_tf=bool(".ckpt" in self.hparams.model_name_or_path),
config=self.config,
cache_dir=cache_dir,
)
self.tokenizer.add_tokens(['[SEP]'])#multi-sentence', ':snt1', ':snt2', ':snt3', ':snt4', ':snt5', ':snt5', ':snt6', ':snt7', ':snt8', ':snt9', ':root', ':ARG1', ':mod', ':op1', ':ARG0', ':ARG0-of', ':name', ':op2', ':ARG2', ':ARG1-of', ':purpose', ':prep-in', ':time', ':li', ':quant', ':unit', ':poss', ':ARG3', ':location', ':domain', ':part-of', ':manner', ':polarity', ':condition', ':ARG4', ':extent', ':time-of', ':location-of', ':op3', ':beneficiary', ':topic', ':degree', ':ARG2-of', ':example', ':extent-of', ':month', ':day', ':op4', ':ARG5', ':manner-of', ':concession', ':duration', ':path', ':mode', ':medium', ':ord', ':value', ':destination', ':source', ':direction', ':instrument-of', ':consist-of', ':dayperiod', ':frequency', ':year', ':quant-of', ':weekday', ':compared-to', ':prep-on', ':ARG3-of', ':degree-of', ':prep-as', ':instrument', ':op5', ':prep-from', ':prep-to', ':century', ':era', ':condition-of', ':op6', ':op7', ':concession-of', ':polite', ':age', ':prep-with', ':decade', ':poss-of', ':prep-without', ':prep-in-addition-to', ':accompanier', ':ord-of', ':direction-of', ':prep-against', ':prep-at', ':subevent-of', ':snt10', ':snt11', ':duration-of', ':prep-for', ':source-of', ':frequency-of', ':topic-of', ':season', ':path-of', ':op8', ':op9', ':prep-among', ':prep-on-behalf-of', ':subevent', ':part', ':ARG4-of', ':beneficiary-of', ':scale', ':example-of', ':prep-by', ':range', ':purpose-of', ':destination-of', ':op10', ':op1-of', ':name-of', ':medium-of', ':prep-along-with', ':conj-as-if', ':timezone', ':prep-under', ':accompanier-of', ':age-of', ':op11', ':op12', ':op13', ':op14', ':op15', ':prep-amid', ':prep-toward', ':prep-out-of', ':prep-into', ':domain-of', ':ARG7', ':quarter', ':ARG5-of', ':op16', ':op17', ':op18', ':op19', ':op20', ':ARG8', ':ARG9', ':calendar', ':year2', ':ARG6', ':subset-of', ':prep-with-of'])
self.model.resize_token_embeddings(len(self.tokenizer))
def is_logger(self):
return self.trainer.proc_rank <= 0
def configure_optimizers(self):
"Prepare optimizer and schedule (linear warmup and decay)"
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
self.opt = optimizer
return [optimizer]
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, second_order_closure=None):
if self.trainer.use_tpu:
xm.optimizer_step(optimizer)
else:
optimizer.step()
optimizer.zero_grad()
self.lr_scheduler.step()
def get_tqdm_dict(self):
avg_loss = getattr(self.trainer, "avg_loss", 0.0)
tqdm_dict = {"loss": "{:.3f}".format(avg_loss), "lr": self.lr_scheduler.get_last_lr()[-1]}
return tqdm_dict
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_end(self, outputs):
return self.validation_end(outputs)
def train_dataloader(self):
train_batch_size = self.hparams.train_batch_size
dataloader = self.load_dataset("train", train_batch_size)
t_total = (
(len(dataloader.dataset) // (train_batch_size * max(1, self.hparams.n_gpu)))
// self.hparams.gradient_accumulation_steps
* float(self.hparams.num_train_epochs)
)
scheduler = get_linear_schedule_with_warmup(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=t_total
)
self.lr_scheduler = scheduler
return dataloader
def val_dataloader(self):
return self.load_dataset("dev", self.hparams.eval_batch_size)
def test_dataloader(self):
return self.load_dataset("test", self.hparams.eval_batch_size)
def _feature_file(self, mode):
return os.path.join(
self.hparams.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(),
str(self.hparams.max_seq_length),
),
)
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument(
"--num_train_epochs", default=3, type=int, help="Total number of training epochs to perform."
)
parser.add_argument("--train_batch_size", default=32, type=int)
parser.add_argument("--eval_batch_size", default=32, type=int)
class LoggingCallback(pl.Callback):
def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
logger.info("***** Validation results *****")
if pl_module.is_logger():
metrics = trainer.callback_metrics
# Log results
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
logger.info("{} = {}\n".format(key, str(metrics[key])))
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
logger.info("***** Test results *****")
if pl_module.is_logger():
metrics = trainer.callback_metrics
# Log and save results to file
output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
logger.info("{} = {}\n".format(key, str(metrics[key])))
writer.write("{} = {}\n".format(key, str(metrics[key])))
def add_generic_args(parser, root_dir):
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O2",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--n_gpu", type=int, default=1)
parser.add_argument("--n_tpu_cores", type=int, default=0)
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
def generic_train(model: BaseTransformer, args: argparse.Namespace):
# init model
set_seed(args)
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
checkpoint_callback = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=5
)
train_params = dict(
accumulate_grad_batches=args.gradient_accumulation_steps,
gpus=args.n_gpu,
max_epochs=args.num_train_epochs,
early_stop_callback=False,
gradient_clip_val=args.max_grad_norm,
checkpoint_callback=checkpoint_callback,
callbacks=[LoggingCallback()],
)
if args.fp16:
train_params["use_amp"] = args.fp16
train_params["amp_level"] = args.fp16_opt_level
if args.n_tpu_cores > 0:
global xm
import torch_xla.core.xla_model as xm
train_params["num_tpu_cores"] = args.n_tpu_cores
train_params["gpus"] = 0
if args.n_gpu > 1:
train_params["distributed_backend"] = "ddp"
trainer = pl.Trainer(**train_params)
if args.do_train:
trainer.fit(model)
return trainer
| 42.646643 | 1,879 | 0.625155 |
c95840de16706dede668f10fdb5378c1b5ef0e21 | 1,435 | py | Python | python_webex/v1/People.py | Paul-weqe/python-webex-bot | 95dac9085d7b7d107565039c2ab9a2769027ff31 | [
"MIT"
] | 10 | 2019-04-05T04:00:13.000Z | 2021-08-24T16:06:10.000Z | python_webex/v1/People.py | Paul-weqe/python-webex-bot | 95dac9085d7b7d107565039c2ab9a2769027ff31 | [
"MIT"
] | 8 | 2019-07-18T21:05:28.000Z | 2020-10-30T14:40:39.000Z | python_webex/v1/People.py | Paul-weqe/python-webex-bot | 95dac9085d7b7d107565039c2ab9a2769027ff31 | [
"MIT"
] | 5 | 2019-06-15T13:09:50.000Z | 2021-04-27T16:55:17.000Z | import sys
import requests
class People:
def get_people(self, email=None):
"""
gets a list of people with a particular attribute
uses https://api.ciscospark.com/people - GET request
"""
if email is None:
sys.exit("'email' is a required field")
url_route = "people"
params = {
"email": email
}
data = requests.get(self.URL + url_route, headers=self.headers, params=params)
return data
def get_person_details(self, person_id=None):
"""
returns specific information of the person with ID personId
uses https://api.ciscospark.com/people/{ personId } - GET request
API reference can be found in: https://developer.webex.com/docs/api/v1/people/get-person-details
"""
if person_id is None:
sys.exit("'personId' is a required field")
url_route = "people"
data = requests.get(self.URL + url_route + "/" + person_id, headers=self.headers)
return data
def get_own_details(self):
"""
gets the bots own information
uses https://api.ciscospark.com/people - GET request
API reference can be found in: https://developer.webex.com/docs/api/v1/people/get-my-own-details
"""
url_route = "people/me"
data = requests.get(self.URL + url_route, headers=self.headers)
return data
| 28.7 | 105 | 0.606969 |
cb5ba846073f57666096f87d32db2e2b83b81c32 | 2,921 | py | Python | options/options_guided_adaptation.py | LeoZDong/shape2prog | 2185d1d4eb7a1c4c55e644c6af477fd8e8e70241 | [
"BSD-2-Clause"
] | null | null | null | options/options_guided_adaptation.py | LeoZDong/shape2prog | 2185d1d4eb7a1c4c55e644c6af477fd8e8e70241 | [
"BSD-2-Clause"
] | null | null | null | options/options_guided_adaptation.py | LeoZDong/shape2prog | 2185d1d4eb7a1c4c55e644c6af477fd8e8e70241 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
import os
import argparse
import socket
import torch
def get_parser():
"""
a parser for Guided Adaptation
"""
parser = argparse.ArgumentParser(description="arguments for Guided Adaptation")
# optimization
parser.add_argument('--learning_rate', type=float, default=0.00002, help='learning rate for GA')
parser.add_argument('--weight_decay', type=float, default=0.00001, help='weight decay')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for Adam')
parser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam')
parser.add_argument('--epochs', type=int, default=20, help='epochs for GA')
parser.add_argument('--grad_clip', type=float, default=0.1, help='gradient clip threshold')
# print freq
parser.add_argument('--info_interval', type=int, default=10, help='freq for printing info')
parser.add_argument('--save_interval', type=int, default=5, help='freq for saving model')
# data info
parser.add_argument('--scale_down', action='store_true', default=False)
parser.add_argument('--batch_size', type=int, default=32, help='batch size of GA')
parser.add_argument('--num_workers', type=int, default=4, help='number of workers')
parser.add_argument('--data_folder', type=str, default='./data/', help='directory to data')
parser.add_argument('--cls', type=str, default='chair',
help='furniture classes: chair, table, bed, sofa, cabinet, bench')
# model info
parser.add_argument('--p_gen_path', type=str, default='./model/program_generator.t7',
help='path to the program generator')
parser.add_argument('--p_exe_path', type=str, default='./model/program_executor.t7',
help='path to the program executor')
parser.add_argument('--model_name', type=str, default='GA', help='folder name to save model')
return parser
def parse():
"""
parse and modify the options accordingly
"""
parser = get_parser()
opt = parser.parse_args()
opt.save_folder = os.path.join('./model', 'ckpts_{}_{}'.format(opt.model_name, opt.cls))
opt.train_file = os.path.join('data', '{}_training.h5'.format(opt.cls))
opt.val_file = os.path.join('data', '{}_testing.h5'.format(opt.cls))
if opt.cls in ['chair', 'table']:
pass
elif opt.cls in ['sofa', 'cabinet', 'bench']:
opt.epochs = max(60, opt.epochs)
elif opt.cls in ['bed']:
opt.learning_rate = 0.0001
opt.epochs = max(150, opt.epochs)
else:
raise NotImplementedError('{} is invalid class'.format(opt.cls))
opt.is_cuda = torch.cuda.is_available()
return opt
if __name__ == '__main__':
opt = parse()
print('===== arguments: guided adaptation =====')
for key, val in vars(opt).items():
print("{:20} {}".format(key, val))
| 35.621951 | 100 | 0.653886 |
e785a5fee00e75d91f45b8e29c1d09a893216bf6 | 454 | py | Python | quiz/forms.py | jstavanja/quiz-biometrics-api | 75e0db348668b14a85f94261aac092ae2d5fa9c6 | [
"MIT"
] | null | null | null | quiz/forms.py | jstavanja/quiz-biometrics-api | 75e0db348668b14a85f94261aac092ae2d5fa9c6 | [
"MIT"
] | null | null | null | quiz/forms.py | jstavanja/quiz-biometrics-api | 75e0db348668b14a85f94261aac092ae2d5fa9c6 | [
"MIT"
] | null | null | null | from django.forms import ModelForm
from django.forms.models import modelformset_factory
from .models import Quiz
from keystroke.models import KeystrokeTestType
class QuizForm(ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('owner')
super(QuizForm, self).__init__(*args, **kwargs)
class Meta:
model = Quiz
fields = '__all__'
exclude = ['course']
QuizFormSet = modelformset_factory(Quiz, form=QuizForm)
| 23.894737 | 55 | 0.735683 |
8d7440853d0b9e1ed44e01b981f70f48f2727ea2 | 4,177 | py | Python | Analyzer/core.py | theetje/Twitter-Analyzer | f67fe87f2b32a16341f1b18370b041b4be03a571 | [
"MIT"
] | null | null | null | Analyzer/core.py | theetje/Twitter-Analyzer | f67fe87f2b32a16341f1b18370b041b4be03a571 | [
"MIT"
] | null | null | null | Analyzer/core.py | theetje/Twitter-Analyzer | f67fe87f2b32a16341f1b18370b041b4be03a571 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# helpers
import csv
import helpers
from pprint import pprint
# models
from Models.base import Base
from Models.Tweet import Tweet
# sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# statsmodels numpy, matplotlib and pandas for analysis
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.tsa.api import VAR, DynamicVAR
from settings import *
def start():
"""Start the Data Analyzer"""
engine = create_engine('sqlite:///' + word_to_analyze + '.sqlite')
session = sessionmaker()
session.configure(bind=engine)
Base.metadata.create_all(engine)
s = session()
if count_words:
positive_counter = 0
negative_counter = 0
for positive_words, negative_words in s.query(
Tweet.positive_words,
Tweet.negative_words):
positive_counter += positive_words
negative_counter += negative_words
print(word_to_analyze
+ " had "
+ str(positive_counter)
+ " positive words and "
+ str(negative_counter)
+ " negative words.")
if count_rows:
print("Number of tweets used from " + word_to_analyze + ": ")
print(helpers.countRows(s, Tweet))
norm_Xt_dict = helpers.getXFromData(s, Tweet, True)
norm_Rt_dict = helpers.getRFromCSV('2017/10/01',
'2017/12/31',
'data/stock/'
+ word_to_analyze
+ '-stock-data'
+ '.csv',
True)
combined_2d_results_log = helpers.combineRtandXt(norm_Xt_dict, norm_Rt_dict)
# VAR
if test_var:
pd_data = pd.DataFrame(combined_2d_results_log, columns=['Rt', 'Xt'])
var_result = VAR(pd_data).fit(maxlag)
print(var_result.summary())
var_result.test_causality('Rt', 'Xt')
# VOORBEELD VAN HOE BESCHRIJVENDE STATESTIEK KAN WORDEN GEPLOT:
# fig = plt.subplots()
# fig = var_result.plot_sample_acorr()
# ax.set_ylabel("Y lable")
# ax.set_xlabel("X lable")
# ax.set_title("Title")
# plt.show()
# GRANGER CAUSALITY ANALYSIS
if test_granger:
result = sm.tsa.stattools.grangercausalitytests(combined_2d_results_log,
maxlag,
addconst=True,
verbose=True)
# PLOT DATA
if plot_figure:
Xt_dict = helpers.getXFromData(s, Tweet)
Rt_dict = helpers.getRFromCSV('2017/10/01',
'2017/12/31',
'data/stock/'
+ word_to_analyze
+ '-stock-data'
+ '.csv')
Xt_df = pd.DataFrame(list(Xt_dict.items()), columns=['Date', 'Xt'])
Xt_df['Date'] = pd.to_datetime(Xt_df['Date'])
Rt_df = pd.DataFrame(list(Rt_dict.items()), columns=['Date', 'Rt'])
Rt_df['Date'] = pd.to_datetime(Rt_df['Date'])
Xt_df = Xt_df.sort_values('Date', ascending=True)
plt.plot(Xt_df['Date'], Xt_df['Xt'], label='Twitter sentiment',
color='black')
plt.xticks(rotation='horizontal')
Rt_df = Rt_df.sort_values('Date', ascending=True)
plt.plot(Rt_df['Date'], Rt_df['Rt'], label='Stock return',
dashes=[6, 2],
color='black')
plt.legend([Xt_df, Rt_df], ['Twitter sentiment', 'Stock return'])
plt.xticks(rotation='horizontal')
if word_to_analyze is 'ibm':
plt.suptitle(word_to_analyze.upper(), fontsize=20)
else:
plt.suptitle(word_to_analyze.title(), fontsize=20)
plt.show()
| 33.95935 | 80 | 0.526933 |
5f74f159bd07d69a084b0dc0509363f5aaa88bdd | 1,716 | py | Python | objectModel/Python/cdm/utilities/__init__.py | aaron-emde/CDM | 9472e9c7694821ac4a9bbe608557d2e65aabc73e | [
"CC-BY-4.0",
"MIT"
] | null | null | null | objectModel/Python/cdm/utilities/__init__.py | aaron-emde/CDM | 9472e9c7694821ac4a9bbe608557d2e65aabc73e | [
"CC-BY-4.0",
"MIT"
] | 3 | 2021-05-11T23:57:12.000Z | 2021-08-04T05:03:05.000Z | objectModel/Python/cdm/utilities/__init__.py | aaron-emde/CDM | 9472e9c7694821ac4a9bbe608557d2e65aabc73e | [
"CC-BY-4.0",
"MIT"
] | null | null | null | # ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation.
# All rights reserved.
# ----------------------------------------------------------------------
from . import lang_utils
from . import primitive_appliers
from . import string_utils
from . import time_utils
from .applier_context import ApplierContext
from .applier_state import ApplierState
from .attribute_context_parameters import AttributeContextParameters
from .attribute_resolution_applier import AttributeResolutionApplier
from .attribute_resolution_directive_set import AttributeResolutionDirectiveSet
from .copy_options import CopyOptions
from .docs_result import DocsResult
from .event_callback import EventCallback
from .exceptions import CdmError
from .friendly_format_node import FriendlyFormatNode
from .identifier_ref import IdentifierRef
from .jobject import JObject
from .ref_counted import RefCounted
from .resolve_context_scope import ResolveContextScope
from .resolve_options import ResolveOptions
from .symbol_set import SymbolSet
from .trait_to_property_map import TraitToPropertyMap
from .visit_callback import VisitCallback
from .logging import logger
__all__ = [
'ApplierContext',
'ApplierState',
'AttributeContextParameters',
'AttributeResolutionApplier',
'AttributeResolutionDirectiveSet',
'CdmError',
'CopyOptions',
'DocsResult',
'EventCallback',
'FriendlyFormatNode',
'IdentifierRef',
'JObject',
'lang_utils',
'logger',
'primitive_appliers',
'RefCounted',
'ResolveContextScope',
'ResolveOptions',
'string_utils',
'SymbolSet',
'time_utils',
'TraitToPropertyMap',
'VisitCallback',
]
| 30.105263 | 79 | 0.725524 |
7581d15ed4249eef74451306a80cac8705c9ff6b | 1,489 | py | Python | tests/test_main.py | phbernardes/djantic | c916b175149f0c3f6ef112e7c42cac14379d6b93 | [
"MIT"
] | null | null | null | tests/test_main.py | phbernardes/djantic | c916b175149f0c3f6ef112e7c42cac14379d6b93 | [
"MIT"
] | null | null | null | tests/test_main.py | phbernardes/djantic | c916b175149f0c3f6ef112e7c42cac14379d6b93 | [
"MIT"
] | null | null | null | import pytest
from pydantic import ConfigError
from testapp.models import User
from djantic import ModelSchema
@pytest.mark.django_db
def test_config_errors():
"""
Test the model config error exceptions.
"""
with pytest.raises(
ConfigError, match="(Is `Config` class defined?)"
):
class InvalidModelErrorSchema(ModelSchema):
pass
with pytest.raises(
ConfigError, match="(Is `Config.model` a valid Django model class?)"
):
class InvalidModelErrorSchema(ModelSchema):
class Config:
model = "Ok"
with pytest.raises(
ConfigError,
match="Only one of 'include' or 'exclude' should be set in configuration.",
):
class IncludeExcludeErrorSchema(ModelSchema):
class Config:
model = User
include = ["id"]
exclude = ["first_name"]
@pytest.mark.django_db
def test_get_field_names():
"""
Test retrieving the field names for a model.
"""
class UserSchema(ModelSchema):
class Config:
model = User
include = ["id"]
assert UserSchema.get_field_names() == ["id"]
class UserSchema(ModelSchema):
class Config:
model = User
exclude = ["id"]
assert UserSchema.get_field_names() == [
"profile",
"first_name",
"last_name",
"email",
"created_at",
"updated_at",
]
| 22.223881 | 83 | 0.57824 |
63dbe9abdc6dbbe605d0ec9c81efc8195f995918 | 29,319 | py | Python | scalecodec/base.py | polkascan/scale-codec | 3b12396e9043cab7900347c25033fd306abdb640 | [
"Apache-2.0"
] | null | null | null | scalecodec/base.py | polkascan/scale-codec | 3b12396e9043cab7900347c25033fd306abdb640 | [
"Apache-2.0"
] | null | null | null | scalecodec/base.py | polkascan/scale-codec | 3b12396e9043cab7900347c25033fd306abdb640 | [
"Apache-2.0"
] | null | null | null | # Python SCALE Codec Library
#
# Copyright 2018-2020 Stichting Polkascan (Polkascan Foundation).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import warnings
from abc import ABC, abstractmethod
from typing import Optional, TYPE_CHECKING, Union
from scalecodec.exceptions import RemainingScaleBytesNotEmptyException, InvalidScaleTypeValueException
if TYPE_CHECKING:
from scalecodec.types import GenericMetadataVersioned, GenericRegistryType
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if 'config_id' in kwargs:
instance_key = kwargs['config_id']
else:
instance_key = cls
if instance_key not in cls._instances:
cls._instances[instance_key] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[instance_key]
class RuntimeConfigurationObject:
@classmethod
def all_subclasses(cls, class_):
return set(class_.__subclasses__()).union(
[s for c in class_.__subclasses__() for s in cls.all_subclasses(c)])
def __init__(self, config_id=None, ss58_format=None, only_primitives_on_init=False, implements_scale_info=False):
self.config_id = config_id
self.type_registry = {'types': {}}
self.__initial_state = False
self.clear_type_registry()
self.active_spec_version_id = None
self.chain_id = None
self.only_primitives_on_init = only_primitives_on_init
self.ss58_format = ss58_format
self.implements_scale_info = implements_scale_info
@classmethod
def convert_type_string(cls, name):
name = re.sub(r'T::', "", name)
name = re.sub(r'^T::', "", name, flags=re.IGNORECASE)
name = re.sub(r'<T>', "", name, flags=re.IGNORECASE)
name = re.sub(r'<T as Trait>::', "", name, flags=re.IGNORECASE)
name = re.sub(r'<T as Trait<I>>::', "", name, flags=re.IGNORECASE)
name = re.sub(r'<T as Config>::', "", name, flags=re.IGNORECASE)
name = re.sub(r'<T as Config<I>>::', "", name, flags=re.IGNORECASE)
name = re.sub(r'\n', "", name)
name = re.sub(r'^(grandpa|session|slashing|limits|beefy_primitives|xcm::opaque)::', "", name)
name = re.sub(r'VecDeque<', "Vec<", name, flags=re.IGNORECASE)
name = re.sub(r'^Box<(.+)>$', r'\1', name, flags=re.IGNORECASE)
if name == '()':
return "Null"
if name.lower() in ['vec<u8>', '&[u8]', "& 'static[u8]"]:
return "Bytes"
if name.lower() == '<lookup as staticlookup>::source':
return 'LookupSource'
if name.lower() == '<balance as hascompact>::type':
return 'Compact<Balance>'
if name.lower() == '<blocknumber as hascompact>::type':
return 'Compact<BlockNumber>'
if name.lower() == '<moment as hascompact>::type':
return 'Compact<Moment>'
if name.lower() == '<inherentofflinereport as inherentofflinereport>::inherent':
return 'InherentOfflineReport'
return name
def get_decoder_class(self, type_string: str, spec_version_id='default'):
if type_string.strip() == '':
return None
if self.implements_scale_info is False:
type_string = self.convert_type_string(type_string)
decoder_class = self.type_registry.get('types', {}).get(type_string.lower(), None)
if not decoder_class:
# Type string containg subtype
if type_string[-1:] == '>':
# Extract sub types
type_parts = re.match(r'^([^<]*)<(.+)>$', type_string)
if type_parts:
type_parts = type_parts.groups()
if type_parts:
# Create dynamic class for Part1<Part2> based on Part1 and set class variable Part2 as sub_type
base_class = self.type_registry.get('types', {}).get(type_parts[0].lower(), None)
if base_class:
decoder_class = type(type_string, (base_class,), {'sub_type': type_parts[1]})
# Custom tuples
elif type_string != '()' and type_string[0] == '(' and type_string[-1] == ')':
decoder_class = type(type_string, (self.get_decoder_class('tuple'),), {
'type_string': type_string
})
decoder_class.build_type_mapping()
elif type_string[0] == '[' and type_string[-1] == ']':
type_parts = re.match(r'^\[([A-Za-z0-9]+); ([0-9]+)\]$', type_string)
if type_parts:
type_parts = type_parts.groups()
if type_parts:
# Create dynamic class for e.g. [u8; 4] resulting in array of u8 with 4 elements
decoder_class = type(type_string, (self.get_decoder_class('FixedLengthArray'),), {
'sub_type': type_parts[0],
'element_count': int(type_parts[1])
})
if decoder_class:
# Attach RuntimeConfigurationObject to new class
decoder_class.runtime_config = self
return decoder_class
def create_scale_object(self, type_string: str, data=None, **kwargs) -> 'ScaleType':
"""
Returns
-------
ScaleType
"""
decoder_class = self.get_decoder_class(type_string)
if decoder_class:
return decoder_class(data=data, **kwargs)
raise NotImplementedError('Decoder class for "{}" not found'.format(type_string))
def clear_type_registry(self):
if not self.__initial_state:
self.type_registry = {'types': {}}
# Class names that contains '<' are excluded because of a side effect that is introduced in
# get_decoder_class: "Create dynamic class for Part1<Part2> based on Part1 and set class variable Part2 as
# sub_type" which won't get reset because class definitions always remain globally
self.type_registry['types'].update(
{
cls.__name__.lower(): cls for cls in self.all_subclasses(ScaleDecoder)
if '<' not in cls.__name__ and '::' not in cls.__name__
}
)
self.__initial_state = True
def update_type_registry_types(self, types_dict):
from scalecodec.types import Enum, Struct, Set, Tuple
self.__initial_state = False
for type_string, decoder_class_data in types_dict.items():
if type(decoder_class_data) == dict:
# Create dynamic decoder class
base_cls = None
if decoder_class_data.get('base_class'):
base_cls = self.get_decoder_class(decoder_class_data['base_class'])
if base_cls is None:
raise ValueError(f"Specified base_class '{decoder_class_data['base_class']}' for type " +
f"'{type_string}' not found")
if decoder_class_data['type'] == 'struct':
if base_cls is None:
base_cls = Struct
decoder_class = type(type_string, (base_cls,), {
'type_mapping': decoder_class_data.get('type_mapping')
})
elif decoder_class_data['type'] == 'tuple':
if base_cls is None:
base_cls = Tuple
decoder_class = type(type_string, (base_cls,), {
'type_mapping': decoder_class_data.get('type_mapping')
})
elif decoder_class_data['type'] == 'enum':
if base_cls is None:
base_cls = Enum
value_list = decoder_class_data.get('value_list')
if type(value_list) is dict:
# Transform value_list with explictly specified index numbers
value_list = {i: v for v, i in value_list.items()}
decoder_class = type(type_string, (base_cls,), {
'value_list': value_list,
'type_mapping': decoder_class_data.get('type_mapping')
})
elif decoder_class_data['type'] == 'set':
if base_cls is None:
base_cls = Set
decoder_class = type(type_string, (base_cls,), {
'value_list': decoder_class_data.get('value_list'),
'value_type': decoder_class_data.get('value_type', 'u64')
})
else:
raise NotImplementedError("Dynamic decoding type '{}' not supported".format(
decoder_class_data['type'])
)
else:
decoder_class = self.get_decoder_class(decoder_class_data)
self.type_registry['types'][type_string.lower()] = decoder_class
def update_type_registry(self, type_registry):
# Set runtime ID if set
self.active_spec_version_id = type_registry.get('runtime_id')
# Set chain ID if set
self.chain_id = type_registry.get('chain_id')
self.type_registry['versioning'] = type_registry.get('versioning')
self.type_registry['runtime_upgrades'] = type_registry.get('runtime_upgrades')
# Update types
if 'types' in type_registry:
self.update_type_registry_types(type_registry.get('types'))
def set_active_spec_version_id(self, spec_version_id):
if spec_version_id != self.active_spec_version_id:
self.active_spec_version_id = spec_version_id
# Updated type registry with versioned types
for versioning_item in self.type_registry.get('versioning') or []:
# Check if versioning item is in current version range
if versioning_item['runtime_range'][0] <= spec_version_id and \
(not versioning_item['runtime_range'][1] or versioning_item['runtime_range'][1] >= spec_version_id):
# Update types in type registry
self.update_type_registry_types(versioning_item['types'])
def get_runtime_id_from_upgrades(self, block_number: int) -> Optional[int]:
"""
Retrieve runtime_id for given block_number if runtime_upgrades are specified in the type registry
Parameters
----------
block_number
Returns
-------
Runtime id
"""
if self.type_registry.get('runtime_upgrades'):
if block_number > self.type_registry['runtime_upgrades'][-1][0]:
return
for max_block_number, runtime_id in reversed(self.type_registry['runtime_upgrades']):
if block_number >= max_block_number and runtime_id != -1:
return runtime_id
def set_runtime_upgrades_head(self, block_number: int):
"""
Sets head for given block_number to last runtime_id in runtime_upgrades cache
Parameters
----------
block_number
Returns
-------
"""
if self.type_registry.get('runtime_upgrades'):
if self.type_registry['runtime_upgrades'][-1][1] == -1:
self.type_registry['runtime_upgrades'][-1][0] = block_number
elif block_number > self.type_registry['runtime_upgrades'][-1][0]:
self.type_registry['runtime_upgrades'].append([block_number, -1])
def get_decoder_class_for_scale_info_definition(
self, type_string: str, scale_info_type: 'GenericRegistryType', prefix: str
):
decoder_class = None
base_decoder_class = None
# Check if base decoder class is defined for path
if 'path' in scale_info_type.value and len(scale_info_type.value['path']) > 0:
path_string = '::'.join(scale_info_type.value["path"])
base_decoder_class = self.get_decoder_class(path_string)
if base_decoder_class is None:
# Try generic type
catch_all_path = '*::' * (len(scale_info_type.value['path']) - 1) + scale_info_type.value["path"][-1]
base_decoder_class = self.get_decoder_class(catch_all_path)
if base_decoder_class and hasattr(base_decoder_class, 'process_scale_info_definition'):
# if process_scale_info_definition is implemented result is final
decoder_class = type(type_string, (base_decoder_class,), {})
decoder_class.process_scale_info_definition(scale_info_type, prefix)
# Link ScaleInfo RegistryType to decoder class
decoder_class.scale_info_type = scale_info_type
return decoder_class
if "primitive" in scale_info_type.value["def"]:
decoder_class = self.get_decoder_class(scale_info_type.value["def"]["primitive"])
elif 'array' in scale_info_type.value['def']:
if base_decoder_class is None:
base_decoder_class = self.get_decoder_class('FixedLengthArray')
decoder_class = type(type_string, (base_decoder_class,), {
'sub_type': f"{prefix}::{scale_info_type.value['def']['array']['type']}",
'element_count': scale_info_type.value['def']['array']['len']
})
elif 'composite' in scale_info_type.value['def']:
type_mapping = []
base_type_string = 'Tuple'
if 'fields' in scale_info_type.value['def']['composite']:
fields = scale_info_type.value['def']['composite']['fields']
if all([f.get('name') for f in fields]):
base_type_string = 'Struct'
type_mapping = [[field['name'], f"{prefix}::{field['type']}"] for field in fields]
else:
base_type_string = 'Tuple'
type_mapping = [f"{prefix}::{field['type']}" for field in fields]
if base_decoder_class is None:
base_decoder_class = self.get_decoder_class(base_type_string)
decoder_class = type(type_string, (base_decoder_class,), {
'type_mapping': type_mapping
})
elif 'sequence' in scale_info_type.value['def']:
# Vec
decoder_class = type(type_string, (self.get_decoder_class('Vec'),), {
'sub_type': f"{prefix}::{scale_info_type.value['def']['sequence']['type']}"
})
elif 'variant' in scale_info_type.value['def']:
# Enum
type_mapping = []
variants = scale_info_type.value['def']['variant']['variants']
if len(variants) > 0:
# Create placeholder list
variant_length = max([v['index'] for v in variants]) + 1
type_mapping = [(None, 'Null')] * variant_length
for variant in variants:
if 'fields' in variant:
if len(variant['fields']) == 0:
enum_value = 'Null'
elif len(variant['fields']) == 1:
enum_value = f"{prefix}::{variant['fields'][0]['type']}"
else:
field_str = ', '.join([f"{prefix}::{f['type']}" for f in variant['fields']])
enum_value = f"({field_str})"
else:
enum_value = 'Null'
# Put mapping in right order in list
type_mapping[variant['index']] = (variant['name'], enum_value)
if base_decoder_class is None:
base_decoder_class = self.get_decoder_class("Enum")
decoder_class = type(type_string, (base_decoder_class,), {
'type_mapping': type_mapping
})
elif 'tuple' in scale_info_type.value['def']:
type_mapping = [f"{prefix}::{f}" for f in scale_info_type.value['def']['tuple']]
decoder_class = type(type_string, (self.get_decoder_class('Tuple'),), {
'type_mapping': type_mapping
})
elif 'compact' in scale_info_type.value['def']:
# Compact
decoder_class = type(type_string, (self.get_decoder_class('Compact'),), {
'sub_type': f"{prefix}::{scale_info_type.value['def']['compact']['type']}"
})
elif 'phantom' in scale_info_type.value['def']:
decoder_class = type(type_string, (self.get_decoder_class('Null'),), {})
elif 'bitsequence' in scale_info_type.value['def']:
decoder_class = type(type_string, (self.get_decoder_class('BitVec'),), {})
else:
raise NotImplementedError(f"RegistryTypeDef {scale_info_type.value['def']} not implemented")
# if 'path' in scale_info_type.value:
# decoder_class.type_string = '::'.join(scale_info_type.value['path'])
# Link ScaleInfo RegistryType to decoder class
decoder_class.scale_info_type = scale_info_type
return decoder_class
def update_from_scale_info_types(self, scale_info_types: list, prefix: str = None):
if prefix is None:
prefix = 'scale_info'
for scale_info_type in scale_info_types:
idx = scale_info_type['id'].value
type_string = f"{prefix}::{idx}"
decoder_class = self.get_decoder_class_for_scale_info_definition(
type_string, scale_info_type['type'], prefix
)
if decoder_class is None:
raise NotImplementedError(f"No decoding class found for scale type {idx}")
if decoder_class:
self.type_registry['types'][type_string] = decoder_class
if len(scale_info_type['type'].value.get('path', [])) > 0:
path_string = '::'.join(scale_info_type['type'].value['path']).lower()
self.type_registry['types'][path_string] = decoder_class
def add_portable_registry(self, metadata: 'GenericMetadataVersioned', prefix=None):
scale_info_types = metadata.portable_registry.value_object['types'].value_object
self.update_from_scale_info_types(scale_info_types, prefix=prefix)
# Todo process extrinsic types
pass
def add_contract_metadata_dict_to_type_registry(self, metadata_dict):
# TODO
prefix = f"ink::{metadata_dict['source']['hash']}"
return self.update_from_scale_info_types(metadata_dict['types'], prefix=prefix)
class ScaleBytes:
def __init__(self, data: Union[str, bytes, bytearray]):
self.offset = 0
if type(data) is bytearray:
self.data = data
elif type(data) is bytes:
self.data = bytearray(data)
elif type(data) is str and data[0:2] == '0x':
self.data = bytearray.fromhex(data[2:])
else:
raise ValueError("Provided data is not in supported format: provided '{}'".format(type(data)))
self.length = len(self.data)
def get_next_bytes(self, length: int) -> bytearray:
data = self.data[self.offset:self.offset + length]
self.offset += length
return data
def get_remaining_bytes(self) -> bytearray:
data = self.data[self.offset:]
self.offset = self.length
return data
def get_remaining_length(self) -> int:
return self.length - self.offset
def reset(self):
self.offset = 0
def __str__(self):
return "0x{}".format(self.data.hex())
def __eq__(self, other):
if not hasattr(other, 'data'):
return False
return self.data == other.data
def __len__(self):
return len(self.data)
def __repr__(self):
return "<{}(data=0x{})>".format(self.__class__.__name__, self.data.hex())
def __add__(self, data):
if type(data) == ScaleBytes:
return ScaleBytes(self.data + data.data)
if type(data) == bytes:
data = bytearray(data)
elif type(data) == str and data[0:2] == '0x':
data = bytearray.fromhex(data[2:])
if type(data) == bytearray:
return ScaleBytes(self.data + data)
def to_hex(self) -> str:
return f'0x{self.data.hex()}'
class ScaleDecoder(ABC):
type_string = None
type_mapping = None
debug = False
sub_type = None
runtime_config = None
def __init__(self, data: ScaleBytes, sub_type: str = None, runtime_config: RuntimeConfigurationObject = None):
if sub_type:
self.sub_type = sub_type
if self.type_mapping is None and self.type_string:
self.build_type_mapping()
if data:
assert(type(data) == ScaleBytes)
if runtime_config:
self.runtime_config = runtime_config
if not self.runtime_config:
# if no runtime config is provided, fallback on singleton
self.runtime_config = RuntimeConfiguration()
self.data = data
self.value_object = None
self.value_serialized = None
self.decoded = False
self.data_start_offset = None
self.data_end_offset = None
@property
def value(self):
# TODO fix
# if not self.decoded:
# self.decode()
return self.value_serialized
@value.setter
def value(self, value):
self.value_serialized = value
@staticmethod
def is_primitive(type_string: str) -> bool:
return type_string in ('bool', 'u8', 'u16', 'u32', 'u64', 'u128', 'u256', 'i8', 'i16', 'i32', 'i64', 'i128',
'i256', 'h160', 'h256', 'h512', '[u8; 4]', '[u8; 4]', '[u8; 8]', '[u8; 16]', '[u8; 32]',
'&[u8]')
@classmethod
def build_type_mapping(cls):
if cls.type_string and cls.type_string[0] == '(' and cls.type_string[-1] == ')':
type_mapping = ()
tuple_contents = cls.type_string[1:-1]
# replace subtype types
sub_types = re.search(r'([A-Za-z]+[<][^>]*[>])', tuple_contents)
if sub_types:
sub_types = sub_types.groups()
for sub_type in sub_types:
tuple_contents = tuple_contents.replace(sub_type, sub_type.replace(',', '|'))
for tuple_element in tuple_contents.split(','):
type_mapping += (tuple_element.strip().replace('|', ','),)
cls.type_mapping = type_mapping
def get_next_bytes(self, length) -> bytearray:
data = self.data.get_next_bytes(length)
return data
def get_next_u8(self) -> int:
return int.from_bytes(self.get_next_bytes(1), byteorder='little')
def get_next_bool(self) -> bool:
data = self.get_next_bytes(1)
if data not in [b'\x00', b'\x01']:
raise InvalidScaleTypeValueException('Invalid value for datatype "bool"')
return data == b'\x01'
def get_remaining_bytes(self) -> bytearray:
data = self.data.get_remaining_bytes()
return data
def get_used_bytes(self) -> bytearray:
return self.data.data[self.data_start_offset:self.data_end_offset]
@abstractmethod
def process(self):
raise NotImplementedError
def decode(self, data: ScaleBytes = None, check_remaining=True):
if data is not None:
self.decoded = False
self.data = data
if not self.decoded:
self.data_start_offset = self.data.offset
self.value_serialized = self.process()
self.decoded = True
if self.value_object is None:
# Default for value_object if not explicitly defined
self.value_object = self.value_serialized
self.data_end_offset = self.data.offset
if check_remaining and self.data.offset != self.data.length:
raise RemainingScaleBytesNotEmptyException(
f'Decoding <{self.__class__.__name__}> - Current offset: {self.data.offset} / length: {self.data.length}'
)
if self.data.offset > self.data.length:
raise RemainingScaleBytesNotEmptyException(
f'Decoding <{self.__class__.__name__}> - No more bytes available (needed: {self.data.offset} / total: {self.data.length})'
)
return self.value
def __str__(self):
return str(self.serialize()) or ''
def __repr__(self):
return "<{}(value={})>".format(self.__class__.__name__, self.serialize())
def encode(self, value=None):
if value and issubclass(self.__class__, value.__class__):
# Accept instance of current class directly
self.data = value.data
self.value_object = value.value_object
self.value_serialized = value.value_serialized
return value.data
if value is not None:
self.value_serialized = value
self.decoded = True
self.data = self.process_encode(self.value_serialized)
if self.value_object is None:
self.value_object = self.value_serialized
return self.data
def process_encode(self, value) -> ScaleBytes:
raise NotImplementedError("Encoding not implemented for this ScaleType")
@classmethod
def get_decoder_class(cls, type_string, data=None, runtime_config=None, **kwargs):
"""
Parameters
----------
type_string
data
runtime_config
kwargs
Returns
-------
ScaleType
"""
warnings.warn("Use RuntimeConfigurationObject.create_scale_object() instead", DeprecationWarning)
if not runtime_config:
runtime_config = RuntimeConfiguration()
decoder_class = runtime_config.get_decoder_class(
type_string,
spec_version_id=kwargs.get('spec_version_id', 'default')
)
if decoder_class:
return decoder_class(data=data, runtime_config=runtime_config, **kwargs)
raise NotImplementedError('Decoder class for "{}" not found'.format(type_string))
# TODO rename to decode_type (confusing when encoding is introduced)
def process_type(self, type_string, **kwargs):
obj = self.runtime_config.create_scale_object(type_string, self.data, **kwargs)
obj.decode(check_remaining=False)
return obj
def serialize(self):
return self.value_serialized
@classmethod
def convert_type(cls, name):
return RuntimeConfigurationObject.convert_type_string(name)
class RuntimeConfiguration(RuntimeConfigurationObject, metaclass=Singleton):
pass
class ScaleType(ScaleDecoder, ABC):
scale_info_type: 'GenericRegistryType' = None
def __init__(self, data=None, sub_type=None, metadata=None, runtime_config=None):
"""
Parameters
----------
data: ScaleBytes
sub_type: str
metadata: VersionedMetadata
runtime_config: RuntimeConfigurationObject
"""
self.metadata = metadata
# Container for meta information
self.meta_info: dict = {}
if not data:
data = ScaleBytes(bytearray())
super().__init__(data, sub_type, runtime_config=runtime_config)
def __getitem__(self, item):
return self.value_object[item]
def __iter__(self):
for item in self.value_object:
yield item
def __eq__(self, other):
if isinstance(other, ScaleType):
return other.value_serialized == self.value_serialized
else:
return other == self.value_serialized
def __gt__(self, other):
if isinstance(other, ScaleType):
return self.value_serialized > other.value_serialized
else:
return self.value_serialized > other
def __ge__(self, other):
if isinstance(other, ScaleType):
return self.value_serialized >= other.value_serialized
else:
return self.value_serialized >= other
def __lt__(self, other):
if isinstance(other, ScaleType):
return self.value_serialized < other.value_serialized
else:
return self.value_serialized < other
def __le__(self, other):
if isinstance(other, ScaleType):
return self.value_serialized <= other.value_serialized
else:
return self.value_serialized <= other
| 35.40942 | 142 | 0.59105 |
69e5823a9aa5ef7dddb7417ebde3d1d35b2df08e | 146 | py | Python | cliente/apps.py | Sergio-Basaure/taller_titulo_admin | 8e77d967e5818abb8e974e93ee64e226d64d73f5 | [
"MIT"
] | null | null | null | cliente/apps.py | Sergio-Basaure/taller_titulo_admin | 8e77d967e5818abb8e974e93ee64e226d64d73f5 | [
"MIT"
] | null | null | null | cliente/apps.py | Sergio-Basaure/taller_titulo_admin | 8e77d967e5818abb8e974e93ee64e226d64d73f5 | [
"MIT"
] | 1 | 2021-08-09T00:55:17.000Z | 2021-08-09T00:55:17.000Z | from django.apps import AppConfig
class ClienteConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'cliente'
| 20.857143 | 56 | 0.760274 |
14479613c8e93e97569fd08ad6820147404b479e | 4,193 | py | Python | RecoMuon/MuonIdentification/python/muons1stStep_cfi.py | mneukum/cmssw | a6a429a1b19b769c77ca2b76d8cc6e8151ed526d | [
"Apache-2.0"
] | 1 | 2020-10-08T06:48:26.000Z | 2020-10-08T06:48:26.000Z | RecoMuon/MuonIdentification/python/muons1stStep_cfi.py | Seyoung-Han/cmssw | 92e2e874dac6175ec18131c15a88daf6c7741f58 | [
"Apache-2.0"
] | null | null | null | RecoMuon/MuonIdentification/python/muons1stStep_cfi.py | Seyoung-Han/cmssw | 92e2e874dac6175ec18131c15a88daf6c7741f58 | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
from RecoMuon.MuonIdentification.isolation_cff import *
from RecoMuon.MuonIdentification.caloCompatibility_cff import *
from RecoMuon.MuonIdentification.MuonTimingFiller_cfi import *
from RecoMuon.MuonIdentification.MuonShowerDigiFiller_cfi import *
from RecoMuon.MuonIdentification.TrackerKinkFinder_cfi import *
from TrackingTools.TrackAssociator.default_cfi import *
muons1stStep = cms.EDProducer("MuonIdProducer",
# MuonCaloCompatibility
MuonCaloCompatibilityBlock,
# TrackDetectorAssociator
TrackAssociatorParameterBlock,
# MuonIsolation
MIdIsoExtractorPSetBlock,
# MuonTiming
TimingFillerBlock,
# MuonShowerDigi
MuonShowerDigiFillerBlock,
# Kink finder
TrackerKinkFinderParametersBlock,
fillEnergy = cms.bool(True),
storeCrossedHcalRecHits = cms.bool(True),
# OR
maxAbsPullX = cms.double(3.0),
maxAbsEta = cms.double(3.0),
# Selection parameters
minPt = cms.double(0.5),
inputCollectionTypes = cms.vstring('inner tracks',
'links',
'outer tracks',
'tev firstHit',
'tev picky',
'tev dyt'),
addExtraSoftMuons = cms.bool(False),
fillGlobalTrackRefits = cms.bool(True),
# internal
debugWithTruthMatching = cms.bool(False),
# input tracks
inputCollectionLabels = cms.VInputTag(cms.InputTag("generalTracks"), cms.InputTag("globalMuons"), cms.InputTag("standAloneMuons","UpdatedAtVtx"),
cms.InputTag("tevMuons","firstHit"),cms.InputTag("tevMuons","picky"),cms.InputTag("tevMuons","dyt")),
fillCaloCompatibility = cms.bool(True),
# OR
maxAbsPullY = cms.double(9999.0),
# AND
maxAbsDy = cms.double(9999.0),
minP = cms.double(2.5),
minPCaloMuon = cms.double(1e9),
# Match parameters
maxAbsDx = cms.double(3.0),
fillIsolation = cms.bool(True),
writeIsoDeposits = cms.bool(True),
minNumberOfMatches = cms.int32(1),
fillMatching = cms.bool(True),
fillShowerDigis = cms.bool(True),
# global fit for candidate p4 requirements
ptThresholdToFillCandidateP4WithGlobalFit = cms.double(200.0),
sigmaThresholdToFillCandidateP4WithGlobalFit = cms.double(2.0),
# global quality
fillGlobalTrackQuality = cms.bool(False), #input depends on external module output --> set to True where the sequence is defined
globalTrackQualityInputTag = cms.InputTag('glbTrackQual'),
# tracker kink finding
fillTrackerKink = cms.bool(True),
# calo muons
minCaloCompatibility = cms.double(0.6),
# arbitration cleaning
runArbitrationCleaner = cms.bool(True),
arbitrationCleanerOptions = cms.PSet( ME1a = cms.bool(True),
Overlap = cms.bool(True),
Clustering = cms.bool(True),
OverlapDPhi = cms.double(0.0786), # 4.5 degrees
OverlapDTheta = cms.double(0.02), # 1.14 degrees
ClusterDPhi = cms.double(0.6), # 34 degrees
ClusterDTheta = cms.double(0.02) # 1.14
),
# tracker muon arbitration
arbitrateTrackerMuons = cms.bool(True)
)
from Configuration.Eras.Modifier_run3_GEM_cff import run3_GEM
run3_GEM.toModify( muons1stStep, TrackAssociatorParameters = dict(useGEM = True ) )
from Configuration.Eras.Modifier_phase2_muon_cff import phase2_muon
phase2_muon.toModify( muons1stStep, TrackAssociatorParameters = dict(useME0 = True ) )
muonEcalDetIds = cms.EDProducer("InterestingEcalDetIdProducer",
inputCollection = cms.InputTag("muons1stStep")
)
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
pp_on_AA_2018.toModify(muons1stStep, minPt = 0.8)
from Configuration.ProcessModifiers.recoFromReco_cff import recoFromReco
recoFromReco.toModify(muons1stStep,fillShowerDigis = False)
| 39.933333 | 149 | 0.652039 |
cf5726d10aab559d17cce23ac9203f5ae299dbad | 85,452 | py | Python | nova/virt/vmwareapi/vmops.py | bopopescu/nova-17 | 2724155f4ac64aa0ef7dc25c1bf38d3c41f95b7b | [
"Apache-2.0"
] | 1 | 2019-09-11T11:56:19.000Z | 2019-09-11T11:56:19.000Z | nova/virt/vmwareapi/vmops.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | nova/virt/vmwareapi/vmops.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | 1 | 2020-07-24T01:18:11.000Z | 2020-07-24T01:18:11.000Z | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for VM tasks like spawn, snapshot, suspend, resume etc.
"""
import collections
import os
import time
import decorator
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from nova.api.metadata import base as instance_metadata
from nova import compute
from nova.compute import power_state
from nova.compute import task_states
from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova import utils
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt import driver
from nova.virt import hardware
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import imagecache
from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vif as vmwarevif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
vmops_opts = [
cfg.StrOpt('cache_prefix',
help='The prefix for Where cached images are stored. This is '
'NOT the full path - just a folder prefix. '
'This should only be used when a datastore cache should '
'be shared between compute nodes. Note: this should only '
'be used when the compute nodes have a shared file '
'system.'),
]
CONF = cfg.CONF
CONF.register_opts(vmops_opts, 'vmware')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('remove_unused_base_images', 'nova.virt.imagecache')
CONF.import_opt('vnc_enabled', 'nova.vnc')
CONF.import_opt('my_ip', 'nova.netconf')
LOG = logging.getLogger(__name__)
VMWARE_POWER_STATES = {
'poweredOff': power_state.SHUTDOWN,
'poweredOn': power_state.RUNNING,
'suspended': power_state.SUSPENDED}
RESIZE_TOTAL_STEPS = 6
DcInfo = collections.namedtuple('DcInfo',
['ref', 'name', 'vmFolder'])
class VirtualMachineInstanceConfigInfo(object):
"""Parameters needed to create and configure a new instance."""
def __init__(self, instance, image_info, datastore, dc_info, image_cache):
# Some methods called during spawn take the instance parameter purely
# for logging purposes.
# TODO(vui) Clean them up, so we no longer need to keep this variable
self.instance = instance
self.ii = image_info
self.root_gb = instance.root_gb
self.datastore = datastore
self.dc_info = dc_info
self._image_cache = image_cache
@property
def cache_image_folder(self):
if self.ii.image_id is None:
return
return self._image_cache.get_image_cache_folder(
self.datastore, self.ii.image_id)
@property
def cache_image_path(self):
if self.ii.image_id is None:
return
cached_image_file_name = "%s.%s" % (self.ii.image_id,
self.ii.file_type)
return self.cache_image_folder.join(cached_image_file_name)
# Note(vui): See https://bugs.launchpad.net/nova/+bug/1363349
# for cases where mocking time.sleep() can have unintended effects on code
# not under test. For now, unblock the affected test cases by providing
# a wrapper function to work around needing to mock time.sleep()
def _time_sleep_wrapper(delay):
time.sleep(delay)
@decorator.decorator
def retry_if_task_in_progress(f, *args, **kwargs):
retries = max(CONF.vmware.api_retry_count, 1)
delay = 1
for attempt in range(1, retries + 1):
if attempt != 1:
_time_sleep_wrapper(delay)
delay = min(2 * delay, 60)
try:
f(*args, **kwargs)
return
except vexc.TaskInProgress:
pass
class VMwareVMOps(object):
"""Management class for VM-related tasks."""
def __init__(self, session, virtapi, volumeops, cluster=None,
datastore_regex=None):
"""Initializer."""
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops
self._cluster = cluster
self._root_resource_pool = vm_util.get_res_pool_ref(self._session,
self._cluster)
self._datastore_regex = datastore_regex
self._base_folder = self._get_base_folder()
self._tmp_folder = 'vmware_temp'
self._datastore_dc_mapping = {}
self._datastore_browser_mapping = {}
self._imagecache = imagecache.ImageCacheManager(self._session,
self._base_folder)
def _get_base_folder(self):
# Enable more than one compute node to run on the same host
if CONF.vmware.cache_prefix:
base_folder = '%s%s' % (CONF.vmware.cache_prefix,
CONF.image_cache_subdirectory_name)
# Ensure that the base folder is unique per compute node
elif CONF.remove_unused_base_images:
base_folder = '%s%s' % (CONF.my_ip,
CONF.image_cache_subdirectory_name)
else:
# Aging disable ensures backward compatibility
base_folder = CONF.image_cache_subdirectory_name
return base_folder
def _extend_virtual_disk(self, instance, requested_size, name, dc_ref):
service_content = self._session.vim.service_content
LOG.debug("Extending root virtual disk to %s", requested_size,
instance=instance)
vmdk_extend_task = self._session._call_method(
self._session.vim,
"ExtendVirtualDisk_Task",
service_content.virtualDiskManager,
name=name,
datacenter=dc_ref,
newCapacityKb=requested_size,
eagerZero=False)
try:
self._session._wait_for_task(vmdk_extend_task)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Extending virtual disk failed with error: %s'),
e, instance=instance)
# Clean up files created during the extend operation
files = [name.replace(".vmdk", "-flat.vmdk"), name]
for file in files:
ds_path = ds_obj.DatastorePath.parse(file)
self._delete_datastore_file(ds_path, dc_ref)
LOG.debug("Extended root virtual disk", instance=instance)
def _delete_datastore_file(self, datastore_path, dc_ref):
try:
ds_util.file_delete(self._session, datastore_path, dc_ref)
except (vexc.CannotDeleteFileException,
vexc.FileFaultException,
vexc.FileLockedException,
vexc.FileNotFoundException):
LOG.debug("Unable to delete %(ds)s. There may be more than "
"one process or thread trying to delete the file",
{'ds': datastore_path},
exc_info=True)
def _extend_if_required(self, dc_info, image_info, instance,
root_vmdk_path):
"""Increase the size of the root vmdk if necessary."""
if instance.root_gb * units.Gi > image_info.file_size:
size_in_kb = instance.root_gb * units.Mi
self._extend_virtual_disk(instance, size_in_kb,
root_vmdk_path, dc_info.ref)
def _configure_config_drive(self, instance, vm_ref, dc_info, datastore,
injected_files, admin_password):
session_vim = self._session.vim
cookies = session_vim.client.options.transport.cookiejar
uploaded_iso_path = self._create_config_drive(instance,
injected_files,
admin_password,
datastore.name,
dc_info.name,
instance.uuid,
cookies)
uploaded_iso_path = datastore.build_path(uploaded_iso_path)
self._attach_cdrom_to_vm(
vm_ref, instance,
datastore.ref,
str(uploaded_iso_path))
def build_virtual_machine(self, instance, image_info, dc_info, datastore,
network_info, extra_specs):
vif_infos = vmwarevif.get_vif_info(self._session,
self._cluster,
utils.is_neutron(),
image_info.vif_model,
network_info)
if extra_specs.storage_policy:
profile_spec = vm_util.get_storage_profile_spec(
self._session, extra_specs.storage_policy)
else:
profile_spec = None
# Get the create vm config spec
client_factory = self._session.vim.client.factory
config_spec = vm_util.get_vm_create_spec(client_factory,
instance,
datastore.name,
vif_infos,
extra_specs,
image_info.os_type,
profile_spec=profile_spec)
# Create the VM
vm_ref = vm_util.create_vm(self._session, instance, dc_info.vmFolder,
config_spec, self._root_resource_pool)
return vm_ref
def _get_extra_specs(self, flavor):
extra_specs = vm_util.ExtraSpecs()
for (key, type) in (('cpu_limit', int),
('cpu_reservation', int),
('cpu_shares_level', str),
('cpu_shares_share', int)):
value = flavor.extra_specs.get('quota:' + key)
if value:
setattr(extra_specs.cpu_limits, key, type(value))
hw_version = flavor.extra_specs.get('vmware:hw_version')
extra_specs.hw_version = hw_version
if CONF.vmware.pbm_enabled:
storage_policy = flavor.extra_specs.get('vmware:storage_policy',
CONF.vmware.pbm_default_policy)
extra_specs.storage_policy = storage_policy
return extra_specs
def _fetch_image_as_file(self, context, vi, image_ds_loc):
"""Download image as an individual file to host via HTTP PUT."""
session = self._session
session_vim = session.vim
cookies = session_vim.client.options.transport.cookiejar
LOG.debug("Downloading image file data %(image_id)s to "
"%(file_path)s on the data store "
"%(datastore_name)s",
{'image_id': vi.ii.image_id,
'file_path': image_ds_loc,
'datastore_name': vi.datastore.name},
instance=vi.instance)
images.fetch_image(
context,
vi.instance,
session._host,
session._port,
vi.dc_info.name,
vi.datastore.name,
image_ds_loc.rel_path,
cookies=cookies)
def _fetch_image_as_vapp(self, context, vi, image_ds_loc):
"""Download stream optimized image to host as a vApp."""
# The directory of the imported disk is the unique name
# of the VM use to import it with.
vm_name = image_ds_loc.parent.basename
LOG.debug("Downloading stream optimized image %(image_id)s to "
"%(file_path)s on the data store "
"%(datastore_name)s as vApp",
{'image_id': vi.ii.image_id,
'file_path': image_ds_loc,
'datastore_name': vi.datastore.name},
instance=vi.instance)
images.fetch_image_stream_optimized(
context,
vi.instance,
self._session,
vm_name,
vi.datastore.name,
vi.dc_info.vmFolder,
self._root_resource_pool)
def _fetch_image_as_ova(self, context, vi, image_ds_loc):
"""Download root disk of an OVA image as streamOptimized."""
# The directory of the imported disk is the unique name
# of the VM use to import it with.
vm_name = image_ds_loc.parent.basename
images.fetch_image_ova(context,
vi.instance,
self._session,
vm_name,
vi.datastore.name,
vi.dc_info.vmFolder,
self._root_resource_pool)
def _prepare_sparse_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, "tmp-sparse.vmdk")
return tmp_dir_loc, tmp_image_ds_loc
def _prepare_flat_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
ds_util.mkdir(self._session, tmp_image_ds_loc.parent, vi.dc_info.ref)
vm_util.create_virtual_disk(
self._session, vi.dc_info.ref,
vi.ii.adapter_type,
vi.ii.disk_type,
str(tmp_image_ds_loc),
vi.ii.file_size_in_kb)
flat_vmdk_name = vi.cache_image_path.basename.replace('.vmdk',
'-flat.vmdk')
flat_vmdk_ds_loc = tmp_dir_loc.join(vi.ii.image_id, flat_vmdk_name)
self._delete_datastore_file(str(flat_vmdk_ds_loc), vi.dc_info.ref)
return tmp_dir_loc, flat_vmdk_ds_loc
def _prepare_stream_optimized_image(self, vi):
vm_name = "%s_%s" % (constants.IMAGE_VM_PREFIX,
uuidutils.generate_uuid())
tmp_dir_loc = vi.datastore.build_path(vm_name)
tmp_image_ds_loc = tmp_dir_loc.join("%s.vmdk" % tmp_dir_loc.basename)
return tmp_dir_loc, tmp_image_ds_loc
def _prepare_iso_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
return tmp_dir_loc, tmp_image_ds_loc
def _move_to_cache(self, dc_ref, src_folder_ds_path, dst_folder_ds_path):
try:
ds_util.file_move(self._session, dc_ref,
src_folder_ds_path, dst_folder_ds_path)
except vexc.FileAlreadyExistsException:
# Folder move has failed. This may be due to the fact that a
# process or thread has already completed the operation.
# Since image caching is synchronized, this can only happen
# due to action external to the process.
# In the event of a FileAlreadyExists we continue,
# all other exceptions will be raised.
LOG.warning(_LW("Destination %s already exists! Concurrent moves "
"can lead to unexpected results."),
dst_folder_ds_path)
def _cache_sparse_image(self, vi, tmp_image_ds_loc):
tmp_dir_loc = tmp_image_ds_loc.parent.parent
converted_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
# converts fetched image to preallocated disk
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(tmp_image_ds_loc),
str(converted_image_ds_loc))
self._delete_datastore_file(str(tmp_image_ds_loc), vi.dc_info.ref)
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _cache_flat_image(self, vi, tmp_image_ds_loc):
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _cache_stream_optimized_image(self, vi, tmp_image_ds_loc):
dst_path = vi.cache_image_folder.join("%s.vmdk" % vi.ii.image_id)
ds_util.mkdir(self._session, vi.cache_image_folder, vi.dc_info.ref)
try:
ds_util.disk_move(self._session, vi.dc_info.ref,
tmp_image_ds_loc, dst_path)
except vexc.FileAlreadyExistsException:
pass
def _cache_iso_image(self, vi, tmp_image_ds_loc):
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _get_vm_config_info(self, instance, image_info,
storage_policy=None):
"""Captures all relevant information from the spawn parameters."""
if (instance.root_gb != 0 and
image_info.file_size > instance.root_gb * units.Gi):
reason = _("Image disk size greater than requested disk size")
raise exception.InstanceUnacceptable(instance_id=instance.uuid,
reason=reason)
allowed_ds_types = ds_util.get_allowed_datastore_types(
image_info.disk_type)
datastore = ds_util.get_datastore(self._session,
self._cluster,
self._datastore_regex,
storage_policy,
allowed_ds_types)
dc_info = self.get_datacenter_ref_and_name(datastore.ref)
return VirtualMachineInstanceConfigInfo(instance,
image_info,
datastore,
dc_info,
self._imagecache)
def _get_image_callbacks(self, vi):
disk_type = vi.ii.disk_type
if vi.ii.is_ova:
image_fetch = self._fetch_image_as_ova
elif disk_type == constants.DISK_TYPE_STREAM_OPTIMIZED:
image_fetch = self._fetch_image_as_vapp
else:
image_fetch = self._fetch_image_as_file
if vi.ii.is_iso:
image_prepare = self._prepare_iso_image
image_cache = self._cache_iso_image
elif disk_type == constants.DISK_TYPE_SPARSE:
image_prepare = self._prepare_sparse_image
image_cache = self._cache_sparse_image
elif disk_type == constants.DISK_TYPE_STREAM_OPTIMIZED:
image_prepare = self._prepare_stream_optimized_image
image_cache = self._cache_stream_optimized_image
elif disk_type in constants.SUPPORTED_FLAT_VARIANTS:
image_prepare = self._prepare_flat_image
image_cache = self._cache_flat_image
else:
reason = _("disk type '%s' not supported") % disk_type
raise exception.InvalidDiskInfo(reason=reason)
return image_prepare, image_fetch, image_cache
def _fetch_image_if_missing(self, context, vi):
image_prepare, image_fetch, image_cache = self._get_image_callbacks(vi)
LOG.debug("Processing image %s", vi.ii.image_id, instance=vi.instance)
with lockutils.lock(str(vi.cache_image_path),
lock_file_prefix='nova-vmware-fetch_image'):
self.check_cache_folder(vi.datastore.name, vi.datastore.ref)
ds_browser = self._get_ds_browser(vi.datastore.ref)
if not ds_util.file_exists(self._session, ds_browser,
vi.cache_image_folder,
vi.cache_image_path.basename):
LOG.debug("Preparing fetch location", instance=vi.instance)
tmp_dir_loc, tmp_image_ds_loc = image_prepare(vi)
LOG.debug("Fetch image to %s", tmp_image_ds_loc,
instance=vi.instance)
image_fetch(context, vi, tmp_image_ds_loc)
LOG.debug("Caching image", instance=vi.instance)
image_cache(vi, tmp_image_ds_loc)
LOG.debug("Cleaning up location %s", str(tmp_dir_loc),
instance=vi.instance)
self._delete_datastore_file(str(tmp_dir_loc), vi.dc_info.ref)
def _create_and_attach_ephemeral_disk(self, instance, vm_ref, dc_info,
size, adapter_type, path):
disk_type = constants.DISK_TYPE_THIN
vm_util.create_virtual_disk(
self._session, dc_info.ref,
adapter_type,
disk_type,
path,
size)
self._volumeops.attach_disk_to_vm(
vm_ref, instance,
adapter_type, disk_type,
path, size, False)
def _create_ephemeral(self, bdi, instance, vm_ref, dc_info,
datastore, folder, adapter_type):
ephemerals = None
if bdi is not None:
ephemerals = driver.block_device_info_get_ephemerals(bdi)
for idx, eph in enumerate(ephemerals):
size = eph['size'] * units.Mi
at = eph.get('disk_bus', adapter_type)
filename = vm_util.get_ephemeral_name(idx)
path = str(ds_obj.DatastorePath(datastore.name, folder,
filename))
self._create_and_attach_ephemeral_disk(instance, vm_ref,
dc_info, size,
at, path)
# There may be block devices defined but no ephemerals. In this case
# we need to allocate a ephemeral disk if required
if not ephemerals and instance.ephemeral_gb:
size = instance.ephemeral_gb * units.Mi
filename = vm_util.get_ephemeral_name(0)
path = str(ds_obj.DatastorePath(datastore.name, folder,
filename))
self._create_and_attach_ephemeral_disk(instance, vm_ref,
dc_info, size,
adapter_type, path)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None,
power_on=True):
client_factory = self._session.vim.client.factory
image_info = images.VMwareImage.from_image(instance.image_ref,
image_meta)
extra_specs = self._get_extra_specs(instance.flavor)
vi = self._get_vm_config_info(instance, image_info,
extra_specs.storage_policy)
# Creates the virtual machine. The virtual machine reference returned
# is unique within Virtual Center.
vm_ref = self.build_virtual_machine(instance,
image_info,
vi.dc_info,
vi.datastore,
network_info,
extra_specs)
# Cache the vm_ref. This saves a remote call to the VC. This uses the
# instance uuid.
vm_util.vm_ref_cache_update(instance.uuid, vm_ref)
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
if CONF.flat_injected:
self._set_machine_id(client_factory, instance, network_info,
vm_ref=vm_ref)
# Set the vnc configuration of the instance, vnc port starts from 5900
if CONF.vnc_enabled:
self._get_and_set_vnc_config(client_factory, instance, vm_ref)
block_device_mapping = []
if block_device_info is not None:
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if instance.image_ref:
self._imagecache.enlist_image(
image_info.image_id, vi.datastore, vi.dc_info.ref)
self._fetch_image_if_missing(context, vi)
if image_info.is_iso:
self._use_iso_image(vm_ref, vi)
elif image_info.linked_clone:
self._use_disk_image_as_linked_clone(vm_ref, vi)
else:
self._use_disk_image_as_full_clone(vm_ref, vi)
if len(block_device_mapping) > 0:
msg = "Block device information present: %s" % block_device_info
# NOTE(mriedem): block_device_info can contain an auth_password
# so we have to scrub the message before logging it.
LOG.debug(strutils.mask_password(msg), instance=instance)
# Before attempting to attach any volume, make sure the
# block_device_mapping (i.e. disk_bus) is valid
self._is_bdm_valid(block_device_mapping)
for disk in block_device_mapping:
connection_info = disk['connection_info']
adapter_type = disk.get('disk_bus') or vi.ii.adapter_type
# TODO(hartsocks): instance is unnecessary, remove it
# we still use instance in many locations for no other purpose
# than logging, can we simplify this?
if disk.get('boot_index') == 0:
self._volumeops.attach_root_volume(connection_info,
instance, vi.datastore.ref, adapter_type)
else:
self._volumeops.attach_volume(connection_info,
instance, adapter_type)
# Create ephemeral disks
self._create_ephemeral(block_device_info, instance, vm_ref,
vi.dc_info, vi.datastore, instance.uuid,
vi.ii.adapter_type)
if configdrive.required_by(instance):
self._configure_config_drive(
instance, vm_ref, vi.dc_info, vi.datastore,
injected_files, admin_password)
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def _is_bdm_valid(self, block_device_mapping):
"""Checks if the block device mapping is valid."""
valid_bus = (constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_BUSLOGIC,
constants.ADAPTER_TYPE_IDE,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL)
for disk in block_device_mapping:
adapter_type = disk.get('disk_bus')
if (adapter_type is not None and adapter_type not in valid_bus):
raise exception.UnsupportedHardware(model=adapter_type,
virt="vmware")
def _create_config_drive(self, instance, injected_files, admin_password,
data_store_name, dc_name, upload_folder, cookies):
if CONF.config_drive_format != 'iso9660':
reason = (_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
raise exception.InstancePowerOnFailure(reason=reason)
LOG.info(_LI('Using config drive for instance'), instance=instance)
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files,
extra_md=extra_md)
try:
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
with utils.tempdir() as tmp_path:
tmp_file = os.path.join(tmp_path, 'configdrive.iso')
cdb.make_drive(tmp_file)
upload_iso_path = "%s/configdrive.iso" % (
upload_folder)
images.upload_iso_to_datastore(
tmp_file, instance,
host=self._session._host,
port=self._session._port,
data_center_name=dc_name,
datastore_name=data_store_name,
cookies=cookies,
file_path=upload_iso_path)
return upload_iso_path
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed with error: %s'),
e, instance=instance)
def _attach_cdrom_to_vm(self, vm_ref, instance,
datastore, file_path):
"""Attach cdrom to VM by reconfiguration."""
client_factory = self._session.vim.client.factory
devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
client_factory,
devices,
'ide')
cdrom_attach_config_spec = vm_util.get_cdrom_attach_config_spec(
client_factory, datastore, file_path,
controller_key, unit_number)
if controller_spec:
cdrom_attach_config_spec.deviceChange.append(controller_spec)
LOG.debug("Reconfiguring VM instance to attach cdrom %s",
file_path, instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, cdrom_attach_config_spec)
LOG.debug("Reconfigured VM instance to attach cdrom %s",
file_path, instance=instance)
def _create_vm_snapshot(self, instance, vm_ref):
LOG.debug("Creating Snapshot of the VM instance", instance=instance)
snapshot_task = self._session._call_method(
self._session.vim,
"CreateSnapshot_Task", vm_ref,
name="%s-snapshot" % instance.uuid,
description="Taking Snapshot of the VM",
memory=False,
quiesce=True)
self._session._wait_for_task(snapshot_task)
LOG.debug("Created Snapshot of the VM instance", instance=instance)
task_info = self._session._call_method(vim_util,
"get_dynamic_property",
snapshot_task, "Task", "info")
snapshot = task_info.result
return snapshot
@retry_if_task_in_progress
def _delete_vm_snapshot(self, instance, vm_ref, snapshot):
LOG.debug("Deleting Snapshot of the VM instance", instance=instance)
delete_snapshot_task = self._session._call_method(
self._session.vim,
"RemoveSnapshot_Task", snapshot,
removeChildren=False, consolidate=True)
self._session._wait_for_task(delete_snapshot_task)
LOG.debug("Deleted Snapshot of the VM instance", instance=instance)
def _create_linked_clone_from_snapshot(self, instance,
vm_ref, snapshot_ref, dc_info):
"""Create linked clone VM to be deployed to same ds as source VM
"""
client_factory = self._session.vim.client.factory
rel_spec = vm_util.relocate_vm_spec(
client_factory,
datastore=None,
host=None,
disk_move_type="createNewChildDiskBacking")
clone_spec = vm_util.clone_vm_spec(client_factory, rel_spec,
power_on=False, snapshot=snapshot_ref, template=True)
vm_name = "%s_%s" % (constants.SNAPSHOT_VM_PREFIX,
uuidutils.generate_uuid())
LOG.debug("Creating linked-clone VM from snapshot", instance=instance)
vm_clone_task = self._session._call_method(
self._session.vim,
"CloneVM_Task",
vm_ref,
folder=dc_info.vmFolder,
name=vm_name,
spec=clone_spec)
self._session._wait_for_task(vm_clone_task)
LOG.info(_LI("Created linked-clone VM from snapshot"),
instance=instance)
task_info = self._session._call_method(vim_util,
"get_dynamic_property",
vm_clone_task, "Task", "info")
return task_info.result
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
Steps followed are:
1. Get the name of the vmdk file which the VM points to right now.
Can be a chain of snapshots, so we need to know the last in the
chain.
2. Create the snapshot. A new vmdk is created which the VM points to
now. The earlier vmdk becomes read-only.
3. Creates a linked clone VM from the snapshot
4. Exports the disk in the link clone VM as a streamOptimized disk.
5. Delete the linked clone VM
6. Deletes the snapshot in original instance.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
def _get_vm_and_vmdk_attribs():
# Get the vmdk info that the VM is pointing to
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
instance.uuid)
if not vmdk.path:
LOG.debug("No root disk defined. Unable to snapshot.",
instance=instance)
raise error_util.NoRootDiskDefined()
lst_properties = ["datastore", "summary.config.guestId"]
props = self._session._call_method(vim_util,
"get_object_properties",
None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(self._session,
props)
os_type = query['summary.config.guestId']
datastores = query['datastore']
return (vmdk, datastores, os_type)
vmdk, datastores, os_type = _get_vm_and_vmdk_attribs()
ds_ref = datastores.ManagedObjectReference[0]
dc_info = self.get_datacenter_ref_and_name(ds_ref)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
# TODO(vui): convert to creating plain vm clone and uploading from it
# instead of using live vm snapshot.
snapshot_ref = self._create_vm_snapshot(instance, vm_ref)
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_vm_ref = None
try:
# Create a temporary VM (linked clone from snapshot), then export
# the VM's root disk to glance via HttpNfc API
snapshot_vm_ref = self._create_linked_clone_from_snapshot(
instance, vm_ref, snapshot_ref, dc_info)
images.upload_image_stream_optimized(
context, image_id, instance, self._session, vm=snapshot_vm_ref,
vmdk_size=vmdk.capacity_in_bytes)
finally:
if snapshot_vm_ref:
vm_util.destroy_vm(self._session, instance, snapshot_vm_ref)
# Deleting the snapshot after destroying the temporary VM created
# based on it allows the instance vm's disks to be consolidated.
# TODO(vui) Add handling for when vmdk volume is attached.
self._delete_vm_snapshot(instance, vm_ref, snapshot_ref)
def reboot(self, instance, network_info, reboot_type="SOFT"):
"""Reboot a VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(self._session, props)
pwr_state = query['runtime.powerState']
tools_status = query['summary.guest.toolsStatus']
tools_running_status = query['summary.guest.toolsRunningStatus']
# Raise an exception if the VM is not powered On.
if pwr_state not in ["poweredOn"]:
reason = _("instance is not powered on")
raise exception.InstanceRebootFailure(reason=reason)
# If latest vmware tools are installed in the VM, and that the tools
# are running, then only do a guest reboot. Otherwise do a hard reset.
if (tools_status == "toolsOk" and
tools_running_status == "guestToolsRunning" and
reboot_type == "SOFT"):
LOG.debug("Rebooting guest OS of VM", instance=instance)
self._session._call_method(self._session.vim, "RebootGuest",
vm_ref)
LOG.debug("Rebooted guest OS of VM", instance=instance)
else:
LOG.debug("Doing hard reboot of VM", instance=instance)
reset_task = self._session._call_method(self._session.vim,
"ResetVM_Task", vm_ref)
self._session._wait_for_task(reset_task)
LOG.debug("Did hard reboot of VM", instance=instance)
def _destroy_instance(self, instance, destroy_disks=True):
# Destroy a VM instance
try:
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["config.files.vmPathName", "runtime.powerState",
"datastore"]
props = self._session._call_method(vim_util,
"get_object_properties",
None, vm_ref, "VirtualMachine", lst_properties)
query = vm_util.get_values_from_object_properties(
self._session, props)
pwr_state = query['runtime.powerState']
vm_config_pathname = query.get('config.files.vmPathName')
vm_ds_path = None
if vm_config_pathname is not None:
vm_ds_path = ds_obj.DatastorePath.parse(
vm_config_pathname)
# Power off the VM if it is in PoweredOn state.
if pwr_state == "poweredOn":
vm_util.power_off_instance(self._session, instance, vm_ref)
# Un-register the VM
try:
LOG.debug("Unregistering the VM", instance=instance)
self._session._call_method(self._session.vim,
"UnregisterVM", vm_ref)
LOG.debug("Unregistered the VM", instance=instance)
except Exception as excep:
LOG.warning(_LW("In vmwareapi:vmops:_destroy_instance, got "
"this exception while un-registering the VM: "
"%s"), excep)
# Delete the folder holding the VM related content on
# the datastore.
if destroy_disks and vm_ds_path:
try:
dir_ds_compliant_path = vm_ds_path.parent
LOG.debug("Deleting contents of the VM from "
"datastore %(datastore_name)s",
{'datastore_name': vm_ds_path.datastore},
instance=instance)
ds_ref_ret = query['datastore']
ds_ref = ds_ref_ret.ManagedObjectReference[0]
dc_info = self.get_datacenter_ref_and_name(ds_ref)
ds_util.file_delete(self._session,
dir_ds_compliant_path,
dc_info.ref)
LOG.debug("Deleted contents of the VM from "
"datastore %(datastore_name)s",
{'datastore_name': vm_ds_path.datastore},
instance=instance)
except Exception:
LOG.warning(_LW("In vmwareapi:vmops:_destroy_instance, "
"exception while deleting the VM contents "
"from the disk"), exc_info=True)
except exception.InstanceNotFound:
LOG.warning(_LW('Instance does not exist on backend'),
instance=instance)
except Exception:
LOG.exception(_LE('Destroy instance failed'),
instance=instance)
finally:
vm_util.vm_ref_cache_delete(instance.uuid)
def destroy(self, instance, destroy_disks=True):
"""Destroy a VM instance.
Steps followed for each VM are:
1. Power off, if it is in poweredOn state.
2. Un-register.
3. Delete the contents of the folder holding the VM related data.
"""
if instance.task_state == task_states.RESIZE_REVERTING:
return
# If there is a rescue VM then we need to destroy that one too.
LOG.debug("Destroying instance", instance=instance)
self._destroy_instance(instance, destroy_disks=destroy_disks)
LOG.debug("Instance destroyed", instance=instance)
def pause(self, instance):
msg = _("pause not supported for vmwareapi")
raise NotImplementedError(msg)
def unpause(self, instance):
msg = _("unpause not supported for vmwareapi")
raise NotImplementedError(msg)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
# Only PoweredOn VMs can be suspended.
if pwr_state == "poweredOn":
LOG.debug("Suspending the VM", instance=instance)
suspend_task = self._session._call_method(self._session.vim,
"SuspendVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug("Suspended the VM", instance=instance)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
reason = _("instance is powered off and cannot be suspended.")
raise exception.InstanceSuspendFailure(reason=reason)
else:
LOG.debug("VM was already in suspended state. So returning "
"without doing anything", instance=instance)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
if pwr_state.lower() == "suspended":
LOG.debug("Resuming the VM", instance=instance)
suspend_task = self._session._call_method(
self._session.vim,
"PowerOnVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug("Resumed the VM", instance=instance)
else:
reason = _("instance is not in a suspended state")
raise exception.InstanceResumeFailure(reason=reason)
def _get_rescue_device(self, instance, vm_ref):
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
return vm_util.find_rescue_device(hardware_devices,
instance)
def rescue(self, context, instance, network_info, image_meta):
"""Rescue the specified instance.
Attach the image that the instance was created from and boot from it.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Get the root disk vmdk object
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
ds_ref = vmdk.device.backing.datastore
datastore = ds_util.get_datastore_by_ref(self._session, ds_ref)
dc_info = self.get_datacenter_ref_and_name(datastore.ref)
# Get the image details of the instance
image_info = images.VMwareImage.from_image(instance.image_ref,
image_meta)
vi = VirtualMachineInstanceConfigInfo(instance,
image_info,
datastore,
dc_info,
self._imagecache)
vm_util.power_off_instance(self._session, instance, vm_ref)
# Get the rescue disk path
rescue_disk_path = datastore.build_path(instance.uuid,
"%s-rescue.%s" % (image_info.image_id, image_info.file_type))
# Copy the cached image to the be the rescue disk. This will be used
# as the rescue disk for the instance.
ds_util.disk_copy(self._session, dc_info.ref,
vi.cache_image_path, rescue_disk_path)
# Attach the rescue disk to the instance
self._volumeops.attach_disk_to_vm(vm_ref, instance, vmdk.adapter_type,
vmdk.disk_type, rescue_disk_path)
# Get the rescue device and configure the boot order to
# boot from this device
rescue_device = self._get_rescue_device(instance, vm_ref)
factory = self._session.vim.client.factory
boot_spec = vm_util.get_vm_boot_spec(factory, rescue_device)
# Update the VM with the new boot order and power on
vm_util.reconfigure_vm(self._session, vm_ref, boot_spec)
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def unrescue(self, instance, power_on=True):
"""Unrescue the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Get the rescue device and detach it from the instance.
try:
rescue_device = self._get_rescue_device(instance, vm_ref)
except exception.NotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to access the rescue disk'),
instance=instance)
vm_util.power_off_instance(self._session, instance, vm_ref)
self._volumeops.detach_disk_from_vm(vm_ref, instance, rescue_device,
destroy_disk=True)
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def power_off(self, instance):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
"""
vm_util.power_off_instance(self._session, instance)
def power_on(self, instance):
vm_util.power_on_instance(self._session, instance)
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the clone disk step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
instance_uuid = instance.uuid
LOG.debug("Updating instance '%(instance_uuid)s' progress to"
" %(progress)d",
{'instance_uuid': instance_uuid, 'progress': progress},
instance=instance)
instance.progress = progress
instance.save()
def _resize_vm(self, vm_ref, flavor):
"""Resizes the VM according to the flavor."""
client_factory = self._session.vim.client.factory
extra_specs = self._get_extra_specs(flavor)
vm_resize_spec = vm_util.get_vm_resize_spec(client_factory,
int(flavor['vcpus']),
int(flavor['memory_mb']),
extra_specs)
vm_util.reconfigure_vm(self._session, vm_ref, vm_resize_spec)
def _resize_disk(self, instance, vm_ref, vmdk, flavor):
if (flavor['root_gb'] > instance.root_gb and
flavor['root_gb'] > vmdk.capacity_in_bytes / units.Gi):
root_disk_in_kb = flavor['root_gb'] * units.Mi
ds_ref = vmdk.device.backing.datastore
dc_info = self.get_datacenter_ref_and_name(ds_ref)
folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
datastore = ds_obj.DatastorePath.parse(vmdk.path).datastore
resized_disk = str(ds_obj.DatastorePath(datastore, folder,
'resized.vmdk'))
ds_util.disk_copy(self._session, dc_info.ref, vmdk.path,
str(resized_disk))
self._extend_virtual_disk(instance, root_disk_in_kb, resized_disk,
dc_info.ref)
self._volumeops.detach_disk_from_vm(vm_ref, instance, vmdk.device)
original_disk = str(ds_obj.DatastorePath(datastore, folder,
'original.vmdk'))
ds_util.disk_move(self._session, dc_info.ref, vmdk.path,
original_disk)
ds_util.disk_move(self._session, dc_info.ref, resized_disk,
vmdk.path)
self._volumeops.attach_disk_to_vm(vm_ref, instance,
vmdk.adapter_type,
vmdk.disk_type, vmdk.path)
def _remove_ephemerals(self, vm_ref):
devices = vm_util.get_ephemerals(self._session, vm_ref)
if devices:
vm_util.detach_devices_from_vm(self._session, vm_ref, devices)
def _resize_create_ephemerals(self, vm_ref, instance, block_device_info):
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
ds_ref = vmdk.device.backing.datastore
datastore = ds_util.get_datastore_by_ref(self._session, ds_ref)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
self._create_ephemeral(block_device_info, instance, vm_ref,
dc_info, datastore, folder, vmdk.adapter_type)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
# Checks if the migration needs a disk resize down.
if (flavor['root_gb'] < instance.root_gb or
flavor['root_gb'] < vmdk.capacity_in_bytes / units.Gi):
reason = _("Unable to shrink disk.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
# TODO(garyk): treat dest parameter. Migration needs to be treated.
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
# 1. Power off the instance
vm_util.power_off_instance(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
# 2. Reconfigure the VM properties
self._resize_vm(vm_ref, flavor)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3.Reconfigure the disk properties
self._resize_disk(instance, vm_ref, vmdk, flavor)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Purge ephemeral disks
self._remove_ephemerals(vm_ref)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
ds_ref = vmdk.device.backing.datastore
dc_info = self.get_datacenter_ref_and_name(ds_ref)
folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
datastore = ds_obj.DatastorePath.parse(vmdk.path).datastore
original_disk = ds_obj.DatastorePath(datastore, folder,
'original.vmdk')
ds_browser = self._get_ds_browser(ds_ref)
if ds_util.file_exists(self._session, ds_browser,
original_disk.parent,
original_disk.basename):
ds_util.disk_delete(self._session, dc_info.ref,
str(original_disk))
def finish_revert_migration(self, context, instance, network_info,
block_device_info, power_on=True):
"""Finish reverting a resize."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Ensure that the VM is off
vm_util.power_off_instance(self._session, instance, vm_ref)
client_factory = self._session.vim.client.factory
# Reconfigure the VM properties
extra_specs = self._get_extra_specs(instance.flavor)
vm_resize_spec = vm_util.get_vm_resize_spec(client_factory,
int(instance.vcpus),
int(instance.memory_mb),
extra_specs)
vm_util.reconfigure_vm(self._session, vm_ref, vm_resize_spec)
# Reconfigure the disks if necessary
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
ds_ref = vmdk.device.backing.datastore
dc_info = self.get_datacenter_ref_and_name(ds_ref)
folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
datastore = ds_obj.DatastorePath.parse(vmdk.path).datastore
original_disk = ds_obj.DatastorePath(datastore, folder,
'original.vmdk')
ds_browser = self._get_ds_browser(ds_ref)
if ds_util.file_exists(self._session, ds_browser,
original_disk.parent,
original_disk.basename):
self._volumeops.detach_disk_from_vm(vm_ref, instance, vmdk.device)
ds_util.disk_delete(self._session, dc_info.ref, vmdk.path)
ds_util.disk_move(self._session, dc_info.ref,
str(original_disk), vmdk.path)
self._volumeops.attach_disk_to_vm(vm_ref, instance,
vmdk.adapter_type,
vmdk.disk_type, vmdk.path)
# Reconfigure ephemerals
self._remove_ephemerals(vm_ref)
self._resize_create_ephemerals(vm_ref, instance, block_device_info)
if power_on:
vm_util.power_on_instance(self._session, instance)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# 5. Update ephemerals if necessary
self._resize_create_ephemerals(vm_ref, instance, block_device_info)
self._update_instance_progress(context, instance,
step=5,
total_steps=RESIZE_TOTAL_STEPS)
# 6. Start VM
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,
step=6,
total_steps=RESIZE_TOTAL_STEPS)
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False):
"""Spawning live_migration operation for distributing high-load."""
vm_ref = vm_util.get_vm_ref(self._session, instance_ref)
host_ref = self._get_host_ref_from_name(dest)
if host_ref is None:
raise exception.HostNotFound(host=dest)
LOG.debug("Migrating VM to host %s", dest, instance=instance_ref)
try:
vm_migrate_task = self._session._call_method(
self._session.vim,
"MigrateVM_Task", vm_ref,
host=host_ref,
priority="defaultPriority")
self._session._wait_for_task(vm_migrate_task)
except Exception:
with excutils.save_and_reraise_exception():
recover_method(context, instance_ref, dest, block_migration)
post_method(context, instance_ref, dest, block_migration)
LOG.debug("Migrated VM to host %s", dest, instance=instance_ref)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
ctxt = nova_context.get_admin_context()
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_LI("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds"), instances_info)
for instance in instances:
LOG.info(_LI("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance):
"""Return data about the VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config.numCpu",
"summary.config.memorySizeMB",
"runtime.powerState"]
vm_props = self._session._call_method(vim_util,
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(
self._session, vm_props)
max_mem = int(query.get('summary.config.memorySizeMB', 0)) * 1024
num_cpu = int(query.get('summary.config.numCpu', 0))
return hardware.InstanceInfo(
state=VMWARE_POWER_STATES[query['runtime.powerState']],
max_mem_kb=max_mem,
mem_kb=max_mem,
num_cpu=num_cpu)
def _get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config",
"summary.quickStats",
"summary.runtime"]
vm_props = self._session._call_method(vim_util,
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(self._session,
vm_props)
data = {}
# All of values received are objects. Convert them to dictionaries
for value in query.values():
prop_dict = vim_util.object_to_dict(value, list_depth=1)
data.update(prop_dict)
return data
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
data = self._get_diagnostics(instance)
# Add a namespace to all of the diagnostsics
return {'vmware:' + k: v for k, v in data.items()}
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
data = self._get_diagnostics(instance)
state = data.get('powerState')
if state:
state = power_state.STATE_MAP[VMWARE_POWER_STATES[state]]
uptime = data.get('uptimeSeconds', 0)
config_drive = configdrive.required_by(instance)
diags = diagnostics.Diagnostics(state=state,
driver='vmwareapi',
config_drive=config_drive,
hypervisor_os='esxi',
uptime=uptime)
diags.memory_details.maximum = data.get('memorySizeMB', 0)
diags.memory_details.used = data.get('guestMemoryUsage', 0)
# TODO(garyk): add in cpu, nic and disk stats
return diags
def _get_vnc_console_connection(self, instance):
"""Return connection info for a vnc console."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
opt_value = self._session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
vm_util.VNC_CONFIG_KEY)
if opt_value:
port = int(opt_value.value)
else:
raise exception.ConsoleTypeUnavailable(console_type='vnc')
return {'port': port,
'internal_access_path': None}
@staticmethod
def _get_machine_id_str(network_info):
machine_id_str = ''
for vif in network_info:
# TODO(vish): add support for dns2
# TODO(sateesh): add support for injection of ipv6 configuration
network = vif['network']
ip_v4 = netmask_v4 = gateway_v4 = broadcast_v4 = dns = None
subnets_v4 = [s for s in network['subnets'] if s['version'] == 4]
if len(subnets_v4) > 0:
if len(subnets_v4[0]['ips']) > 0:
ip_v4 = subnets_v4[0]['ips'][0]
if len(subnets_v4[0]['dns']) > 0:
dns = subnets_v4[0]['dns'][0]['address']
netmask_v4 = str(subnets_v4[0].as_netaddr().netmask)
gateway_v4 = subnets_v4[0]['gateway']['address']
broadcast_v4 = str(subnets_v4[0].as_netaddr().broadcast)
interface_str = ";".join([vif['address'],
ip_v4 and ip_v4['address'] or '',
netmask_v4 or '',
gateway_v4 or '',
broadcast_v4 or '',
dns or ''])
machine_id_str = machine_id_str + interface_str + '#'
return machine_id_str
def _set_machine_id(self, client_factory, instance, network_info,
vm_ref=None):
"""Set the machine id of the VM for guest tools to pick up
and reconfigure the network interfaces.
"""
if vm_ref is None:
vm_ref = vm_util.get_vm_ref(self._session, instance)
machine_id_change_spec = vm_util.get_machine_id_change_spec(
client_factory,
self._get_machine_id_str(network_info))
LOG.debug("Reconfiguring VM instance to set the machine id",
instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, machine_id_change_spec)
LOG.debug("Reconfigured VM instance to set the machine id",
instance=instance)
@utils.synchronized('vmware.get_and_set_vnc_port')
def _get_and_set_vnc_config(self, client_factory, instance, vm_ref):
"""Set the vnc configuration of the VM."""
port = vm_util.get_vnc_port(self._session)
vnc_config_spec = vm_util.get_vnc_config_spec(
client_factory, port)
LOG.debug("Reconfiguring VM instance to enable vnc on "
"port - %(port)s", {'port': port},
instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, vnc_config_spec)
LOG.debug("Reconfigured VM instance to enable vnc on "
"port - %(port)s", {'port': port},
instance=instance)
def _get_ds_browser(self, ds_ref):
ds_browser = self._datastore_browser_mapping.get(ds_ref.value)
if not ds_browser:
ds_browser = self._session._call_method(
vim_util, "get_dynamic_property", ds_ref, "Datastore",
"browser")
self._datastore_browser_mapping[ds_ref.value] = ds_browser
return ds_browser
def _get_host_ref_from_name(self, host_name):
"""Get reference to the host with the name specified."""
host_objs = self._session._call_method(vim_util, "get_objects",
"HostSystem", ["name"])
vm_util._cancel_retrieve_if_necessary(self._session, host_objs)
for host in host_objs:
if hasattr(host, 'propSet'):
if host.propSet[0].val == host_name:
return host.obj
return None
def _create_folder_if_missing(self, ds_name, ds_ref, folder):
"""Create a folder if it does not exist.
Currently there are two folder that are required on the datastore
- base folder - the folder to store cached images
- temp folder - the folder used for snapshot management and
image uploading
This method is aimed to be used for the management of those
folders to ensure that they are created if they are missing.
The ds_util method mkdir will be used to check if the folder
exists. If this throws and exception 'FileAlreadyExistsException'
then the folder already exists on the datastore.
"""
path = ds_obj.DatastorePath(ds_name, folder)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
try:
ds_util.mkdir(self._session, path, dc_info.ref)
LOG.debug("Folder %s created.", path)
except vexc.FileAlreadyExistsException:
# NOTE(hartsocks): if the folder already exists, that
# just means the folder was prepped by another process.
pass
def check_cache_folder(self, ds_name, ds_ref):
"""Check that the cache folder exists."""
self._create_folder_if_missing(ds_name, ds_ref, self._base_folder)
def check_temp_folder(self, ds_name, ds_ref):
"""Check that the temp folder exists."""
self._create_folder_if_missing(ds_name, ds_ref, self._tmp_folder)
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
client_factory = self._session.vim.client.factory
self._set_machine_id(client_factory, instance, network_info)
def manage_image_cache(self, context, instances):
if not CONF.remove_unused_base_images:
LOG.debug("Image aging disabled. Aging will not be done.")
return
datastores = ds_util.get_available_datastores(self._session,
self._cluster,
self._datastore_regex)
datastores_info = []
for ds in datastores:
dc_info = self.get_datacenter_ref_and_name(ds.ref)
datastores_info.append((ds, dc_info))
self._imagecache.update(context, instances, datastores_info)
def _get_valid_vms_from_retrieve_result(self, retrieve_result):
"""Returns list of valid vms from RetrieveResult object."""
lst_vm_names = []
while retrieve_result:
token = vm_util._get_token(retrieve_result)
for vm in retrieve_result.objects:
vm_name = None
conn_state = None
for prop in vm.propSet:
if prop.name == "name":
vm_name = prop.val
elif prop.name == "runtime.connectionState":
conn_state = prop.val
# Ignoring the orphaned or inaccessible VMs
if (conn_state not in ["orphaned", "inaccessible"] and
uuidutils.is_uuid_like(vm_name)):
lst_vm_names.append(vm_name)
if token:
retrieve_result = self._session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
return lst_vm_names
def instance_exists(self, instance):
try:
vm_util.get_vm_ref(self._session, instance)
return True
except exception.InstanceNotFound:
return False
def attach_interface(self, instance, image_meta, vif):
"""Attach an interface to the instance."""
vif_model = image_meta.get("hw_vif_model",
constants.DEFAULT_VIF_MODEL)
vif_model = vm_util.convert_vif_model(vif_model)
vif_info = vmwarevif.get_vif_dict(self._session, self._cluster,
vif_model, utils.is_neutron(), vif)
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Ensure that there is not a race with the port index management
with lockutils.lock(instance.uuid,
lock_file_prefix='nova-vmware-hot-plug'):
port_index = vm_util.get_attach_port_index(self._session, vm_ref)
client_factory = self._session.vim.client.factory
attach_config_spec = vm_util.get_network_attach_config_spec(
client_factory, vif_info, port_index)
LOG.debug("Reconfiguring VM to attach interface",
instance=instance)
try:
vm_util.reconfigure_vm(self._session, vm_ref,
attach_config_spec)
except Exception as e:
LOG.error(_LE('Attaching network adapter failed. Exception: '
' %s'),
e, instance=instance)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
LOG.debug("Reconfigured VM to attach interface", instance=instance)
def detach_interface(self, instance, vif):
"""Detach an interface from the instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Ensure that there is not a race with the port index management
with lockutils.lock(instance.uuid,
lock_file_prefix='nova-vmware-hot-plug'):
port_index = vm_util.get_vm_detach_port_index(self._session,
vm_ref,
vif['id'])
if port_index is None:
msg = _("No device with interface-id %s exists on "
"VM") % vif['id']
raise exception.NotFound(msg)
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
device = vmwarevif.get_network_device(hardware_devices,
vif['address'])
if device is None:
msg = _("No device with MAC address %s exists on the "
"VM") % vif['address']
raise exception.NotFound(msg)
client_factory = self._session.vim.client.factory
detach_config_spec = vm_util.get_network_detach_config_spec(
client_factory, device, port_index)
LOG.debug("Reconfiguring VM to detach interface",
instance=instance)
try:
vm_util.reconfigure_vm(self._session, vm_ref,
detach_config_spec)
except Exception as e:
LOG.error(_LE('Detaching network adapter failed. Exception: '
'%s'),
e, instance=instance)
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
LOG.debug("Reconfigured VM to detach interface", instance=instance)
def _use_disk_image_as_full_clone(self, vm_ref, vi):
"""Uses cached image disk by copying it into the VM directory."""
instance_folder = vi.instance.uuid
root_disk_name = "%s.vmdk" % vi.instance.uuid
root_disk_ds_loc = vi.datastore.build_path(instance_folder,
root_disk_name)
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(vi.cache_image_path),
str(root_disk_ds_loc))
self._extend_if_required(
vi.dc_info, vi.ii, vi.instance, str(root_disk_ds_loc))
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi, False)
def _sized_image_exists(self, sized_disk_ds_loc, ds_ref):
ds_browser = self._get_ds_browser(ds_ref)
return ds_util.file_exists(
self._session, ds_browser, sized_disk_ds_loc.parent,
sized_disk_ds_loc.basename)
def _use_disk_image_as_linked_clone(self, vm_ref, vi):
"""Uses cached image as parent of a COW child in the VM directory."""
sized_image_disk_name = "%s.vmdk" % vi.ii.image_id
if vi.root_gb > 0:
sized_image_disk_name = "%s.%s.vmdk" % (vi.ii.image_id, vi.root_gb)
sized_disk_ds_loc = vi.cache_image_folder.join(sized_image_disk_name)
# Ensure only a single thread extends the image at once.
# We do this by taking a lock on the name of the extended
# image. This allows multiple threads to create resized
# copies simultaneously, as long as they are different
# sizes. Threads attempting to create the same resized copy
# will be serialized, with only the first actually creating
# the copy.
#
# Note that the object is in a per-nova cache directory,
# so inter-nova locking is not a concern. Consequently we
# can safely use simple thread locks.
with lockutils.lock(str(sized_disk_ds_loc),
lock_file_prefix='nova-vmware-image'):
if not self._sized_image_exists(sized_disk_ds_loc,
vi.datastore.ref):
LOG.debug("Copying root disk of size %sGb", vi.root_gb,
instance=vi.instance)
try:
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(vi.cache_image_path),
str(sized_disk_ds_loc))
except Exception as e:
LOG.warning(_LW("Root disk file creation "
"failed - %s"), e)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to copy cached '
'image %(source)s to '
'%(dest)s for resize: '
'%(error)s'),
{'source': vi.cache_image_path,
'dest': sized_disk_ds_loc,
'error': e})
try:
ds_util.file_delete(self._session,
sized_disk_ds_loc,
vi.dc_info.ref)
except vexc.FileNotFoundException:
# File was never created: cleanup not
# required
pass
# Resize the copy to the appropriate size. No need
# for cleanup up here, as _extend_virtual_disk
# already does it
self._extend_if_required(
vi.dc_info, vi.ii, vi.instance, str(sized_disk_ds_loc))
# Associate the sized image disk to the VM by attaching to the VM a
# COW child of said disk.
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(sized_disk_ds_loc),
vi.root_gb * units.Mi, vi.ii.linked_clone)
def _use_iso_image(self, vm_ref, vi):
"""Uses cached image as a bootable virtual cdrom."""
self._attach_cdrom_to_vm(
vm_ref, vi.instance, vi.datastore.ref,
str(vi.cache_image_path))
# Optionally create and attach blank disk
if vi.root_gb > 0:
instance_folder = vi.instance.uuid
root_disk_name = "%s.vmdk" % vi.instance.uuid
root_disk_ds_loc = vi.datastore.build_path(instance_folder,
root_disk_name)
# It is pointless to COW a blank disk
linked_clone = False
vm_util.create_virtual_disk(
self._session, vi.dc_info.ref,
vi.ii.adapter_type,
vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi)
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi, linked_clone)
def _update_datacenter_cache_from_objects(self, dcs):
"""Updates the datastore/datacenter cache."""
while dcs:
token = vm_util._get_token(dcs)
for dco in dcs.objects:
dc_ref = dco.obj
ds_refs = []
prop_dict = vm_util.propset_dict(dco.propSet)
name = prop_dict.get('name')
vmFolder = prop_dict.get('vmFolder')
datastore_refs = prop_dict.get('datastore')
if datastore_refs:
datastore_refs = datastore_refs.ManagedObjectReference
for ds in datastore_refs:
ds_refs.append(ds.value)
else:
LOG.debug("Datacenter %s doesn't have any datastore "
"associated with it, ignoring it", name)
for ds_ref in ds_refs:
self._datastore_dc_mapping[ds_ref] = DcInfo(ref=dc_ref,
name=name, vmFolder=vmFolder)
if token:
dcs = self._session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
def get_datacenter_ref_and_name(self, ds_ref):
"""Get the datacenter name and the reference."""
dc_info = self._datastore_dc_mapping.get(ds_ref.value)
if not dc_info:
dcs = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["name", "datastore", "vmFolder"])
self._update_datacenter_cache_from_objects(dcs)
dc_info = self._datastore_dc_mapping.get(ds_ref.value)
return dc_info
def list_instances(self):
"""Lists the VM instances that are registered with vCenter cluster."""
properties = ['name', 'runtime.connectionState']
LOG.debug("Getting list of instances from cluster %s",
self._cluster)
vms = []
if self._root_resource_pool:
vms = self._session._call_method(
vim_util, 'get_inner_objects', self._root_resource_pool, 'vm',
'VirtualMachine', properties)
lst_vm_names = self._get_valid_vms_from_retrieve_result(vms)
LOG.debug("Got total of %s instances", str(len(lst_vm_names)))
return lst_vm_names
def get_vnc_console(self, instance):
"""Return connection info for a vnc console using vCenter logic."""
# vCenter does not run virtual machines and does not run
# a VNC proxy. Instead, you need to tell OpenStack to talk
# directly to the ESX host running the VM you are attempting
# to connect to via VNC.
vnc_console = self._get_vnc_console_connection(instance)
host_name = vm_util.get_host_name_for_vm(
self._session,
instance)
vnc_console['host'] = host_name
# NOTE: VM can move hosts in some situations. Debug for admins.
LOG.debug("VM %(uuid)s is currently on host %(host_name)s",
{'uuid': instance.name, 'host_name': host_name},
instance=instance)
return ctype.ConsoleVNC(**vnc_console)
| 46.797371 | 79 | 0.574334 |
9f7a30d5dc366a9bf92329c43514efd5543db63f | 2,131 | py | Python | structs.py | LHGames-2017/wannable_anonymous | 581870aff1424190e800468279eb18dae43b1035 | [
"MIT"
] | null | null | null | structs.py | LHGames-2017/wannable_anonymous | 581870aff1424190e800468279eb18dae43b1035 | [
"MIT"
] | null | null | null | structs.py | LHGames-2017/wannable_anonymous | 581870aff1424190e800468279eb18dae43b1035 | [
"MIT"
] | null | null | null | import math
class ActionTypes():
DefaultAction, MoveAction, AttackAction, CollectAction, UpgradeAction, StealAction, PurchaseAction, HealAction = range(8)
class UpgradeType():
CarryingCapacity, AttackPower, Defence, MaximumHealth, CollectingSpeed = range(5)
class TileType():
Tile, Wall, House, Lava, Resource, Shop = range(6)
class TileContent():
Empty, Wall, House, Lava, Resource, Shop, Player = range(7)
class Point(object):
# Constructor
def __init__(self, X=0, Y=0):
self.X = X
self.Y = Y
# Overloaded operators
def __add__(self, point):
return Point(self.X + point.X, self.Y + point.Y)
def __sub__(self, point):
return Point(self.X - point.X, self.Y - point.Y)
def __str__(self):
return "{{{0}, {1}}}".format(self.X, self.Y)
# Distance between two Points
def Distance(self, p1, p2):
delta_x = p1.X - p2.X
delta_y = p1.Y - p2.Y
return math.sqrt(math.pow(delta_x, 2) + math.pow(delta_y, 2))
class GameInfo(object):
def __init__(self, json_dict):
self.__dict__ = json_dict
self.HouseLocation = Point(json_dict["HouseLocation"])
self.Map = None
self.Players = dict()
class Tile(object):
def __init__(self, content=None, x=0, y=0):
self.Content = content
self.X = x
self.Y = y
class Player(object):
def __init__(self, health, maxHealth, position, houseLocation, score, carriedRessources,
carryingCapacity=1000):
self.Health = health
self.MaxHealth = maxHealth
self.Position = position
self.HouseLocation = houseLocation
self.Score = score
self.CarriedRessources = carriedRessources
self.CarryingCapacity = carryingCapacity
class PlayerInfo(object):
def __init__(self, health, maxHealth, position):
self.Health = health
self.MaxHealth = maxHealth
self.Position = position
class ActionContent(object):
def __init__(self, action_name, content):
self.ActionName = action_name
self.Content = str(content)
| 25.070588 | 125 | 0.640544 |
443406d783e5cc0fe4123a74e8309c82ffb2ba40 | 111 | py | Python | tests/conftest.py | dpausp/redmineapi-tools | 6eed069d23340c9984914ae84f52d2eff6503d98 | [
"BSD-3-Clause"
] | null | null | null | tests/conftest.py | dpausp/redmineapi-tools | 6eed069d23340c9984914ae84f52d2eff6503d98 | [
"BSD-3-Clause"
] | null | null | null | tests/conftest.py | dpausp/redmineapi-tools | 6eed069d23340c9984914ae84f52d2eff6503d98 | [
"BSD-3-Clause"
] | null | null | null | from pathlib import Path
import sys
sys.path.append(str(Path(__file__).parent.parent / "src"))
print(sys.path)
| 22.2 | 58 | 0.765766 |
727df87ff876191ee6a7b5336c346cf9f024b923 | 3,857 | py | Python | multizip.py | hasindu-madushan/MultiZip | 3fa5cd678beb77d07bdbe901fe4261d6aa2ddaf8 | [
"Apache-2.0"
] | null | null | null | multizip.py | hasindu-madushan/MultiZip | 3fa5cd678beb77d07bdbe901fe4261d6aa2ddaf8 | [
"Apache-2.0"
] | null | null | null | multizip.py | hasindu-madushan/MultiZip | 3fa5cd678beb77d07bdbe901fe4261d6aa2ddaf8 | [
"Apache-2.0"
] | null | null | null | #!python3
#======================================================================
# Author : Hasindu Madushan
# Date : 08 sep 2020
# Module : multizip
# File : multizip.py
# verison: 0.9v
#======================================================================
import multizipcore as core
import zlib
class Multizip:
def __init__(self, file_name):
self._file_name = file_name
self._cdhs = []
self._lfhs = []
self.main_disk = core._Disk(file_name) # load the main disk
offset_encd = self.main_disk.content.find(core._end_of_central_directory_header_signature) # the offset of the encd
# if no encd found the zip file is not valid
if offset_encd == -1:
print("Error: Zip file is not valid")
exit(-1)
self._encd = core._EndCentralDirectoryRecord( self.main_disk.content, offset_encd )
self.main_disk_id = self._encd.this_disk
core._Disk.main_disk_id = self.main_disk_id
core._Disk.main_disk = self.main_disk
self._loadCentralDirHeaders()
@staticmethod
def _loadRange( start_disk, start_offset, length, decompress=False):
res = b''
current_size = 0
disk = start_disk
offset = start_offset
while True:
append_size = length - current_size
if append_size <= disk.size - offset:
res += disk.content[offset : offset + append_size]
print(len(res))
break
current_size += disk.size - offset
res += disk.content[offset : offset + append_size]
#res += disk.content[offset:]
disk = disk.load( disk.disk_id + 1 )
offset = 0
if decompress:
return zlib.decompress( res, wbits=-15 )
return res
def listFileNames(self):
names = []
for cdh in self._cdhs:
file_name = cdh.file_name.split('/')[-1]
if file_name != '':
names.append(file_name)
return names
def extractFile( self , file_name ):
for cdh in self._cdhs:
if file_name == cdh.file_name.split('/')[-1]:
disk = self.main_disk.load( cdh.local_header_disk )
lfh = core._LocalFileHeader( disk.content, cdh.offset_local_header)
content = Multizip._loadRange( disk, lfh.offset_file_data, lfh.compressed_size , decompress=True)
with open( file_name, 'wb+') as f:
f.write( content )
return
print("File not foud: " + file_name)
def _loadCentralDirHeaders( self ):
self._cdhs.append( core._CentralDirectoryHeader(self.main_disk.content, self._encd.offset_central_dir) )
k = self._cdhs[0].len
i = 1
#!!! It was assumed that all the central dirs are in the main disk
while True:
offset = self._encd.offset_central_dir + k
if self.main_disk.content[offset:offset + 4] != core._central_directory_header_signature:
break
self._cdhs.append( core._CentralDirectoryHeader(self.main_disk.content, offset ) )
k += self._cdhs[i].len
#print(self._cdhs[i].file_name)
i += 1
| 32.141667 | 127 | 0.480425 |
579173100fe340ed042e5c03ec09e33b40f17581 | 2,303 | py | Python | python/oneflow/compatible/single_client/test/ops/test_interface_op_read_and_write.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 3,285 | 2020-07-31T05:51:22.000Z | 2022-03-31T15:20:16.000Z | python/oneflow/compatible/single_client/test/ops/test_interface_op_read_and_write.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 2,417 | 2020-07-31T06:28:58.000Z | 2022-03-31T23:04:14.000Z | python/oneflow/compatible/single_client/test/ops/test_interface_op_read_and_write.py | wangyuyue/oneflow | 0a71c22fe8355392acc8dc0e301589faee4c4832 | [
"Apache-2.0"
] | 520 | 2020-07-31T05:52:42.000Z | 2022-03-29T02:38:11.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as tp
@flow.unittest.skip_unless_1n2d()
class TestInterfaceOpReadAndWrite(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test(test_case):
flow.config.gpu_device_num(2)
@flow.global_function()
def add() -> tp.Numpy:
with flow.scope.placement("gpu", "0:0-1"):
x = flow.get_variable(
name="x",
shape=(2, 3),
initializer=flow.random_uniform_initializer(),
)
y = flow.get_variable(
name="y",
shape=(2, 3),
initializer=flow.random_uniform_initializer(),
)
return flow.math.add_n([x, y])
flow.train.CheckPoint().init()
if flow.eager_execution_enabled():
add()
x_value = np.random.random((2, 3)).astype(np.float32)
y_value = np.random.random((2, 3)).astype(np.float32)
flow.experimental.set_interface_blob_value("x", x_value)
flow.experimental.set_interface_blob_value("y", y_value)
test_case.assertTrue(
np.array_equal(x_value, flow.experimental.get_interface_blob_value("x"))
)
test_case.assertTrue(
np.array_equal(y_value, flow.experimental.get_interface_blob_value("y"))
)
test_case.assertTrue(np.array_equal(add(), x_value + y_value))
if __name__ == "__main__":
unittest.main()
| 34.893939 | 84 | 0.654364 |
d84d7b0d2a22cb2f7760c0790025451c088ba4eb | 2,860 | py | Python | config/settings/local.py | johanoh/fitness_app | bd8bc093058ab9f3873132d93ac3262d117546e0 | [
"MIT"
] | null | null | null | config/settings/local.py | johanoh/fitness_app | bd8bc093058ab9f3873132d93ac3262d117546e0 | [
"MIT"
] | null | null | null | config/settings/local.py | johanoh/fitness_app | bd8bc093058ab9f3873132d93ac3262d117546e0 | [
"MIT"
] | null | null | null | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="fVt9kdBODh21UxZLkr3dI7O4WKa1Z1kSAvZNcpmPN47sxlANHjoENbGiLcrZwQmX",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#using-whitenoise-in-development
INSTALLED_APPS = ["whitenoise.runserver_nostatic"] + INSTALLED_APPS # noqa F405
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
if env("USE_DOCKER") == "yes":
import socket
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS += [".".join(ip.split(".")[:-1] + ["1"]) for ip in ips]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Celery
# ------------------------------------------------------------------------------
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-eager-propagates
CELERY_TASK_EAGER_PROPAGATES = True
# Your stuff...
# ------------------------------------------------------------------------------
| 41.449275 | 97 | 0.576573 |
0f26cd388beb22d971dd692d278c7d94afbbb8ae | 6,882 | gyp | Python | third_party/cacheinvalidation/cacheinvalidation.gyp | Gitman1989/chromium | 2b1cceae1075ef012fb225deec8b4c8bbe4bc897 | [
"BSD-3-Clause"
] | 2 | 2017-09-02T19:08:28.000Z | 2021-11-15T15:15:14.000Z | third_party/cacheinvalidation/cacheinvalidation.gyp | Gitman1989/chromium | 2b1cceae1075ef012fb225deec8b4c8bbe4bc897 | [
"BSD-3-Clause"
] | null | null | null | third_party/cacheinvalidation/cacheinvalidation.gyp | Gitman1989/chromium | 2b1cceae1075ef012fb225deec8b4c8bbe4bc897 | [
"BSD-3-Clause"
] | 1 | 2020-04-13T05:45:10.000Z | 2020-04-13T05:45:10.000Z | # Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(akalin): Make it so that warnings are errors on Windows.
# TODO(akalin): Clean up warnings on Windows.
{
'variables': {
# The root directory for the proto files.
'proto_dir_root': 'files/src',
# The relative path of the cacheinvalidation proto files from
# proto_dir_root.
# TODO(akalin): Add a RULE_INPUT_DIR predefined variable to gyp so
# we don't need this variable.
'proto_dir_relpath': 'google/cacheinvalidation',
# Where files generated from proto files are put.
'protoc_out_dir': '<(SHARED_INTERMEDIATE_DIR)/protoc_out',
},
'targets': [
# The rule/action to generate files from the cacheinvalidation proto
# files.
{
'target_name': 'cacheinvalidation_proto',
'type': 'none',
'sources': [
'<(proto_dir_root)/google/cacheinvalidation/internal.proto',
'<(proto_dir_root)/google/cacheinvalidation/ticl_persistence.proto',
'<(proto_dir_root)/google/cacheinvalidation/types.proto',
],
# TODO(akalin): This block was copied from the sync_proto target
# from chrome.gyp. Decomp the shared blocks out somehow.
'rules': [
{
'rule_name': 'genproto',
'extension': 'proto',
'inputs': [
'<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)',
],
'outputs': [
'<(protoc_out_dir)/<(proto_dir_relpath)/<(RULE_INPUT_ROOT).pb.h',
'<(protoc_out_dir)/<(proto_dir_relpath)/<(RULE_INPUT_ROOT).pb.cc',
],
'action': [
'<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)',
'--proto_path=<(proto_dir_root)',
# This path needs to be prefixed by proto_path, so we can't
# use RULE_INPUT_PATH (which is an absolute path).
'<(proto_dir_root)/<(proto_dir_relpath)/<(RULE_INPUT_NAME)',
'--cpp_out=<(protoc_out_dir)',
],
'message': 'Generating C++ code from <(RULE_INPUT_PATH)',
},
],
'dependencies': [
'../../third_party/protobuf/protobuf.gyp:protoc#host',
],
# This target exports a hard dependency because it generates header
# files.
'hard_dependency': 1,
},
# The main cache invalidation library. External clients should depend
# only on this.
{
'target_name': 'cacheinvalidation',
'type': '<(library)',
'sources': [
'<(protoc_out_dir)/<(proto_dir_relpath)/internal.pb.h',
'<(protoc_out_dir)/<(proto_dir_relpath)/internal.pb.cc',
'<(protoc_out_dir)/<(proto_dir_relpath)/ticl_persistence.pb.h',
'<(protoc_out_dir)/<(proto_dir_relpath)/ticl_persistence.pb.cc',
'<(protoc_out_dir)/<(proto_dir_relpath)/types.pb.h',
'<(protoc_out_dir)/<(proto_dir_relpath)/types.pb.cc',
'overrides/google/cacheinvalidation/callback.h',
'overrides/google/cacheinvalidation/compiler-specific.h',
'overrides/google/cacheinvalidation/gmock.h',
'overrides/google/cacheinvalidation/googletest.h',
'overrides/google/cacheinvalidation/hash_map.h',
'overrides/google/cacheinvalidation/logging.h',
'overrides/google/cacheinvalidation/md5.h',
'overrides/google/cacheinvalidation/mutex.h',
'overrides/google/cacheinvalidation/random.h',
'overrides/google/cacheinvalidation/scoped_ptr.h',
'overrides/google/cacheinvalidation/stl-namespace.h'
'overrides/google/cacheinvalidation/string_util.h'
'overrides/google/cacheinvalidation/time.h',
'files/src/google/cacheinvalidation/invalidation-client-impl.cc',
'files/src/google/cacheinvalidation/invalidation-client-impl.h',
'files/src/google/cacheinvalidation/invalidation-client.cc',
'files/src/google/cacheinvalidation/invalidation-client.h',
'files/src/google/cacheinvalidation/invalidation-types.h',
'files/src/google/cacheinvalidation/log-macro.h',
'files/src/google/cacheinvalidation/network-manager.cc',
'files/src/google/cacheinvalidation/network-manager.h',
'files/src/google/cacheinvalidation/persistence-manager.cc',
'files/src/google/cacheinvalidation/persistence-manager.h',
'files/src/google/cacheinvalidation/persistence-utils.cc',
'files/src/google/cacheinvalidation/persistence-utils.h',
'files/src/google/cacheinvalidation/proto-converter.cc',
'files/src/google/cacheinvalidation/proto-converter.h',
'files/src/google/cacheinvalidation/registration-update-manager.cc',
'files/src/google/cacheinvalidation/registration-update-manager.h',
'files/src/google/cacheinvalidation/session-manager.cc',
'files/src/google/cacheinvalidation/session-manager.h',
'files/src/google/cacheinvalidation/throttle.cc',
'files/src/google/cacheinvalidation/throttle.h',
'files/src/google/cacheinvalidation/version-manager.cc',
'files/src/google/cacheinvalidation/version-manager.h',
],
'include_dirs': [
'<(protoc_out_dir)',
'./overrides',
'./files/src',
],
'dependencies': [
'../../base/base.gyp:base',
'../../third_party/protobuf/protobuf.gyp:protobuf_lite',
'cacheinvalidation_proto',
],
# This target exports a hard dependency because depedents require
# cacheinvalidation_proto to compile.
'hard_dependency': 1,
'direct_dependent_settings': {
'include_dirs': [
'<(protoc_out_dir)',
'./overrides',
'./files/src',
],
},
'export_dependent_settings': [
'../../third_party/protobuf/protobuf.gyp:protobuf_lite',
'cacheinvalidation_proto',
],
},
# Unittests for the cache invalidation library.
{
'target_name': 'cacheinvalidation_unittests',
'type': 'executable',
'sources': [
'../../base/test/run_all_unittests.cc',
'files/src/google/cacheinvalidation/system-resources-for-test.h',
'files/src/google/cacheinvalidation/invalidation-client-impl_test.cc',
'files/src/google/cacheinvalidation/persistence-manager_test.cc',
'files/src/google/cacheinvalidation/persistence-utils_test.cc',
'files/src/google/cacheinvalidation/throttle_test.cc',
],
'dependencies': [
'../../base/base.gyp:base',
'../../base/base.gyp:test_support_base',
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
'cacheinvalidation',
],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| 42.220859 | 78 | 0.648939 |
1269906134ce5f730cf8d8fbeff2dd4a59fa0df6 | 2,144 | py | Python | zerver/tests/test_email_log.py | gagansh7171/zulip | 58a44698963ffcd21b57937a4913562a032c5631 | [
"Apache-2.0"
] | 1 | 2020-09-19T09:18:39.000Z | 2020-09-19T09:18:39.000Z | zerver/tests/test_email_log.py | gagansh7171/zulip | 58a44698963ffcd21b57937a4913562a032c5631 | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_email_log.py | gagansh7171/zulip | 58a44698963ffcd21b57937a4913562a032c5631 | [
"Apache-2.0"
] | null | null | null | import os
from unittest import mock
from django.conf import settings
from zerver.lib.test_classes import ZulipTestCase
from zproject.email_backends import get_forward_address
class EmailLogTest(ZulipTestCase):
def test_generate_and_clear_email_log(self) -> None:
with self.settings(EMAIL_BACKEND='zproject.email_backends.EmailLogBackEnd'), \
mock.patch('zproject.email_backends.EmailBackend.send_messages'), \
self.assertLogs(level="INFO") as m, \
self.settings(DEVELOPMENT_LOG_EMAILS=True):
result = self.client_get('/emails/generate/')
self.assertEqual(result.status_code, 302)
self.assertIn('emails', result['Location'])
result = self.client_get("/emails/")
self.assert_in_success_response(["All the emails sent in the Zulip"], result)
result = self.client_get('/emails/clear/')
self.assertEqual(result.status_code, 302)
result = self.client_get(result['Location'])
self.assertIn('manually generate most of the emails by clicking', str(result.content))
output_log = "INFO:root:Emails sent in development are available at http://testserver/emails"
self.assertEqual(m.output, [output_log for i in range(15)])
def test_forward_address_details(self) -> None:
forward_address = "forward-to@example.com"
result = self.client_post("/emails/", {"forward_address": forward_address})
self.assert_json_success(result)
self.assertEqual(get_forward_address(), forward_address)
with self.settings(EMAIL_BACKEND='zproject.email_backends.EmailLogBackEnd'):
with mock.patch('zproject.email_backends.EmailBackend.send_messages'):
result = self.client_get('/emails/generate/')
self.assertEqual(result.status_code, 302)
self.assertIn('emails', result['Location'])
result = self.client_get(result['Location'])
self.assert_in_success_response([forward_address], result)
os.remove(settings.FORWARD_ADDRESS_CONFIG_FILE)
| 47.644444 | 105 | 0.678172 |
e7bc1d76066199a987941b509d5a9f7d031fa968 | 2,276 | py | Python | moving.py | E15dev/maze-animation | 5419222d616efc67ee4e56949123ed4f1b046c51 | [
"MIT"
] | null | null | null | moving.py | E15dev/maze-animation | 5419222d616efc67ee4e56949123ed4f1b046c51 | [
"MIT"
] | null | null | null | moving.py | E15dev/maze-animation | 5419222d616efc67ee4e56949123ed4f1b046c51 | [
"MIT"
] | null | null | null | import pygame
import time
import random
import math
from pynput.keyboard import *
def refresh():
pygame.display.flip()
return 0
def set_window_fps(fps):
pygame.display.set_caption('pygame render ' + str(fps) + ' fps')
def get_state(x, y):
random.seed((x * 0xFFFFFE) + y)
x = random.randrange(0, 2) # values for 0 to 1
return bool(x)
def draw():
for x in range(0, width + 100, cell_size):
for y in range(0, height + 100, cell_size):
if get_state(x + xm, y + ym):
pygame.draw.line(screen, (255, 255, 255), (x, y), (x+cell_size, y+cell_size))
else:
pygame.draw.line(screen, (255, 255, 255), (x + cell_size, y), (x, y + cell_size))
# draw map center
pygame.draw.line(screen, (255, 0, 0), (center_width - xm - 5, center_height - ym - 5), (center_width - xm + 5, center_height - ym + 5))
pygame.draw.line(screen, (255, 0, 0), (center_width - xm - 5, center_height - ym + 5), (center_width - xm + 5, center_height - ym - 5))
def press_on(key):
global move_x, move_y
key = str(key)
if key == 'Key.left':
move_x -= 1
elif key == 'Key.up':
move_y += 1
elif key == 'Key.right':
move_x += 1
elif key == 'Key.down':
move_y -= 1
return 0
# config
cell_size = 25
xm = 0
ym = 0
move_x = 0
move_y = 0
(width, height) = (500, 500) # screen resolution
# init
center_width = math.floor(width / 2)
center_height = math.floor(width / 2)
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('The game')
radius = 50
iteration = 0
frames_time = []
# more config
screen.fill((0, 0, 0))
pygame.display.flip()
with Listener(on_press = press_on) as listener:
running = True
while running:
iteration += 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
try:
fps = 1 / (time.time() - last_fps)
except:
fps = "error"
set_window_fps(fps)
last_fps = time.time()
screen.fill((0, 0, 0))
draw()
refresh() # swap buffer to screen
xm -= move_x*cell_size
move_x = 0
ym += move_y*cell_size
move_y = 0
print('end')
| 24.473118 | 139 | 0.576011 |
08f2e57a133b008df9dbbdb39fc2e944830fba16 | 205 | py | Python | awards/forms.py | Sundaybrian/prodev | ed51fbd7c70941de36f7bc59c1940acfafaecf72 | [
"MIT"
] | null | null | null | awards/forms.py | Sundaybrian/prodev | ed51fbd7c70941de36f7bc59c1940acfafaecf72 | [
"MIT"
] | 7 | 2020-06-05T22:53:10.000Z | 2022-02-10T08:29:14.000Z | awards/forms.py | Sundaybrian/prodev | ed51fbd7c70941de36f7bc59c1940acfafaecf72 | [
"MIT"
] | null | null | null | from django import forms
from .models import Review
class NewReviewForm(forms.ModelForm):
'''
form to create a rating
'''
class Meta:
model=Review
exclude=['post','judge']
| 18.636364 | 37 | 0.634146 |
d1d96662e59585463bd7daac126eec3318d56dc9 | 492 | py | Python | 102_user_display_and_settings_module/main/migrations/0004_auto_20190925_2131.py | prototypefund/ampel2go_community | 08759509287efef08218a4eb6e6e2b029b862b4a | [
"Apache-2.0"
] | 2 | 2020-08-08T15:38:08.000Z | 2020-11-18T13:13:45.000Z | 102_user_display_and_settings_module/main/migrations/0004_auto_20190925_2131.py | prototypefund/ampel2go_community | 08759509287efef08218a4eb6e6e2b029b862b4a | [
"Apache-2.0"
] | 1 | 2021-09-22T19:46:49.000Z | 2021-09-22T19:46:49.000Z | 102_user_display_and_settings_module/main/migrations/0004_auto_20190925_2131.py | prototypefund/ampel2go_community | 08759509287efef08218a4eb6e6e2b029b862b4a | [
"Apache-2.0"
] | 1 | 2022-02-16T09:52:31.000Z | 2022-02-16T09:52:31.000Z | # Generated by Django 2.2.5 on 2019-09-25 19:31
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20190925_2129'),
]
operations = [
migrations.AlterField(
model_name='tutorial',
name='tutorial_published',
field=models.DateTimeField(default=datetime.datetime(2019, 9, 25, 21, 31, 33, 43516), verbose_name='date published'),
),
]
| 24.6 | 129 | 0.638211 |
d43223a631d14adea16f7b098efbc154f340158a | 38,800 | py | Python | musx/pitch.py | ricktaube/musx | 5fb116b1a1ade9ef42a9a3c8311c604795e0af6a | [
"BSD-3-Clause"
] | 9 | 2021-06-03T21:36:53.000Z | 2021-06-13T01:53:17.000Z | musx/pitch.py | musx-admin/musx | 5fb116b1a1ade9ef42a9a3c8311c604795e0af6a | [
"BSD-3-Clause"
] | 2 | 2021-06-03T18:38:57.000Z | 2021-06-13T10:46:28.000Z | musx/pitch.py | musx-admin/musx | 5fb116b1a1ade9ef42a9a3c8311c604795e0af6a | [
"BSD-3-Clause"
] | 1 | 2022-02-12T23:04:27.000Z | 2022-02-12T23:04:27.000Z | ###############################################################################
"""
Defines the equal tempered chromatic scale over 11 octaves and provides a
mapping between alternate representations of pitch material: Pitch instances,
hertz frequency, key numbers, and pitch names.
The Pitch class represent equal tempered pitches and returns information
in hertz, keynum, pitch class, Pnum and pitch name formats. Pitches
can be compared using standard math relations and maintain proper spelling
when complemented or transposed by an Interval.
The keynum, pitch and hertz functions provide mapping between the three
alternate representations of frequency:
* `keynum()` : converts a pitch name or hertz value into a key number.
* `pitch()` : converts a hertz value, key number or pitch name into a Pitch.
* `hertz()` : converts a pitch name or key number into a hertz value.
The functions can map individual values, lists of values, and string
sequences of values.
**Lists and string sequences:**
The three functions convert individual values, lists of values and string
sequences containing values. In a string sequence use spaces
to delimit the items:
* A string sequence of key numbers: '0 40 21 33 87 12'
* A string sequence of hertz values: '440 880 220.12'
* A string sequence of pitch names: 'c4 d4 eb4 f#4 g3'
In any string sequence you can repeat an item by appending
one or more commas to its rightside. For example, the motive of Beethoven's 5th Symphony
'G4 G4 G4 Eb4' can be written as a pitch sequence 'G4,, Eb4',
a key number sequence '67,, 63', or a hertz sequence '392,, 311'.
A string sequence of pitches also supports "sticky" octave numbers: once
an octave number is provided it remains in effect until a different octave is given.
For example a diatonic scale from A3 to A4 is 'a3 b c4 d e f g a'.
**Rests:**
The special string name 'R' or 'r' represents a musical rest. The key number of
a rest is -1, its hertz value is 0.0 and its Pitch is an empty pitch: Pitch().
"""
__pdoc__ = {
'parse_number_sequence': False,
'parse_pitch_sequence': False,
'chromatic_scale': False,
'build_chromatic_scale': False
}
from enum import IntEnum
from collections import namedtuple
import math
from . import tools
PitchBase = namedtuple('PitchBase', ['letter', 'accidental', 'octave'])
PitchBase.__doc__ = """Base class for the immutable implementation of Pitch."""
class Pitch (PitchBase):
"""
Creates a Pitch from a string or list, if neither is provided
an empty Pitch is returned. The legal constructor forms are:
* Pitch(string) - creates a Pitch from a pitch name string.
* Pitch([l, a, o]) - creates a Pitch from a three element
pitch list containing a letter, accidental and octave index
(see below).
* Pitch() - creates an empty Pitch.
The format of a Pitch name string is:
```
<pitch> := <letter>, [<accidental>], <octave>
<letter> := 'C' | 'D' | 'E' | 'F' | 'G' | 'A' | 'B' |
'c' | 'd' | 'e' | 'f' | 'g' | 'a' | 'b'
<accidental> := <2flat> | <flat> | <natural> | <sharp> | <2sharp>
<2flat> := 'bb' | 'ff'
<flat> := 'b' | 'f'
<natural> := ''
<sharp> := '#' | 's'
<2sharp> := '##' | 'ss'
<octave> := '00' | '0' | '1' | '2' | '3' | '4' | '5' |
'6' | '7' | '8' | '9'
```
Parameters
----------
arg : string | list | None
A pitch name string, a list of three pitch indexes, or None.
Returns
-------
A new Pitch instance.
Raises
------
* TypeError if arg is a invalid pitch list.
* TypeError if arg is an invalid pitch.
"""
# Pitch letter constants (0-6).
_let_C, _let_D, _let_E, _let_F, _let_G, _let_A, _let_B = range(7)
# Maps pitch-letter names onto zero based indexes.
_letter_map = {"C": _let_C, "D": _let_D, "E": _let_E, "F": _let_F,
"G": _let_G, "A": _let_A, "B": _let_B,
"c": _let_C, "d": _let_D, "e": _let_E, "f": _let_F,
"g": _let_G, "a": _let_A, "b": _let_B
}
# Octave constants for code readability.
_oct_00, _oct_0, _oct_1, _oct_2, _oct_3, _oct_4, _oct_5, _oct_6, _oct_7, _oct_8, _oct_9 = range(11)
# Maps octave names onto zero based indexes.
_octave_map = {"00": _oct_00, "0": _oct_0, "1": _oct_1, "2": _oct_2, "3": _oct_3, "4": _oct_4,
"5": _oct_5, "6": _oct_6, "7": _oct_7, "8": _oct_8, "9": _oct_9}
# Accidental constants for code readability.
_acc_2f, _acc_f, _acc_n, _acc_s, _acc_2s = range(5)
# Maps accidental names onto zero based indexes.
_accidental_map = {"bb": _acc_2f, "b": _acc_f, "": _acc_n, "#": _acc_s, "##": _acc_2s,
"ff": _acc_2f, "f": _acc_f, "n": _acc_n, "s": _acc_s, "ss": _acc_2s}
_enharmonic_map = [{_acc_s: 'B#', _acc_n: 'C', _acc_2f: 'Dbb'},
{_acc_2s: 'B##', _acc_s: 'C#', _acc_f: 'Db'},
{_acc_2s: 'C##', _acc_n: 'D', _acc_2f: 'Ebb'},
{_acc_s: 'D#', _acc_f: 'Eb', _acc_2f: 'Fbb'},
{_acc_2s: 'D##', _acc_n: 'E', _acc_f: 'Fb'},
{_acc_s: 'E#', _acc_n: 'F', _acc_2f: 'Gbb'},
{_acc_2s: 'E##', _acc_s: 'F#', _acc_f: 'Gb'},
{_acc_2s: 'F##', _acc_n: 'G', _acc_2f: 'Abb'},
{_acc_s: 'G#', _acc_f: 'Ab'},
{_acc_2s: 'G##', _acc_n: 'A', _acc_2f: 'Bbb'},
{_acc_s: 'A#', _acc_f: 'Bb', _acc_2f: 'Cbb'},
{_acc_2s: 'A##', _acc_n: 'B', _acc_f: 'Cb'}]
# Reverse map of pitch indexes 0-6 onto their canonical names.
_letter_names = ['C', 'D', 'E', 'F', 'G', 'A', 'B']
# Reverse map of accidental indexes 0-4 onto their symbolic names.
_accidental_names = ['bb', 'b', '', '#', '##']
# Reverse map of pitch indexes 0-4 onto their safe names.
_accidental_safe_names = ['ff', 'f', '', 's', 'ss']
# Reverse map of pitch indexes 0-10 onto their canonical names.
_octave_names = ['00', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
# Diatonic letter distances in semitones.
_letter_spans = [0, 2, 4, 5, 7, 9, 11]
# ## The minimum pnum identifier value.
# _min_pcid = (_let_C << 4 | _acc_2f)
#
# ## The maximum pnum identifier value.
# _max_pcid = (_let_B << 4 | _acc_2s)
pnums = IntEnum('Pnum',
[(lj + aj, ((li << 4) + ai))
for li, lj in enumerate(["C", "D", "E", "F", "G", "A", "B"])
for ai, aj in enumerate(["ff", "f", "", "s", "ss"])])
"""
A class variable that holds an IntEnum of all possible letter-and-accidental
combinations Cff up to Bss. (Since the accidental character # is illegal as a
python enum name pnums use the 'safe versions' of the accidental
names: 'ff' upto 'ss'.
A pnum value is a one byte integer 'llllaaaa', where 'llll' is its
letter index 0-6, and 'aaaa' is its accidental index 0-4. Pnums can be
compared using regular math relations.
"""
def __new__(cls, arg=None):
# Check for valid types and lengths up front.
if arg is None:
return cls._values_to_pitch(arg, arg, arg)
if isinstance(arg, list):
if len(arg) == 3 and all(isinstance(a, int) for a in arg):
return cls._values_to_pitch(*arg)
else:
raise TypeError(f'{arg} is an invalid pitch list.')
if isinstance(arg, str) and len(arg) >= 2:
return cls._string_to_pitch(arg)
raise TypeError(f"'{arg}' is an invalid pitch.")
@classmethod
def _string_to_pitch(cls, arg):
"""
A private method that accepts a pitch string and parses it into three
integer index values: letter, accidental, and octave. If all three values can
be parsed from the string they should then passed to the _values_to_pitch()
method to assign them to the instance's attributes. A ValueError
should be raised for any value that cannot be parsed from the string. See:
_values_to_pitch().
Parameter
---------
arg : string
The string to convert to a pitch.
Returns
-------
A new Pitch instance.
Raises
------
* ValueError is arg is not a valid pitch name.
"""
strlen = len(arg)
index = 0
letter = cls._letter_map.get(arg[index].upper())
if letter is None:
raise ValueError(f"'{arg}' is not a valid pitch name.")
while index < strlen and not arg[index].isdigit():
index += 1
if index == strlen:
raise ValueError(f"'{arg}' is not a valid pitch name.")
octave = cls._octave_map.get(arg[index::])
if octave is None:
raise ValueError(f"'{arg}' is not a valid pitch name.")
accidental = cls._acc_n # default accidental natural
if index > 1:
accidental = cls._accidental_map.get(arg[1:index])
if accidental is None:
raise ValueError(f"'{arg}' is not a valid pitch name.")
return cls._values_to_pitch(letter, accidental, octave)
@classmethod
def _values_to_pitch(cls, let, acc, ova):
"""
A private method that checks three values (letter, accidental and octave) to make
sure they are either valid index values for the letter, accidental and octave
attributes or they are None. The valid integer values are:
* A letter index 0-6 corresponding to the pitch letter names ['C', 'D', 'E', 'F', 'G', 'A', 'B'].
* An accidental index 0-4 corresponding to symbolic accidental names ['bb', 'b', '', '#', '##']
or 'safe' accidental names ['ff', 'f', 'n', 's', 'ss'].
* An octave index 0-10 corresponding to the pitch octave names ['00', '0', '1', '2', '3',
'4', '5', '6', '7', '8', '9'].
If any value is out of range the method will raise a ValueError for that value. If
all values are legal the method will make the following 'edge case' tests:
* Values cannot produce a pitch below midi key number 0 (lowest pitch is 'C00')
* Values cannot produce a pitch above midi key number 127 (highest pitches are 'G9' and 'Abb9')
If all the edge case checks pass then _values_to_pitch() should call the super's __new()__ method:
super(Pitch, cls).__new__(cls, let, acc, ova)
otherwise it should raise a ValueError for the offending values. NOTE: _values_to_pitch
should be the only method in your implementation that calls the super method.
Parameter
---------
let : string
The pitch letter string to convert to a pitch.
acc : string
The accidental string to convert to a pitch.
ova : string
The octave string to convert to a pitch.
Returns
-------
A new Pitch instance.
Raises
------
* ValueError is arg is not a valid pitch name.
"""
if let is None:
return super(Pitch, cls).__new__(cls, None, None, None)
if 0 <= let <= 6:
if 0 <= acc <= 4:
if 0 <= ova <= 10:
if ova == 0 and let == cls._let_C and acc < cls._acc_n:
nam = cls._letter_names[let] + cls._accidental_names[acc] + cls._octave_names[ova]
raise ValueError(f"Pitch '{nam}': midi number below 0.")
if ova == 10 and cls._letter_spans[let] + acc-2 > 7:
nam = cls._letter_names[let] + cls._accidental_names[acc] + cls._octave_names[ova]
raise ValueError(f"Pitch '{nam}': midi number exceeds 127.")
return super(Pitch, cls).__new__(cls, let, acc, ova)
else:
raise ValueError(f"'{ova}' is not a valid pitch octave 0-10.")
else:
raise ValueError(f"'{acc}' is not a valid pitch accidental 0-4.")
else:
raise ValueError(f"'{let}' is not a valid pitch letter.")
def __repr__(self):
"""
Prints an external form that, if evaluated, will create
a Pitch with the same content as this pitch.
"""
s = self.string()
if s:
return f'Pitch("{s}")'
return 'Pitch()'
__str__ == __repr__
# def __str__(self):
# """
# Returns a string displaying information about the pitch within angle
# brackets. Information includes the class name, the pitch text, and
# the id of the object. It is important that you implement the __str__
# method precisely. In particular, for __str__ you want to see
# '<', '>', '0x' in your output string. The format of your output
# strings from your version of this function must look EXACTLY the
# same as in the two examples below.
# Example
# -------
# >>> str(Pitch("C#6"))
# '<Pitch: C#6 0x7fdb17e2e950>'
# >>> str(Pitch())
# '<Pitch: empty 0x7fdb1898fa70>'
# """
# s = self.string()
# return f'<Pitch: {s if s else "empty"} {hex(id(self))}>'
def __lt__(self, other):
"""
Implements Pitch < Pitch.
This method should call self.pos() and other.pos() to get the two
values to compare.
Parameters
----------
other : Pitch
The pitch to compare with this pitch.
Returns
-------
True if this Pitch is less than the other.
Raises
------
* TypeError if other is not a Pitch.
"""
if isinstance(other, Pitch):
return self.pos() < other.pos()
raise TypeError(f'{other} is not a Pitch.')
def __le__(self, other):
"""
Implements Pitch <= Pitch.
This method should call self.pos() and other.pos() to get the values
to compare.
Parameters
----------
other : Pitch
The pitch to compare with this pitch.
Returns
-------
True if this Pitch is less than or equal to the other.
Raises
------
* TypeError if other is not a Pitch.
"""
if isinstance(other, Pitch):
return self.pos() <= other.pos()
raise TypeError(f'{other} is not a Pitch.')
def __eq__(self, other):
"""
Implements Pitch == Pitch.
This method should call self.pos() and other.pos() to get the values
to compare.
Parameters
----------
other : Pitch
The pitch to compare with this pitch.
Returns
-------
True if this Pitch is equal to the other.
Raises
------
* TypeError if other is not a Pitch.
"""
if isinstance(other, Pitch):
return self.pos() == other.pos()
raise TypeError(f'{other} is not a Pitch.')
def __ne__(self, other):
"""
Implements Pitch != Pitch.
This method should call self.pos() and other.pos() to get the values
to compare.
Parameters
----------
other : Pitch
The pitch to compare with this pitch.
Returns
-------
True if this Pitch is not equal to the other.
Raises
------
* TypeError if other is not a Pitch.
"""
if isinstance(other, Pitch):
return self.pos() != other.pos()
raise TypeError(f'{other} is not a Pitch.')
def __ge__(self, other):
"""
Implements Pitch >= Pitch.
This method should call self.pos() and other.pos() to get the values
to compare.
Parameters
----------
other : Pitch
The pitch to compare with this pitch.
Returns
-------
True if this Pitch is greater than or equal to the other.
Raises
------
* TypeError if other is not a Pitch.
"""
if isinstance(other, Pitch):
return self.pos() >= other.pos()
raise TypeError(f'{other} is not a Pitch.')
def __gt__(self, other):
"""
Implements Pitch > Pitch.
This method should call self.pos() and other.pos() to get the values
to compare.
Parameters
----------
other : Pitch
The pitch to compare with this pitch.
Returns
-------
True if this Pitch is greater than the other.
Raises
------
* TypeError if other is not a Pitch.
"""
if isinstance(other, Pitch):
return self.pos() > other.pos()
raise TypeError(f'{other} is not a Pitch.')
def pos(self):
"""
Returns a unique integer representing this pitch's position in
the octave-letter-accidental space. The expression to calculate
this value is `(octave<<8) + (letter<<4) + accidental`.
"""
return (self.octave << 8) + (self.letter << 4) + self.accidental
def is_empty(self):
"""
Returns true if the Pitch is empty. A pitch is empty if its letter,
accidental and octave attributes are None. Only one of these attributes
needs to be checked because __new__ will only create a Pitch if all
three are legal values or all three are None.
"""
return self.letter is None
def string(self):
"""
Returns a string containing the pitch name including the letter,
accidental, and octave. For example, Pitch("C#7").string() would
return 'C#7'.
"""
if self.is_empty():
return ''
s = self._letter_names[self.letter]
s += self._accidental_names[self.accidental]
s += self._octave_names[self.octave]
return s
def keynum(self):
"""Returns the midi key number of the Pitch."""
deg = self._letter_spans[self.letter]
# convert accidental index into semitone shift, e.g. double flat == -2.
acc = self.accidental - 2
return (12 * self.octave) + deg + acc
def pnum(self):
"""
Returns the pnum (pitch class enum) of the Pitch. Pnums enumerate and
order the letter and accidental of a Pitch so they can be compared,
e.g.: C < C# < Dbb. See also: `pnums`.
"""
return self.pnums((self.letter << 4) + self.accidental)
def pc(self):
"""Returns the pitch class (0-11) of the Pitch."""
return self.keynum() % 12
def hertz(self):
"""Returns the hertz value of the Pitch."""
k = self.keynum()
return 440.0 * math.pow(2, ((k - 69) / 12))
@classmethod
def from_keynum(cls, keynum, acci=None):
"""
A class method that creates a Pitch for the specified midi key number.
Parameters
----------
keynum : int
A valid keynum 0-127.
acci : string
The accidental to use. If no accidental is provided a default is
chosen from `C C# D Eb F F# G Ab A Bb B`
Returns
-------
A new Pitch with an appropriate spelling.
Raises
------
A ValueError if the midi key number is invalid or if the pitch requested does not support the specified accidental.
"""
if not (isinstance(keynum, int) and 0 <= keynum <= 127):
raise ValueError(f"Invalid midi key number: {keynum}.")
o, i = divmod(keynum, 12)
if acci is None:
acci = ['', '#', '', 'b', '', '', '#', '', 'b', '', 'b', ''][i]
a = cls._accidental_map.get(acci)
if a is None:
raise ValueError(f"'{acci}' is not a valid accidental.")
# s = cls._enharmonic_map[i][a]
s = cls._enharmonic_map[i].get(a)
if s is None:
raise ValueError(f'No pitch for keynum {keynum} and accidental {acci}')
if s in ['B#', 'B##']:
o -= 1
elif s in ['Cb', 'Cbb']:
o += 1
return Pitch(s + cls._octave_names[o])
# The chromatic scale and the functions pitch(), keynum() and hertz()
chromatic_scale = {}
"""
A hash table (dictionary) that maps between note names, midi key
numbers and hertz values. The table's dictionary keys consist of all
integer midi key numbers and all string pitch spellings of all midi key
numbers. See build_chromatic_scale() for more information.
"""
def keynum(ref, filt=round):
"""
Returns key numbers from a pitch name, hertz value, a list
of the same, or a string sequence of the same.
Parameters
----------
ref : string | int | float | list
A pitch name, hertz value, list of the same, or a string
containing a sequence of the same separated by spaces.
filt : function | None
A function of one argument maps a floating point key number
to an integer. The default is math.round, but math.floor and
math.ceil could also be used. If filt is None then a floating
point key number is returned. In a floating point key number, the
first two digits of the fractional portion are interpreted as
the number of cents above the integer midi value.
Returns
-------
If ref is a pitch its hash table value is returned.
If ref is a hertz value its key number is calculated, filtered
and returned (see the filt parameter).
If ref is a python list of pitch names or hertz values
then a list of key numbers is returned.
If ref is a string of hertz values
delimited by spaces then a list of key numbers is returned.
If ref is a string of pitch names then a list of key numbers
are returned. Items in the list can be directly repeated by
appending ',' to an item for each repetition. If the items are
pitch names then if a pitch does not contain an explicit octave
then it inherits the previous octave number in the list.
If no octave number is provided the the middle C octave (4)
is used as the inital octave.
Raises
------
ValueError if ref is not a pitch name, hertz value or list of the same.
Examples
--------
```python
>>> keynum('C#4')
61
>>> keynum(100)
43
>>> keynum(['Cb4', 'D#6'])
[59, 87]
>>> keynum([100, 200, 300])
[43, 55, 62]
>>> keynum('cs4 d e,, f g3 a b')
[61, 62, 64, 64, 64, 65, 55, 57, 59]
>>> keynum('100, 200, 300')
[43, 43, 55, 55, 62]
```
"""
if isinstance(ref, str):
ref = ref.strip() # remove whitespace from start and end
if ref:
if ref[0].isalpha(): # should be a pitch
try: # try to return a single keynum
return chromatic_scale[ref][0]
except KeyError:
# ref was not a single pitch. if ref contains a
# space then take it to be a list of pitch names
# otherwise its an error
if ' ' in ref:
ref = parse_pitch_sequence(ref)
elif ref[0].isdigit(): # should be hertz
# if ref contains a space then take it to be a list
# of hertz values otherwise convert to a float.
if ' ' in ref:
ref = parse_number_sequence(ref)
else:
ref = float(ref)
# ref is hertz and so not in the hashtable
if isinstance(ref, (float, int)):
keyn = 69 + math.log2(ref / 440.0) * 12
if filt:
keyn = filt(keyn)
if 0 <= keyn < 128:
return keyn #filt(keyn) if filt else keyn
if isinstance(ref, list):
return [keynum(x, filt) for x in ref]
raise ValueError(f"invalid keynum input: '{ref}'.")
def hertz(ref, filt=round):
"""
Returns hertz values from a pitch name, key number, a list
of the same, or a string sequence of the same.
Parameters
----------
ref : string, int or float
A pitch name or midi key number to convert to a hertz value.
filt : function | None
A function of one argument maps a floating point key number
to an integer.
Returns
-------
If ref is a pitch name or key number its hertz hash table
value is returned.
If ref is a python list of pitch names or key numbers
then a list of hertz values is returned.
If ref is a string of key numbers
delimited by spaces then a list of hertz values is returned.
If ref is a string of pitch names then a list of hertz values
are returned. Items in this list can be directly repeated by appending
',' to an item for each repetition. If the items are pitch names then
if a pitch does not contain an explicit octave then it inherits the
previous octave number in the list. If no octave number is provided the
the middle C octave (4) is used as the inital octave.
Raises
------
ValueError if ref is not a pitch name, key number or list of the same.
Examples
--------
```python
>>> hertz('C#4')
277.1826309768721
>>> hertz(100)
2637.02045530296
>>> hertz(['Cb4', 'D#6'])
[246.94165062806206, 1244.5079348883237]
>>> hertz([48, 60, 72])
[130.8127826502993, 261.6255653005986, 523.2511306011972]
>>> hertz('cs4 d b3')
[277.1826309768721, 293.6647679174076, 246.94165062806206]
>>> hertz('48, 60')
[130.8127826502993, 130.8127826502993, 261.6255653005986]
```
"""
if isinstance(ref, str):
ref = ref.strip()
if ref:
if ref[0].isalpha(): # should be a pitch
try: # try common case of a single pitch
return chromatic_scale[ref][1] # try to returning a single hertz
except KeyError: # keep going if string isnt a pitch
pass
ref = parse_pitch_sequence(ref)
elif ref[0].isdigit(): # should be a keynum
ref = parse_number_sequence(ref)
if isinstance(ref, float):
ref = filt(ref) if filt else int(ref)
if isinstance(ref, int):
return chromatic_scale[ref][1] # KeyError if int isnt valid keynum
if isinstance(ref, list):
return [hertz(x, filt) for x in ref]
raise ValueError(f"invalid hertz input: '{ref}'.")
def pitch(ref, filt=round, *, hz=False, acc=[]):
"""
Returns the pitch name from a hertz value, key number, a
list of the same, or a string sequence of the same.
Parameters
----------
ref : int or float
A key number or hertz value, depending on the value of the
hz parameter.
filt : function | None
A function of one argument that maps a floating point key number
to an integer.
hz : True | False
If True then ref is accepted as a hertz value otherwise it is
assumed to be a key number. The default value is False.
acc : int | list
An ordered preference list of accidentals to use in the pitch spelling.
Values range from -2 (double flat) to 2 (double sharp) with 0 being
no accidental.
Returns
-------
If ref is a key number its hash table value is returned.
If ref is a hertz value its key number is calculated, filtered
to an int value and its hash table value is returned.
If ref is a python list of key numbers their hash values are returned.
If ref is a python list of hertz values they are converted to
key numbers and then processed as described in the previous point.
If ref is a string sequence of hertz values or key numbers they are
converted to a python list and treated as described above.
Examples
--------
>>> pitch(60)
Pitch("C4")
>>> pitch(60, acc=[1])
Pitch("B#3")
>>> pitch(60, acc=[-2])
Pitch("Dbb4")
>>> pitch(440*3/2, hz=True)
Pitch("E5")
>>> pitch([48, 60, 72])
[Pitch("C3"), Pitch("C4"), Pitch("C5")]
>>> pitch("67,, 63")
[Pitch("G4"), Pitch("G4"), Pitch("G4"), Pitch("Eb4")]
"""
#print('** in pitch: ',ref)
# if parsing hertz first convert to keynum or list of keynums
if hz:
ref = keynum(ref)
# ref is float keynum, convert to int
if isinstance(ref, float):
ref = filt(ref)
# ref is an integer keynum, look up the pitch
if isinstance(ref, int):
try:
data = chromatic_scale[ref]
if data:
if not acc:
# default prefers sharps for C# and F# otherwise flats.
acc = [2, 3, 1, 4, 0] if ref % 12 in [1, 6] else [2, 1, 3, 0, 4]
else:
if not isinstance(acc,list):
acc = [acc]
acc = [a + 2 for a in acc] # convert -2..2 to 0..4
try:
return next(data[0][i] for i in acc if data[0][i])
except StopIteration as err:
raise ValueError("No pitch for accidentals {acc}.") from err
except KeyError as err:
raise ValueError(f"no table entry for midi note {ref}.") from err
# ref is a string sequence of keynums, a string sequence
# of pitch names, or a pitch name.
if isinstance(ref, str):
#print('** is str, ref=', ref)
ref = ref.strip()
if ref:
if ref[0].isalpha(): # should be a pitch
try:
#print('** trying')
##return chromatic_scale[ref][1] # try to return a single pitch
return chromatic_scale[ref][2] # try to return a single pitch
except KeyError:
pass
#print('** parse pitch seq')
ref = parse_pitch_sequence(ref)
elif ref[0].isdigit(): # should be a hertz
#print('** parse number seq')
ref = parse_number_sequence(ref)
# ref is a list of keynums or a list of pitch names
if isinstance(ref, list):
#print('** processing list:', ref)
return [pitch(x, filt, hz=False, acc=acc) for x in ref]
raise ValueError(f"invalid keynum input: '{ref}'.")
def parse_pitch_sequence(string):
seq = tools.parse_string_sequence(string)
oct = '4'
for i,p in enumerate(seq):
if not (p[0] in 'CcDdEeFfGgAaBbRr'):
raise ValueError(f"invalid pitch: '{p}'.")
# o holds octave number, or '' if no octave
o = p[len(p.rstrip('0123456789')):]
if o:
oct = o # update octave to carry forward
else:
seq[i] = p+oct # add octave to pitch
return seq
def parse_number_sequence(string):
seq = tools.parse_string_sequence(string)
for i,p in enumerate(seq):
if not p[0] in '0123456789+-.':
raise ValueError(f"invalid numeric: '{p}'.")
seq[i] = float(p) if ('.' in p) else int(p)
return seq
def scale(start, length, *steps, fit=None):
"""
Returns a list of key numbers beginning on start and incremented by
successive interval increments. The step values loop if the
length of the scale is greater than the number of intervals in steps.
Parameters
----------
start : int | float
The initial key number that starts the scale.
length : int
The length of the scale including the start.
steps : ints | floats | list | tuple
An in-line (variadic) series of increments defining the
intervals beween the key numbers in the scale. This series
can also be specified as a single list or tuple of increments.
fit : None | [lb, ub, mode]
Limits placed on the range of the scale. If the value is None there
are no limits. Otherwise fit should be a list or tuple of 3
elements containing a lower bound, upper bound and string mode.
See: `musx.tools.fit()` for more information.
Returns
-------
A list of key numbers defining the scale.
Examples
--------
```python
# 16 note (3 octave) pentatonic scale on C#4
>>> scale(61, 16, 2, 3, 2, 2, 3)
[60, 62, 65, 67, 69, 72, 74, 77, 79, 81, 84, 86, 89, 91, 93, 96]
# one octave of the octotonic scale
>>> scale(60, 9, 1, 2)
[60, 61, 63, 64, 66, 67, 69, 70, 72]
# interval cycle
>>> scale(60, 12, (1, 2, 3, 4, 5))
[60, 61, 63, 66, 70, 75, 76, 78, 81, 85]
# cycle of fifths compressed to one octave
>>> pitch(scale(0, 12, 7, fit=[60, 72, 'wrap']))
['C5', 'G4', 'D4', 'A4', 'E4', 'B4', 'F#4', 'C#4', 'Ab4', 'Eb4', 'Bb4', 'F4']
```
"""
series = [start]
numsteps = len(steps)
if fit:
fit = list(fit) # copy it
series[0] = tools.fit(series[0], *fit)
# the variadic steps is a list of values or a list of one list.
if numsteps == 1 and isinstance(steps[0], (list, tuple)):
steps = steps[0]
numsteps = len(steps)
for i in range(0, length-1):
knum = series[-1] + steps[i % numsteps]
if fit:
knum = tools.fit(knum, *fit)
series.append(knum)
return series
def build_chromatic_scale():
"""
Returns a hash table (dictionary) with entries that map between
pitch names, Pitch instances, midi key numbers, and hertz values. The hash keys
are all possible MIDI key numbers 0 to 127 and all possible pitch
names for each MIDI key number. See module documentation for more information.
For a dictionary key that is a pitch name, its dictionary value will
be a two element list containing the pitch's integer keynum and its hertz
value:
`<pitch_name>: [<keynum>, <hertz>, <Pitch>]`
For a dictionary key that is an integer midi key number, its dictionary value
will be a two element list:
`<keynum> : [[<Pitch_bb>, <Pitch_b>, <Pitch>, <Pitch_#>, <Pitch_##>], <hertz>]`
The first element in the list is a sublist of length five containing all
possible Pitch objects for the given key number. The five list
locations represent accidental ordering from double-flat to double-sharp
spellings: if a pitch spelling uses one sharp it would be added at index 3,
and if a pitch spelling is not possible for a given accidental position that
position will hold an empty string. The second element in the value list is
the key number's hertz value.
To calculate a hertz value from a key number use the formula:
`hertz = 440.0 * 2 ** ((keynum - 69) / 12)`
To calculate a key number from a hertz value use the reverse formula:
`keynum = 69 + log(hertz / 440, 2) * 12`
"""
# letter names for note entries
letter_names = ["C", "D", "E", "F", "G", "A", "B"]
# symbolic accidental names for notes, "" is for a diatonic note without accidental
accidental_names = ["bb", "b", "", "#", "##"]
# safe accidental name variants
accidental_safe_name = {"bb": "ff", "b": "f", "": "", "#": "s", "##": "ss"}
# lowest octave name is 00 instead of -1.
octave_names = ["00", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# semi-tone shifts applied to diatonic notes within the octave
letter_steps = [0, 2, 4, 5, 7, 9, 11]
# semi-tone shift applied to diatonic notes for notes with accidentals
accidental_steps = [-2, -1, 0, 1, 2]
# init table with midi keys, each holding a note array and frequency.
# the note array will hold all chromatic note names for the midi key.
table = {key: [['','','','',''], 440.0 * 2 ** ((key-69.0)/12)] for key in range(128)}
# iterate all the octave for midi key numbers 0-127.
for octave_index, octave_name in enumerate(octave_names):
# starting key number for notes in the octave
octave_midi = octave_index * 12
# iterate all the diatonic letter names for notes.
for letter_index, letter_name in enumerate(letter_names):
# the midi key number of the diatonic note
letter_midi = octave_midi + letter_steps[letter_index]
# iterate the accidentals and create all possible note names for the letter
for accidental_index, accidental_name in enumerate(accidental_names):
# get the semitone amount to shift the letter note by
accidental_step = accidental_steps[accidental_index]
# calculate the midi key number for the note
note_midi = letter_midi + accidental_step
# stop notes outside of midi range (there are only a few)
if 0 <= note_midi <= 127:
accidental_name = accidental_names[accidental_index]
# create the official note name
note_name1 = letter_name + accidental_name + octave_name
# create the Pitch for the official name.
note_pitch = Pitch(note_name1)
# create variants (lower case letter names, safe accidental names)
note_name2 = letter_name.lower() + accidental_name + octave_name
note_name3 = letter_name + accidental_safe_name[accidental_name] + octave_name
note_name4 = letter_name.lower() + accidental_safe_name[accidental_name] + octave_name
# fetch the midi data from the table
midi_data = table[note_midi]
# add the note to the note array
##midi_data[0][accidental_index] = note_name1
midi_data[0][accidental_index] = note_pitch
# get the frequency from the midi data and add it to the note data.
note_freq = table[note_midi][1]
# add the hash entry for the note name
##table[note_name1] = [note_midi, note_freq]
table[note_name1] = [note_midi, note_freq, note_pitch]
# add the variants (lower case letter, safe accidentals)
table[note_name2] = table[note_name1]
table[note_name3] = table[note_name1]
table[note_name4] = table[note_name1]
# add entries for musical rests
r = Pitch()
table['R'] = [-1, 0.0, r]
table['r'] = table['R']
#table[-1] = [[r,r,r,r,r], 0.0]
return table
# Build the hash table
chromatic_scale = build_chromatic_scale() | 37.890625 | 123 | 0.575052 |
daa77be867bb98d9ca43cf57b8ed19f7bb405bd5 | 2,806 | py | Python | FusionIIIT/applications/academic_procedures/urls.py | devPrashantKumar/Fusion | a56cd0a9faa0c88e170ac8cbc316bd71f7f8b27c | [
"bzip2-1.0.6"
] | null | null | null | FusionIIIT/applications/academic_procedures/urls.py | devPrashantKumar/Fusion | a56cd0a9faa0c88e170ac8cbc316bd71f7f8b27c | [
"bzip2-1.0.6"
] | null | null | null | FusionIIIT/applications/academic_procedures/urls.py | devPrashantKumar/Fusion | a56cd0a9faa0c88e170ac8cbc316bd71f7f8b27c | [
"bzip2-1.0.6"
] | null | null | null | from django.conf.urls import url
from . import views
appname = 'procedures'
urlpatterns = [
url(r'^$', views.academic_procedures_redirect, name='redirect'),
url(r'^main/', views.academic_procedures, name='procedures'),
url(r'^register/', views.register, name='register'),
url(r'^pre_registration/', views.pre_registration, name='pre_register'),
url(r'^final_registration/', views.final_registration, name='final_register'),
url(r'^addCourse/', views.add_courses, name='addCourse'),
url(r'^drop_course/', views.drop_course, name='drop_course'),
url(r'^branch-change/', views.approve_branch_change, name='branch_change'),
url(r'^brach-change-request/', views.branch_change_request, name='branch_change_request'),
url(r'^acad_person/verifyCourse/$', views.verify_course, name='verifyCourse'),
url(r'^acad_person/student_list$', views.student_list, name='studentlist'),
url(r'^acad_person/$', views.acad_person, name='acad_person'),
url(r'^acad_person/verifyCourse/drop/$', views.dropcourseadmin, name='dropcourseadmin'),
url(r'^branch-validate', views.approve_branch_change, name='branch_validate'),
url(r'^acad_person/branch_change/$', views.acad_branch_change, name='acad_branch_change'),
url(r'^stu/', views.academic_procedures_student),
url(r'^fac/', views.academic_procedures_faculty, name='faculty_procedures'),
url(r'^addThesis/$', views.add_thesis, name='add_thesis'),
url(r'^process_verification_request/$', views.process_verification_request),
url(r'^teaching_credit/$', views.teaching_credit_register),
url(r'^course_marks_data/$', views.course_marks_data),
url(r'^submit_marks/$', views.submit_marks),
url(r'^verify_course_marks_data/$', views.verify_course_marks_data),
url(r'^verify_marks/$', views.verify_marks),
url(r'^announce_results/$', views.announce_results),
url(r'^generate_grade_pdf/$', views.generate_grade_pdf),
url(r'^manual_grade_submission/$', views.manual_grade_submission),
url(r'^generate_result_pdf/$', views.generate_result_pdf),
url(r'^generate_grade_sheet_pdf/$', views.generate_grade_sheet_pdf),
url(r'^test/$', views.test),
url(r'^bonafide_pdf/$',views.Bonafide_form),
url(r'^test_ret/$', views.test_ret),
url(r'^faculty_data/$', views.facultyData, name='faculty_data'),
url(r'^ACF/$', views.ACF, name='ACF'),
url(r'^MTSGF/$', views.MTSGF),
url(r'^PHDPE/$', views.PHDPE),
url(r'^update_assistantship/$', views.update_assistantship),
url(r'^update_mtechsg/$', views.update_mtechsg),
url(r'^update_phdform/$' , views.update_phdform),
url(r'^update_dues/$' , views.update_dues),
url(r'^dues_pdf/$' , views.dues_pdf),
url(r'^acad_person/gen_course_list$', views.gen_course_list, name='gen_course_list'),
] | 57.265306 | 94 | 0.717035 |
8712bb8aaca8c48fb14cd98fc029db7e138b7853 | 11,971 | py | Python | gluoncv/auto/tasks/image_classification.py | mzolfaghari/gluon-cv | 6727e1e9f18b1919e79bb132ab9e873ffed41388 | [
"Apache-2.0"
] | null | null | null | gluoncv/auto/tasks/image_classification.py | mzolfaghari/gluon-cv | 6727e1e9f18b1919e79bb132ab9e873ffed41388 | [
"Apache-2.0"
] | null | null | null | gluoncv/auto/tasks/image_classification.py | mzolfaghari/gluon-cv | 6727e1e9f18b1919e79bb132ab9e873ffed41388 | [
"Apache-2.0"
] | null | null | null | """Auto pipeline for image classification task"""
# pylint: disable=bad-whitespace,missing-class-docstring
import logging
import copy
import time
import pprint
import pickle
from typing import Union, Tuple
from autocfg import dataclass
import numpy as np
import pandas as pd
import autogluon.core as ag
from autogluon.core.decorator import sample_config
from autogluon.core.scheduler.resource import get_cpu_count, get_gpu_count
from autogluon.core.task.base import BaseTask
from autogluon.core.searcher import RandomSearcher
from ..estimators.base_estimator import BaseEstimator
from ..estimators import ImageClassificationEstimator
from .utils import config_to_nested
from .dataset import ImageClassificationDataset
__all__ = ['ImageClassification']
@dataclass
class LiteConfig:
model : Union[str, ag.Space, type(None)] = ag.Categorical('resnet18_v1b', 'mobilenetv3_small')
lr : Union[ag.Space, float] = 1e-2
num_trials : int = 1
epochs : Union[ag.Space, int] = 5
batch_size : Union[ag.Space, int] = 8
nthreads_per_trial : int = 32
ngpus_per_trial : int = 0
time_limits : int = 3600
search_strategy : Union[str, ag.Space] = 'random'
dist_ip_addrs : Union[type(None), list, Tuple] = None
@dataclass
class DefaultConfig:
model : Union[ag.Space, str] = ag.Categorical('resnet50_v1b', 'resnest50')
lr : Union[ag.Space, float] = ag.Categorical(1e-2, 5e-2)
num_trials : int = 3
epochs : Union[ag.Space, int] = 15
batch_size : Union[ag.Space, int] = 16
nthreads_per_trial : int = 128
ngpus_per_trial : int = 8
time_limits : int = 3600
search_strategy : Union[str, ag.Space] = 'random'
dist_ip_addrs : Union[type(None), list, Tuple] = None
@ag.args()
def _train_image_classification(args, reporter):
"""
Parameters
----------
args: <class 'autogluon.utils.edict.EasyDict'>
"""
# train, val data
train_data = args.pop('train_data')
val_data = args.pop('val_data')
# convert user defined config to nested form
args = config_to_nested(args)
tic = time.time()
try:
estimator_cls = args.pop('estimator', None)
assert estimator_cls == ImageClassificationEstimator
custom_net = args.pop('custom_net', None)
custom_optimizer = args.pop('custom_optimizer', None)
estimator = estimator_cls(args, reporter=reporter,
net=custom_net, optimizer=custom_optimizer)
# training
result = estimator.fit(train_data=train_data, val_data=val_data)
# pylint: disable=bare-except
except:
import traceback
return {'traceback': traceback.format_exc(), 'args': str(args),
'time': time.time() - tic, 'train_acc': -1, 'valid_acc': -1}
# TODO: checkpointing needs to be done in a better way
# unique_checkpoint = 'train_image_classification_' + str(uuid.uuid4()) + '.pkl'
# estimator.save(unique_checkpoint)
result.update({'model_checkpoint': pickle.dumps(estimator)})
return result
class ImageClassification(BaseTask):
"""Whole Image Classification general task.
Parameters
----------
config : dict
The configurations, can be nested dict.
logger : logging.Logger
The desired logger object, use `None` for module specific logger with default setting.
net : mx.gluon.Block
The custom network. If defined, the model name in config will be ignored so your
custom network will be used for training rather than pulling it from model zoo.
"""
Dataset = ImageClassificationDataset
def __init__(self, config=None, estimator=None, logger=None):
super(ImageClassification, self).__init__()
self._fit_summary = {}
self._logger = logger if logger is not None else logging.getLogger(__name__)
self._logger.setLevel(logging.INFO)
# cpu and gpu setting
cpu_count = get_cpu_count()
gpu_count = get_gpu_count()
# default settings
if not config:
if gpu_count < 1:
self._logger.info('No GPU detected/allowed, using most conservative search space.')
config = LiteConfig()
else:
config = DefaultConfig()
config = config.asdict()
else:
if not config.get('dist_ip_addrs', None):
ngpus_per_trial = config.get('ngpus_per_trial', gpu_count)
if ngpus_per_trial < 1:
self._logger.info('No GPU detected/allowed, using most conservative search space.')
default_config = LiteConfig()
else:
default_config = DefaultConfig()
config = default_config.merge(config, allow_new_key=True).asdict()
# adjust cpu/gpu resources
if not config.get('dist_ip_addrs', None):
nthreads_per_trial = config.get('nthreads_per_trial', cpu_count)
if nthreads_per_trial > cpu_count:
nthreads_per_trial = cpu_count
ngpus_per_trial = config.get('ngpus_per_trial', gpu_count)
if ngpus_per_trial > gpu_count:
ngpus_per_trial = gpu_count
self._logger.warning(
"The number of requested GPUs is greater than the number of available GPUs."
"Reduce the number to %d", ngpus_per_trial)
else:
raise ValueError('Please specify `nthreads_per_trial` and `ngpus_per_trial` '
'given that dist workers are available')
# additional configs
config['num_workers'] = nthreads_per_trial
config['gpus'] = [int(i) for i in range(ngpus_per_trial)]
if config['gpus']:
config['batch_size'] = config.get('batch_size', 8) * len(config['gpus'])
self._logger.info('Increase batch size to %d based on the number of gpus %d',
config['batch_size'], len(config['gpus']))
config['seed'] = config.get('seed', np.random.randint(32,767))
self._config = config
# scheduler options
self.search_strategy = config.get('search_strategy', 'random')
self.scheduler_options = {
'resource': {'num_cpus': nthreads_per_trial, 'num_gpus': ngpus_per_trial},
'checkpoint': config.get('checkpoint', 'checkpoint/exp1.ag'),
'num_trials': config.get('num_trials', 2),
'time_out': config.get('time_limits', 60 * 60),
'resume': (len(config.get('resume', '')) > 0),
'visualizer': config.get('visualizer', 'none'),
'time_attr': 'epoch',
'reward_attr': 'acc_reward',
'dist_ip_addrs': config.get('dist_ip_addrs', None),
'searcher': self.search_strategy,
'search_options': config.get('search_options', None)}
if self.search_strategy == 'hyperband':
self.scheduler_options.update({
'searcher': 'random',
'max_t': config.get('epochs', 50),
'grace_period': config.get('grace_period', config.get('epochs', 50) // 4)})
def fit(self, train_data, val_data=None, train_size=0.9, random_state=None):
"""Fit auto estimator given the input data.
Parameters
----------
train_data : pd.DataFrame or iterator
Training data.
val_data : pd.DataFrame or iterator, optional
Validation data, optional. If `train_data` is DataFrame, `val_data` will be split from
`train_data` given `train_size`.
train_size : float
The portion of train data split from original `train_data` if `val_data` is not provided.
random_state : int
Random state for splitting, for `np.random.seed`.
Returns
-------
Estimator
The estimator obtained by training on the specified dataset.
"""
# split train/val before HPO to make fair comparisons
if not isinstance(train_data, pd.DataFrame):
assert val_data is not None, \
"Please provide `val_data` as we do not know how to split `train_data` of type: \
{}".format(type(train_data))
if val_data is None:
assert 0 <= train_size <= 1.0
if random_state:
np.random.seed(random_state)
split_mask = np.random.rand(len(train_data)) < train_size
train = train_data[split_mask]
val = train_data[~split_mask]
self._logger.info('Randomly split train_data into train[%d]/validation[%d] splits.',
len(train), len(val))
train_data, val_data = train, val
# automatically suggest some hyperparameters based on the dataset statistics(experimental)
estimator = self._config.get('estimator', None)
if estimator is None:
estimator = [ImageClassificationEstimator]
self._config['estimator'] = ag.Categorical(*estimator)
# register args
config = self._config.copy()
config['train_data'] = train_data
config['val_data'] = val_data
_train_image_classification.register_args(**config)
start_time = time.time()
self._fit_summary = {}
if config.get('num_trials', 1) < 2:
rand_config = RandomSearcher(_train_image_classification.cs).get_config()
self._logger.info("Starting fit without HPO")
results = _train_image_classification(_train_image_classification.args, rand_config)
best_config = sample_config(_train_image_classification.args, rand_config)
best_config.pop('train_data', None)
best_config.pop('val_data', None)
self._fit_summary.update({'train_acc': results.get('train_acc', -1),
'valid_acc': results.get('valid_acc', -1),
'total_time': results.get('time', time.time() - start_time),
'best_config': best_config})
else:
self._logger.info("Starting HPO experiments")
results = self.run_fit(_train_image_classification, self.search_strategy,
self.scheduler_options)
end_time = time.time()
self._logger.info(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> finish model fitting")
self._logger.info("total runtime is %.2f s", end_time - start_time)
if config.get('num_trials', 1) > 1:
best_config = sample_config(_train_image_classification.args, results['best_config'])
# convert best config to nested form
best_config = config_to_nested(best_config)
best_config.pop('train_data', None)
best_config.pop('val_data', None)
self._fit_summary.update({'train_acc': results.get('train_acc', -1),
'valid_acc': results.get('valid_acc', results.get('best_reward', -1)),
'total_time': results.get('total_time', time.time() - start_time),
'best_config': best_config})
self._logger.info(pprint.pformat(self._fit_summary, indent=2))
# TODO: checkpointing needs to be done in a better way
model_checkpoint = results.get('model_checkpoint', None)
if model_checkpoint is None:
raise RuntimeError(f'Unexpected error happened during fit: {pprint.pformat(results, indent=2)}')
estimator = pickle.loads(results['model_checkpoint'])
return estimator
def fit_summary(self):
return copy.copy(self._fit_summary)
@classmethod
def load(cls, filename):
obj = BaseEstimator.load(filename)
# make sure not accidentally loading e.g. classification model
assert isinstance(obj, ImageClassificationEstimator)
return obj
| 43.216606 | 108 | 0.62192 |
760075b978a22edb68146f3f871a065080758f65 | 534 | py | Python | omm/__init__.py | mkelley/onkyo-music-machine | 3d77d46554ca258f0ee224c1283ac55201c2d3a6 | [
"MIT"
] | null | null | null | omm/__init__.py | mkelley/onkyo-music-machine | 3d77d46554ca258f0ee224c1283ac55201c2d3a6 | [
"MIT"
] | null | null | null | omm/__init__.py | mkelley/onkyo-music-machine | 3d77d46554ca258f0ee224c1283ac55201c2d3a6 | [
"MIT"
] | null | null | null | import argparse
import connexion
from .command import OnkyoCommand
GPIO = None
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=8000)
parser.add_argument('--gpio', type=int, default=25)
return parser.parse_args()
def main():
"""Command-line script for omm server."""
global GPIO
args = _parse_args()
GPIO = args.gpio
app = connexion.FlaskApp(__name__, specification_dir='api')
app.add_api('openapi.yaml')
app.run(port=args.port)
| 20.538462 | 63 | 0.689139 |
df313a47413654adb075610189510143c8c25c2b | 341 | py | Python | psyneulink/globals/preferences/__init__.py | mihaic/psyneulink | 3d2fc3117c82bccc92fc585add330b0f9b35c830 | [
"Apache-2.0"
] | null | null | null | psyneulink/globals/preferences/__init__.py | mihaic/psyneulink | 3d2fc3117c82bccc92fc585add330b0f9b35c830 | [
"Apache-2.0"
] | null | null | null | psyneulink/globals/preferences/__init__.py | mihaic/psyneulink | 3d2fc3117c82bccc92fc585add330b0f9b35c830 | [
"Apache-2.0"
] | null | null | null | from . import componentpreferenceset
from . import mechanismpreferenceset
from . import preferenceset
from .componentpreferenceset import *
from .mechanismpreferenceset import *
from .preferenceset import *
__all__ = list(componentpreferenceset.__all__)
__all__.extend(mechanismpreferenceset.__all__)
__all__.extend(preferenceset.__all__)
| 28.416667 | 46 | 0.847507 |
5f581c985c5b1827cae9dc8796bc6799cf1f9c9e | 531 | py | Python | mapsite/mapapp/models.py | pkolios/map-spa | 7f066377711aeb2864205b49874c48c83dc3f04f | [
"MIT"
] | null | null | null | mapsite/mapapp/models.py | pkolios/map-spa | 7f066377711aeb2864205b49874c48c83dc3f04f | [
"MIT"
] | null | null | null | mapsite/mapapp/models.py | pkolios/map-spa | 7f066377711aeb2864205b49874c48c83dc3f04f | [
"MIT"
] | null | null | null | from django.db import models
from django.forms.models import model_to_dict
class Address(models.Model):
"""Represents the address data stored locally."""
full_address = models.CharField(max_length=250)
lat = models.DecimalField(max_digits=9, decimal_places=6)
lon = models.DecimalField(max_digits=9, decimal_places=6)
class Meta:
ordering = ['-pk']
def __str__(self):
return self.full_address
def to_dict(self):
"""Cast model to dict."""
return model_to_dict(self)
| 25.285714 | 61 | 0.687382 |
d742094b58bdc173f6358dc9170590e72d1a440c | 22,465 | py | Python | saleor/webhook/event_types.py | flamingowebjo/saleor | ecba9e1c10d32cd66c63a986e4cb907c421ce303 | [
"CC-BY-4.0"
] | null | null | null | saleor/webhook/event_types.py | flamingowebjo/saleor | ecba9e1c10d32cd66c63a986e4cb907c421ce303 | [
"CC-BY-4.0"
] | null | null | null | saleor/webhook/event_types.py | flamingowebjo/saleor | ecba9e1c10d32cd66c63a986e4cb907c421ce303 | [
"CC-BY-4.0"
] | null | null | null | from ..core.permissions import (
AccountPermissions,
AppPermission,
ChannelPermissions,
CheckoutPermissions,
DiscountPermissions,
GiftcardPermissions,
MenuPermissions,
OrderPermissions,
PagePermissions,
PaymentPermissions,
ProductPermissions,
ShippingPermissions,
SitePermissions,
)
class WebhookEventAsyncType:
ANY = "any_events"
APP_INSTALLED = "app_installed"
APP_UPDATED = "app_updated"
APP_DELETED = "app_deleted"
APP_STATUS_CHANGED = "app_status_changed"
CATEGORY_CREATED = "category_created"
CATEGORY_UPDATED = "category_updated"
CATEGORY_DELETED = "category_deleted"
CHANNEL_CREATED = "channel_created"
CHANNEL_UPDATED = "channel_updated"
CHANNEL_DELETED = "channel_deleted"
CHANNEL_STATUS_CHANGED = "channel_status_changed"
GIFT_CARD_CREATED = "gift_card_created"
GIFT_CARD_UPDATED = "gift_card_updated"
GIFT_CARD_DELETED = "gift_card_deleted"
GIFT_CARD_STATUS_CHANGED = "gift_card_status_changed"
MENU_CREATED = "menu_created"
MENU_UPDATED = "menu_updated"
MENU_DELETED = "menu_deleted"
MENU_ITEM_CREATED = "menu_item_created"
MENU_ITEM_UPDATED = "menu_item_updated"
MENU_ITEM_DELETED = "menu_item_deleted"
ORDER_CREATED = "order_created"
ORDER_CONFIRMED = "order_confirmed"
ORDER_FULLY_PAID = "order_fully_paid"
ORDER_UPDATED = "order_updated"
ORDER_CANCELLED = "order_cancelled"
FULFILLMENT_CANCELED = "fulfillment_canceled"
ORDER_FULFILLED = "order_fulfilled"
DRAFT_ORDER_CREATED = "draft_order_created"
DRAFT_ORDER_UPDATED = "draft_order_updated"
DRAFT_ORDER_DELETED = "draft_order_deleted"
SALE_CREATED = "sale_created"
SALE_UPDATED = "sale_updated"
SALE_DELETED = "sale_deleted"
INVOICE_REQUESTED = "invoice_requested"
INVOICE_DELETED = "invoice_deleted"
INVOICE_SENT = "invoice_sent"
FULFILLMENT_CREATED = "fulfillment_created"
CUSTOMER_CREATED = "customer_created"
CUSTOMER_UPDATED = "customer_updated"
COLLECTION_CREATED = "collection_created"
COLLECTION_UPDATED = "collection_updated"
COLLECTION_DELETED = "collection_deleted"
PRODUCT_CREATED = "product_created"
PRODUCT_UPDATED = "product_updated"
PRODUCT_DELETED = "product_deleted"
PRODUCT_VARIANT_CREATED = "product_variant_created"
PRODUCT_VARIANT_UPDATED = "product_variant_updated"
PRODUCT_VARIANT_DELETED = "product_variant_deleted"
PRODUCT_VARIANT_OUT_OF_STOCK = "product_variant_out_of_stock"
PRODUCT_VARIANT_BACK_IN_STOCK = "product_variant_back_in_stock"
CHECKOUT_CREATED = "checkout_created"
CHECKOUT_UPDATED = "checkout_updated"
NOTIFY_USER = "notify_user"
PAGE_CREATED = "page_created"
PAGE_UPDATED = "page_updated"
PAGE_DELETED = "page_deleted"
SHIPPING_PRICE_CREATED = "shipping_price_created"
SHIPPING_PRICE_UPDATED = "shipping_price_updated"
SHIPPING_PRICE_DELETED = "shipping_price_deleted"
SHIPPING_ZONE_CREATED = "shipping_zone_created"
SHIPPING_ZONE_UPDATED = "shipping_zone_updated"
SHIPPING_ZONE_DELETED = "shipping_zone_deleted"
TRANSACTION_ACTION_REQUEST = "transaction_action_request"
TRANSLATION_CREATED = "translation_created"
TRANSLATION_UPDATED = "translation_updated"
WAREHOUSE_CREATED = "warehouse_created"
WAREHOUSE_UPDATED = "warehouse_updated"
WAREHOUSE_DELETED = "warehouse_deleted"
VOUCHER_CREATED = "voucher_created"
VOUCHER_UPDATED = "voucher_updated"
VOUCHER_DELETED = "voucher_deleted"
OBSERVABILITY = "observability"
DISPLAY_LABELS = {
ANY: "Any events",
APP_INSTALLED: "App created",
APP_UPDATED: "App updated",
APP_DELETED: "App deleted",
APP_STATUS_CHANGED: "App status changed",
CATEGORY_CREATED: "Category created",
CATEGORY_UPDATED: "Category updated",
CATEGORY_DELETED: "Category deleted",
CHANNEL_CREATED: "Channel created",
CHANNEL_UPDATED: "Channel updated",
CHANNEL_DELETED: "Channel deleted",
CHANNEL_STATUS_CHANGED: "Channel status changed",
GIFT_CARD_CREATED: "Gift card created",
GIFT_CARD_UPDATED: "Gift card updated",
GIFT_CARD_DELETED: "Gift card deleted",
GIFT_CARD_STATUS_CHANGED: "Gift card status changed",
MENU_CREATED: "Menu created",
MENU_UPDATED: "Menu updated",
MENU_DELETED: "Menu deleted",
MENU_ITEM_CREATED: "Menu item created",
MENU_ITEM_UPDATED: "Menu item updated",
MENU_ITEM_DELETED: "Menu item deleted",
ORDER_CREATED: "Order created",
ORDER_CONFIRMED: "Order confirmed",
ORDER_FULLY_PAID: "Order paid",
ORDER_UPDATED: "Order updated",
ORDER_CANCELLED: "Order cancelled",
ORDER_FULFILLED: "Order fulfilled",
DRAFT_ORDER_CREATED: "Draft order created",
DRAFT_ORDER_UPDATED: "Draft order updated",
DRAFT_ORDER_DELETED: "Draft order deleted",
SALE_CREATED: "Sale created",
SALE_UPDATED: "Sale updated",
SALE_DELETED: "Sale deleted",
INVOICE_REQUESTED: "Invoice requested",
INVOICE_DELETED: "Invoice deleted",
INVOICE_SENT: "Invoice sent",
CUSTOMER_CREATED: "Customer created",
CUSTOMER_UPDATED: "Customer updated",
COLLECTION_CREATED: "Collection created",
COLLECTION_UPDATED: "Collection updated",
COLLECTION_DELETED: "Collection deleted",
PRODUCT_CREATED: "Product created",
PRODUCT_UPDATED: "Product updated",
PRODUCT_DELETED: "Product deleted",
PRODUCT_VARIANT_CREATED: "Product variant created",
PRODUCT_VARIANT_UPDATED: "Product variant updated",
PRODUCT_VARIANT_DELETED: "Product variant deleted",
PRODUCT_VARIANT_OUT_OF_STOCK: "Product variant stock changed",
PRODUCT_VARIANT_BACK_IN_STOCK: "Product variant back in stock",
CHECKOUT_CREATED: "Checkout created",
CHECKOUT_UPDATED: "Checkout updated",
FULFILLMENT_CREATED: "Fulfillment_created",
FULFILLMENT_CANCELED: "Fulfillment_cancelled",
NOTIFY_USER: "Notify user",
PAGE_CREATED: "Page Created",
PAGE_UPDATED: "Page Updated",
PAGE_DELETED: "Page Deleted",
SHIPPING_PRICE_CREATED: "Shipping price created",
SHIPPING_PRICE_UPDATED: "Shipping price updated",
SHIPPING_PRICE_DELETED: "Shipping price deleted",
SHIPPING_ZONE_CREATED: "Shipping zone created",
SHIPPING_ZONE_UPDATED: "Shipping zone updated",
SHIPPING_ZONE_DELETED: "Shipping zone deleted",
TRANSACTION_ACTION_REQUEST: "Payment action request",
TRANSLATION_CREATED: "Create translation",
TRANSLATION_UPDATED: "Update translation",
WAREHOUSE_CREATED: "Warehouse created",
WAREHOUSE_UPDATED: "Warehouse updated",
WAREHOUSE_DELETED: "Warehouse deleted",
VOUCHER_CREATED: "Voucher created",
VOUCHER_UPDATED: "Voucher updated",
VOUCHER_DELETED: "Voucher deleted",
OBSERVABILITY: "Observability",
}
CHOICES = [
(ANY, DISPLAY_LABELS[ANY]),
(APP_INSTALLED, DISPLAY_LABELS[APP_INSTALLED]),
(APP_UPDATED, DISPLAY_LABELS[APP_UPDATED]),
(APP_DELETED, DISPLAY_LABELS[APP_DELETED]),
(APP_STATUS_CHANGED, DISPLAY_LABELS[APP_STATUS_CHANGED]),
(CATEGORY_CREATED, DISPLAY_LABELS[CATEGORY_CREATED]),
(CATEGORY_UPDATED, DISPLAY_LABELS[CATEGORY_UPDATED]),
(CATEGORY_DELETED, DISPLAY_LABELS[CATEGORY_DELETED]),
(CHANNEL_CREATED, DISPLAY_LABELS[CHANNEL_CREATED]),
(CHANNEL_UPDATED, DISPLAY_LABELS[CHANNEL_UPDATED]),
(CHANNEL_DELETED, DISPLAY_LABELS[CHANNEL_DELETED]),
(CHANNEL_STATUS_CHANGED, DISPLAY_LABELS[CHANNEL_STATUS_CHANGED]),
(GIFT_CARD_CREATED, DISPLAY_LABELS[GIFT_CARD_CREATED]),
(GIFT_CARD_UPDATED, DISPLAY_LABELS[GIFT_CARD_UPDATED]),
(GIFT_CARD_DELETED, DISPLAY_LABELS[GIFT_CARD_DELETED]),
(GIFT_CARD_STATUS_CHANGED, DISPLAY_LABELS[GIFT_CARD_STATUS_CHANGED]),
(MENU_CREATED, DISPLAY_LABELS[MENU_CREATED]),
(MENU_UPDATED, DISPLAY_LABELS[MENU_UPDATED]),
(MENU_DELETED, DISPLAY_LABELS[MENU_DELETED]),
(MENU_ITEM_CREATED, DISPLAY_LABELS[MENU_ITEM_CREATED]),
(MENU_ITEM_UPDATED, DISPLAY_LABELS[MENU_ITEM_UPDATED]),
(MENU_ITEM_DELETED, DISPLAY_LABELS[MENU_ITEM_DELETED]),
(ORDER_CREATED, DISPLAY_LABELS[ORDER_CREATED]),
(ORDER_CONFIRMED, DISPLAY_LABELS[ORDER_CONFIRMED]),
(ORDER_FULLY_PAID, DISPLAY_LABELS[ORDER_FULLY_PAID]),
(ORDER_UPDATED, DISPLAY_LABELS[ORDER_UPDATED]),
(ORDER_CANCELLED, DISPLAY_LABELS[ORDER_CANCELLED]),
(ORDER_FULFILLED, DISPLAY_LABELS[ORDER_FULFILLED]),
(DRAFT_ORDER_CREATED, DISPLAY_LABELS[DRAFT_ORDER_CREATED]),
(DRAFT_ORDER_UPDATED, DISPLAY_LABELS[DRAFT_ORDER_UPDATED]),
(DRAFT_ORDER_DELETED, DISPLAY_LABELS[DRAFT_ORDER_DELETED]),
(SALE_CREATED, DISPLAY_LABELS[SALE_CREATED]),
(SALE_UPDATED, DISPLAY_LABELS[SALE_UPDATED]),
(SALE_DELETED, DISPLAY_LABELS[SALE_DELETED]),
(INVOICE_REQUESTED, DISPLAY_LABELS[INVOICE_REQUESTED]),
(INVOICE_DELETED, DISPLAY_LABELS[INVOICE_DELETED]),
(INVOICE_SENT, DISPLAY_LABELS[INVOICE_SENT]),
(CUSTOMER_CREATED, DISPLAY_LABELS[CUSTOMER_CREATED]),
(CUSTOMER_UPDATED, DISPLAY_LABELS[CUSTOMER_UPDATED]),
(COLLECTION_CREATED, DISPLAY_LABELS[COLLECTION_CREATED]),
(COLLECTION_UPDATED, DISPLAY_LABELS[COLLECTION_UPDATED]),
(COLLECTION_DELETED, DISPLAY_LABELS[COLLECTION_DELETED]),
(PRODUCT_CREATED, DISPLAY_LABELS[PRODUCT_CREATED]),
(PRODUCT_UPDATED, DISPLAY_LABELS[PRODUCT_UPDATED]),
(PRODUCT_DELETED, DISPLAY_LABELS[PRODUCT_DELETED]),
(PRODUCT_VARIANT_CREATED, DISPLAY_LABELS[PRODUCT_VARIANT_CREATED]),
(PRODUCT_VARIANT_UPDATED, DISPLAY_LABELS[PRODUCT_VARIANT_UPDATED]),
(PRODUCT_VARIANT_DELETED, DISPLAY_LABELS[PRODUCT_VARIANT_DELETED]),
(PRODUCT_VARIANT_OUT_OF_STOCK, DISPLAY_LABELS[PRODUCT_VARIANT_OUT_OF_STOCK]),
(PRODUCT_VARIANT_BACK_IN_STOCK, DISPLAY_LABELS[PRODUCT_VARIANT_BACK_IN_STOCK]),
(CHECKOUT_CREATED, DISPLAY_LABELS[CHECKOUT_CREATED]),
(CHECKOUT_UPDATED, DISPLAY_LABELS[CHECKOUT_UPDATED]),
(FULFILLMENT_CREATED, DISPLAY_LABELS[FULFILLMENT_CREATED]),
(FULFILLMENT_CANCELED, DISPLAY_LABELS[FULFILLMENT_CANCELED]),
(NOTIFY_USER, DISPLAY_LABELS[NOTIFY_USER]),
(PAGE_CREATED, DISPLAY_LABELS[PAGE_CREATED]),
(PAGE_UPDATED, DISPLAY_LABELS[PAGE_UPDATED]),
(PAGE_DELETED, DISPLAY_LABELS[PAGE_DELETED]),
(SHIPPING_PRICE_CREATED, DISPLAY_LABELS[SHIPPING_PRICE_CREATED]),
(SHIPPING_PRICE_UPDATED, DISPLAY_LABELS[SHIPPING_PRICE_UPDATED]),
(SHIPPING_PRICE_DELETED, DISPLAY_LABELS[SHIPPING_PRICE_DELETED]),
(SHIPPING_ZONE_CREATED, DISPLAY_LABELS[SHIPPING_ZONE_CREATED]),
(SHIPPING_ZONE_UPDATED, DISPLAY_LABELS[SHIPPING_ZONE_UPDATED]),
(SHIPPING_ZONE_DELETED, DISPLAY_LABELS[SHIPPING_ZONE_DELETED]),
(TRANSACTION_ACTION_REQUEST, DISPLAY_LABELS[TRANSACTION_ACTION_REQUEST]),
(TRANSLATION_CREATED, DISPLAY_LABELS[TRANSLATION_CREATED]),
(TRANSLATION_UPDATED, DISPLAY_LABELS[TRANSLATION_UPDATED]),
(WAREHOUSE_CREATED, DISPLAY_LABELS[WAREHOUSE_CREATED]),
(WAREHOUSE_UPDATED, DISPLAY_LABELS[WAREHOUSE_UPDATED]),
(WAREHOUSE_DELETED, DISPLAY_LABELS[WAREHOUSE_DELETED]),
(VOUCHER_CREATED, DISPLAY_LABELS[VOUCHER_CREATED]),
(VOUCHER_UPDATED, DISPLAY_LABELS[VOUCHER_UPDATED]),
(VOUCHER_DELETED, DISPLAY_LABELS[VOUCHER_DELETED]),
(OBSERVABILITY, DISPLAY_LABELS[OBSERVABILITY]),
]
ALL = [event[0] for event in CHOICES]
PERMISSIONS = {
APP_INSTALLED: AppPermission.MANAGE_APPS,
APP_UPDATED: AppPermission.MANAGE_APPS,
APP_DELETED: AppPermission.MANAGE_APPS,
APP_STATUS_CHANGED: AppPermission.MANAGE_APPS,
CATEGORY_CREATED: ProductPermissions.MANAGE_PRODUCTS,
CATEGORY_UPDATED: ProductPermissions.MANAGE_PRODUCTS,
CATEGORY_DELETED: ProductPermissions.MANAGE_PRODUCTS,
CHANNEL_CREATED: ChannelPermissions.MANAGE_CHANNELS,
CHANNEL_UPDATED: ChannelPermissions.MANAGE_CHANNELS,
CHANNEL_DELETED: ChannelPermissions.MANAGE_CHANNELS,
CHANNEL_STATUS_CHANGED: ChannelPermissions.MANAGE_CHANNELS,
GIFT_CARD_CREATED: GiftcardPermissions.MANAGE_GIFT_CARD,
GIFT_CARD_UPDATED: GiftcardPermissions.MANAGE_GIFT_CARD,
GIFT_CARD_DELETED: GiftcardPermissions.MANAGE_GIFT_CARD,
GIFT_CARD_STATUS_CHANGED: GiftcardPermissions.MANAGE_GIFT_CARD,
MENU_CREATED: MenuPermissions.MANAGE_MENUS,
MENU_UPDATED: MenuPermissions.MANAGE_MENUS,
MENU_DELETED: MenuPermissions.MANAGE_MENUS,
MENU_ITEM_CREATED: MenuPermissions.MANAGE_MENUS,
MENU_ITEM_UPDATED: MenuPermissions.MANAGE_MENUS,
MENU_ITEM_DELETED: MenuPermissions.MANAGE_MENUS,
ORDER_CREATED: OrderPermissions.MANAGE_ORDERS,
ORDER_CONFIRMED: OrderPermissions.MANAGE_ORDERS,
ORDER_FULLY_PAID: OrderPermissions.MANAGE_ORDERS,
ORDER_UPDATED: OrderPermissions.MANAGE_ORDERS,
ORDER_CANCELLED: OrderPermissions.MANAGE_ORDERS,
ORDER_FULFILLED: OrderPermissions.MANAGE_ORDERS,
DRAFT_ORDER_CREATED: OrderPermissions.MANAGE_ORDERS,
DRAFT_ORDER_DELETED: OrderPermissions.MANAGE_ORDERS,
DRAFT_ORDER_UPDATED: OrderPermissions.MANAGE_ORDERS,
SALE_CREATED: DiscountPermissions.MANAGE_DISCOUNTS,
SALE_UPDATED: DiscountPermissions.MANAGE_DISCOUNTS,
SALE_DELETED: DiscountPermissions.MANAGE_DISCOUNTS,
INVOICE_REQUESTED: OrderPermissions.MANAGE_ORDERS,
INVOICE_DELETED: OrderPermissions.MANAGE_ORDERS,
INVOICE_SENT: OrderPermissions.MANAGE_ORDERS,
CUSTOMER_CREATED: AccountPermissions.MANAGE_USERS,
CUSTOMER_UPDATED: AccountPermissions.MANAGE_USERS,
COLLECTION_CREATED: ProductPermissions.MANAGE_PRODUCTS,
COLLECTION_UPDATED: ProductPermissions.MANAGE_PRODUCTS,
COLLECTION_DELETED: ProductPermissions.MANAGE_PRODUCTS,
PRODUCT_CREATED: ProductPermissions.MANAGE_PRODUCTS,
PRODUCT_UPDATED: ProductPermissions.MANAGE_PRODUCTS,
PRODUCT_DELETED: ProductPermissions.MANAGE_PRODUCTS,
PRODUCT_VARIANT_CREATED: ProductPermissions.MANAGE_PRODUCTS,
PRODUCT_VARIANT_UPDATED: ProductPermissions.MANAGE_PRODUCTS,
PRODUCT_VARIANT_DELETED: ProductPermissions.MANAGE_PRODUCTS,
PRODUCT_VARIANT_BACK_IN_STOCK: ProductPermissions.MANAGE_PRODUCTS,
PRODUCT_VARIANT_OUT_OF_STOCK: ProductPermissions.MANAGE_PRODUCTS,
CHECKOUT_CREATED: CheckoutPermissions.MANAGE_CHECKOUTS,
CHECKOUT_UPDATED: CheckoutPermissions.MANAGE_CHECKOUTS,
FULFILLMENT_CREATED: OrderPermissions.MANAGE_ORDERS,
FULFILLMENT_CANCELED: OrderPermissions.MANAGE_ORDERS,
NOTIFY_USER: AccountPermissions.MANAGE_USERS,
PAGE_CREATED: PagePermissions.MANAGE_PAGES,
PAGE_UPDATED: PagePermissions.MANAGE_PAGES,
PAGE_DELETED: PagePermissions.MANAGE_PAGES,
SHIPPING_PRICE_CREATED: ShippingPermissions.MANAGE_SHIPPING,
SHIPPING_PRICE_UPDATED: ShippingPermissions.MANAGE_SHIPPING,
SHIPPING_PRICE_DELETED: ShippingPermissions.MANAGE_SHIPPING,
SHIPPING_ZONE_CREATED: ShippingPermissions.MANAGE_SHIPPING,
SHIPPING_ZONE_UPDATED: ShippingPermissions.MANAGE_SHIPPING,
SHIPPING_ZONE_DELETED: ShippingPermissions.MANAGE_SHIPPING,
TRANSACTION_ACTION_REQUEST: PaymentPermissions.HANDLE_PAYMENTS,
TRANSLATION_CREATED: SitePermissions.MANAGE_TRANSLATIONS,
TRANSLATION_UPDATED: SitePermissions.MANAGE_TRANSLATIONS,
WAREHOUSE_CREATED: ProductPermissions.MANAGE_PRODUCTS,
WAREHOUSE_UPDATED: ProductPermissions.MANAGE_PRODUCTS,
WAREHOUSE_DELETED: ProductPermissions.MANAGE_PRODUCTS,
VOUCHER_CREATED: DiscountPermissions.MANAGE_DISCOUNTS,
VOUCHER_UPDATED: DiscountPermissions.MANAGE_DISCOUNTS,
VOUCHER_DELETED: DiscountPermissions.MANAGE_DISCOUNTS,
OBSERVABILITY: AppPermission.MANAGE_OBSERVABILITY,
}
class WebhookEventSyncType:
PAYMENT_LIST_GATEWAYS = "payment_list_gateways"
PAYMENT_AUTHORIZE = "payment_authorize"
PAYMENT_CAPTURE = "payment_capture"
PAYMENT_REFUND = "payment_refund"
PAYMENT_VOID = "payment_void"
PAYMENT_CONFIRM = "payment_confirm"
PAYMENT_PROCESS = "payment_process"
SHIPPING_LIST_METHODS_FOR_CHECKOUT = "shipping_list_methods_for_checkout"
CHECKOUT_FILTER_SHIPPING_METHODS = "checkout_filter_shipping_methods"
ORDER_FILTER_SHIPPING_METHODS = "order_filter_shipping_methods"
DISPLAY_LABELS = {
PAYMENT_AUTHORIZE: "Authorize payment",
PAYMENT_CAPTURE: "Capture payment",
PAYMENT_CONFIRM: "Confirm payment",
PAYMENT_LIST_GATEWAYS: "List payment gateways",
PAYMENT_PROCESS: "Process payment",
PAYMENT_REFUND: "Refund payment",
PAYMENT_VOID: "Void payment",
SHIPPING_LIST_METHODS_FOR_CHECKOUT: "Shipping list methods for checkout",
ORDER_FILTER_SHIPPING_METHODS: "Filter order shipping methods",
CHECKOUT_FILTER_SHIPPING_METHODS: "Filter checkout shipping methods",
}
CHOICES = [
(PAYMENT_AUTHORIZE, DISPLAY_LABELS[PAYMENT_AUTHORIZE]),
(PAYMENT_CAPTURE, DISPLAY_LABELS[PAYMENT_CAPTURE]),
(PAYMENT_CONFIRM, DISPLAY_LABELS[PAYMENT_CONFIRM]),
(PAYMENT_LIST_GATEWAYS, DISPLAY_LABELS[PAYMENT_LIST_GATEWAYS]),
(PAYMENT_PROCESS, DISPLAY_LABELS[PAYMENT_PROCESS]),
(PAYMENT_REFUND, DISPLAY_LABELS[PAYMENT_REFUND]),
(PAYMENT_VOID, DISPLAY_LABELS[PAYMENT_VOID]),
(
SHIPPING_LIST_METHODS_FOR_CHECKOUT,
DISPLAY_LABELS[SHIPPING_LIST_METHODS_FOR_CHECKOUT],
),
(ORDER_FILTER_SHIPPING_METHODS, DISPLAY_LABELS[ORDER_FILTER_SHIPPING_METHODS]),
(
CHECKOUT_FILTER_SHIPPING_METHODS,
DISPLAY_LABELS[CHECKOUT_FILTER_SHIPPING_METHODS],
),
]
ALL = [event[0] for event in CHOICES]
PAYMENT_EVENTS = [
PAYMENT_AUTHORIZE,
PAYMENT_CAPTURE,
PAYMENT_CONFIRM,
PAYMENT_LIST_GATEWAYS,
PAYMENT_PROCESS,
PAYMENT_REFUND,
PAYMENT_VOID,
]
PERMISSIONS = {
PAYMENT_AUTHORIZE: PaymentPermissions.HANDLE_PAYMENTS,
PAYMENT_CAPTURE: PaymentPermissions.HANDLE_PAYMENTS,
PAYMENT_CONFIRM: PaymentPermissions.HANDLE_PAYMENTS,
PAYMENT_LIST_GATEWAYS: PaymentPermissions.HANDLE_PAYMENTS,
PAYMENT_PROCESS: PaymentPermissions.HANDLE_PAYMENTS,
PAYMENT_REFUND: PaymentPermissions.HANDLE_PAYMENTS,
PAYMENT_VOID: PaymentPermissions.HANDLE_PAYMENTS,
SHIPPING_LIST_METHODS_FOR_CHECKOUT: ShippingPermissions.MANAGE_SHIPPING,
ORDER_FILTER_SHIPPING_METHODS: OrderPermissions.MANAGE_ORDERS,
CHECKOUT_FILTER_SHIPPING_METHODS: CheckoutPermissions.MANAGE_CHECKOUTS,
}
SUBSCRIBABLE_EVENTS = [
WebhookEventAsyncType.APP_INSTALLED,
WebhookEventAsyncType.APP_UPDATED,
WebhookEventAsyncType.APP_DELETED,
WebhookEventAsyncType.APP_STATUS_CHANGED,
WebhookEventAsyncType.CATEGORY_CREATED,
WebhookEventAsyncType.CATEGORY_UPDATED,
WebhookEventAsyncType.CATEGORY_DELETED,
WebhookEventAsyncType.CHANNEL_CREATED,
WebhookEventAsyncType.CHANNEL_UPDATED,
WebhookEventAsyncType.CHANNEL_DELETED,
WebhookEventAsyncType.CHANNEL_STATUS_CHANGED,
WebhookEventAsyncType.GIFT_CARD_CREATED,
WebhookEventAsyncType.GIFT_CARD_UPDATED,
WebhookEventAsyncType.GIFT_CARD_DELETED,
WebhookEventAsyncType.GIFT_CARD_STATUS_CHANGED,
WebhookEventAsyncType.MENU_CREATED,
WebhookEventAsyncType.MENU_UPDATED,
WebhookEventAsyncType.MENU_DELETED,
WebhookEventAsyncType.MENU_ITEM_CREATED,
WebhookEventAsyncType.MENU_ITEM_UPDATED,
WebhookEventAsyncType.MENU_ITEM_DELETED,
WebhookEventAsyncType.ORDER_CREATED,
WebhookEventAsyncType.ORDER_UPDATED,
WebhookEventAsyncType.ORDER_CONFIRMED,
WebhookEventAsyncType.ORDER_FULLY_PAID,
WebhookEventAsyncType.ORDER_FULFILLED,
WebhookEventAsyncType.ORDER_CANCELLED,
WebhookEventAsyncType.DRAFT_ORDER_CREATED,
WebhookEventAsyncType.DRAFT_ORDER_UPDATED,
WebhookEventAsyncType.DRAFT_ORDER_DELETED,
WebhookEventAsyncType.PRODUCT_CREATED,
WebhookEventAsyncType.PRODUCT_UPDATED,
WebhookEventAsyncType.PRODUCT_DELETED,
WebhookEventAsyncType.PRODUCT_VARIANT_DELETED,
WebhookEventAsyncType.PRODUCT_VARIANT_CREATED,
WebhookEventAsyncType.PRODUCT_VARIANT_UPDATED,
WebhookEventAsyncType.PRODUCT_VARIANT_BACK_IN_STOCK,
WebhookEventAsyncType.PRODUCT_VARIANT_OUT_OF_STOCK,
WebhookEventAsyncType.SALE_CREATED,
WebhookEventAsyncType.SALE_UPDATED,
WebhookEventAsyncType.SALE_DELETED,
WebhookEventAsyncType.INVOICE_REQUESTED,
WebhookEventAsyncType.INVOICE_DELETED,
WebhookEventAsyncType.INVOICE_SENT,
WebhookEventAsyncType.FULFILLMENT_CREATED,
WebhookEventAsyncType.FULFILLMENT_CANCELED,
WebhookEventAsyncType.CUSTOMER_CREATED,
WebhookEventAsyncType.CUSTOMER_UPDATED,
WebhookEventAsyncType.COLLECTION_CREATED,
WebhookEventAsyncType.COLLECTION_UPDATED,
WebhookEventAsyncType.COLLECTION_DELETED,
WebhookEventAsyncType.CHECKOUT_CREATED,
WebhookEventAsyncType.CHECKOUT_UPDATED,
WebhookEventAsyncType.PAGE_CREATED,
WebhookEventAsyncType.PAGE_UPDATED,
WebhookEventAsyncType.PAGE_DELETED,
WebhookEventAsyncType.SHIPPING_PRICE_CREATED,
WebhookEventAsyncType.SHIPPING_PRICE_UPDATED,
WebhookEventAsyncType.SHIPPING_PRICE_DELETED,
WebhookEventAsyncType.SHIPPING_ZONE_CREATED,
WebhookEventAsyncType.SHIPPING_ZONE_UPDATED,
WebhookEventAsyncType.SHIPPING_ZONE_DELETED,
WebhookEventAsyncType.TRANSACTION_ACTION_REQUEST,
WebhookEventAsyncType.TRANSLATION_CREATED,
WebhookEventAsyncType.TRANSLATION_UPDATED,
WebhookEventAsyncType.WAREHOUSE_CREATED,
WebhookEventAsyncType.WAREHOUSE_UPDATED,
WebhookEventAsyncType.WAREHOUSE_DELETED,
WebhookEventAsyncType.VOUCHER_CREATED,
WebhookEventAsyncType.VOUCHER_UPDATED,
WebhookEventAsyncType.VOUCHER_DELETED,
]
| 45.292339 | 87 | 0.756421 |
4906c9c134b668a4a31a82f176a7e86432de681e | 12,166 | py | Python | lmfit/astutils.py | tritemio/lmfit-py | 9002013a853efa50d11fdbcfafa4b17216bcc3ff | [
"BSD-3-Clause"
] | null | null | null | lmfit/astutils.py | tritemio/lmfit-py | 9002013a853efa50d11fdbcfafa4b17216bcc3ff | [
"BSD-3-Clause"
] | null | null | null | lmfit/astutils.py | tritemio/lmfit-py | 9002013a853efa50d11fdbcfafa4b17216bcc3ff | [
"BSD-3-Clause"
] | null | null | null | """
utility functions for asteval
Matthew Newville <newville@cars.uchicago.edu>,
The University of Chicago
"""
from __future__ import division, print_function
import re
import ast
from sys import exc_info
RESERVED_WORDS = ('and', 'as', 'assert', 'break', 'class', 'continue',
'def', 'del', 'elif', 'else', 'except', 'exec',
'finally', 'for', 'from', 'global', 'if', 'import',
'in', 'is', 'lambda', 'not', 'or', 'pass', 'print',
'raise', 'return', 'try', 'while', 'with', 'True',
'False', 'None', 'eval', 'execfile', '__import__',
'__package__')
NAME_MATCH = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$").match
UNSAFE_ATTRS = ('__subclasses__', '__bases__', '__globals__', '__code__',
'__closure__', '__func__', '__self__', '__module__',
'__dict__', '__class__', '__call__', '__get__',
'__getattribute__', '__subclasshook__', '__new__',
'__init__', 'func_globals', 'func_code', 'func_closure',
'im_class', 'im_func', 'im_self', 'gi_code', 'gi_frame',
'__asteval__')
# inherit these from python's __builtins__
FROM_PY = ('ArithmeticError', 'AssertionError', 'AttributeError',
'BaseException', 'BufferError', 'BytesWarning',
'DeprecationWarning', 'EOFError', 'EnvironmentError',
'Exception', 'False', 'FloatingPointError', 'GeneratorExit',
'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
'MemoryError', 'NameError', 'None',
'NotImplementedError', 'OSError', 'OverflowError',
'ReferenceError', 'RuntimeError', 'RuntimeWarning',
'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError',
'SystemExit', 'True', 'TypeError', 'UnboundLocalError',
'UnicodeDecodeError', 'UnicodeEncodeError', 'UnicodeError',
'UnicodeTranslateError', 'UnicodeWarning', 'ValueError',
'Warning', 'ZeroDivisionError', 'abs', 'all', 'any', 'bin',
'bool', 'bytearray', 'bytes', 'chr', 'complex', 'dict', 'dir',
'divmod', 'enumerate', 'filter', 'float', 'format', 'frozenset',
'hash', 'hex', 'id', 'int', 'isinstance', 'len', 'list', 'map',
'max', 'min', 'oct', 'ord', 'pow', 'range', 'repr',
'reversed', 'round', 'set', 'slice', 'sorted', 'str', 'sum',
'tuple', 'type', 'zip')
# inherit these from python's math
FROM_MATH = ('acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
'ceil', 'copysign', 'cos', 'cosh', 'degrees', 'e', 'exp',
'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum',
'hypot', 'isinf', 'isnan', 'ldexp', 'log', 'log10', 'log1p',
'modf', 'pi', 'pow', 'radians', 'sin', 'sinh', 'sqrt', 'tan',
'tanh', 'trunc')
FROM_NUMPY = ('Inf', 'NAN', 'abs', 'add', 'alen', 'all', 'amax', 'amin',
'angle', 'any', 'append', 'arange', 'arccos', 'arccosh',
'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
'argmax', 'argmin', 'argsort', 'argwhere', 'around', 'array',
'array2string', 'asanyarray', 'asarray', 'asarray_chkfinite',
'ascontiguousarray', 'asfarray', 'asfortranarray',
'asmatrix', 'asscalar', 'atleast_1d', 'atleast_2d',
'atleast_3d', 'average', 'bartlett', 'base_repr',
'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor',
'blackman', 'bool', 'broadcast', 'broadcast_arrays', 'byte',
'c_', 'cdouble', 'ceil', 'cfloat', 'chararray', 'choose',
'clip', 'clongdouble', 'clongfloat', 'column_stack',
'common_type', 'complex', 'complex128', 'complex64',
'complex_', 'complexfloating', 'compress', 'concatenate',
'conjugate', 'convolve', 'copy', 'copysign', 'corrcoef',
'correlate', 'cos', 'cosh', 'cov', 'cross', 'csingle',
'cumprod', 'cumsum', 'datetime_data', 'deg2rad', 'degrees',
'delete', 'diag', 'diag_indices', 'diag_indices_from',
'diagflat', 'diagonal', 'diff', 'digitize', 'divide', 'dot',
'double', 'dsplit', 'dstack', 'dtype', 'e', 'ediff1d',
'empty', 'empty_like', 'equal', 'exp', 'exp2', 'expand_dims',
'expm1', 'extract', 'eye', 'fabs', 'fill_diagonal', 'finfo',
'fix', 'flatiter', 'flatnonzero', 'fliplr', 'flipud',
'float', 'float32', 'float64', 'float_', 'floating', 'floor',
'floor_divide', 'fmax', 'fmin', 'fmod', 'format_parser',
'frexp', 'frombuffer', 'fromfile', 'fromfunction',
'fromiter', 'frompyfunc', 'fromregex', 'fromstring', 'fv',
'genfromtxt', 'getbufsize', 'geterr', 'gradient', 'greater',
'greater_equal', 'hamming', 'hanning', 'histogram',
'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot',
'i0', 'identity', 'iinfo', 'imag', 'in1d', 'index_exp',
'indices', 'inexact', 'inf', 'info', 'infty', 'inner',
'insert', 'int', 'int0', 'int16', 'int32', 'int64', 'int8',
'int_', 'int_asbuffer', 'intc', 'integer', 'interp',
'intersect1d', 'intp', 'invert', 'ipmt', 'irr', 'iscomplex',
'iscomplexobj', 'isfinite', 'isfortran', 'isinf', 'isnan',
'isneginf', 'isposinf', 'isreal', 'isrealobj', 'isscalar',
'issctype', 'iterable', 'ix_', 'kaiser', 'kron', 'ldexp',
'left_shift', 'less', 'less_equal', 'linspace',
'little_endian', 'load', 'loads', 'loadtxt', 'log', 'log10',
'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and',
'logical_not', 'logical_or', 'logical_xor', 'logspace',
'long', 'longcomplex', 'longdouble', 'longfloat', 'longlong',
'mafromtxt', 'mask_indices', 'mat', 'matrix',
'maximum', 'maximum_sctype', 'may_share_memory', 'mean',
'median', 'memmap', 'meshgrid', 'mgrid', 'minimum',
'mintypecode', 'mirr', 'mod', 'modf', 'msort', 'multiply',
'nan', 'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax',
'nanmin', 'nansum', 'ndarray', 'ndenumerate', 'ndfromtxt',
'ndim', 'ndindex', 'negative', 'newaxis', 'nextafter',
'nonzero', 'not_equal', 'nper', 'npv', 'number',
'obj2sctype', 'ogrid', 'ones', 'ones_like', 'outer',
'packbits', 'percentile', 'pi', 'piecewise', 'place', 'pmt',
'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv', 'polyfit',
'polyint', 'polymul', 'polysub', 'polyval', 'power', 'ppmt',
'prod', 'product', 'ptp', 'put', 'putmask', 'pv', 'r_',
'rad2deg', 'radians', 'rank', 'rate', 'ravel', 'real',
'real_if_close', 'reciprocal', 'record', 'remainder',
'repeat', 'reshape', 'resize', 'restoredot', 'right_shift',
'rint', 'roll', 'rollaxis', 'roots', 'rot90', 'round',
'round_', 'row_stack', 's_', 'sctype2char', 'searchsorted',
'select', 'setbufsize', 'setdiff1d', 'seterr', 'setxor1d',
'shape', 'short', 'sign', 'signbit', 'signedinteger', 'sin',
'sinc', 'single', 'singlecomplex', 'sinh', 'size',
'sometrue', 'sort', 'sort_complex', 'spacing', 'split',
'sqrt', 'square', 'squeeze', 'std', 'str', 'str_',
'subtract', 'sum', 'swapaxes', 'take', 'tan', 'tanh',
'tensordot', 'tile', 'trace', 'transpose', 'trapz', 'tri',
'tril', 'tril_indices', 'tril_indices_from', 'trim_zeros',
'triu', 'triu_indices', 'triu_indices_from', 'true_divide',
'trunc', 'ubyte', 'uint', 'uint0', 'uint16', 'uint32',
'uint64', 'uint8', 'uintc', 'uintp', 'ulonglong', 'union1d',
'unique', 'unravel_index', 'unsignedinteger', 'unwrap',
'ushort', 'vander', 'var', 'vdot', 'vectorize', 'vsplit',
'vstack', 'where', 'who', 'zeros', 'zeros_like')
NUMPY_RENAMES = {'ln': 'log', 'asin': 'arcsin', 'acos': 'arccos',
'atan': 'arctan', 'atan2': 'arctan2', 'atanh':
'arctanh', 'acosh': 'arccosh', 'asinh': 'arcsinh'}
def _open(filename, mode='r', buffering=0):
"""read only version of open()"""
umode = 'r'
if mode == 'rb':
umode = 'rb'
return open(filename, umode, buffering)
LOCALFUNCS = {'open': _open}
OPERATORS = {ast.Is: lambda a, b: a is b,
ast.IsNot: lambda a, b: a is not b,
ast.In: lambda a, b: a in b,
ast.NotIn: lambda a, b: a not in b,
ast.Add: lambda a, b: a + b,
ast.BitAnd: lambda a, b: a & b,
ast.BitOr: lambda a, b: a | b,
ast.BitXor: lambda a, b: a ^ b,
ast.Div: lambda a, b: a / b,
ast.FloorDiv: lambda a, b: a // b,
ast.LShift: lambda a, b: a << b,
ast.RShift: lambda a, b: a >> b,
ast.Mult: lambda a, b: a * b,
ast.Pow: lambda a, b: a ** b,
ast.Sub: lambda a, b: a - b,
ast.Mod: lambda a, b: a % b,
ast.And: lambda a, b: a and b,
ast.Or: lambda a, b: a or b,
ast.Eq: lambda a, b: a == b,
ast.Gt: lambda a, b: a > b,
ast.GtE: lambda a, b: a >= b,
ast.Lt: lambda a, b: a < b,
ast.LtE: lambda a, b: a <= b,
ast.NotEq: lambda a, b: a != b,
ast.Invert: lambda a: ~a,
ast.Not: lambda a: not a,
ast.UAdd: lambda a: +a,
ast.USub: lambda a: -a}
def valid_symbol_name(name):
"""determines whether the input symbol name is a valid name
This checks for reserved words, and that the name matches the
regular expression ``[a-zA-Z_][a-zA-Z0-9_]``
"""
if name in RESERVED_WORDS:
return False
return NAME_MATCH(name) is not None
def op2func(op):
"return function for operator nodes"
return OPERATORS[op.__class__]
class Empty:
"""empty class"""
def __init__(self):
pass
def __nonzero__(self):
return False
ReturnedNone = Empty()
class ExceptionHolder(object):
"basic exception handler"
def __init__(self, node, exc=None, msg='', expr=None, lineno=None):
self.node = node
self.expr = expr
self.msg = msg
self.exc = exc
self.lineno = lineno
self.exc_info = exc_info()
if self.exc is None and self.exc_info[0] is not None:
self.exc = self.exc_info[0]
if self.msg is '' and self.exc_info[1] is not None:
self.msg = self.exc_info[1]
def get_error(self):
"retrieve error data"
col_offset = -1
if self.node is not None:
try:
col_offset = self.node.col_offset
except AttributeError:
pass
try:
exc_name = self.exc.__name__
except AttributeError:
exc_name = str(self.exc)
if exc_name in (None, 'None'):
exc_name = 'UnknownError'
out = [" %s" % self.expr]
if col_offset > 0:
out.append(" %s^^^" % ((col_offset)*' '))
out.append(str(self.msg))
return (exc_name, '\n'.join(out))
class NameFinder(ast.NodeVisitor):
"""find all symbol names used by a parsed node"""
def __init__(self):
self.names = []
ast.NodeVisitor.__init__(self)
def generic_visit(self, node):
if node.__class__.__name__ == 'Name':
if node.ctx.__class__ == ast.Load and node.id not in self.names:
self.names.append(node.id)
ast.NodeVisitor.generic_visit(self, node)
def get_ast_names(astnode):
"returns symbol Names from an AST node"
finder = NameFinder()
finder.generic_visit(astnode)
return finder.names
| 46.972973 | 76 | 0.527043 |
5d65221d4139b0a80adacc02765e17878d1975fa | 1,643 | gyp | Python | deps/libgdal/gyp-formats/ogr_tiger.gyp | blairdgeo/node-gdal | a9bb3c082b30605ed1668dd9fe49afd25a7bb9d6 | [
"Apache-2.0"
] | 1 | 2015-07-04T20:09:20.000Z | 2015-07-04T20:09:20.000Z | deps/libgdal/gyp-formats/ogr_tiger.gyp | blairdgeo/node-gdal | a9bb3c082b30605ed1668dd9fe49afd25a7bb9d6 | [
"Apache-2.0"
] | null | null | null | deps/libgdal/gyp-formats/ogr_tiger.gyp | blairdgeo/node-gdal | a9bb3c082b30605ed1668dd9fe49afd25a7bb9d6 | [
"Apache-2.0"
] | null | null | null | {
"includes": [
"../common.gypi"
],
"targets": [
{
"target_name": "libgdal_ogr_tiger_frmt",
"type": "static_library",
"sources": [
"../gdal/ogr/ogrsf_frmts/tiger/ogrtigerdatasource.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/ogrtigerdriver.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/ogrtigerlayer.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigeraltname.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerarealandmarks.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigercompletechain.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerentitynames.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerfeatureids.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerfilebase.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigeridhistory.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerinfo.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerkeyfeatures.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerlandmarks.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigeroverunder.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerpip.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerpoint.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerpolychainlink.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerpolygon.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerpolygoncorrections.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerpolygoneconomic.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerspatialmetadata.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigertlidrange.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerzerocellid.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerzipcodes.cpp",
"../gdal/ogr/ogrsf_frmts/tiger/tigerzipplus4.cpp"
],
"include_dirs": [
"../gdal/ogr/ogrsf_frmts/tiger"
]
}
]
}
| 39.119048 | 64 | 0.692635 |
536d44484759fcb32b6ac551eb7a4fc9bc8c72df | 17,813 | py | Python | neurokit2/signal/signal_psd.py | gutierrezps/NeuroKit | a30f76e64b4108abdc652a20391dc0288c62501d | [
"MIT"
] | 1 | 2022-03-20T21:09:34.000Z | 2022-03-20T21:09:34.000Z | neurokit2/signal/signal_psd.py | Lei-I-Zhang/NeuroKit | a30f76e64b4108abdc652a20391dc0288c62501d | [
"MIT"
] | null | null | null | neurokit2/signal/signal_psd.py | Lei-I-Zhang/NeuroKit | a30f76e64b4108abdc652a20391dc0288c62501d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
import pandas as pd
import scipy.signal
from ..misc import NeuroKitWarning
def signal_psd(
signal,
sampling_rate=1000,
method="welch",
show=False,
normalize=True,
min_frequency="default",
max_frequency=np.inf,
window=None,
window_type="hann",
order=16,
order_criteria="KIC",
order_corrected=True,
silent=True,
**kwargs,
):
"""Compute the Power Spectral Density (PSD).
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
method : str
Either 'welch' (default), 'fft', 'multitapers' (requires the 'mne' package), 'lombscargle'
(requires the 'astropy' package) or 'burg'.
show : bool
If True, will return a plot. If False, will return the density values that can be plotted externally.
normalize : bool
Normalization of power by maximum PSD value. Default to True.
Normalization allows comparison between different PSD methods.
min_frequency : str, float
The minimum frequency. If "default", min_frequency is chosen based on the sampling rate and length of signal to
optimize the frequency resolution.
max_frequency : float
The maximum frequency.
window : int
Length of each window in seconds (for Welch method). If None (default), window will be automatically
calculated to capture at least 2 cycles of min_frequency. If the length of recording does not
allow the formal, window will be default to half of the length of recording.
window_type : str
Desired window to use. Defaults to 'hann'. See ``scipy.signal.get_window()`` for list of windows.
order : int
The order of autoregression (only used for autoregressive (AR) methods such as 'burg').
order_criteria : str
The criteria to automatically select order in parametric PSD (only used for autoregressive
(AR) methods such as 'burg').
order_corrected : bool
Should the order criteria (AIC or KIC) be corrected? If unsure which method to use to choose
the order, rely on the default (i.e., the corrected KIC).
silent : bool
If False, warnings will be printed. Default to True.
**kwargs : optional
Keyword arguments to be passed to `scipy.signal.welch()`.
See Also
--------
signal_filter, mne.time_frequency.psd_array_multitaper, scipy.signal.welch
Returns
-------
data : pd.DataFrame
A DataFrame containing the Power Spectrum values and a plot if
`show` is True.
Examples
--------
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=2, frequency=[5, 6, 50, 52, 80], noise=0.5)
>>>
>>> # FFT method (based on numpy)
>>> psd_multitapers = nk.signal_psd(signal, method="fft", show=True)
>>> # Welch method (based on scipy)
>>> psd_welch = nk.signal_psd(signal, method="welch", min_frequency=1, show=True)
>>> # Multitapers method (requires MNE)
>>> psd_multitapers = nk.signal_psd(signal, method="multitapers", show=True)
>>> # Burg method
>>> psd_burg = nk.signal_psd(signal, method="burg", min_frequency=1, show=True)
>>> # Lomb method (requires AstroPy)
>>> psd_lomb = nk.signal_psd(signal, method="lomb", min_frequency=1, show=True)
"""
# Constant Detrend
signal = signal - np.mean(signal)
# Sanitize method name
method = method.lower()
# Sanitize min_frequency
N = len(signal)
if isinstance(min_frequency, str):
min_frequency = (2 * sampling_rate) / (N / 2) # for high frequency resolution
# MNE
if method in ["multitaper", "multitapers", "mne"]:
frequency, power = _signal_psd_multitaper(
signal,
sampling_rate=sampling_rate,
min_frequency=min_frequency,
max_frequency=max_frequency,
)
# FFT (Numpy)
elif method in ["fft"]:
frequency, power = _signal_psd_fft(signal, sampling_rate=sampling_rate)
# Lombscargle (AtroPy)
elif method.lower() in ["lombscargle", "lomb"]:
frequency, power = _signal_psd_lomb(
signal,
sampling_rate=sampling_rate,
min_frequency=min_frequency,
max_frequency=max_frequency,
)
# Method that are using a window
else:
# Define window length
if min_frequency == 0:
min_frequency = 0.001 # sanitize min_frequency
if window is not None:
nperseg = int(window * sampling_rate)
else:
# to capture at least 2 cycles of min_frequency
nperseg = int((2 / min_frequency) * sampling_rate)
# in case duration of recording is not sufficient
if nperseg > N / 2:
if silent is False:
warn(
"The duration of recording is too short to support a"
" sufficiently long window for high frequency resolution."
" Consider using a longer recording or increasing the `min_frequency`",
category=NeuroKitWarning,
)
nperseg = int(N / 2)
# Welch (Scipy)
if method.lower() in ["welch"]:
frequency, power = _signal_psd_welch(
signal,
sampling_rate=sampling_rate,
nperseg=nperseg,
window_type=window_type,
**kwargs,
)
# BURG
elif method.lower() in ["burg", "pburg", "spectrum"]:
frequency, power = _signal_psd_burg(
signal,
sampling_rate=sampling_rate,
order=order,
criteria=order_criteria,
corrected=order_corrected,
side="one-sided",
nperseg=nperseg,
)
# Normalize
if normalize is True:
power /= np.max(power)
# Store results
data = pd.DataFrame({"Frequency": frequency, "Power": power})
# Filter
data = data.loc[
np.logical_and(data["Frequency"] >= min_frequency, data["Frequency"] <= max_frequency)
]
# data["Power"] = 10 * np.log(data["Power"])
if show is True:
ax = data.plot(
x="Frequency", y="Power", title="Power Spectral Density (" + str(method) + " method)"
)
ax.set(xlabel="Frequency (Hz)", ylabel="Spectrum")
return data
# =============================================================================
# Multitaper method
# =============================================================================
def _signal_psd_fft(signal, sampling_rate=1000):
# Power-spectrum density (PSD)
power = np.abs(np.fft.rfft(signal)) ** 2
frequency = np.linspace(0, sampling_rate / 2, len(power))
return frequency, power
# =============================================================================
# Multitaper method
# =============================================================================
def _signal_psd_multitaper(signal, sampling_rate=1000, min_frequency=0, max_frequency=np.inf):
try:
import mne
except ImportError as e:
raise ImportError(
"NeuroKit error: signal_psd(): the 'mne'",
" module is required for the 'mne' method to run.",
" Please install it first (`pip install mne`).",
) from e
power, frequency = mne.time_frequency.psd_array_multitaper(
signal,
sfreq=sampling_rate,
fmin=min_frequency,
fmax=max_frequency,
adaptive=True,
normalization="full",
verbose=False,
)
return frequency, power
# =============================================================================
# Welch method
# =============================================================================
def _signal_psd_welch(signal, sampling_rate=1000, nperseg=None, window_type="hann", **kwargs):
if nperseg is not None:
nfft = int(nperseg * 2)
else:
nfft = None
frequency, power = scipy.signal.welch(
signal,
fs=sampling_rate,
scaling="density",
detrend=False,
nfft=nfft,
average="mean",
nperseg=nperseg,
window=window_type,
**kwargs,
)
return frequency, power
# =============================================================================
# Lomb method
# =============================================================================
def _signal_psd_lomb(signal, sampling_rate=1000, min_frequency=0, max_frequency=np.inf):
try:
import astropy.timeseries
if max_frequency == np.inf:
max_frequency = sampling_rate / 2 # sanitize highest frequency
t = np.arange(len(signal)) / sampling_rate
frequency, power = astropy.timeseries.LombScargle(t, signal, normalization="psd").autopower(
minimum_frequency=min_frequency, maximum_frequency=max_frequency
)
except ImportError as e:
raise ImportError(
"NeuroKit error: signal_psd(): the 'astropy'",
" module is required for the 'lomb' method to run.",
" Please install it first (`pip install astropy`).",
) from e
return frequency, power
# =============================================================================
# Burg method
# =============================================================================
def _signal_psd_burg(
signal,
sampling_rate=1000,
order=16,
criteria="KIC",
corrected=True,
side="one-sided",
nperseg=None,
):
nfft = int(nperseg * 2)
ar, rho, _ = _signal_arma_burg(signal, order=order, criteria=criteria, corrected=corrected)
psd = _signal_psd_from_arma(ar=ar, rho=rho, sampling_rate=sampling_rate, nfft=nfft, side=side)
# signal is real, not complex
if nfft % 2 == 0:
power = psd[0 : int(nfft / 2 + 1)] * 2
else:
power = psd[0 : int((nfft + 1) / 2)] * 2
# angular frequencies, w
# for one-sided psd, w spans [0, pi]
# for two-sdied psd, w spans [0, 2pi)
# for dc-centered psd, w spans (-pi, pi] for even nfft, (-pi, pi) for add nfft
if side == "one-sided":
w = np.pi * np.linspace(0, 1, len(power))
# elif side == "two-sided":
# w = np.pi * np.linspace(0, 2, len(power), endpoint=False) #exclude last point
# elif side == "centerdc":
# if nfft % 2 == 0:
# w = np.pi * np.linspace(-1, 1, len(power))
# else:
# w = np.pi * np.linspace(-1, 1, len(power) + 1, endpoint=False) # exclude last point
# w = w[1:] # exclude first point (extra)
frequency = (w * sampling_rate) / (2 * np.pi)
return frequency, power
def _signal_arma_burg(signal, order=16, criteria="KIC", corrected=True):
# Sanitize order and signal
N = len(signal)
if order <= 0.0:
raise ValueError("Order must be > 0")
if order > N:
raise ValueError("Order must be less than length signal minus 2")
if not isinstance(signal, np.ndarray):
signal = np.array(signal)
# Initialisation
# rho is variance of driving white noise process (prediction error)
rho = sum(abs(signal) ** 2.0) / float(N)
denominator = rho * 2.0 * N
ar = np.zeros(0, dtype=complex) # AR parametric signal model estimate
ref = np.zeros(0, dtype=complex) # vector K of reflection coefficients (parcor coefficients)
ef = signal.astype(complex) # forward prediction error
eb = signal.astype(complex) # backward prediction error
temp = 1.0
# Main recursion
for k in range(0, order):
# calculate the next order reflection coefficient
numerator = sum([ef[j] * eb[j - 1].conjugate() for j in range(k + 1, N)])
denominator = temp * denominator - abs(ef[k]) ** 2 - abs(eb[N - 1]) ** 2
kp = -2.0 * numerator / denominator
# Update the prediction error
temp = 1.0 - abs(kp) ** 2.0
new_rho = temp * rho
if criteria is not None:
# k=k+1 because order goes from 1 to P whereas k starts at 0.
residual_new = _criteria(
criteria=criteria, N=N, k=k + 1, rho=new_rho, corrected=corrected
)
if k == 0:
residual_old = 2.0 * abs(residual_new)
# Stop as criteria has reached
if residual_new > residual_old:
break
# This should be after the criteria
residual_old = residual_new
rho = new_rho
if rho <= 0:
raise ValueError(
f"Found a negative value (expected positive strictly) {rho}. Decrease the order."
)
ar = np.resize(ar, ar.size + 1)
ar[k] = kp
if k == 0:
for j in range(N - 1, k, -1):
ef_previous = ef[j] # previous value
ef[j] = ef_previous + kp * eb[j - 1] # Eq. (8.7)
eb[j] = eb[j - 1] + kp.conjugate() * ef_previous
else:
# Update the AR coeff
khalf = (k + 1) // 2 # khalf must be an integer
for j in range(0, khalf):
ar_previous = ar[j] # previous value
ar[j] = ar_previous + kp * ar[k - j - 1].conjugate() # Eq. (8.2)
if j != k - j - 1:
ar[k - j - 1] = ar[k - j - 1] + kp * ar_previous.conjugate() # Eq. (8.2)
# Update the forward and backward prediction errors
for j in range(N - 1, k, -1):
ef_previous = ef[j] # previous value
ef[j] = ef_previous + kp * eb[j - 1] # Eq. (8.7)
eb[j] = eb[j - 1] + kp.conjugate() * ef_previous
# save the reflection coefficient
ref = np.resize(ref, ref.size + 1)
ref[k] = kp
return ar, rho, ref
# =============================================================================
# Utilities
# =============================================================================
def _criteria(criteria=None, N=None, k=None, rho=None, corrected=True):
"""Criteria to automatically select order in parametric PSD.
AIC, AICc, KIC and AKICc are based on information theory. They attempt to balance the complexity
(or length) of the model against how well the model fits the data.
AIC and KIC are biased estimates of the asymmetric and the symmetric Kullback-Leibler divergence
respectively. AICc and AKICc attempt to correct the bias.
Parameters
----------
criteria : str
The criteria to be used. The critera can be one of the following: AIC (Akaike Information Criterion),
KIC (Kullback Iinformation Criterion), FPE (Final Prediction Error Criterion), MDL (Minimum
Description Length), CAT (Criterion Autoregressive Transfer Function), AIC order-selection using
eigen values, MDL order-selection using eigen values.
N : int
The sample size of the signal.
k : int
The AR order.
rho : int
The rho at order k.
corrected : bool
Specify for AIC and KIC methods.
Returns
-------
residual : Union[int, float]
Residuals to select the optimal order.
"""
if criteria == "AIC":
if corrected is True:
residual = np.log(rho) + 2.0 * (k + 1) / (N - k - 2)
else:
residual = N * np.log(np.array(rho)) + 2.0 * (np.array(k) + 1)
elif criteria == "KIC":
if corrected is True:
residual = (
np.log(rho) + k / N / (N - k) + (3.0 - (k + 2.0) / N) * (k + 1.0) / (N - k - 2.0)
)
else:
residual = np.log(rho) + 3.0 * (k + 1.0) / float(N)
elif criteria == "FPE":
fpe = rho * (N + k + 1.0) / (N - k - 1)
return fpe
elif criteria == "MDL":
mdl = N * np.log(rho) + k * np.log(N)
return mdl
return residual
def _signal_psd_from_arma(
ar=None, ma=None, rho=1.0, sampling_rate=1000, nfft=None, side="one-sided"
):
if ar is None and ma is None:
raise ValueError("Either AR or MA model must be provided")
psd = np.zeros(nfft, dtype=complex)
if ar is not None:
ip = len(ar)
den = np.zeros(nfft, dtype=complex)
den[0] = 1.0 + 0j
for k in range(0, ip):
den[k + 1] = ar[k]
denf = np.fft.fft(den, nfft)
if ma is not None:
iq = len(ma)
num = np.zeros(nfft, dtype=complex)
num[0] = 1.0 + 0j
for k in range(0, iq):
num[k + 1] = ma[k]
numf = np.fft.fft(num, nfft)
if ar is not None and ma is not None:
psd = rho / sampling_rate * abs(numf) ** 2.0 / abs(denf) ** 2.0
elif ar is not None:
psd = rho / sampling_rate / abs(denf) ** 2.0
elif ma is not None:
psd = rho / sampling_rate * abs(numf) ** 2.0
psd = np.real(psd) # The PSD is a twosided PSD.
# convert to one-sided
if side == "one-sided":
assert len(psd) % 2 == 0
one_side_psd = np.array(psd[0 : len(psd) // 2 + 1]) * 2.0
one_side_psd[0] /= 2.0
# one_side_psd[-1] = psd[-1]
psd = one_side_psd
# convert to centerdc
elif side == "centerdc":
first_half = psd[0 : len(psd) // 2]
second_half = psd[len(psd) // 2 :]
rotate_second_half = second_half[-1:] + second_half[:-1]
center_psd = np.concatenate((rotate_second_half, first_half))
center_psd[0] = psd[-1]
psd = center_psd
return psd
| 33.609434 | 119 | 0.552237 |
d66d14c08c2c55f6f07587636a24b0eb8571e57b | 4,892 | py | Python | TimeWrapper_JE/venv/Lib/site-packages/pip/_internal/metadata/base.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | TimeWrapper_JE/venv/Lib/site-packages/pip/_internal/metadata/base.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | TimeWrapper_JE/venv/Lib/site-packages/pip/_internal/metadata/base.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | 1 | 2021-06-20T19:28:37.000Z | 2021-06-20T19:28:37.000Z | import logging
import re
from typing import Container, Iterator, List, Optional, Union
from pip._vendor.packaging.version import LegacyVersion, Version
from pip._internal.utils.misc import stdlib_pkgs # TODO: Move definition here.
DistributionVersion = Union[LegacyVersion, Version]
logger = logging.getLogger(__name__)
class BaseDistribution:
@property
def location(self):
# type: () -> Optional[str]
"""Where the distribution is loaded from.
A string value is not necessarily a filesystem path, since distributions
can be loaded from other sources, e.g. arbitrary zip archives. ``None``
means the distribution is created in-memory.
"""
raise NotImplementedError()
@property
def metadata_version(self):
# type: () -> Optional[str]
"""Value of "Metadata-Version:" in the distribution, if available."""
raise NotImplementedError()
@property
def canonical_name(self):
# type: () -> str
raise NotImplementedError()
@property
def version(self):
# type: () -> DistributionVersion
raise NotImplementedError()
@property
def installer(self):
# type: () -> str
raise NotImplementedError()
@property
def editable(self):
# type: () -> bool
raise NotImplementedError()
@property
def local(self):
# type: () -> bool
raise NotImplementedError()
@property
def in_usersite(self):
# type: () -> bool
raise NotImplementedError()
class BaseEnvironment:
"""An environment containing distributions to introspect."""
@classmethod
def default(cls):
# type: () -> BaseEnvironment
raise NotImplementedError()
@classmethod
def from_paths(cls, paths):
# type: (Optional[List[str]]) -> BaseEnvironment
raise NotImplementedError()
def get_distribution(self, name):
# type: (str) -> Optional[BaseDistribution]
"""Given a requirement name, return the installed distributions."""
raise NotImplementedError()
def _iter_distributions(self):
# type: () -> Iterator[BaseDistribution]
"""Iterate through installed distributions.
This function should be implemented by subclass, but never called
directly. Use the public ``iter_distribution()`` instead, which
implements additional logic to make sure the distributions are valid.
"""
raise NotImplementedError()
def iter_distributions(self):
# type: () -> Iterator[BaseDistribution]
"""Iterate through installed distributions."""
for dist in self._iter_distributions():
# Make sure the distribution actually comes from a valid Python
# packaging distribution. Pip's AdjacentTempDirectory leaves folders
# e.g. ``~atplotlib.dist-info`` if cleanup was interrupted. The
# valid project name pattern is taken from PEP 508.
project_name_valid = re.match(
r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$",
dist.canonical_name,
flags=re.IGNORECASE,
)
if not project_name_valid:
logger.warning(
"Ignoring invalid distribution %s (%s)",
dist.canonical_name,
dist.location,
)
continue
yield dist
def iter_installed_distributions(
self,
local_only=True, # type: bool
skip=stdlib_pkgs, # type: Container[str]
include_editables=True, # type: bool
editables_only=False, # type: bool
user_only=False, # type: bool
):
# type: (...) -> Iterator[BaseDistribution]
"""Return a list of installed distributions.
:param local_only: If True (default), only return installations
local to the current virtualenv, if in a virtualenv.
:param skip: An iterable of canonicalized project names to ignore;
defaults to ``stdlib_pkgs``.
:param include_editables: If False, don't report editables.
:param editables_only: If True, only report editables.
:param user_only: If True, only report installations in the user
site directory.
"""
it = self.iter_distributions()
if local_only:
it = (d for d in it if d.local)
if not include_editables:
it = (d for d in it if not d.editable)
if editables_only:
it = (d for d in it if d.editable)
if user_only:
it = (d for d in it if d.in_usersite)
return (d for d in it if d.canonical_name not in skip)
| 34.20979 | 81 | 0.595666 |
920b70716857fb9de4925b90dbef4e4df46c7401 | 4,363 | py | Python | epa_graphs.py | tcq1/epa | a54d5142d510609709de61646cc4fd790238ae07 | [
"MIT"
] | null | null | null | epa_graphs.py | tcq1/epa | a54d5142d510609709de61646cc4fd790238ae07 | [
"MIT"
] | null | null | null | epa_graphs.py | tcq1/epa | a54d5142d510609709de61646cc4fd790238ae07 | [
"MIT"
] | null | null | null | import random
import copy
import collections
def choose_nodes(choosen, target_nodes, edges):
"""
choose `edges` nodes from `target_nodes`, whitout `chossen` and doubled nodes
Function is side effect free
@param choosen list of already chossen elements
@param list to choose elements from
@param edges amount of edged
@return nodes to pick
"""
target_nodes = copy.deepcopy(target_nodes)
for elem in choosen:
target_nodes.remove(elem)
for _ in range(edges):
node = random.choice(target_nodes)
target_nodes.remove(node)
yield node
def gen_directed_graph(nodes = 1000, edge_factor = 2, costs = (1,1)):
"""
generates a dicrected graph with `nodes` nodes and `edge_factor` edges per node
@param nodes amount of nodes
@param edge_factor edges per node
@param cost expects a tuple with (mu,sigma) for the normal distribution which generates the cost
@retrun dict in the form dict[node] = [(node_1, cost), (node_2, cost), (node_3, cost)]
"""
graph = {}
node_list = list(range(nodes))
for node_1 in range(nodes):
graph[node_1] = []
for node_2 in choose_nodes([node_1], node_list, edge_factor):
cost = random.gauss(costs[0],costs[1])
graph[node_1].append((node_2,cost))
return graph
def gen_directed_graph_rand(nodes = 1000, edge_factor = 3, costs = (1,1)):
"""
generates a dicrected graph with `nodes` nodes and up to `edge_factor` edges per node
@param nodes amount of nodes
@param edge_factor up to `edge_factor` edges per node
@param cost expects a tuple with (mu,sigma) for the normal distribution which generates the cost
@retrun dict in the form dict[node] = [(node_1, cost), (node_2, cost), (node_3, cost)]
"""
graph = {}
node_list = list(range(nodes))
for node_1 in range(nodes):
graph[node_1] = []
edges = random.randint(1,edge_factor)
for node_2 in choose_nodes([node_1], node_list, edges):
cost = random.gauss(costs[0],costs[1])
graph[node_1].append((node_2,cost))
return graph
def gen_undirected_graph(nodes = 1000, edge_factor = 2, costs = (1,1)):
"""
generates an undicrected graph with `nodes` nodes and around `edge_factor` edges per node
@param nodes amount of nodes
@param edge_factor approximate edges per node, might happen that some nodes have more edges
@param cost expects a tuple with (mu,sigma) for the normal distribution which generates the cost
@retrun dict in the form dict[node] = [(node_1, cost), (node_2, cost), (node_3, cost)]
"""
graph = collections.defaultdict(list)
node_list = list(range(nodes))
for node_1 in range(nodes):
if node_1 not in graph:
graph[node_1] = []
edges = edge_factor - len(graph[node_1])
choosen = list(map(lambda x: x[0], graph[node_1]))
choosen.append(node_1)
for node_2 in choose_nodes(choosen, node_list, edges):
cost = random.gauss(costs[0],costs[1])
graph[node_1].append((node_2,cost))
graph[node_2].append((node_1,cost))
return graph
def gen_undirected_graph_rand(nodes = 1000, edge_factor = 3, costs = (1,1)):
"""
generates an undicrected graph with `nodes` nodes and up to `edge_factor` edges per node
@param nodes amount of nodes
@param edge_factor up to `edge_factor` edges per node
@param cost expects a tuple with (mu,sigma) for the normal distribution which generates the cost
@retrun dict in the form dict[node] = [(node_1, cost), (node_2, cost), (node_3, cost)]
"""
graph = collections.defaultdict(list)
node_list = list(range(nodes))
for node_1 in range(nodes):
if node_1 not in graph:
graph[node_1] = []
edges = random.randint(1,edge_factor)
edges = edges - len(graph[node_1])
choosen = list(map(lambda x: x[0], graph[node_1]))
choosen.append(node_1)
for node_2 in choose_nodes(choosen, node_list, edges):
cost = random.gauss(costs[0],costs[1])
graph[node_1].append((node_2,cost))
graph[node_2].append((node_1,cost))
return graph
| 36.974576 | 100 | 0.635113 |
0c8fc76f29eb2ed9a9436f0eccf85a4c93828ac9 | 8,357 | py | Python | main_eval_saved_model.py | azagsam/jiant_slovene | cd7230319ebcd99d845c44baf4caa410bd9acc49 | [
"MIT"
] | null | null | null | main_eval_saved_model.py | azagsam/jiant_slovene | cd7230319ebcd99d845c44baf4caa410bd9acc49 | [
"MIT"
] | null | null | null | main_eval_saved_model.py | azagsam/jiant_slovene | cd7230319ebcd99d845c44baf4caa410bd9acc49 | [
"MIT"
] | null | null | null | import jiant.proj.main.tokenize_and_cache as tokenize_and_cache
import jiant.proj.main.export_model as export_model
import jiant.proj.main.scripts.configurator as configurator
import jiant.proj.main.runscript as main_runscript
import jiant.utils.python.io as py_io
import jiant.utils.display as display
import os
import benchmark_submission_formatter
from distutils.dir_util import copy_tree
import shutil
import json
from make_plots import create
# Dict to huggingface name of the model or local directory containing model (sloberta)
pretrained_models = {
"sloberta": "./models/pretrained/sloberta",
"crosloengual": "EMBEDDIA/crosloengual-bert",
"multilingual": "bert-base-multilingual-cased",
"roberta": "roberta-base",
}
pretrained = pretrained_models["crosloengual"] # model to transformers model
path_to_pretrained_weights = "/home/azagar/myfiles/jiant_slovene_v2/trained_models/crosloengual_1/crosloengual__slobench__cb__epochs_10__train_batch_1__eval_batch_1__num_eval_steps_0/crosloengual/slobench/best_model.p"
output_name = list(pretrained_models.keys())[list(pretrained_models.values()).index(pretrained)] # name of the model
name = "slobench" # name of the directory containing datasets and config files
tasks = ["cb"] # list of tasks - can also be ["boolq", "cb", "copa", "multirc", "rte", "wsc"] for multitask
# name of output directory
if len(tasks) == 1:
task_name = tasks[0]
else:
task_name = "multitask_"
for i in tasks:
task_name = f"{task_name}{i}_"
# Here is where we set batch size and number of epochs and declare whether we want to use GPU or not. In the phases
# list we declare which phase should the tokenizer tokenize.
train_batch_size = 4
eval_batch_size = 8
epochs = 1
num_gpus = 1
phases = ["train", "val", "test", "test_with_answers"]
# Here we set if we want to plot a graph based on validation results and loss and number of steps after which we
# want to check. If graph per epoch is True then this script will calculate the number of steps in one epoch and
# make validations on every epoch
eval_every_epoch = True
graph_steps = 1
graph_per_epoch = True
save_every_epoch = True # set true if you want to save model after epoch eval
epochs_to_save = [] # if we do eval on every epoch, we can after which epoch we want to save the model during training
# if we want to save after all epoch set list to [i+1 for i in range(epochs)]. Note that one save equals to 0.5GB
# Some tokenization parameters
max_seq_length = 256
smart_truncate = True
do_iter = True
# Runscript parameters.
learning_rate = 1e-5
optimizer_type = "adam"
adam_epsilon = 1e-8
max_grad_norm = 1.0
eval_every_steps = 0
no_improvements_for_n_evals = 0
eval_subset_num = None
model_load_mode = "partial" # If we wish to load saved model from jiant we have to set model_load_mode to "partial"
do_train = False
do_val = False
validate_test = True
force_overwrite = True
write_test_preds = True
write_val_preds = False
write_test_with_answers_preds = False
do_save = False
do_save_best = True
do_save_last = False
load_best_model = True
# This will export the hugging face model to directory "./models/name_of_model"
export_model.export_model(
hf_pretrained_model_name_or_path=pretrained,
output_base_path=f"./models/{output_name}",
)
# Tokenize and cache each task
for task_name in tasks:
tokenize_and_cache.main(tokenize_and_cache.RunConfiguration(
task_config_path=f"./tasks/configs/{name}/{task_name}_config.json",
hf_pretrained_model_name_or_path=pretrained,
output_dir=f"./cache/{task_name}",
phases=phases,
do_iter=do_iter,
smart_truncate=smart_truncate,
max_seq_length=max_seq_length,
))
# Make configuration
jiant_run_config = configurator.SimpleAPIMultiTaskConfigurator(
task_config_base_path=f"./tasks/configs/{name}",
task_cache_base_path="./cache",
train_task_name_list=tasks,
val_task_name_list=tasks,
test_task_name_list=tasks,
test_with_answers_task_name_list=tasks,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
epochs=epochs,
num_gpus=num_gpus,
eval_subset_num=eval_subset_num,
).create_config()
# Make directories and place configuration .json file
os.makedirs("./run_configs/", exist_ok=True)
os.makedirs(f"./runs/{output_name}/{name}", exist_ok=True)
py_io.write_json(jiant_run_config, "./run_configs/jiant_run_config.json")
display.show_json(jiant_run_config)
# If we want to make evaluation after each epoch we overwrite graph_steps to be equal as the number of steps in one epoch
if graph_per_epoch:
with open("./run_configs/jiant_run_config.json", "r") as json_file:
json_f = json.load(json_file)
max_steps = json_f["global_train_config"]["max_steps"]
graph_steps = max_steps // epochs
with open("./run_configs/jiant_run_config.json", "r") as json_file:
json_f = json.load(json_file)
max_steps = json_f["global_train_config"]["max_steps"]
epoch_steps = max_steps // epochs
# Run configuration
run_args = main_runscript.RunConfiguration(
jiant_task_container_config_path="./run_configs/jiant_run_config.json",
output_dir=f"./runs/{output_name}/{name}",
model_load_mode=model_load_mode,
hf_pretrained_model_name_or_path=pretrained,
model_path=path_to_pretrained_weights,
model_config_path=f"./models/{output_name}/model/config.json",
learning_rate=learning_rate,
eval_every_steps=eval_every_steps,
do_train=do_train,
do_val=do_val,
do_test_with_answers=validate_test,
force_overwrite=force_overwrite,
write_test_preds=write_test_preds,
write_val_preds=write_val_preds,
write_test_with_answers_preds=write_test_with_answers_preds,
do_save_best=do_save_best,
do_save_last=do_save_last,
do_save=do_save,
no_cuda=True if not num_gpus else False,
no_improvements_for_n_evals=no_improvements_for_n_evals,
adam_epsilon=adam_epsilon,
max_grad_norm=max_grad_norm,
optimizer_type=optimizer_type,
load_best_model=load_best_model,
graph_steps=graph_steps,
graph_per_epoch=graph_per_epoch,
epoch_steps=epoch_steps,
epochs_to_save=epochs_to_save,
save_every_epoch=save_every_epoch
)
main_runscript.run_loop(run_args)
if do_train and eval_every_epoch:
create(tasks=tasks, path_to_look="./runs", num_epochs=epochs, model_name=output_name, epoch_length=epoch_steps)
# if we want to write predictions to file on test dataset
if "test" in phases and write_test_preds:
benchmark_submission_formatter.results(
benchmark="SUPERGLUE",
input_base_path=f"./runs/{output_name}/{name}",
output_path=f"./runs/{output_name}/{name}",
task_names=tasks,
preds="test_preds.p",
regime="test",
)
# if we want to write predictions to file on test_with_answers dataset
if "test_with_answers" in phases and write_val_preds:
benchmark_submission_formatter.results(
benchmark="SUPERGLUE",
input_base_path=f"./runs/{output_name}/{name}",
output_path=f"./runs/{output_name}/{name}",
task_names=tasks,
preds="test_with_answers_preds.p",
regime="test_with_answers",
)
# if we want to write predictions to file on val dataset
if "val" in phases and do_val:
benchmark_submission_formatter.results(
benchmark="SUPERGLUE",
input_base_path=f"./runs/{output_name}/{name}",
output_path=f"./runs/{output_name}/{name}",
task_names=tasks,
preds="val_preds.p",
regime="val",
)
# script makes output in ./run directory and if we run multiple times it will overwrite. You can copy to a new directory
# and copy the output to new directory if do_backup is set to True
do_backup = True
if do_backup:
bak_folder = f"./evaluated_models/{output_name}__{name}__{task_name}__epochs_{epochs}__train_batch_{train_batch_size}__eval_batch_{eval_batch_size}__num_eval_steps_{eval_every_steps}"
if os.path.isdir(bak_folder):
shutil.rmtree(bak_folder)
os.makedirs(bak_folder)
os.makedirs(f"{bak_folder}/run_configs")
copy_tree("./runs", bak_folder)
copy_tree("./run_configs", f"{bak_folder}/run_configs")
shutil.rmtree("./runs")
shutil.rmtree("./run_configs")
shutil.rmtree(f"./models/{output_name}")
shutil.rmtree("./cache")
| 36.177489 | 218 | 0.754816 |
22730a479f3a0c7330d12cfdfe1f0186b2f22470 | 467 | py | Python | plotly/validators/carpet/aaxis/_tickangle.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/carpet/aaxis/_tickangle.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/carpet/aaxis/_tickangle.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
class TickangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(
self, plotly_name='tickangle', parent_name='carpet.aaxis', **kwargs
):
super(TickangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 29.1875 | 75 | 0.638116 |
9ecb660de867c1ec9aa20a79ff9ef86602d09dca | 1,737 | py | Python | hebcal/parshios.py | TheBestMoshe/hebcal | 7d77d2137684342b91fa7402cc357316f23dcfa1 | [
"MIT"
] | 12 | 2018-10-03T22:49:15.000Z | 2021-08-13T03:52:05.000Z | hebcal/parshios.py | TheBestMoshe/hebcal | 7d77d2137684342b91fa7402cc357316f23dcfa1 | [
"MIT"
] | 3 | 2019-06-27T19:22:03.000Z | 2020-04-26T13:15:14.000Z | hebcal/parshios.py | TheBestMoshe/hebcal | 7d77d2137684342b91fa7402cc357316f23dcfa1 | [
"MIT"
] | 5 | 2019-02-15T02:26:19.000Z | 2021-11-07T21:57:52.000Z | import pyluach
from pyluach import parshios
american_ashkinazik_parshios = [
'Beraishis', 'Noach', "Lech L'cha", 'Vayera', 'Chayei Sarah',
'Toldos', 'Vayetzei', 'Vayishlach', 'Vayeshev', 'Miketz',
'Vayigash', 'Vayechi', 'Shemos', "Va'era", 'Bo', 'Beshalach',
'Yisro', 'Mishpatim', 'Teruma', 'Tetzave', 'Ki Sisa', 'Vayakhel',
'Pekudei', 'Vayikra', 'Tzav', 'Shemini', 'Tazria', 'Metzora',
'Acharei Mos', 'Kedoshim', 'Emor', 'Behar', 'Bechukosai',
'Bamidbar', 'Naso', "Beha'aloscha", "Shelach", 'Korach', 'Chukas',
'Balak', 'Pinchas', 'Matos', "Ma'sei", 'Devarim', "Va'eschanan",
'Eikev', "R'ey", 'Shoftim', 'Ki Setzei', 'Ki Savo', 'Netzavim',
'Vayelech', 'Haazinu', "V'zos Habrocha"
]
class Parshios:
def __init__(self, info):
self.info = info
def _pyluach_hebrew_date(self):
return pyluach.dates.HebrewDate(self.info.hebrew_year(),
self.info.hebrew_month(),
self.info.hebrew_day())
def _parsha_list(self):
# Set the pronunciation
# If an invalid pronunciation is provided, default to
# "american_ashkinzik".
if self.info.pronunciation == 'american_ashkinazik':
return american_ashkinazik_parshios
else:
return american_ashkinazik_parshios
def parsha(self):
parsha_number = parshios.getparsha(self._pyluach_hebrew_date())
if parsha_number is not None:
return parsha_number[0]
else:
return None
def parsha_string(self):
return self._parsha_list()[self.parsha()]
| 37.76087 | 78 | 0.568221 |
2b6fa7b23e2746bff5d99f105e8be57c232700eb | 125 | py | Python | conda-build/run_test.py | jakirkham/anaconda-recipes | 74fb2280662a022f2d12a7744b4823cfa5e236df | [
"BSD-3-Clause"
] | 130 | 2015-07-28T03:41:21.000Z | 2022-03-16T03:07:41.000Z | conda-build/run_test.py | jakirkham/anaconda-recipes | 74fb2280662a022f2d12a7744b4823cfa5e236df | [
"BSD-3-Clause"
] | 119 | 2015-08-01T00:54:06.000Z | 2021-01-05T13:00:46.000Z | conda-build/run_test.py | jakirkham/anaconda-recipes | 74fb2280662a022f2d12a7744b4823cfa5e236df | [
"BSD-3-Clause"
] | 72 | 2015-07-29T02:35:56.000Z | 2022-02-26T14:31:15.000Z | import conda_build
print('conda_build.__version__: %s' % conda_build.__version__)
assert conda_build.__version__ == '3.0.9'
| 25 | 62 | 0.784 |
f753645e8a4281453a74dfac5eb98935c2eac341 | 1,768 | py | Python | mcrouter/test/test_service_info.py | kiaplayer/mcrouter | c54233f1cd57dc9f541bdbff7ff485775ff289fc | [
"MIT"
] | 2,205 | 2015-01-03T02:56:53.000Z | 2022-03-31T09:24:08.000Z | mcrouter/test/test_service_info.py | kiaplayer/mcrouter | c54233f1cd57dc9f541bdbff7ff485775ff289fc | [
"MIT"
] | 327 | 2015-01-07T00:59:57.000Z | 2022-03-31T16:03:58.000Z | mcrouter/test/test_service_info.py | kiaplayer/mcrouter | c54233f1cd57dc9f541bdbff7ff485775ff289fc | [
"MIT"
] | 479 | 2015-01-07T02:06:42.000Z | 2022-03-24T11:44:31.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from mcrouter.test.MCProcess import Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestServiceInfo(McrouterTestCase):
config = './mcrouter/test/test_service_info.json'
def setUp(self):
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
self.mcrouter = self.add_mcrouter(self.config)
def test_route_format(self):
ports = [self.mc1.port, self.mc2.port]
# Test a key 'abc,def' with a comma
route = self.mcrouter.get("__mcrouter__.route(set,abc,def)")
parts = route.split("\r\n")
self.assertEqual(len(parts), 2)
for i, part in enumerate(parts):
host, port = part.split(":")
self.assertEqual(host, "127.0.0.1")
self.assertEqual(port, str(ports[i]))
def test_hostid(self):
hostid = self.mcrouter.get("__mcrouter__.hostid")
self.assertEqual(str(int(hostid)), hostid)
self.assertEqual(hostid, self.mcrouter.get("__mcrouter__.hostid"))
def _check_route_handles(self, op):
# Test a key 'abc,def' with a comma
cmd = "__mcrouter__.route_handles({},abc,def)".format(op)
rh = self.mcrouter.get(cmd)
self.assertTrue("root" in rh)
self.assertTrue("127.0.0.1" in rh)
self.assertTrue(str(self.mc1.port) in rh)
self.assertTrue(str(self.mc2.port) in rh)
def test_route_handles(self):
self._check_route_handles("get")
self._check_route_handles("set")
self._check_route_handles("delete")
| 36.833333 | 74 | 0.657805 |
e658e3cc176d7bfc3fc1bcec815f6ab2a8f8face | 11,520 | py | Python | panic/gui/phonebook.py | kapot65/PANIC | 3550af7a33190be9b0dfc65e5d7544910481b001 | [
"CC-BY-3.0"
] | 1 | 2019-09-20T08:10:32.000Z | 2019-09-20T08:10:32.000Z | panic/gui/phonebook.py | MaxIV-KitsControls/app-cells-panicgui | 58c5610970135e6fbb7b715094514b33bc1bc9c1 | [
"CC-BY-3.0"
] | null | null | null | panic/gui/phonebook.py | MaxIV-KitsControls/app-cells-panicgui | 58c5610970135e6fbb7b715094514b33bc1bc9c1 | [
"CC-BY-3.0"
] | null | null | null | """
This file belongs to the PANIC Alarm Suite,
developed by ALBA Synchrotron for Tango Control System
GPL Licensed
"""
import sys, panic, traceback
from utils import Qt, QtCore, QtGui
from utils import getThemeIcon
from utils import iValidatedWidget
class PhoneBook(QtGui.QWidget):
def __init__(self,parent=None,container=None):
QtGui.QWidget.__init__(self,parent)
self._bwi = PhoneBookUi()
self._bwi.phonebookSetupUi(self)
self._kontainer = container
self.buildList()
def buildList(self):
self._bwi.buildList()
def show(self):
QtGui.QWidget.show(self)
class PhoneBookEntry(iValidatedWidget,object):
def __init__(self, phoneBook):
self.pB = phoneBook
self.api = self.pB.api
def addSetupUi(self, addForm):
self._Form=addForm
addForm.setObjectName("addForm")
self.addGridLayout = QtGui.QGridLayout(addForm)
self.addGridLayout.setObjectName("addGridLayout")
self.sectionLabel = QtGui.QLabel()
self.sectionLabel.setObjectName("sectionLabel")
self.sectionLabel.setText("section:")
#self.addGridLayout.addWidget(self.sectionLabel, 0, 0, 1, 1)
self.sectionCombo = QtGui.QComboBox()
self.sectionCombo.setObjectName("sectionCombo")
sections=['','CONTROLS', 'VACUUM', 'FRONTENDS', 'BEAMLINES', 'On Call']
for s in sections:
self.sectionCombo.addItem(s)
#self.addGridLayout.addWidget(self.sectionCombo, 0, 1, 1, 1)
self.nameLabel = QtGui.QLabel(addForm)
self.nameLabel.setObjectName("emailLabel")
self.nameLabel.setText("name:")
self.addGridLayout.addWidget(self.nameLabel, 1, 0, 1, 1)
self.nameLine = QtGui.QLineEdit(addForm)
self.nameLine.setObjectName("nameLine")
self.addGridLayout.addWidget(self.nameLine, 1, 1, 1, 1)
self.emailLabel = QtGui.QLabel(addForm)
self.emailLabel.setObjectName("emailLabel")
self.emailLabel.setText("email:")
self.addGridLayout.addWidget(self.emailLabel, 2, 0, 1, 1)
self.emailLine = QtGui.QLineEdit(addForm)
self.emailLine.setObjectName("emailLine")
self.addGridLayout.addWidget(self.emailLine, 2, 1, 1, 1)
self.smsHorizontalLayout = QtGui.QHBoxLayout()
self.smsHorizontalLayout.setObjectName("smsHorizontalLayout")
self.smsCheckBox = QtGui.QCheckBox()
self.smsCheckBox.setObjectName("smsCheckBox")
self.smsHorizontalLayout.addWidget(self.smsCheckBox)
self.smsLabel = QtGui.QLabel(addForm)
self.smsLabel.setObjectName("smsLabel")
self.smsLabel.setText("sms ?")
self.smsLabel.setEnabled(False)
self.smsHorizontalLayout.addWidget(self.smsLabel)
self.smsLine = QtGui.QLineEdit(addForm)
self.smsLine.setObjectName("emailLine")
self.smsHorizontalLayout.addWidget(self.smsLine)
self.smsLine.setEnabled(False)
self.addGridLayout.addLayout(self.smsHorizontalLayout, 3, 0, 1, 2)
self.addHorizontalLayout = QtGui.QHBoxLayout()
self.addHorizontalLayout.setObjectName("addHorizontalLayout")
self.addButton = QtGui.QPushButton(addForm)
self.addButton.setObjectName("addButton")
self.addHorizontalLayout.addWidget(self.addButton)
self.cancelButton = QtGui.QPushButton(addForm)
self.cancelButton.setObjectName("cancelButton")
self.addHorizontalLayout.addWidget(self.cancelButton)
self.addGridLayout.addLayout(self.addHorizontalLayout, 4, 0, 1, 2)
self.addRetranslateUi(addForm)
QtCore.QMetaObject.connectSlotsByName(addForm)
def addRetranslateUi(self, addForm):
addForm.setWindowTitle(QtGui.QApplication.translate("addForm", "Add Recipient", None, QtGui.QApplication.UnicodeUTF8))
self.addButton.setText(QtGui.QApplication.translate("addForm", "Add", None, QtGui.QApplication.UnicodeUTF8))
self.addButton.setIcon(getThemeIcon("list-add"))
self.addButton.setToolTip("Add person to the list")
self.cancelButton.setText(QtGui.QApplication.translate("addForm", "Cancel", None, QtGui.QApplication.UnicodeUTF8))
self.cancelButton.setIcon(getThemeIcon("process-stop"))
self.cancelButton.setToolTip("Cancel")
addForm.resize(250, 150)
QtCore.QObject.connect(self.addButton,QtCore.SIGNAL("clicked()"), self.onAdd)
QtCore.QObject.connect(self.cancelButton,QtCore.SIGNAL("clicked()"), self.onCancel)
QtCore.QObject.connect(self.smsCheckBox,QtCore.SIGNAL("stateChanged(int)"), self.onCheckStateChanged)
def onAdd(self):
try:
section = str(self.sectionCombo.currentText())
name = str(self.nameLine.text()).upper()
name = '%'+name if not name.startswith('%') else name
email = str(self.emailLine.text())
if not self.validate('Phonebook.onAdd(%s,%s,%s)'%(section,name,email)):
return
if self.smsCheckBox.isChecked():
number = str(self.smsLine.text())
number = 'SMS:'+number if not number.startswith('SMS:') else number
if number: email+=','+number
if (not name):
message='Type name\n'
raise Exception(message)
elif (not email and not number):
message='Type email address\n'
raise Exception(message)
else:
print 'onAdd.edit_phoneBook(%s,%s,(%s))'%(name,email,section)
self.api.edit_phonebook(name,email,section)
except Exception:
Qt.QMessageBox.critical(None,"Error", traceback.format_exc())
return
self.onCancel()
self.pB.onRefresh()
Qt.QMessageBox.information(None,"Phonebook","%s added succesfully!"%name)
def onCancel(self):
self._Form.close()
def onCheckStateChanged(self, state):
if state==0:
self.smsLabel.setEnabled(False)
self.smsLine.setEnabled(False)
else:
self.smsLabel.setEnabled(True)
self.smsLine.setEnabled(True)
class PhoneBookUi(object):
api=None
def __init__(self,api=None):
type(self).api = api or self.api or panic.current()
object.__init__(self)
def phonebookSetupUi(self, Form):
Form.setObjectName("Form")
self.diffGridLayout = QtGui.QGridLayout(Form)
self.diffGridLayout.setObjectName("diffGridLayout")
self.tableWidget = QtGui.QTableWidget(Form)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.diffGridLayout.addWidget(self.tableWidget, 0, 0, 1, 1)
self.lowerHorizontalLayout = QtGui.QHBoxLayout()
self.lowerHorizontalLayout.setObjectName("lowerHorizontalLayout")
self.addButton = QtGui.QPushButton(Form)
self.addButton.setObjectName("addButton")
self.lowerHorizontalLayout.addWidget(self.addButton)
self.removeButton = QtGui.QPushButton(Form)
self.removeButton.setObjectName("removeButton")
self.lowerHorizontalLayout.addWidget(self.removeButton)
self.refreshButton = QtGui.QPushButton(Form)
self.refreshButton.setObjectName("refreshButton")
self.lowerHorizontalLayout.addWidget(self.refreshButton)
self.diffGridLayout.addLayout(self.lowerHorizontalLayout, 1, 0, 1, 2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Phonebook", None, QtGui.QApplication.UnicodeUTF8))
self.addButton.setText(QtGui.QApplication.translate("Form", "Add", None, QtGui.QApplication.UnicodeUTF8))
self.addButton.setIcon(getThemeIcon("list-add"))
self.addButton.setToolTip("Add person to the list")
self.removeButton.setText(QtGui.QApplication.translate("Form", "Remove", None, QtGui.QApplication.UnicodeUTF8))
self.removeButton.setIcon(getThemeIcon("list-remove"))
self.removeButton.setToolTip("Remove person from list")
self.refreshButton.setText(QtGui.QApplication.translate("Form", "Refresh", None, QtGui.QApplication.UnicodeUTF8))
self.refreshButton.setIcon(getThemeIcon("view-refresh"))
self.refreshButton.setToolTip("Refresh list")
QtCore.QObject.connect(self.tableWidget, QtCore.SIGNAL("itemDoubleClicked(QTableWidgetItem *)"), self.onEdit)
QtCore.QObject.connect(self.addButton,QtCore.SIGNAL("clicked()"), self.onAdd)
QtCore.QObject.connect(self.removeButton,QtCore.SIGNAL("clicked()"), self.onRemove)
QtCore.QObject.connect(self.refreshButton,QtCore.SIGNAL("clicked()"), self.onRefresh)
Form.resize(430, 800)
def buildList(self):
data=self.api.get_phonebook()
self.tableWidget.setColumnCount(2)
self.tableWidget.setRowCount(len(data))
#self.tableWidget.setHorizontalHeaderLabels(["",""])
#print data
i = 0
for name,value in sorted(data.items()):
for k in (name,value):
item = QtGui.QTableWidgetItem(k)
#if k.split('#')[0].strip():
#item.setFlags(QtCore.Qt.ItemIsEnabled)
if not (i/2)%2:
item.setBackgroundColor(QtGui.QColor(225,225,225))
self.tableWidget.setItem(int(i/2),i%2,item)
i+=1
self.tableWidget.resizeColumnsToContents()
def onEdit(self):
##pid=self.tableWidget.currentRow()-1
##alter=(str(self.tableWidget.item(pid,1).text()))
##self.api.edit_phonebook(alter)
#self.buildList()
name,value = map(str,(self.tableWidget.item(self.tableWidget.currentRow(),0).text(),self.tableWidget.item(self.tableWidget.currentRow(),1).text()))
print 'PhoneBook.onEdit(%s,%s)'%(name,value)
try:
self.prompt = Qt.QWidget()
self.promptUi = PhoneBookEntry(self)
self.promptUi.addSetupUi(self.prompt)
self.promptUi.nameLine.setText(name)
self.promptUi.emailLine.setText(value)
self.prompt.show()
except:
print traceback.format_exc()
def onAdd(self):
try:
self.prompt = Qt.QWidget()
self.promptUi = PhoneBookEntry(self)
self.promptUi.addSetupUi(self.prompt)
self.prompt.show()
except:
print traceback.format_exc()
def onRemove(self):
name,value = map(str,(self.tableWidget.item(self.tableWidget.currentRow(),0).text(),self.tableWidget.item(self.tableWidget.currentRow(),1).text()))
reply=Qt.QMessageBox.question(None,"Remove","Do You Want to Remove %s?"%name, Qt.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.Yes)
if reply == QtGui.QMessageBox.Yes:
try:
self.api.remove_phonebook(name)
self.onRefresh()
Qt.QMessageBox.information(None,"Remove","%s Removed"%name)
except:
print traceback.format_exc()
Qt.QMessageBox.critical(None,"Problem", "Could not remove selected person<br>")
def onRefresh(self):
self.buildList()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
form = PhoneBook()
form.show()
sys.exit(app.exec_())
| 44.478764 | 155 | 0.657899 |
5d5435bbf68401c66d16b6b50f2d53b885c30db4 | 11,615 | py | Python | sdk/python/pulumi_azure_native/web/v20210115/list_web_app_backup_status_secrets.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/v20210115/list_web_app_backup_status_secrets.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/v20210115/list_web_app_backup_status_secrets.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = [
'ListWebAppBackupStatusSecretsResult',
'AwaitableListWebAppBackupStatusSecretsResult',
'list_web_app_backup_status_secrets',
]
@pulumi.output_type
class ListWebAppBackupStatusSecretsResult:
"""
Backup description.
"""
def __init__(__self__, backup_id=None, blob_name=None, correlation_id=None, created=None, databases=None, finished_time_stamp=None, id=None, kind=None, last_restore_time_stamp=None, log=None, name=None, scheduled=None, size_in_bytes=None, status=None, storage_account_url=None, type=None, website_size_in_bytes=None):
if backup_id and not isinstance(backup_id, int):
raise TypeError("Expected argument 'backup_id' to be a int")
pulumi.set(__self__, "backup_id", backup_id)
if blob_name and not isinstance(blob_name, str):
raise TypeError("Expected argument 'blob_name' to be a str")
pulumi.set(__self__, "blob_name", blob_name)
if correlation_id and not isinstance(correlation_id, str):
raise TypeError("Expected argument 'correlation_id' to be a str")
pulumi.set(__self__, "correlation_id", correlation_id)
if created and not isinstance(created, str):
raise TypeError("Expected argument 'created' to be a str")
pulumi.set(__self__, "created", created)
if databases and not isinstance(databases, list):
raise TypeError("Expected argument 'databases' to be a list")
pulumi.set(__self__, "databases", databases)
if finished_time_stamp and not isinstance(finished_time_stamp, str):
raise TypeError("Expected argument 'finished_time_stamp' to be a str")
pulumi.set(__self__, "finished_time_stamp", finished_time_stamp)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if last_restore_time_stamp and not isinstance(last_restore_time_stamp, str):
raise TypeError("Expected argument 'last_restore_time_stamp' to be a str")
pulumi.set(__self__, "last_restore_time_stamp", last_restore_time_stamp)
if log and not isinstance(log, str):
raise TypeError("Expected argument 'log' to be a str")
pulumi.set(__self__, "log", log)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if scheduled and not isinstance(scheduled, bool):
raise TypeError("Expected argument 'scheduled' to be a bool")
pulumi.set(__self__, "scheduled", scheduled)
if size_in_bytes and not isinstance(size_in_bytes, float):
raise TypeError("Expected argument 'size_in_bytes' to be a float")
pulumi.set(__self__, "size_in_bytes", size_in_bytes)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if storage_account_url and not isinstance(storage_account_url, str):
raise TypeError("Expected argument 'storage_account_url' to be a str")
pulumi.set(__self__, "storage_account_url", storage_account_url)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if website_size_in_bytes and not isinstance(website_size_in_bytes, float):
raise TypeError("Expected argument 'website_size_in_bytes' to be a float")
pulumi.set(__self__, "website_size_in_bytes", website_size_in_bytes)
@property
@pulumi.getter(name="backupId")
def backup_id(self) -> int:
"""
Id of the backup.
"""
return pulumi.get(self, "backup_id")
@property
@pulumi.getter(name="blobName")
def blob_name(self) -> str:
"""
Name of the blob which contains data for this backup.
"""
return pulumi.get(self, "blob_name")
@property
@pulumi.getter(name="correlationId")
def correlation_id(self) -> str:
"""
Unique correlation identifier. Please use this along with the timestamp while communicating with Azure support.
"""
return pulumi.get(self, "correlation_id")
@property
@pulumi.getter
def created(self) -> str:
"""
Timestamp of the backup creation.
"""
return pulumi.get(self, "created")
@property
@pulumi.getter
def databases(self) -> Sequence['outputs.DatabaseBackupSettingResponse']:
"""
List of databases included in the backup.
"""
return pulumi.get(self, "databases")
@property
@pulumi.getter(name="finishedTimeStamp")
def finished_time_stamp(self) -> str:
"""
Timestamp when this backup finished.
"""
return pulumi.get(self, "finished_time_stamp")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="lastRestoreTimeStamp")
def last_restore_time_stamp(self) -> str:
"""
Timestamp of a last restore operation which used this backup.
"""
return pulumi.get(self, "last_restore_time_stamp")
@property
@pulumi.getter
def log(self) -> str:
"""
Details regarding this backup. Might contain an error message.
"""
return pulumi.get(self, "log")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def scheduled(self) -> bool:
"""
True if this backup has been created due to a schedule being triggered.
"""
return pulumi.get(self, "scheduled")
@property
@pulumi.getter(name="sizeInBytes")
def size_in_bytes(self) -> float:
"""
Size of the backup in bytes.
"""
return pulumi.get(self, "size_in_bytes")
@property
@pulumi.getter
def status(self) -> str:
"""
Backup status.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="storageAccountUrl")
def storage_account_url(self) -> str:
"""
SAS URL for the storage account container which contains this backup.
"""
return pulumi.get(self, "storage_account_url")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="websiteSizeInBytes")
def website_size_in_bytes(self) -> float:
"""
Size of the original web app which has been backed up.
"""
return pulumi.get(self, "website_size_in_bytes")
class AwaitableListWebAppBackupStatusSecretsResult(ListWebAppBackupStatusSecretsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppBackupStatusSecretsResult(
backup_id=self.backup_id,
blob_name=self.blob_name,
correlation_id=self.correlation_id,
created=self.created,
databases=self.databases,
finished_time_stamp=self.finished_time_stamp,
id=self.id,
kind=self.kind,
last_restore_time_stamp=self.last_restore_time_stamp,
log=self.log,
name=self.name,
scheduled=self.scheduled,
size_in_bytes=self.size_in_bytes,
status=self.status,
storage_account_url=self.storage_account_url,
type=self.type,
website_size_in_bytes=self.website_size_in_bytes)
def list_web_app_backup_status_secrets(backup_id: Optional[str] = None,
backup_name: Optional[str] = None,
backup_schedule: Optional[pulumi.InputType['BackupSchedule']] = None,
databases: Optional[Sequence[pulumi.InputType['DatabaseBackupSetting']]] = None,
enabled: Optional[bool] = None,
kind: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
storage_account_url: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppBackupStatusSecretsResult:
"""
Backup description.
:param str backup_id: ID of backup.
:param str backup_name: Name of the backup.
:param pulumi.InputType['BackupSchedule'] backup_schedule: Schedule for the backup if it is executed periodically.
:param Sequence[pulumi.InputType['DatabaseBackupSetting']] databases: Databases included in the backup.
:param bool enabled: True if the backup schedule is enabled (must be included in that case), false if the backup schedule should be disabled.
:param str kind: Kind of resource.
:param str name: Name of web app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str storage_account_url: SAS URL to the container.
"""
__args__ = dict()
__args__['backupId'] = backup_id
__args__['backupName'] = backup_name
__args__['backupSchedule'] = backup_schedule
__args__['databases'] = databases
__args__['enabled'] = enabled
__args__['kind'] = kind
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['storageAccountUrl'] = storage_account_url
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20210115:listWebAppBackupStatusSecrets', __args__, opts=opts, typ=ListWebAppBackupStatusSecretsResult).value
return AwaitableListWebAppBackupStatusSecretsResult(
backup_id=__ret__.backup_id,
blob_name=__ret__.blob_name,
correlation_id=__ret__.correlation_id,
created=__ret__.created,
databases=__ret__.databases,
finished_time_stamp=__ret__.finished_time_stamp,
id=__ret__.id,
kind=__ret__.kind,
last_restore_time_stamp=__ret__.last_restore_time_stamp,
log=__ret__.log,
name=__ret__.name,
scheduled=__ret__.scheduled,
size_in_bytes=__ret__.size_in_bytes,
status=__ret__.status,
storage_account_url=__ret__.storage_account_url,
type=__ret__.type,
website_size_in_bytes=__ret__.website_size_in_bytes)
| 38.97651 | 321 | 0.642703 |
c6c0d23c1f147aecd3e41c44519f1728fafa2a1d | 364 | py | Python | backend/api/views/tasks/text.py | 12xiaoni/text-label | 7456c5e73d32bcfc81a02be7e0d748f162934d35 | [
"MIT"
] | 1 | 2022-01-06T01:40:52.000Z | 2022-01-06T01:40:52.000Z | backend/api/views/tasks/text.py | 12xiaoni/text-label | 7456c5e73d32bcfc81a02be7e0d748f162934d35 | [
"MIT"
] | null | null | null | backend/api/views/tasks/text.py | 12xiaoni/text-label | 7456c5e73d32bcfc81a02be7e0d748f162934d35 | [
"MIT"
] | 1 | 2021-12-22T22:02:14.000Z | 2021-12-22T22:02:14.000Z | from ...models import TextLabel
from ...serializers import TextLabelSerializer
from .base import BaseDetailAPI, BaseListAPI
class TextLabelListAPI(BaseListAPI):
annotation_class = TextLabel
serializer_class = TextLabelSerializer
class TextLabelDetailAPI(BaseDetailAPI):
queryset = TextLabel.objects.all()
serializer_class = TextLabelSerializer
| 26 | 46 | 0.804945 |
3cec4ad1841344e469d11d5f74233b707af1bd26 | 1,982 | py | Python | Pymoe/Kitsu/anime.py | bagley2014/PyMoe | 83185a4d30e617f72a670ca6fb39fdf2ba84524a | [
"MIT"
] | 243 | 2016-10-15T14:51:15.000Z | 2022-01-27T06:39:06.000Z | Pymoe/Kitsu/anime.py | bagley2014/PyMoe | 83185a4d30e617f72a670ca6fb39fdf2ba84524a | [
"MIT"
] | 32 | 2016-10-17T00:00:29.000Z | 2022-03-20T02:08:48.000Z | Pymoe/Kitsu/anime.py | bagley2014/PyMoe | 83185a4d30e617f72a670ca6fb39fdf2ba84524a | [
"MIT"
] | 25 | 2016-10-16T00:31:57.000Z | 2021-11-11T03:38:55.000Z | import requests
from ..errors import *
from .helpers import SearchWrapper
class KitsuAnime:
def __init__(self, api, header):
self.apiurl = api
self.header = header
def get(self, aid):
"""
Get anime information by id.
:param int aid: ID of the anime.
:return: Dictionary or None (for not found)
:rtype: Dictionary or None
:raises: :class:`Pymoe.errors.ServerError`
"""
r = requests.get(self.apiurl + "/anime/{}".format(aid), headers=self.header)
if r.status_code != 200:
if r.status_code == 404:
return None
else:
raise ServerError
return r.json()
def search(self, term):
"""
Search for anime by term.
:param str term: What to search for.
:return: The results as a SearchWrapper iterator.
:rtype: SearchWrapper
"""
r = requests.get(
self.apiurl + "/anime", params={"filter[text]": term}, headers=self.header
)
if r.status_code != 200:
raise ServerError
jsd = r.json()
if jsd["meta"]["count"]:
return SearchWrapper(
jsd["data"],
jsd["links"]["next"] if "next" in jsd["links"] else None,
self.header,
)
else:
return None
def streaming_links(self, aid):
"""
Get anime streaming-links by id.
:param int aid: ID of the anime.
:return: Dictionary or None (for not found)
:rtype: Dictionary or None
:raises: :class:`Pymoe.errors.ServerError`
"""
r = requests.get(
self.apiurl + f"/anime/{aid}/streaming-links",
headers=self.header,
)
if r.status_code != 200:
if r.status_code == 404:
return None
else:
raise ServerError
return r.json()
| 26.078947 | 86 | 0.518163 |
98ca71239d01dc1063a17d7a2cbdf89c26282f34 | 3,361 | py | Python | clipper-parm/examples/image_query/example_client.py | mukkachaitanya/parity-models | 9f336a67798934d29592aca471dff6ad047473f6 | [
"Apache-2.0"
] | 32 | 2019-09-11T16:49:58.000Z | 2022-01-26T15:40:40.000Z | clipper-parm/examples/image_query/example_client.py | mukkachaitanya/parity-models | 9f336a67798934d29592aca471dff6ad047473f6 | [
"Apache-2.0"
] | 5 | 2019-11-10T16:13:40.000Z | 2022-01-13T01:31:51.000Z | clipper-parm/examples/image_query/example_client.py | mukkachaitanya/parity-models | 9f336a67798934d29592aca471dff6ad047473f6 | [
"Apache-2.0"
] | 9 | 2019-09-03T14:05:26.000Z | 2021-12-22T07:17:27.000Z | from __future__ import print_function
import base64
from clipper_admin import ClipperConnection, DockerContainerManager
from clipper_admin.deployers import python as python_deployer
import json
import requests
from datetime import datetime
import time
import numpy as np
import signal
import sys
import argparse
"""def predict(addr, filename):
url = "http://%s/image-example/predict" % addr
req_json = json.dumps({
"input":
base64.b64encode(open(filename, "rb").read()).decode()
})
headers = {'Content-type': 'application/json'}
start = datetime.now()
r = requests.post(url, headers=headers, data=req_json)
end = datetime.now()
latency = (end - start).total_seconds() * 1000.0
print("'%s', %f ms" % (r.text, latency))
def image_size(img):
import base64, io, os, PIL.Image, tempfile
tmp = tempfile.NamedTemporaryFile('wb', delete=False, suffix='.jpg')
tmp.write(io.BytesIO(img).getvalue())
tmp.close()
size = PIL.Image.open(tmp.name, 'r').size
os.unlink(tmp.name)
return [size]
def image_size_v2(img):
return [(2, 2)]"""
def query(addr, filename):
url = "http://%s/image-example/predict" % addr
req_json = json.dumps({
"input":
base64.b64encode(open(filename, "rb").read()).decode() # bytes to unicode
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
print(r.json())
def image_size(imgs):
"""
Input:
- imgs : (np.ndarray) of shape (n, d). n is the number of data in this batch
d is the length of the bytes as numpy int8 array.
Output:
- sizes : List[Tuple(int, int),...]
"""
import base64
import io
import os
import PIL.Image
import tempfile
num_imgs = len(imgs)
sizes = []
for i in range(num_imgs):
# Create a temp file to write to
tmp = tempfile.NamedTemporaryFile('wb', delete=False, suffix='.png')
tmp.write(io.BytesIO(imgs[i]).getvalue())
tmp.close()
# Use PIL to read in the file and compute size
size = PIL.Image.open(tmp.name, 'r').size
# Remove the temp file
os.unlink(tmp.name)
sizes.append(size)
return sizes
# Stop Clipper on Ctrl-C
def signal_handler(signal, frame):
print("Stopping Clipper...")
clipper_conn = ClipperConnection(DockerContainerManager())
clipper_conn.stop_all()
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
parser = argparse.ArgumentParser(
description='Use Clipper to Query Images.')
parser.add_argument('image', nargs='+', help='Path to an image')
imgs = parser.parse_args().image
clipper_conn = ClipperConnection(DockerContainerManager())
clipper_conn.start_clipper()
python_deployer.create_endpoint(
clipper_conn=clipper_conn,
name="image-example",
input_type="bytes",
func=image_size,
pkgs_to_install=['pillow']
)
time.sleep(2)
try:
for f in imgs:
if f.endswith('.jpg') or f.endswith('.png'):
query(clipper_conn.get_query_addr(), f)
except Exception as e:
print("exception")
clipper_conn.get_clipper_logs()
clipper_conn.stop_all()
| 28.483051 | 81 | 0.63374 |
721c1eb3cce12c73ba811a61decbade7c5514b5f | 38,768 | py | Python | holoviews/tests/core/testdynamic.py | jonmmease/holoviews | 27407e1a5d8020c39c135fa3f8c4fdeb11fea5c0 | [
"BSD-3-Clause"
] | 1 | 2019-01-02T20:20:09.000Z | 2019-01-02T20:20:09.000Z | holoviews/tests/core/testdynamic.py | jonmmease/holoviews | 27407e1a5d8020c39c135fa3f8c4fdeb11fea5c0 | [
"BSD-3-Clause"
] | null | null | null | holoviews/tests/core/testdynamic.py | jonmmease/holoviews | 27407e1a5d8020c39c135fa3f8c4fdeb11fea5c0 | [
"BSD-3-Clause"
] | null | null | null | import uuid
from collections import deque
import time
import numpy as np
from holoviews import Dimension, NdLayout, GridSpace, Layout, NdOverlay
from holoviews.core.spaces import DynamicMap, HoloMap, Callable
from holoviews.core.options import Store
from holoviews.element import Image, Scatter, Curve, Text, Points
from holoviews.operation import histogram
from holoviews.plotting.util import initialize_dynamic
from holoviews.streams import Stream, PointerXY, PointerX, PointerY, RangeX, Buffer
from holoviews.util import Dynamic
from holoviews.element.comparison import ComparisonTestCase
from .testdimensioned import CustomBackendTestCase, TestObj
XY = Stream.define('XY', x=0,y=0)
frequencies = np.linspace(0.5,2.0,5)
phases = np.linspace(0, np.pi*2, 5)
x,y = np.mgrid[-5:6, -5:6] * 0.1
def sine_array(phase, freq):
return np.sin(phase + (freq*x**2+freq*y**2))
class DynamicMapConstructor(ComparisonTestCase):
def test_simple_constructor_kdims(self):
DynamicMap(lambda x: x, kdims=['test'])
def test_simple_constructor_invalid_no_kdims(self):
regexp = ("Callable '<lambda>' accepts more positional arguments than there are "
"kdims and stream parameters")
with self.assertRaisesRegexp(KeyError, regexp):
DynamicMap(lambda x: x)
def test_simple_constructor_invalid(self):
regexp = ("Callback '<lambda>' signature over \['x'\] does not accommodate "
"required kdims \['x', 'y'\]")
with self.assertRaisesRegexp(KeyError, regexp):
DynamicMap(lambda x: x, kdims=['x','y'])
def test_simple_constructor_streams(self):
DynamicMap(lambda x: x, streams=[PointerX()])
def test_simple_constructor_streams_invalid_uninstantiated(self):
regexp = ("The supplied streams list contains objects "
"that are not Stream instances:(.+?)")
with self.assertRaisesRegexp(TypeError, regexp):
DynamicMap(lambda x: x, streams=[PointerX])
def test_simple_constructor_streams_invalid_type(self):
regexp = ("The supplied streams list contains objects "
"that are not Stream instances:(.+?)")
with self.assertRaisesRegexp(TypeError, regexp):
DynamicMap(lambda x: x, streams=[3])
def test_simple_constructor_streams_invalid_mismatch(self):
regexp = "Callable '<lambda>' missing keywords to accept stream parameters: y"
with self.assertRaisesRegexp(KeyError, regexp):
DynamicMap(lambda x: x, streams=[PointerXY()])
def test_simple_constructor_streams_invalid_mismatch_named(self):
def foo(x): return x
regexp = "Callable 'foo' missing keywords to accept stream parameters: y"
with self.assertRaisesRegexp(KeyError, regexp):
DynamicMap(foo, streams=[PointerXY()])
class DynamicMapMethods(ComparisonTestCase):
def test_deep_relabel_label(self):
fn = lambda i: Image(sine_array(0,i))
dmap = DynamicMap(fn, kdims=['i']).relabel(label='Test')
self.assertEqual(dmap[0].label, 'Test')
def test_deep_relabel_group(self):
fn = lambda i: Image(sine_array(0,i))
dmap = DynamicMap(fn, kdims=['i']).relabel(group='Test')
self.assertEqual(dmap[0].group, 'Test')
def test_redim_dimension_name(self):
fn = lambda i: Image(sine_array(0,i))
dmap = DynamicMap(fn, kdims=['i']).redim(i='New')
self.assertEqual(dmap.kdims[0].name, 'New')
def test_redim_dimension_range_aux(self):
fn = lambda i: Image(sine_array(0,i))
dmap = DynamicMap(fn, kdims=['i']).redim.range(i=(0,1))
self.assertEqual(dmap.kdims[0].range, (0,1))
def test_redim_dimension_values_cache_reset_1D(self):
# Setting the values should drop mismatching keys from the cache
fn = lambda i: Curve([i,i])
dmap = DynamicMap(fn, kdims=['i'])[{0,1,2,3,4,5}]
self.assertEqual(dmap.keys(), [0,1,2,3,4,5])
redimmed = dmap.redim.values(i=[2,3,5,6,8])
self.assertEqual(redimmed.keys(), [2,3,5])
def test_redim_dimension_values_cache_reset_2D_single(self):
# Setting the values should drop mismatching keys from the cache
fn = lambda i,j: Curve([i,j])
keys = [(0,1),(1,0),(2,2),(2,5), (3,3)]
dmap = DynamicMap(fn, kdims=['i','j'])[keys]
self.assertEqual(dmap.keys(), keys)
redimmed = dmap.redim.values(i=[2,10,50])
self.assertEqual(redimmed.keys(), [(2,2),(2,5)])
def test_redim_dimension_values_cache_reset_2D_multi(self):
# Setting the values should drop mismatching keys from the cache
fn = lambda i,j: Curve([i,j])
keys = [(0,1),(1,0),(2,2),(2,5), (3,3)]
dmap = DynamicMap(fn, kdims=['i','j'])[keys]
self.assertEqual(dmap.keys(), keys)
redimmed = dmap.redim.values(i=[2,10,50], j=[5,50,100])
self.assertEqual(redimmed.keys(), [(2,5)])
def test_redim_dimension_unit_aux(self):
fn = lambda i: Image(sine_array(0,i))
dmap = DynamicMap(fn, kdims=['i']).redim.unit(i='m/s')
self.assertEqual(dmap.kdims[0].unit, 'm/s')
def test_redim_dimension_type_aux(self):
fn = lambda i: Image(sine_array(0,i))
dmap = DynamicMap(fn, kdims=['i']).redim.type(i=int)
self.assertEqual(dmap.kdims[0].type, int)
def test_deep_redim_dimension_name(self):
fn = lambda i: Image(sine_array(0,i))
dmap = DynamicMap(fn, kdims=['i']).redim(x='X')
self.assertEqual(dmap[0].kdims[0].name, 'X')
def test_deep_redim_dimension_name_with_spec(self):
fn = lambda i: Image(sine_array(0,i))
dmap = DynamicMap(fn, kdims=['i']).redim(Image, x='X')
self.assertEqual(dmap[0].kdims[0].name, 'X')
def test_deep_getitem_bounded_kdims(self):
fn = lambda i: Curve(np.arange(i))
dmap = DynamicMap(fn, kdims=[Dimension('Test', range=(10, 20))])
self.assertEqual(dmap[:, 5:10][10], fn(10)[5:10])
def test_deep_getitem_bounded_kdims_and_vdims(self):
fn = lambda i: Curve(np.arange(i))
dmap = DynamicMap(fn, kdims=[Dimension('Test', range=(10, 20))])
self.assertEqual(dmap[:, 5:10, 0:5][10], fn(10)[5:10, 0:5])
def test_deep_getitem_cross_product_and_slice(self):
fn = lambda i: Curve(np.arange(i))
dmap = DynamicMap(fn, kdims=[Dimension('Test', range=(10, 20))])
self.assertEqual(dmap[[10, 11, 12], 5:10],
dmap.clone([(i, fn(i)[5:10]) for i in range(10, 13)]))
def test_deep_getitem_index_and_slice(self):
fn = lambda i: Curve(np.arange(i))
dmap = DynamicMap(fn, kdims=[Dimension('Test', range=(10, 20))])
self.assertEqual(dmap[10, 5:10], fn(10)[5:10])
def test_deep_getitem_cache_sliced(self):
fn = lambda i: Curve(np.arange(i))
dmap = DynamicMap(fn, kdims=[Dimension('Test', range=(10, 20))])
dmap[10] # Add item to cache
self.assertEqual(dmap[:, 5:10][10], fn(10)[5:10])
def test_deep_select_slice_kdim(self):
fn = lambda i: Curve(np.arange(i))
dmap = DynamicMap(fn, kdims=[Dimension('Test', range=(10, 20))])
self.assertEqual(dmap.select(x=(5, 10))[10], fn(10)[5:10])
def test_deep_select_slice_kdim_and_vdims(self):
fn = lambda i: Curve(np.arange(i))
dmap = DynamicMap(fn, kdims=[Dimension('Test', range=(10, 20))])
self.assertEqual(dmap.select(x=(5, 10), y=(0, 5))[10], fn(10)[5:10, 0:5])
def test_deep_select_slice_kdim_no_match(self):
fn = lambda i: Curve(np.arange(i))
dmap = DynamicMap(fn, kdims=[Dimension('Test', range=(10, 20))])
self.assertEqual(dmap.select(DynamicMap, x=(5, 10))[10], fn(10))
def test_deep_map_apply_element_function(self):
fn = lambda i: Curve(np.arange(i))
dmap = DynamicMap(fn, kdims=[Dimension('Test', range=(10, 20))])
mapped = dmap.map(lambda x: x.clone(x.data*2), Curve)
curve = fn(10)
self.assertEqual(mapped[10], curve.clone(curve.data*2))
def test_deep_map_transform_element_type(self):
fn = lambda i: Curve(np.arange(i))
dmap = DynamicMap(fn, kdims=[Dimension('Test', range=(10, 20))])
dmap[10]
mapped = dmap.map(lambda x: Scatter(x), Curve)
area = mapped[11]
self.assertEqual(area, Scatter(fn(11)))
def test_deep_map_apply_dmap_function(self):
fn = lambda i: Curve(np.arange(i))
dmap1 = DynamicMap(fn, kdims=[Dimension('Test', range=(10, 20))])
dmap2 = DynamicMap(fn, kdims=[Dimension('Test', range=(10, 20))])
mapped = (dmap1 + dmap2).map(lambda x: x[10], DynamicMap)
self.assertEqual(mapped, Layout([('DynamicMap.I', fn(10)),
('DynamicMap.II', fn(10))]))
def test_deep_map_apply_dmap_function_no_clone(self):
fn = lambda i: Curve(np.arange(i))
dmap1 = DynamicMap(fn, kdims=[Dimension('Test', range=(10, 20))])
dmap2 = DynamicMap(fn, kdims=[Dimension('Test', range=(10, 20))])
layout = (dmap1 + dmap2)
mapped = layout.map(lambda x: x[10], DynamicMap, clone=False)
self.assertIs(mapped, layout)
def test_dynamic_reindex_reorder(self):
def history_callback(x, y, history=deque(maxlen=10)):
history.append((x, y))
return Points(list(history))
dmap = DynamicMap(history_callback, kdims=['x', 'y'])
reindexed = dmap.reindex(['y', 'x'])
points = reindexed[2, 1]
self.assertEqual(points, Points([(1, 2)]))
def test_dynamic_reindex_drop_raises_exception(self):
def history_callback(x, y, history=deque(maxlen=10)):
history.append((x, y))
return Points(list(history))
dmap = DynamicMap(history_callback, kdims=['x', 'y'])
exception = ("DynamicMap does not allow dropping dimensions, "
"reindex may only be used to reorder dimensions.")
with self.assertRaisesRegexp(ValueError, exception):
dmap.reindex(['x'])
def test_dynamic_groupby_kdims_and_streams(self):
def plot_function(mydim, data):
return Scatter(data[data[:, 2]==mydim])
buff = Buffer(data=np.empty((0, 3)))
dmap = DynamicMap(plot_function, streams=[buff], kdims='mydim').redim.values(mydim=[0, 1, 2])
ndlayout = dmap.groupby('mydim', container_type=NdLayout)
self.assertIsInstance(ndlayout[0], DynamicMap)
data = np.array([(0, 0, 0), (1, 1, 1), (2, 2, 2)])
buff.send(data)
self.assertEqual(ndlayout[0][()], Scatter([(0, 0)]))
self.assertEqual(ndlayout[1][()], Scatter([(1, 1)]))
self.assertEqual(ndlayout[2][()], Scatter([(2, 2)]))
def test_dynamic_split_overlays_on_ndoverlay(self):
dmap = DynamicMap(lambda: NdOverlay({i: Points([i]) for i in range(3)}))
initialize_dynamic(dmap)
keys, dmaps = dmap._split_overlays()
self.assertEqual(keys, [(0,), (1,), (2,)])
self.assertEqual(dmaps[0][()], Points([0]))
self.assertEqual(dmaps[1][()], Points([1]))
self.assertEqual(dmaps[2][()], Points([2]))
def test_dynamic_split_overlays_on_overlay(self):
dmap1 = DynamicMap(lambda: Points([]))
dmap2 = DynamicMap(lambda: Curve([]))
dmap = dmap1 * dmap2
initialize_dynamic(dmap)
keys, dmaps = dmap._split_overlays()
self.assertEqual(keys, [('Points', 'I'), ('Curve', 'I')])
self.assertEqual(dmaps[0][()], Points([]))
self.assertEqual(dmaps[1][()], Curve([]))
def test_dynamic_split_overlays_on_varying_order_overlay(self):
def cb(i):
if i%2 == 0:
return Curve([]) * Points([])
else:
return Points([]) * Curve([])
dmap = DynamicMap(cb, kdims='i').redim.range(i=(0, 4))
initialize_dynamic(dmap)
keys, dmaps = dmap._split_overlays()
self.assertEqual(keys, [('Curve', 'I'), ('Points', 'I')])
self.assertEqual(dmaps[0][0], Curve([]))
self.assertEqual(dmaps[0][1], Curve([]))
self.assertEqual(dmaps[1][0], Points([]))
self.assertEqual(dmaps[1][1], Points([]))
def test_dynamic_split_overlays_on_missing_item_in_overlay(self):
def cb(i):
if i%2 == 0:
return Curve([]) * Points([])
else:
return Scatter([]) * Curve([])
dmap = DynamicMap(cb, kdims='i').redim.range(i=(0, 4))
initialize_dynamic(dmap)
keys, dmaps = dmap._split_overlays()
self.assertEqual(keys, [('Curve', 'I'), ('Points', 'I')])
self.assertEqual(dmaps[0][0], Curve([]))
self.assertEqual(dmaps[0][1], Curve([]))
self.assertEqual(dmaps[1][0], Points([]))
with self.assertRaises(KeyError):
dmaps[1][1]
class DynamicMapOptionsTests(CustomBackendTestCase):
def test_dynamic_options(self):
dmap = DynamicMap(lambda X: TestObj(None), kdims=['X']).redim.range(X=(0,10))
dmap = dmap.options(plot_opt1='red')
opts = Store.lookup_options('backend_1', dmap[0], 'plot')
self.assertEqual(opts.options, {'plot_opt1': 'red'})
def test_dynamic_options_no_clone(self):
dmap = DynamicMap(lambda X: TestObj(None), kdims=['X']).redim.range(X=(0,10))
dmap.options(plot_opt1='red', clone=False)
opts = Store.lookup_options('backend_1', dmap[0], 'plot')
self.assertEqual(opts.options, {'plot_opt1': 'red'})
class DynamicMapUnboundedProperty(ComparisonTestCase):
def test_callable_bounded_init(self):
fn = lambda i: Image(sine_array(0,i))
dmap=DynamicMap(fn, kdims=[Dimension('dim', range=(0,10))])
self.assertEqual(dmap.unbounded, [])
def test_callable_bounded_clone(self):
fn = lambda i: Image(sine_array(0,i))
dmap=DynamicMap(fn, kdims=[Dimension('dim', range=(0,10))])
self.assertEqual(dmap, dmap.clone())
self.assertEqual(dmap.unbounded, [])
def test_sampled_unbounded_init(self):
fn = lambda i: Image(sine_array(0,i))
dmap=DynamicMap(fn, kdims=['i'])
self.assertEqual(dmap.unbounded, ['i'])
def test_sampled_unbounded_resample(self):
fn = lambda i: Image(sine_array(0,i))
dmap=DynamicMap(fn, kdims=['i'])
self.assertEqual(dmap[{0, 1, 2}].keys(), [0, 1, 2])
self.assertEqual(dmap.unbounded, ['i'])
def test_mixed_kdim_streams_unbounded(self):
dmap=DynamicMap(lambda x,y,z: x+y, kdims=['z'], streams=[XY()])
self.assertEqual(dmap.unbounded, ['z'])
def test_mixed_kdim_streams_bounded_redim(self):
dmap=DynamicMap(lambda x,y,z: x+y, kdims=['z'], streams=[XY()])
self.assertEqual(dmap.redim.range(z=(-0.5,0.5)).unbounded, [])
class DynamicTransferStreams(ComparisonTestCase):
def setUp(self):
self.dimstream = PointerX(x=0)
self.stream = PointerY(y=0)
self.dmap = DynamicMap(lambda x, y, z: Curve([x, y, z]),
kdims=['x', 'z'], streams=[self.stream, self.dimstream])
def test_dynamic_redim_inherits_streams(self):
redimmed = self.dmap.redim.range(z=(0, 5))
self.assertEqual(redimmed.streams, self.dmap.streams)
def test_dynamic_relabel_inherits_streams(self):
relabelled = self.dmap.relabel(label='Test')
self.assertEqual(relabelled.streams, self.dmap.streams)
def test_dynamic_map_inherits_streams(self):
mapped = self.dmap.map(lambda x: x, Curve)
self.assertEqual(mapped.streams, self.dmap.streams)
def test_dynamic_select_inherits_streams(self):
selected = self.dmap.select(Curve, x=(0, 5))
self.assertEqual(selected.streams, self.dmap.streams)
def test_dynamic_hist_inherits_streams(self):
hist = self.dmap.hist(adjoin=False)
self.assertEqual(hist.streams, self.dmap.streams)
def test_dynamic_mul_inherits_dim_streams(self):
hist = self.dmap * self.dmap
self.assertEqual(hist.streams, self.dmap.streams[1:])
def test_dynamic_util_inherits_dim_streams(self):
hist = Dynamic(self.dmap)
self.assertEqual(hist.streams, self.dmap.streams[1:])
def test_dynamic_util_inherits_dim_streams_clash(self):
exception = ("The supplied stream objects PointerX\(x=None\) and "
"PointerX\(x=0\) clash on the following parameters: \['x'\]")
with self.assertRaisesRegexp(Exception, exception):
Dynamic(self.dmap, streams=[PointerX])
class DynamicTestOperation(ComparisonTestCase):
def test_dynamic_operation(self):
fn = lambda i: Image(sine_array(0,i))
dmap=DynamicMap(fn, kdims=['i'])
dmap_with_fn = Dynamic(dmap, operation=lambda x: x.clone(x.data*2))
self.assertEqual(dmap_with_fn[5], Image(sine_array(0,5)*2))
def test_dynamic_operation_on_hmap(self):
hmap = HoloMap({i: Image(sine_array(0,i)) for i in range(10)})
dmap = Dynamic(hmap, operation=lambda x: x)
self.assertEqual(dmap.kdims[0].name, hmap.kdims[0].name)
self.assertEqual(dmap.kdims[0].values, hmap.keys())
def test_dynamic_operation_link_inputs_not_transferred_on_clone(self):
fn = lambda i: Image(sine_array(0,i))
dmap=DynamicMap(fn, kdims=['i'])
dmap_with_fn = Dynamic(dmap, link_inputs=False, operation=lambda x: x.clone(x.data*2))
self.assertTrue(dmap_with_fn.clone().callback.link_inputs)
def test_dynamic_operation_on_element(self):
img = Image(sine_array(0,5))
posxy = PointerXY(x=2, y=1)
dmap_with_fn = Dynamic(img, operation=lambda obj, x, y: obj.clone(obj.data*x+y),
streams=[posxy])
element = dmap_with_fn[()]
self.assertEqual(element, Image(sine_array(0,5)*2+1))
self.assertEqual(dmap_with_fn.streams, [posxy])
def test_dynamic_operation_with_kwargs(self):
fn = lambda i: Image(sine_array(0,i))
dmap=DynamicMap(fn, kdims=['i'])
def fn(x, multiplier=2):
return x.clone(x.data*multiplier)
dmap_with_fn = Dynamic(dmap, operation=fn, kwargs=dict(multiplier=3))
self.assertEqual(dmap_with_fn[5], Image(sine_array(0,5)*3))
def test_dynamic_operation_init_renamed_stream_params(self):
img = Image(sine_array(0,5))
stream = RangeX(rename={'x_range': 'bin_range'})
histogram(img, bin_range=(0, 1), streams=[stream], dynamic=True)
self.assertEqual(stream.x_range, (0, 1))
def test_dynamic_operation_init_stream_params(self):
img = Image(sine_array(0,5))
stream = Stream.define('TestStream', bin_range=None)()
histogram(img, bin_range=(0, 1), streams=[stream], dynamic=True)
self.assertEqual(stream.bin_range, (0, 1))
class DynamicTestOverlay(ComparisonTestCase):
def test_dynamic_element_overlay(self):
fn = lambda i: Image(sine_array(0,i))
dmap=DynamicMap(fn, kdims=['i'])
dynamic_overlay = dmap * Image(sine_array(0,10))
overlaid = Image(sine_array(0,5)) * Image(sine_array(0,10))
self.assertEqual(dynamic_overlay[5], overlaid)
def test_dynamic_element_underlay(self):
fn = lambda i: Image(sine_array(0,i))
dmap=DynamicMap(fn, kdims=['i'])
dynamic_overlay = Image(sine_array(0,10)) * dmap
overlaid = Image(sine_array(0,10)) * Image(sine_array(0,5))
self.assertEqual(dynamic_overlay[5], overlaid)
def test_dynamic_dynamicmap_overlay(self):
fn = lambda i: Image(sine_array(0,i))
dmap=DynamicMap(fn, kdims=['i'])
fn2 = lambda i: Image(sine_array(0,i*2))
dmap2=DynamicMap(fn2, kdims=['i'])
dynamic_overlay = dmap * dmap2
overlaid = Image(sine_array(0,5)) * Image(sine_array(0,10))
self.assertEqual(dynamic_overlay[5], overlaid)
def test_dynamic_holomap_overlay(self):
fn = lambda i: Image(sine_array(0,i))
dmap = DynamicMap(fn, kdims=['i'])
hmap = HoloMap({i: Image(sine_array(0,i*2)) for i in range(10)}, kdims=['i'])
dynamic_overlay = dmap * hmap
overlaid = Image(sine_array(0,5)) * Image(sine_array(0,10))
self.assertEqual(dynamic_overlay[5], overlaid)
def test_dynamic_overlay_memoization(self):
"""Tests that Callable memoizes unchanged callbacks"""
def fn(x, y):
return Scatter([(x, y)])
dmap = DynamicMap(fn, kdims=[], streams=[PointerXY()])
counter = [0]
def fn2(x, y):
counter[0] += 1
return Image(np.random.rand(10, 10))
dmap2 = DynamicMap(fn2, kdims=[], streams=[PointerXY()])
overlaid = dmap * dmap2
overlay = overlaid[()]
self.assertEqual(overlay.Scatter.I, fn(0, 0))
dmap.event(x=1, y=2)
overlay = overlaid[()]
# Ensure dmap return value was updated
self.assertEqual(overlay.Scatter.I, fn(1, 2))
# Ensure dmap2 callback was called only once
self.assertEqual(counter[0], 1)
def test_dynamic_event_renaming_valid(self):
def fn(x1, y1):
return Scatter([(x1, y1)])
xy = PointerXY(rename={'x':'x1','y':'y1'})
dmap = DynamicMap(fn, kdims=[], streams=[xy])
dmap.event(x1=1, y1=2)
def test_dynamic_event_renaming_invalid(self):
def fn(x1, y1):
return Scatter([(x1, y1)])
xy = PointerXY(rename={'x':'x1','y':'y1'})
dmap = DynamicMap(fn, kdims=[], streams=[xy])
regexp = '(.+?)do not correspond to stream parameters'
with self.assertRaisesRegexp(KeyError, regexp):
dmap.event(x=1, y=2)
class DynamicCallableMemoize(ComparisonTestCase):
def test_dynamic_keydim_not_memoize(self):
dmap = DynamicMap(lambda x: Curve([(0, x)]), kdims=['x'])
self.assertEqual(dmap[0], Curve([(0, 0)]))
self.assertEqual(dmap[1], Curve([(0, 1)]))
def test_dynamic_keydim_memoize(self):
dmap = DynamicMap(lambda x: Curve([(0, x)]), kdims=['x'])
self.assertIs(dmap[0], dmap[0])
def test_dynamic_keydim_memoize_disable(self):
dmap = DynamicMap(Callable(lambda x: Curve([(0, x)]), memoize=False), kdims=['x'])
first = dmap[0]
del dmap.data[(0,)]
second = dmap[0]
self.assertIsNot(first, second)
def test_dynamic_callable_memoize(self):
# Always memoized only one of each held
def history_callback(x, history=deque(maxlen=10)):
history.append(x)
return Curve(list(history))
x = PointerX()
dmap = DynamicMap(history_callback, kdims=[], streams=[x])
# Add stream subscriber mocking plot
x.add_subscriber(lambda **kwargs: dmap[()])
for i in range(2):
x.event(x=1)
self.assertEqual(dmap[()], Curve([1]))
for i in range(2):
x.event(x=2)
self.assertEqual(dmap[()], Curve([1, 2]))
def test_dynamic_callable_disable_callable_memoize(self):
# Disabling Callable.memoize means no memoization is applied,
# every access to DynamicMap calls callback and adds sample
def history_callback(x, history=deque(maxlen=10)):
history.append(x)
return Curve(list(history))
x = PointerX()
callable_obj = Callable(history_callback, memoize=False)
dmap = DynamicMap(callable_obj, kdims=[], streams=[x])
# Add stream subscriber mocking plot
x.add_subscriber(lambda **kwargs: dmap[()])
for i in range(2):
x.event(x=1)
self.assertEqual(dmap[()], Curve([1, 1, 1]))
for i in range(2):
x.event(x=2)
self.assertEqual(dmap[()], Curve([1, 1, 1, 2, 2, 2]))
class StreamSubscribersAddandClear(ComparisonTestCase):
def setUp(self):
self.fn1 = lambda x: x
self.fn2 = lambda x: x**2
self.fn3 = lambda x: x**3
self.fn4 = lambda x: x**4
def test_subscriber_clear_all(self):
pointerx = PointerX(x=2)
pointerx.add_subscriber(self.fn1, precedence=0)
pointerx.add_subscriber(self.fn2, precedence=1)
pointerx.add_subscriber(self.fn3, precedence=1.5)
pointerx.add_subscriber(self.fn4, precedence=10)
self.assertEqual(pointerx.subscribers, [self.fn1,self.fn2,self.fn3,self.fn4])
pointerx.clear('all')
self.assertEqual(pointerx.subscribers, [])
def test_subscriber_clear_user(self):
pointerx = PointerX(x=2)
pointerx.add_subscriber(self.fn1, precedence=0)
pointerx.add_subscriber(self.fn2, precedence=1)
pointerx.add_subscriber(self.fn3, precedence=1.5)
pointerx.add_subscriber(self.fn4, precedence=10)
self.assertEqual(pointerx.subscribers, [self.fn1,self.fn2,self.fn3,self.fn4])
pointerx.clear('user')
self.assertEqual(pointerx.subscribers, [self.fn3,self.fn4])
def test_subscriber_clear_internal(self):
pointerx = PointerX(x=2)
pointerx.add_subscriber(self.fn1, precedence=0)
pointerx.add_subscriber(self.fn2, precedence=1)
pointerx.add_subscriber(self.fn3, precedence=1.5)
pointerx.add_subscriber(self.fn4, precedence=10)
self.assertEqual(pointerx.subscribers, [self.fn1,self.fn2,self.fn3,self.fn4])
pointerx.clear('internal')
self.assertEqual(pointerx.subscribers, [self.fn1,self.fn2])
class DynamicStreamReset(ComparisonTestCase):
def test_dynamic_callable_stream_transient(self):
# Enable transient stream meaning memoization only happens when
# stream is inactive, should have sample for each call to
# stream.update
def history_callback(x, history=deque(maxlen=10)):
if x is not None:
history.append(x)
return Curve(list(history))
x = PointerX(transient=True)
dmap = DynamicMap(history_callback, kdims=[], streams=[x])
# Add stream subscriber mocking plot
x.add_subscriber(lambda **kwargs: dmap[()])
for i in range(2):
x.event(x=1)
self.assertEqual(dmap[()], Curve([1, 1]))
for i in range(2):
x.event(x=2)
self.assertEqual(dmap[()], Curve([1, 1, 2, 2]))
def test_dynamic_stream_transients(self):
# Ensure Stream reset option resets streams to default value
# when not triggering
global xresets, yresets
xresets, yresets = 0, 0
def history_callback(x, y, history=deque(maxlen=10)):
global xresets, yresets
if x is None:
xresets += 1
else:
history.append(x)
if y is None:
yresets += 1
return Curve(list(history))
x = PointerX(transient=True)
y = PointerY(transient=True)
dmap = DynamicMap(history_callback, kdims=[], streams=[x, y])
# Add stream subscriber mocking plot
x.add_subscriber(lambda **kwargs: dmap[()])
y.add_subscriber(lambda **kwargs: dmap[()])
# Update each stream and count when None default appears
for i in range(2):
x.event(x=i)
y.event(y=i)
self.assertEqual(xresets, 2)
self.assertEqual(yresets, 2)
def test_dynamic_callable_stream_hashkey(self):
# Enable transient stream meaning memoization only happens when
# stream is inactive, should have sample for each call to
# stream.update
def history_callback(x, history=deque(maxlen=10)):
if x is not None:
history.append(x)
return Curve(list(history))
class NoMemoize(PointerX):
@property
def hashkey(self): return {'hash': uuid.uuid4().hex}
x = NoMemoize()
dmap = DynamicMap(history_callback, kdims=[], streams=[x])
# Add stream subscriber mocking plot
x.add_subscriber(lambda **kwargs: dmap[()])
for i in range(2):
x.event(x=1)
self.assertEqual(dmap[()], Curve([1, 1, 1]))
for i in range(2):
x.event(x=2)
self.assertEqual(dmap[()], Curve([1, 1, 1, 2, 2, 2]))
class TestPeriodicStreamUpdate(ComparisonTestCase):
def test_periodic_counter_blocking(self):
class Counter(object):
def __init__(self):
self.count = 0
def __call__(self):
self.count += 1
return Curve([1,2,3])
next_stream = Stream.define('Next')()
counter = Counter()
dmap = DynamicMap(counter, streams=[next_stream])
# Add stream subscriber mocking plot
next_stream.add_subscriber(lambda **kwargs: dmap[()])
dmap.periodic(0.01, 100)
self.assertEqual(counter.count, 100)
def test_periodic_param_fn_blocking(self):
def callback(x): return Curve([1,2,3])
xval = Stream.define('x',x=0)()
dmap = DynamicMap(callback, streams=[xval])
# Add stream subscriber mocking plot
xval.add_subscriber(lambda **kwargs: dmap[()])
dmap.periodic(0.01, 100, param_fn=lambda i: {'x':i})
self.assertEqual(xval.x, 100)
def test_periodic_param_fn_non_blocking(self):
def callback(x): return Curve([1,2,3])
xval = Stream.define('x',x=0)()
dmap = DynamicMap(callback, streams=[xval])
# Add stream subscriber mocking plot
xval.add_subscriber(lambda **kwargs: dmap[()])
dmap.periodic(0.0001, 1000, param_fn=lambda i: {'x':i}, block=False)
self.assertNotEqual(xval.x, 1000)
for i in range(1000):
time.sleep(0.01)
if dmap.periodic.instance.completed:
break
dmap.periodic.stop()
self.assertEqual(xval.x, 1000)
def test_periodic_param_fn_blocking_period(self):
def callback(x):
return Curve([1,2,3])
xval = Stream.define('x',x=0)()
dmap = DynamicMap(callback, streams=[xval])
# Add stream subscriber mocking plot
xval.add_subscriber(lambda **kwargs: dmap[()])
start = time.time()
dmap.periodic(0.5, 10, param_fn=lambda i: {'x':i}, block=True)
end = time.time()
self.assertEqual((end - start) > 5, True)
def test_periodic_param_fn_blocking_timeout(self):
def callback(x):
return Curve([1,2,3])
xval = Stream.define('x',x=0)()
dmap = DynamicMap(callback, streams=[xval])
# Add stream subscriber mocking plot
xval.add_subscriber(lambda **kwargs: dmap[()])
start = time.time()
dmap.periodic(0.5, 100, param_fn=lambda i: {'x':i}, timeout=3)
end = time.time()
self.assertEqual((end - start) < 5, True)
class DynamicCollate(ComparisonTestCase):
def test_dynamic_collate_layout(self):
def callback():
return Image(np.array([[0, 1], [2, 3]])) + Text(0, 0, 'Test')
dmap = DynamicMap(callback, kdims=[])
layout = dmap.collate()
self.assertEqual(list(layout.keys()), [('Image', 'I'), ('Text', 'I')])
self.assertEqual(layout.Image.I[()], Image(np.array([[0, 1], [2, 3]])))
def test_dynamic_collate_layout_raise_no_remapping_error(self):
def callback(x, y):
return Image(np.array([[0, 1], [2, 3]])) + Text(0, 0, 'Test')
stream = PointerXY()
cb_callable = Callable(callback)
dmap = DynamicMap(cb_callable, kdims=[], streams=[stream])
with self.assertRaisesRegexp(ValueError, 'The following streams are set to be automatically linked'):
dmap.collate()
def test_dynamic_collate_layout_raise_ambiguous_remapping_error(self):
def callback(x, y):
return Image(np.array([[0, 1], [2, 3]])) + Image(np.array([[0, 1], [2, 3]]))
stream = PointerXY()
cb_callable = Callable(callback, stream_mapping={'Image': [stream]})
dmap = DynamicMap(cb_callable, kdims=[], streams=[stream])
with self.assertRaisesRegexp(ValueError, 'The stream_mapping supplied on the Callable is ambiguous'):
dmap.collate()
def test_dynamic_collate_layout_with_integer_stream_mapping(self):
def callback(x, y):
return Image(np.array([[0, 1], [2, 3]])) + Text(0, 0, 'Test')
stream = PointerXY()
cb_callable = Callable(callback, stream_mapping={0: [stream]})
dmap = DynamicMap(cb_callable, kdims=[], streams=[stream])
layout = dmap.collate()
self.assertEqual(list(layout.keys()), [('Image', 'I'), ('Text', 'I')])
self.assertIs(stream.source, layout.Image.I)
def test_dynamic_collate_layout_with_spec_stream_mapping(self):
def callback(x, y):
return Image(np.array([[0, 1], [2, 3]])) + Text(0, 0, 'Test')
stream = PointerXY()
cb_callable = Callable(callback, stream_mapping={'Image': [stream]})
dmap = DynamicMap(cb_callable, kdims=[], streams=[stream])
layout = dmap.collate()
self.assertEqual(list(layout.keys()), [('Image', 'I'), ('Text', 'I')])
self.assertIs(stream.source, layout.Image.I)
def test_dynamic_collate_ndlayout(self):
def callback():
return NdLayout({i: Image(np.array([[i, 1], [2, 3]])) for i in range(1, 3)})
dmap = DynamicMap(callback, kdims=[])
layout = dmap.collate()
self.assertEqual(list(layout.keys()), [1, 2])
self.assertEqual(layout[1][()], Image(np.array([[1, 1], [2, 3]])))
def test_dynamic_collate_ndlayout_with_integer_stream_mapping(self):
def callback(x, y):
return NdLayout({i: Image(np.array([[i, 1], [2, 3]])) for i in range(1, 3)})
stream = PointerXY()
cb_callable = Callable(callback, stream_mapping={0: [stream]})
dmap = DynamicMap(cb_callable, kdims=[], streams=[stream])
layout = dmap.collate()
self.assertEqual(list(layout.keys()), [1, 2])
self.assertIs(stream.source, layout[1])
def test_dynamic_collate_ndlayout_with_key_stream_mapping(self):
def callback(x, y):
return NdLayout({i: Image(np.array([[i, 1], [2, 3]])) for i in range(1, 3)})
stream = PointerXY()
cb_callable = Callable(callback, stream_mapping={(1,): [stream]})
dmap = DynamicMap(cb_callable, kdims=[], streams=[stream])
layout = dmap.collate()
self.assertEqual(list(layout.keys()), [1, 2])
self.assertIs(stream.source, layout[1])
def test_dynamic_collate_grid(self):
def callback():
return GridSpace({(i, j): Image(np.array([[i, j], [2, 3]]))
for i in range(1, 3) for j in range(1, 3)})
dmap = DynamicMap(callback, kdims=[])
grid = dmap.collate()
self.assertEqual(list(grid.keys()), [(i, j) for i in range(1, 3)
for j in range(1, 3)])
self.assertEqual(grid[(0, 1)][()], Image(np.array([[1, 1], [2, 3]])))
def test_dynamic_collate_grid_with_integer_stream_mapping(self):
def callback():
return GridSpace({(i, j): Image(np.array([[i, j], [2, 3]]))
for i in range(1, 3) for j in range(1, 3)})
stream = PointerXY()
cb_callable = Callable(callback, stream_mapping={1: [stream]})
dmap = DynamicMap(cb_callable, kdims=[])
grid = dmap.collate()
self.assertEqual(list(grid.keys()), [(i, j) for i in range(1, 3)
for j in range(1, 3)])
self.assertEqual(stream.source, grid[(1, 2)])
def test_dynamic_collate_grid_with_key_stream_mapping(self):
def callback():
return GridSpace({(i, j): Image(np.array([[i, j], [2, 3]]))
for i in range(1, 3) for j in range(1, 3)})
stream = PointerXY()
cb_callable = Callable(callback, stream_mapping={(1, 2): [stream]})
dmap = DynamicMap(cb_callable, kdims=[])
grid = dmap.collate()
self.assertEqual(list(grid.keys()), [(i, j) for i in range(1, 3)
for j in range(1, 3)])
self.assertEqual(stream.source, grid[(1, 2)])
def test_dynamic_collate_layout_with_changing_label(self):
def callback(i):
return Layout([Curve([], label=str(j)) for j in range(i, i+2)])
dmap = DynamicMap(callback, kdims=['i']).redim.range(i=(0, 10))
layout = dmap.collate()
dmap1, dmap2 = layout.values()
el1, el2 = dmap1[2], dmap2[2]
self.assertEqual(el1.label, '2')
self.assertEqual(el2.label, '3')
def test_dynamic_collate_ndlayout_with_changing_keys(self):
def callback(i):
return NdLayout({j: Curve([], label=str(j)) for j in range(i, i+2)})
dmap = DynamicMap(callback, kdims=['i']).redim.range(i=(0, 10))
layout = dmap.collate()
dmap1, dmap2 = layout.values()
el1, el2 = dmap1[2], dmap2[2]
self.assertEqual(el1.label, '2')
self.assertEqual(el2.label, '3')
def test_dynamic_collate_gridspace_with_changing_keys(self):
def callback(i):
return GridSpace({j: Curve([], label=str(j)) for j in range(i, i+2)}, 'X')
dmap = DynamicMap(callback, kdims=['i']).redim.range(i=(0, 10))
layout = dmap.collate()
dmap1, dmap2 = layout.values()
el1, el2 = dmap1[2], dmap2[2]
self.assertEqual(el1.label, '2')
self.assertEqual(el2.label, '3')
def test_dynamic_collate_gridspace_with_changing_items_raises(self):
def callback(i):
return GridSpace({j: Curve([], label=str(j)) for j in range(i)}, 'X')
dmap = DynamicMap(callback, kdims=['i']).redim.range(i=(2, 10))
layout = dmap.collate()
dmap1, dmap2 = layout.values()
err = 'Collated DynamicMaps must return GridSpace with consistent number of items.'
with self.assertRaisesRegexp(ValueError, err):
dmap1[4]
def test_dynamic_collate_gridspace_with_changing_item_types_raises(self):
def callback(i):
eltype = Image if i%2 else Curve
return GridSpace({j: eltype([], label=str(j)) for j in range(i, i+2)}, 'X')
dmap = DynamicMap(callback, kdims=['i']).redim.range(i=(2, 10))
layout = dmap.collate()
dmap1, dmap2 = layout.values()
err = ('The objects in a GridSpace returned by a DynamicMap must '
'consistently return the same number of items of the same type.')
with self.assertRaisesRegexp(ValueError, err):
dmap1[3]
| 41.067797 | 109 | 0.61347 |
ca6d740338325034763345e4a4af85efb3957365 | 3,682 | py | Python | user/views.py | amid-africa/photoorder | 407cf58b3dbd3e2144a8533f489889295f946776 | [
"MIT"
] | null | null | null | user/views.py | amid-africa/photoorder | 407cf58b3dbd3e2144a8533f489889295f946776 | [
"MIT"
] | null | null | null | user/views.py | amid-africa/photoorder | 407cf58b3dbd3e2144a8533f489889295f946776 | [
"MIT"
] | null | null | null | from django.contrib import messages
from django.contrib.auth import login, get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.tokens import default_token_generator
from django.contrib.auth.views import PasswordChangeView, PasswordResetView
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_decode
from django.views import View
from django.views.generic import TemplateView
from django.views.generic.edit import UpdateView
from .forms import (CustomUserCreationForm, CustomPasswordResetForm,
CustomUserChangeForm)
User = get_user_model()
@method_decorator(login_required, name='dispatch')
class UserView(TemplateView):
template_name = "user/user_profile.html"
def get_context_data(self, *args, **kwargs):
# The current user is already in the context
context = super(UserView, self).get_context_data(*args, **kwargs)
context['data'] = 'somedata'
return context
@method_decorator(login_required, name='dispatch')
class UserUpdate(UpdateView):
model = User
template_name = 'user/user_update_form.html'
form_class = CustomUserChangeForm
success_url = reverse_lazy('profile')
def get_object(self):
return self.request.user
class SignUp(View):
"""Create a new user"""
success_url = reverse_lazy('profile')
template_name = 'registration/signup.html'
def __init__(self, **kwargs):
if 'success_url' in kwargs:
self.success_url = kwargs['success_url']
def get(self, request):
form = CustomUserCreationForm()
return render(request, self.template_name, {'form': form})
def post(self, request):
form = CustomUserCreationForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return redirect(self.success_url)
return render(request, self.template_name, {'form': form})
class ConfirmEmail(View):
"""Confirm the users email address"""
success_url = reverse_lazy('profile')
def __init__(self, **kwargs):
if 'success_url' in kwargs:
self.success_url = kwargs['success_url']
def get(self, request, uidb64, token):
try:
# Get the user
uid = urlsafe_base64_decode(uidb64).decode()
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and default_token_generator.check_token(user, token):
# Check the token is valid
messages.success(request, 'Your email address has been confirmed successfully.')
if not user.is_confirmed:
user.is_confirmed = True
user.save()
return redirect(self.success_url)
# If error, display it.
messages.error(request, 'Your email could not be confirmed. Please generate a new code to try again.')
return render(request, 'user/confirm_email_failed.html', {'user': user})
class CustomPasswordResetView(PasswordResetView):
"""Extend builtin PasswordResetView to use our custom form and template"""
form_class = CustomPasswordResetForm
template_name = 'user/password_reset_form.html'
class CustomPasswordChangeView(PasswordChangeView):
"""Extend builtin PasswordChangeView to use our template"""
template_name = 'user/password_change_form.html'
| 35.403846 | 110 | 0.704508 |
218cadce2ea2fa2891c3b58c8966e623e2fdd5fb | 6,265 | py | Python | sw/example/demo_freeRTOS/FreeRTOS-main/.github/scripts/core_checker.py | matejpolj/neorv32 | 369415dac1111cb9b1faba732476d759dc7405c1 | [
"BSD-3-Clause"
] | 1 | 2022-03-26T09:58:05.000Z | 2022-03-26T09:58:05.000Z | .github/scripts/core_checker.py | kingtiger999/FreeRTOS | fc9396f576860663369828fbe3f11d84ea4b30b5 | [
"MIT"
] | null | null | null | .github/scripts/core_checker.py | kingtiger999/FreeRTOS | fc9396f576860663369828fbe3f11d84ea4b30b5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# python >= 3.4
import os
from common.header_checker import HeaderChecker
#--------------------------------------------------------------------------------------------------
# CONFIG
#--------------------------------------------------------------------------------------------------
FREERTOS_IGNORED_EXTENSIONS = [
'.1',
'.ASM',
'.C',
'.DSW',
'.G_C',
'.H',
'.Hbp',
'.IDE',
'.LIB',
'.Opt',
'.PC',
'.PRM',
'.TXT',
'.URL',
'.UVL',
'.Uv2',
'.a',
'.ac',
'.am',
'.atsln',
'.atstart',
'.atsuo',
'.bash',
'.bat',
'.bbl',
'.bit',
'.board',
'.bsb',
'.bsdl',
'.bts',
'.ccxml',
'.cdkproj',
'.cdkws',
'.cfg',
'.cgp',
'.checksum',
'.cmake',
'.cmd',
'.config',
'.cpp',
'.cproj',
'.cproject',
'.crun',
'.css',
'.csv',
'.custom_argvars',
'.cxx',
'.cydwr',
'.cyprj',
'.cysch',
'.dat',
'.datas',
'.db',
'.dbgdt',
'.dep',
'.dni',
'.dnx',
'.doc',
'.dox',
'.doxygen',
'.ds',
'.dsk',
'.dtd',
'.dts',
'.elf',
'.emProject',
'.env_conf',
'.ewd',
'.ewp',
'.ewt',
'.eww',
'.exe',
'.filters',
'.flash',
'.fmt',
'.ftl',
'.gdb',
'.gif',
'.gise',
'.gld',
'.gpdsc',
'.gui',
'.h_from_toolchain',
'.hdf',
'.hdp',
'.hex',
'.hist',
'.history',
'.hsf',
'.htm',
'.html',
'.hwc',
'.hwl',
'.hwp',
'.hws',
'.hzp',
'.hzs',
'.i',
'.icf',
'.ide',
'.idx',
'.in',
'.inc',
'.include',
'.index',
'.inf',
'.ini',
'.init',
'.ipcf',
'.ise',
'.jlink',
'.json',
'.la',
'.launch',
'.lcf',
'.ld',
'.lds',
'.lib',
'.lk1',
'.lkr',
'.lm',
'.lo',
'.lock',
'.lsl',
'.lst',
'.m4',
'.mac',
'.make',
'.map',
'.mbt',
'.mcp',
'.mcpar',
'.mcs',
'.mcw',
'.md',
'.mdm',
'.mem',
'.mhs',
'.mk',
'.mk1',
'.mmi',
'.mrt',
'.mss',
'.mtpj',
'.nav',
'.ntrc_log',
'.opa',
'.opb',
'.opc',
'.opl',
'.opt',
'.opv',
'.out',
'.pack',
'.par',
'.patch',
'.pbd',
'.pdsc',
'.pe',
'.pem',
'.pgs',
'.pl',
'.plg',
'.png',
'.prc',
'.pref',
'.prefs',
'.prj',
'.project',
'.properties',
'.ps1',
'.ptf',
'.py',
'.r79',
'.rapp',
'.rc',
'.reggroups',
'.reglist',
'.resc',
'.resources',
'.rom',
'.rprj',
'.s79',
'.s82',
'.s90',
'.sc',
'.scf',
'.scfg',
'.script',
'.sct',
'.scvd',
'.session',
'.sfr',
'.sh',
'.shtml',
'.sig',
'.sln',
'.spec',
'.sprj',
'.stf',
'.stg',
'.suo',
'.sup',
'.svg',
'.tags',
'.tcl',
'.tdt',
'.template',
'.tgt',
'.tps',
'.tra',
'.tree',
'.tws',
'.txt',
'.ucf',
'.url',
'.user',
'.ut',
'.uvmpw',
'.uvopt',
'.uvoptx',
'.uvproj',
'.uvprojx',
'.vcproj',
'.vcxproj',
'.version',
'.webserver',
'.wpj',
'.wsdt',
'.wsp',
'.wspos',
'.wsx',
'.x',
'.xbcd',
'.xcl',
'.xise',
'.xml',
'.xmp',
'.xmsgs',
'.xsl',
'.yml',
'.md',
'.zip'
]
FREERTOS_IGNORED_PATTERNS = [
r'.*\.git.*',
r'.*mbedtls_config\.h.*',
r'.*mbedtls_config\.h.*',
r'.*CMSIS.*',
r'.*/Nordic_Code/*',
r'.*/makefile',
r'.*/Makefile',
r'.*/printf-stdarg\.c.*',
r'.*/startup.*',
r'.*/trcConfig\.h.*',
r'.*/trcConfig\.c.*',
r'.*/trcSnapshotConfig\.h.*',
r'.*/MicroZed_hw_platform.*'
]
FREERTOS_IGNORED_FILES = [
'.cproject',
'.project',
'fyi-another-way-to-ignore-file.txt',
'mbedtls_config.h',
'requirements.txt',
'run-cbmc-proofs.py',
'.editorconfig',
'lcovrc',
'htif.c', 'htif.h',
'ethernetif.c',
'platform.c',
'platform.h',
'platform_config.h',
'FreeRTOS_asm_vectors.S',
'interrupt_vector.s',
'gdbinit'
]
FREERTOS_HEADER = [
'/*\n',
' * FreeRTOS V202112.00\n',
' * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n',
' *\n',
' * Permission is hereby granted, free of charge, to any person obtaining a copy of\n',
' * this software and associated documentation files (the "Software"), to deal in\n',
' * the Software without restriction, including without limitation the rights to\n',
' * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n',
' * the Software, and to permit persons to whom the Software is furnished to do so,\n',
' * subject to the following conditions:\n',
' *\n',
' * The above copyright notice and this permission notice shall be included in all\n',
' * copies or substantial portions of the Software.\n',
' *\n',
' * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n',
' * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n',
' * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n',
' * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n',
' * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n',
' * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n',
' *\n',
' * https://www.FreeRTOS.org\n',
' * https://github.com/FreeRTOS\n',
' *\n',
' */\n',
]
def main():
parser = HeaderChecker.configArgParser()
args = parser.parse_args()
# Configure the checks then run
checker = HeaderChecker(FREERTOS_HEADER)
checker.ignoreExtension(*FREERTOS_IGNORED_EXTENSIONS)
checker.ignorePattern(*FREERTOS_IGNORED_PATTERNS)
checker.ignoreFile(*FREERTOS_IGNORED_FILES)
checker.ignoreFile(os.path.split(__file__)[-1])
rc = checker.processArgs(args)
if rc:
checker.showHelp(__file__)
return rc
if __name__ == '__main__':
exit(main())
| 18.480826 | 99 | 0.44581 |
f7709afeb38fbca74ac9f6d9054ead7bc7db0771 | 2,833 | py | Python | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_materializations.py | hebo-yang/dagster | 553ad7fbb209e690907a477ca18e03ae519199f9 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_materializations.py | hebo-yang/dagster | 553ad7fbb209e690907a477ca18e03ae519199f9 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_materializations.py | hebo-yang/dagster | 553ad7fbb209e690907a477ca18e03ae519199f9 | [
"Apache-2.0"
] | null | null | null | from dagster_graphql.test.utils import infer_pipeline_selector
from dagster_graphql_tests.graphql.setup import LONG_INT
from .graphql_context_test_suite import ExecutingGraphQLContextTestMatrix
from .utils import sync_execute_get_events
class TestMaterializations(ExecutingGraphQLContextTestMatrix):
def test_materializations(self, graphql_context, snapshot):
selector = infer_pipeline_selector(graphql_context, "materialization_pipeline")
logs = sync_execute_get_events(
context=graphql_context,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
}
},
)
materializations = [log for log in logs if log["__typename"] == "StepMaterializationEvent"]
assert len(materializations) == 1
mat = materializations[0]["materialization"]
assert mat["label"] == "all_types"
text_entry = mat["metadataEntries"][0]
assert text_entry["__typename"] == "EventTextMetadataEntry"
assert text_entry["text"]
text_entry = mat["metadataEntries"][1]
assert text_entry["__typename"] == "EventUrlMetadataEntry"
assert text_entry["url"]
text_entry = mat["metadataEntries"][2]
assert text_entry["__typename"] == "EventPathMetadataEntry"
assert text_entry["path"]
text_entry = mat["metadataEntries"][3]
assert text_entry["__typename"] == "EventJsonMetadataEntry"
assert text_entry["jsonString"]
text_entry = mat["metadataEntries"][4]
assert text_entry["__typename"] == "EventPythonArtifactMetadataEntry"
assert text_entry["module"]
assert text_entry["name"]
text_entry = mat["metadataEntries"][5]
assert text_entry["__typename"] == "EventPythonArtifactMetadataEntry"
assert text_entry["module"]
assert text_entry["name"]
text_entry = mat["metadataEntries"][6]
assert text_entry["__typename"] == "EventFloatMetadataEntry"
assert text_entry["floatValue"]
text_entry = mat["metadataEntries"][7]
assert text_entry["__typename"] == "EventIntMetadataEntry"
assert text_entry["intRepr"]
text_entry = mat["metadataEntries"][8]
assert text_entry["__typename"] == "EventFloatMetadataEntry"
assert text_entry["floatValue"] is None # float NaN test
text_entry = mat["metadataEntries"][9]
assert text_entry["__typename"] == "EventIntMetadataEntry"
assert int(text_entry["intRepr"]) == LONG_INT
non_engine_event_logs = [
message for message in logs if message["__typename"] != "EngineEvent"
]
snapshot.assert_match([message["__typename"] for message in non_engine_event_logs])
| 38.808219 | 99 | 0.662549 |
1ebb98bd228d153f2dd1df489c2b56dee69971cc | 6,452 | py | Python | models/multiTask/MLF_DNN.py | iyuge2/MMSA | e17a012b07609662a4bdfac8cb8e1f92a9297b41 | [
"Apache-2.0"
] | 3 | 2020-07-06T06:32:16.000Z | 2021-12-13T12:59:34.000Z | models/multiTask/MLF_DNN.py | iyuge2/MMSA | e17a012b07609662a4bdfac8cb8e1f92a9297b41 | [
"Apache-2.0"
] | null | null | null | models/multiTask/MLF_DNN.py | iyuge2/MMSA | e17a012b07609662a4bdfac8cb8e1f92a9297b41 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['MLF_DNN']
class SubNet(nn.Module):
'''
The subnetwork that is used in TFN for video and audio in the pre-fusion stage
'''
def __init__(self, in_size, hidden_size, dropout):
'''
Args:
in_size: input dimension
hidden_size: hidden layer dimension
dropout: dropout probability
Output:
(return value in forward) a tensor of shape (batch_size, hidden_size)
'''
super(SubNet, self).__init__()
self.norm = nn.BatchNorm1d(in_size)
self.drop = nn.Dropout(p=dropout)
self.linear_1 = nn.Linear(in_size, hidden_size)
self.linear_2 = nn.Linear(hidden_size, hidden_size)
self.linear_3 = nn.Linear(hidden_size, hidden_size)
def forward(self, x):
'''
Args:
x: tensor of shape (batch_size, in_size)
'''
normed = self.norm(x)
dropped = self.drop(normed)
y_1 = F.relu(self.linear_1(dropped))
y_2 = F.relu(self.linear_2(y_1))
y_3 = F.relu(self.linear_3(y_2))
return y_3
class TextSubNet(nn.Module):
'''
The LSTM-based subnetwork that is used in TFN for text
'''
def __init__(self, in_size, hidden_size, out_size, num_layers=1, dropout=0.2, bidirectional=False):
'''
Args:
in_size: input dimension
hidden_size: hidden layer dimension
num_layers: specify the number of layers of LSTMs.
dropout: dropout probability
bidirectional: specify usage of bidirectional LSTM
Output:
(return value in forward) a tensor of shape (batch_size, out_size)
'''
super(TextSubNet, self).__init__()
if num_layers == 1:
dropout = 0.0
self.rnn = nn.LSTM(in_size, hidden_size, num_layers=num_layers, dropout=dropout, bidirectional=bidirectional, batch_first=True)
self.dropout = nn.Dropout(dropout)
self.linear_1 = nn.Linear(hidden_size, out_size)
def forward(self, x):
'''
Args:
x: tensor of shape (batch_size, sequence_len, in_size)
'''
_, final_states = self.rnn(x)
h = self.dropout(final_states[0].squeeze())
y_1 = self.linear_1(h)
return y_1
class MLF_DNN(nn.Module):
"""
late fusion using DNN
"""
def __init__(self, args):
super(MLF_DNN, self).__init__()
self.text_in, self.audio_in, self.video_in = args.feature_dims
self.text_hidden, self.audio_hidden, self.video_hidden = args.hidden_dims
self.text_out = args.text_out
self.audio_prob, self.video_prob, self.text_prob = args.dropouts
self.post_text_prob, self.post_audio_prob, self.post_video_prob, self.post_fusion_prob = args.post_dropouts
self.post_fusion_dim = args.post_fusion_dim
self.post_text_dim = args.post_text_dim
self.post_audio_dim = args.post_audio_dim
self.post_video_dim = args.post_video_dim
# define the pre-fusion subnetworks
self.audio_subnet = SubNet(self.audio_in, self.audio_hidden, self.audio_prob)
self.video_subnet = SubNet(self.video_in, self.video_hidden, self.video_prob)
self.text_subnet = TextSubNet(self.text_in, self.text_hidden, self.text_out, dropout=self.text_prob)
# define the post_fusion layers
self.post_fusion_dropout = nn.Dropout(p=self.post_fusion_prob)
self.post_fusion_layer_1 = nn.Linear(self.text_out + self.video_hidden + self.audio_hidden, self.post_fusion_dim)
self.post_fusion_layer_2 = nn.Linear(self.post_fusion_dim, self.post_fusion_dim)
self.post_fusion_layer_3 = nn.Linear(self.post_fusion_dim, 1)
# define the classify layer for text
self.post_text_dropout = nn.Dropout(p=self.post_text_prob)
self.post_text_layer_1 = nn.Linear(self.text_out, self.post_text_dim)
self.post_text_layer_2 = nn.Linear(self.post_text_dim, self.post_text_dim)
self.post_text_layer_3 = nn.Linear(self.post_text_dim, 1)
# define the classify layer for audio
self.post_audio_dropout = nn.Dropout(p=self.post_audio_prob)
self.post_audio_layer_1 = nn.Linear(self.audio_hidden, self.post_audio_dim)
self.post_audio_layer_2 = nn.Linear(self.post_audio_dim, self.post_audio_dim)
self.post_audio_layer_3 = nn.Linear(self.post_audio_dim, 1)
# define the classify layer for video
self.post_video_dropout = nn.Dropout(p=self.post_video_prob)
self.post_video_layer_1 = nn.Linear(self.video_hidden, self.post_video_dim)
self.post_video_layer_2 = nn.Linear(self.post_video_dim, self.post_video_dim)
self.post_video_layer_3 = nn.Linear(self.post_video_dim, 1)
def forward(self, text_x, audio_x, video_x):
audio_x = audio_x.squeeze(1)
video_x = video_x.squeeze(1)
audio_h = self.audio_subnet(audio_x)
video_h = self.video_subnet(video_x)
text_h = self.text_subnet(text_x)
# text
x_t = self.post_text_dropout(text_h)
x_t = F.relu(self.post_text_layer_1(x_t), inplace=True)
x_t = F.relu(self.post_text_layer_2(x_t), inplace=True)
output_text = self.post_text_layer_3(x_t)
# audio
x_a = self.post_audio_dropout(audio_h)
x_a = F.relu(self.post_audio_layer_1(x_a), inplace=True)
x_a = F.relu(self.post_audio_layer_2(x_a), inplace=True)
output_audio = self.post_audio_layer_3(x_a)
# video
x_v = self.post_video_dropout(video_h)
x_v = F.relu(self.post_video_layer_1(x_v), inplace=True)
x_v = F.relu(self.post_video_layer_2(x_v), inplace=True)
output_video = self.post_video_layer_3(x_v)
# fusion
fusion_h = torch.cat([audio_h, video_h, text_h], dim=-1)
x = self.post_fusion_dropout(fusion_h)
x = F.relu(self.post_fusion_layer_1(x), inplace=True)
x = F.relu(self.post_fusion_layer_2(x), inplace=True)
output_fusion = self.post_fusion_layer_3(x)
res = {
'Feature_t': text_h,
'Feature_a': audio_h,
'Feature_v': video_h,
'Feature_f': fusion_h,
'M': output_fusion,
'T': output_text,
'A': output_audio,
'V': output_video
}
return res | 39.10303 | 135 | 0.651736 |
00cf94e2bcc7090ce730ddcddd16f1f3db6ef6fc | 387 | py | Python | manage.py | justinbuiMITRE/geoq | 46efe290ec99ff29f4fc11db94b849ede5eba757 | [
"MIT"
] | 3 | 2015-06-08T14:12:33.000Z | 2018-07-12T16:25:19.000Z | manage.py | justinbuiMITRE/geoq | 46efe290ec99ff29f4fc11db94b849ede5eba757 | [
"MIT"
] | 8 | 2015-06-29T17:42:41.000Z | 2020-09-24T02:29:41.000Z | manage.py | justinbuiMITRE/geoq | 46efe290ec99ff29f4fc11db94b849ede5eba757 | [
"MIT"
] | 13 | 2015-06-02T19:19:34.000Z | 2020-07-16T18:10:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geoq.settings")
manage_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(manage_dir, 'geoq'))
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 25.8 | 68 | 0.728682 |
643f903cf3dd0e7b273e9b8b4d0498992dd0683f | 793 | py | Python | Collect/MOD17/NPP_yearly.py | trngb/watools | 57b9074d59d856886675aa26014bfd6673d5da76 | [
"Apache-2.0"
] | 11 | 2018-09-25T08:58:26.000Z | 2021-02-13T18:58:05.000Z | Collect/MOD17/NPP_yearly.py | trngbich/watools | 57b9074d59d856886675aa26014bfd6673d5da76 | [
"Apache-2.0"
] | 1 | 2020-07-03T02:36:41.000Z | 2021-03-21T22:20:47.000Z | Collect/MOD17/NPP_yearly.py | trngbich/watools | 57b9074d59d856886675aa26014bfd6673d5da76 | [
"Apache-2.0"
] | 16 | 2018-09-28T22:55:11.000Z | 2021-02-22T13:03:56.000Z | import sys
from watools.Collect.MOD17.DataAccessNPP import DownloadData
def main(Dir, Startdate, Enddate, latlim, lonlim, cores=False, Waitbar = 1, hdf_library = None, remove_hdf = 1):
"""
This function downloads MOD17 yearly NPP data for the specified time
interval, and spatial extent.
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax]
lonlim -- [xmin, xmax]
cores -- amount of cores used
Waitbar -- 1 (Default) will print a waitbar
"""
print('\nDownload yearly MODIS NPP data for period %s till %s' %(Startdate, Enddate))
DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar, cores, hdf_library, remove_hdf)
if __name__ == '__main__':
main(sys.argv) | 34.478261 | 112 | 0.679697 |
338b39b414a31871d17c7accef36623d941adaa1 | 15,942 | py | Python | MCP2221_1.py | FlippieCoetser/Python.MCP23018 | 06f8dba8fd9dbbdbd65a0383b8945dc44a28c9e3 | [
"MIT"
] | 1 | 2020-01-05T15:58:42.000Z | 2020-01-05T15:58:42.000Z | MCP2221_1.py | FlippieCoetser/Python.MCP23018 | 06f8dba8fd9dbbdbd65a0383b8945dc44a28c9e3 | [
"MIT"
] | null | null | null | MCP2221_1.py | FlippieCoetser/Python.MCP23018 | 06f8dba8fd9dbbdbd65a0383b8945dc44a28c9e3 | [
"MIT"
] | null | null | null | import hid
"""
Library to interface with USB HID-Class devices on Windows, Linux, FreeBSD, and macOS.
"""
from enum import Enum, auto
import time
from ENUMS import DIRECTION, STATE
class I2C_SPEED(Enum):
STANDARD = 100000
FAST = 400000
HIGH = 3400000
class PARAMETER(Enum):
I2C_SPEED = 'I2C_SPEED'
class MODE(Enum):
GPIO = 0b000
SSPND = 0b001
CLK = 0b001
USBCFG = 0b001
LED_I2C = 0b001
ADC = 0b010
LED_RX = 0b010
LED_TX = 0b011
DAC = 0b011
INTRP = 0b100
class DRIVE_MODE(Enum):
PUSH_PULL = 'PUSH_PULL'
OPEN_DRAIN = 'OPEN_DRAIN'
MCP2221_MAX_I2C_DATA_LEN = 60
MCP2221_RETRY_MAX = 50
RESP_I2C_START_TOUT = 0x12
RESP_I2C_WRADDRL_TOUT = 0x23
RESP_ADDR_NACK = 0x25
RESP_I2C_WRADDRL_NACK = 0x25
RESP_I2C_PARTIALDATA = 0x41
RESP_I2C_WRDATA_TOUT = 0x44
RESP_I2C_WRITINGNOSTOP = 0x45
RESP_I2C_STOP_TOUT = 0x62
MASK_ADDR_NACK = 0x40
RESP_READ_ERR = 0x7F
RESP_READ_COMPL = 0x55
RESP_READ_PARTIAL = 0x54
class MCP2221:
GP_GPIO = 0b000
GP_DEDICATED = 0b001
GP_ALT0 = 0b010
GP_ALT1 = 0b011
GP_ALT2 = 0b100
HID_ReportSize = 64
"""
The USB HID Protocol uses 64-byte reports
"""
INETERNAL_CLOCK = 12000000
"""
MCP2221 Internal Clock Frequency 12 MHz
"""
GET_STATUS = bytearray([0x10, 0x00])
"""
GET DEVICE STATUS
Response on Page 24
"""
SET_PARAMETER = bytearray([0x10, 0x00])
"""
SET DEVICE STATUS
Page 24
"""
GET_I2C_DATA = bytearray([0x40])
"""
GET I2C DATA
Page 44
"""
SET_GPIO_OUTPUT_VALUES = bytearray([0x50])
"""
SET GPIO OUTPUT VALUES
Page 45
"""
GET_GPIO_VALUES = bytearray([0x51])
"""
GET GPIO VALUES
Page 48
"""
SET_SRAM_SETTINGS = bytearray([0x60])
"""
SET SRAM SETTINGS
Page 49
"""
GET_SRAM_SETTINGS = bytearray([0x61])
"""
GET SRAM SETTINGS
Page 53
"""
RESET_CHIP = bytearray([0x70,
0xAB,
0xCD,
0xEF])
"""
RESET CHIP command is used to force a Reset of the MCP2221 device.
This command is useful when the Flash memory is updated with new data.
The MCP2221 would need to be re-enumerated to see the new data
This is the only command that does not expect a response.
Page 57:
Index | Functiona Descriptions | Value |
0 | Reset Chip | 0x70 |
1 | | 0xAB |
2 | | 0xCD |
3 | | 0xEF |
4 - 63| | 0x00 |
"""
WRITE_I2C_DATA = bytearray([0x90])
"""
I2C WRITE DATA
Page 39
"""
READ_I2C_DATA = bytearray([0x91])
"""
READ I2C DATA
Page 40
"""
WRITE_I2C_REPEATED_START = bytearray([0x92])
"""
I2C WRITE DATA REPEATED START
Page 40
"""
READ_I2C_REPEATED_START = bytearray([0x93])
"""
I2C WRITE DATA REPEATED START
Page 43
"""
WRITE_I2C_NO_STOP = bytearray([0x94])
"""
I2C WRITE DATA REPEATED START
Page 41
"""
READ_FLASH_DATA = bytearray([0xB0])
"""
READ FLASH DATA
Page 26
"""
WRITE_FLASH_DATA = bytearray([0xB1])
"""
WRITE FLASH DATA
Page 32
"""
SEND_FLASH_ACCESS_PASSWORD = bytearray([0xB2])
"""
SEND FLASH ACCESS PASSWORD
Page 32
"""
EMPTY_REPORT = bytearray([0x00]) * 63
CANCEL_TRANSFER = bytearray([0x10])
"""
CANCEL I2C TRANSFER
Page 23
"""
SET_I2C_SPEED = lambda speed:bytearray([0x00,
0x20,
MCP2221.INETERNAL_CLOCK // speed.value - 3])
"""
SET I2C SPEED
Typical Values
Standard mode = 100 Kbps
Fast mode = 400 Kbps
High Speed mode = 3.4 Mbps
Page 23
"""
VID = 0x04D8
"""
Vendor ID factory default = 0x04D8
Using Microchip provided utility the Vendor ID can be customized
"""
PID = 0x00DD
"""
Product ID factory default = 0x00DD
Using Microchip provided utility the Product ID can be customized
"""
SET_PIN_MODE = bytearray([0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0xff]) + bytearray([0x00]) * 56
SET_VALUE = lambda pin: 2 + 4 * pin
GET_VALUE = lambda pin: 2 + 2 * pin
SET_DIRECTION = lambda pin: 4 * (pin + 1)
def __init__(self):
self._hid = hid.device()
self._hid.open(MCP2221.VID, MCP2221.PID)
self._reset()
time.sleep(0.5) #Required to stop Freezing
def _hid_xfer(self, report, response=True):
"""
used to transfer a 64 byte data report
first byte is the report ID and set to 0 for MCP2221
"""
self._hid.write(b'\0' + report + b'\0'*(MCP2221.HID_ReportSize-len(report)))
if response:
return self._hid.read(MCP2221.HID_ReportSize)
def _reset(self):
"""
Reset device by sending Reset Chip command.
After reset a new connection is openend.
"""
print('MCP2221: RESET')
self._hid_xfer(MCP2221.RESET_CHIP, response=False)
start = time.monotonic()
while time.monotonic() - start < 5:
try:
self._hid.open(MCP2221.VID, MCP2221.PID)
except OSError:
time.sleep(0.1)
continue
return
raise OSError("open failed")
def reset(self):
pass
def wait(self, duration):
"""
Use external library to Wait for a specified number of seconds
"""
time.sleep(duration)
def i2c_configure(self, speed):
if speed in I2C_SPEED:
print('MCP2221 CONFIG - Type: set Parameter: ', PARAMETER.I2C_SPEED, 'Value: ', speed)
self._hid_xfer(MCP2221.SET_PARAMETER + MCP2221.SET_I2C_SPEED(speed))
else:
raise Exception("Invalid configuration")
def _i2c_status(self):
device_status = self._hid_xfer(MCP2221.GET_STATUS)
successfull = 0
if device_status[1] != successfull:
raise RuntimeError("Get Device Status Failed")
return device_status
def _i2c_state(self):
I2C_state = self._i2c_status()[8]
return I2C_state
def _i2c_cancel(self):
device_status = self._hid_xfer(MCP2221.SET_PARAMETER + MCP2221.CANCEL_TRANSFER)
successfull = 0
if device_status[1] != successfull:
raise RuntimeError("Get Device Status Failed")
request_status = device_status[2]
if request_status != successfull:
raise RuntimeError("I2C Transfer Cancel Request Failed")
if request_status == 0x10:
print('MCP2221 I2C Transfer Cancel Request In-Progress')
time.sleep(0.001)
def _i2c_write(self, address, buffer, start=0, end=None):
"""
cmd: 0x90 (I2C Write Data Command)
address: I2C Slave Address
buffer: bytes([register_address, data])
"""
# Check Status of Interernal I2C Module
I2C_state = self._i2c_state()
successfull = 0x00
if I2C_state != successfull:
self._i2c_cancel()
# Calculate end position
end = end if end else len(buffer)
length = end - start
retries = 0
while (end - start) > 0:
chunk = min(end - start, MCP2221_MAX_I2C_DATA_LEN)
# write out current chunk
response = self._hid_xfer(MCP2221.WRITE_I2C_DATA + bytes([length & 0xFF,
(length >> 8) & 0xFF,
address << 1]) +
buffer[start:(start+chunk)])
# check for success
transfer_status = response[1]
I2C_enginge_status = response[2]
successfull = 0x00
if transfer_status != successfull:
# TODO: Check the meaning of each and raise correct error
if I2C_enginge_status in (RESP_I2C_START_TOUT,
RESP_I2C_WRADDRL_TOUT,
RESP_I2C_WRADDRL_NACK,
RESP_I2C_WRDATA_TOUT,
RESP_I2C_STOP_TOUT):
raise RuntimeError("I2C write failure")
# Retry if failed
retries += 1
if retries >= MCP2221_RETRY_MAX:
raise RuntimeError("I2C write error, max retries reached.")
time.sleep(0.001)
continue
# Wait until I2C Ready for next write
while self._i2c_state() == RESP_I2C_PARTIALDATA:
time.sleep(0.001)
# Change start position of next chunk and reset retries
start += chunk
retries = 0
# check status in another loop
for _ in range(MCP2221_RETRY_MAX):
status = self._i2c_status()
if status[20] & MASK_ADDR_NACK:
raise RuntimeError("I2C slave address was NACK'd")
I2C_state = status[8]
if I2C_state == 0:
break
# Command always 0x90 for I2C Write, TODO: Investigate
if I2C_state == RESP_I2C_WRITINGNOSTOP and cmd == 0x94:
break # this is OK too!
if I2C_state in (RESP_I2C_START_TOUT,
RESP_I2C_WRADDRL_TOUT,
RESP_I2C_WRADDRL_NACK,
RESP_I2C_WRDATA_TOUT,
RESP_I2C_STOP_TOUT):
raise RuntimeError("Unrecoverable I2C state failure")
time.sleep(0.001)
else:
raise RuntimeError("I2C write error: max retries reached.")
def _i2c_read(self, address, buffer, start=0, end=None):
if self._i2c_state() not in (RESP_I2C_WRITINGNOSTOP, 0):
self._i2c_cancel()
end = end if end else len(buffer)
length = end - start
# tell it we want to read
resp = self._hid_xfer(MCP2221.READ_I2C_DATA + bytes([length & 0xFF,
(length >> 8) & 0xFF,
(address << 1) | 0x01]))
# check for success
if resp[1] != 0x00:
raise RuntimeError("Unrecoverable I2C read failure")
# and now the read part
while (end - start) > 0:
for retry in range(MCP2221_RETRY_MAX):
# the actual read
resp = self._hid_xfer(MCP2221.GET_I2C_DATA)
# check for success
if resp[1] == RESP_I2C_PARTIALDATA:
time.sleep(0.001)
continue
if resp[1] != 0x00:
raise RuntimeError("Unrecoverable I2C read failure")
if resp[2] == RESP_ADDR_NACK:
raise RuntimeError("I2C NACK")
if resp[3] == 0x00 and resp[2] == 0x00:
break
if resp[3] == RESP_READ_ERR:
time.sleep(0.001)
continue
if resp[2] in (RESP_READ_COMPL, RESP_READ_PARTIAL):
break
# move data into buffer
chunk = min(end - start, 60)
for i, k in enumerate(range(start, start+chunk)):
buffer[k] = resp[4 + i]
start += chunk
def i2c_writeto(self, address, buffer, *, start=0, end=None):
"""
address: I2C Slave Address
buffer: bytes([register_address, data])
"""
self._i2c_write(address, buffer, start, end)
def i2c_readfrom_into(self, address, buffer, *, start=0, end=None):
"""
address: I2C Slave Address
buffer: bytesarray(x) used to load response into
"""
self._i2c_read(address, buffer, start, end)
def gp_get_mode(self, pin):
report = self._hid_xfer(MCP2221.GET_SRAM_SETTINGS)
return report[22+pin] & 0x07
def load_current_pin_mode(self, report):
current = self._hid_xfer(MCP2221.GET_SRAM_SETTINGS)
report[8] = current[22] # GP0
report[9] = current[23] # GP1
report[10] = current[24] # GP2
report[11] = current[25] # GP3
return report
def gp_set_mode(self, pin, mode):
report = MCP2221.SET_SRAM_SETTINGS + MCP2221.SET_PIN_MODE
report = self.load_current_pin_mode(report)
# set pin mode
parameter = 8 + pin
# TODO: rather use << 0x07
mask = lambda value: value & 0x07
report[parameter] = mask(mode.value)
# and make it so
self._hid_xfer(report)
def update_report(self, report, offset, value):
report[offset] = 0x01
report[offset + 1] = value
return report
def gpio_set_direction(self, pin, direction):
# report is adjusted based on pin number and expected pin state
report = MCP2221.SET_GPIO_OUTPUT_VALUES + MCP2221.EMPTY_REPORT
offset = MCP2221.SET_DIRECTION(pin)
report = self.update_report(report, offset, direction.value)
self._hid_xfer(report)
def gpio_get_pin(self, pin):
report = self._hid_xfer(MCP2221.GET_GPIO_VALUES)
# Based on pin number read specific position in report
offset = MCP2221.GET_VALUE(pin)
if report[offset] == 0xEE:
raise RuntimeError("Pin is not set for GPIO operation.")
else:
state = STATE(report[offset])
return state
def gpio_set_pin(self, pin, state):
# report is adjusted based on pin number and expected pin state
report = MCP2221.SET_GPIO_OUTPUT_VALUES + MCP2221.EMPTY_REPORT
offset = MCP2221.SET_VALUE(pin)
report = self.update_report(report, offset, state.value)
self._hid_xfer(report)
mcp2221 = MCP2221()
class I2C():
def __init__(self, *, frequency=I2C_SPEED.FAST):
self._mcp2221 = mcp2221
self._mcp2221.i2c_configure(frequency)
def writeto(self, address, buffer):
"""
address: I2C Slave Address
buffer: bytes([register_address, data])
"""
self._mcp2221.i2c_writeto(address, buffer, start=0, end=None)
def readfrom_into(self, address, buffer):
"""
address: I2C Slave Address
buffer: bytesarray(x) used to load response into
"""
self._mcp2221.i2c_readfrom_into(address, buffer, start=0, end=None)
class Pin:
def __init__(self, pin_id=None):
self.id = pin_id
self._direction = None
def init(self, direction):
mcp2221.gp_set_mode(self.id, MODE.GPIO)
mcp2221.gpio_set_direction(self.id, direction)
self._direction = direction
@property
def value(self):
state = mcp2221.gpio_get_pin(self.id)
return state
@value.setter
def value(self, state):
mcp2221.gpio_set_pin(self.id, state)
@property
def direction(self):
return self.__direction
@direction.setter
def direction(self, direction):
self.__direction = direction
if direction is DIRECTION.OUT:
self.init(direction=DIRECTION.OUT)
self.value = STATE.LOW
elif direction is DIRECTION.IN:
self.init(direction=DIRECTION.IN)
else:
raise AttributeError("Not a Direction")
| 31.505929 | 98 | 0.55382 |
475091e28eb5d8b9d5512496f82de88b79ed99f4 | 2,408 | py | Python | dvc/repo/fetch.py | gmrukwa/dvc | 81962d8aa73ff4318089aff95f57169effa5d77a | [
"Apache-2.0"
] | null | null | null | dvc/repo/fetch.py | gmrukwa/dvc | 81962d8aa73ff4318089aff95f57169effa5d77a | [
"Apache-2.0"
] | null | null | null | dvc/repo/fetch.py | gmrukwa/dvc | 81962d8aa73ff4318089aff95f57169effa5d77a | [
"Apache-2.0"
] | null | null | null | import logging
from dvc.config import NoRemoteError
from dvc.exceptions import DownloadError
from . import locked
logger = logging.getLogger(__name__)
@locked
def fetch(
self,
targets=None,
jobs=None,
remote=None,
all_branches=False,
show_checksums=False,
with_deps=False,
all_tags=False,
recursive=False,
all_commits=False,
run_cache=False,
revs=None,
):
"""Download data items from a cloud and imported repositories
Returns:
int: number of successfully downloaded files
Raises:
DownloadError: thrown when there are failed downloads, either
during `cloud.pull` or trying to fetch imported files
config.NoRemoteError: thrown when downloading only local files and no
remote is configured
"""
if isinstance(targets, str):
targets = [targets]
used = self.used_cache(
targets,
all_branches=all_branches,
all_tags=all_tags,
all_commits=all_commits,
with_deps=with_deps,
force=True,
remote=remote,
jobs=jobs,
recursive=recursive,
revs=revs,
)
downloaded = 0
failed = 0
try:
if run_cache:
self.stage_cache.pull(remote)
downloaded += self.cloud.pull(
used, jobs, remote=remote, show_checksums=show_checksums,
)
except NoRemoteError:
if not used.external and used["local"]:
raise
except DownloadError as exc:
failed += exc.amount
for (repo_url, repo_rev), files in used.external.items():
d, f = _fetch_external(self, repo_url, repo_rev, files, jobs)
downloaded += d
failed += f
if failed:
raise DownloadError(failed)
return downloaded
def _fetch_external(self, repo_url, repo_rev, files, jobs):
from dvc.external_repo import external_repo
from dvc.scm.base import CloneError
failed, downloaded = 0, 0
cache = self.cache.local
try:
with external_repo(
repo_url, repo_rev, cache_dir=cache.cache_dir
) as repo:
d, f, _ = repo.fetch_external(files, jobs=jobs)
downloaded += d
failed += f
except CloneError:
failed += 1
logger.exception(
"failed to fetch data for '{}'".format(", ".join(files))
)
return downloaded, failed
| 23.841584 | 77 | 0.622093 |
d7534a3cf75ffc5fd7d7ce3706da2a333435ecea | 1,548 | py | Python | ec2/elb/listelement.py | Rome84/AWS | 32f5b6a83e37e62b0e33658bdab03ea493c905cb | [
"MIT"
] | null | null | null | ec2/elb/listelement.py | Rome84/AWS | 32f5b6a83e37e62b0e33658bdab03ea493c905cb | [
"MIT"
] | null | null | null | ec2/elb/listelement.py | Rome84/AWS | 32f5b6a83e37e62b0e33658bdab03ea493c905cb | [
"MIT"
] | null | null | null | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class ListElement(list):
"""
A :py:class:`list` subclass that has some additional methods
for interacting with Amazon's XML API.
"""
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'member':
self.append(value)
| 41.837838 | 75 | 0.718992 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.