hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fa6303a3822f7cb9e59443140aabf094fe45f975 | 1,704 | py | Python | Python_3/Filtering/ex_gradient3.py | waynegm/OpendTect-External-Attributes | 7caaf69ee71cc3375fcd9e4d656c669ca01dc469 | [
"MIT"
] | 22 | 2015-09-24T10:41:05.000Z | 2022-01-02T21:43:48.000Z | Python_3/Filtering/ex_gradient3.py | waynegm/OpendTect-External-Attributes | 7caaf69ee71cc3375fcd9e4d656c669ca01dc469 | [
"MIT"
] | 5 | 2016-11-02T10:30:56.000Z | 2018-07-02T12:54:28.000Z | Python_3/Filtering/ex_gradient3.py | waynegm/OpendTect-External-Attributes | 7caaf69ee71cc3375fcd9e4d656c669ca01dc469 | [
"MIT"
] | 8 | 2017-03-18T17:17:44.000Z | 2021-04-02T09:43:35.000Z | # Compute Gradients
#
# Prewitt External Attribute
#
import sys,os
import numpy as np
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
import extlib as xl
xa.params = {
'Inputs': ['Input'],
'Output' : ['Average Gradient', 'In-line gradient', 'Cross-line gradient', 'Z gradient'],
'ZSampMargin' : {'Value': [-1,1], 'Hidden': True},
'StepOut' : {'Value': [1,1], 'Hidden': True},
'Select': {'Name': 'Operator', 'Values': ['Scharr','Kroon'], 'Selection': 0},
'Parallel' : False,
'Help' : 'http://waynegm.github.io/OpendTect-Plugin-Docs/Attributes/ExternalAttrib/'
}
def doCompute():
inlpos = xa.SI['nrinl']//2
crlpos = xa.SI['nrcrl']//2
filt = xa.params['Select']['Selection']
while True:
xa.doInput()
indata = xa.Input['Input']
if filt==0:
xa.Output['In-line gradient'] = xl.scharr3_dx(indata, full=False)
xa.Output['Cross-line gradient'] = xl.scharr3_dy(indata, full=False)
xa.Output['Z gradient'] = xl.scharr3_dz(indata, full=False)
else:
xa.Output['In-line gradient'] = xl.kroon3(indata, axis=0)[inlpos,crlpos,:]
xa.Output['Cross-line gradient'] = xl.kroon3(indata, axis=1)[inlpos,crlpos,:]
xa.Output['Z gradient'] = xl.kroon3(indata, axis=-1)[inlpos,crlpos,:]
xa.Output['Average Gradient'] = ( xa.Output['In-line gradient']
+ xa.Output['Cross-line gradient']
+ xa.Output['Z gradient'] )/3
xa.doOutput()
xa.doCompute = doCompute
xa.run(sys.argv[1:])
| 36.255319 | 93 | 0.551643 |
709d808baa543aedaf6ab8731e147601eada74ac | 9,636 | py | Python | newyorker/cartoons.py | Demonofthe2ndkind/new-yorker | 9ed280adefbf2fd86d0aa90016e9a641e5138ea2 | [
"Apache-2.0"
] | null | null | null | newyorker/cartoons.py | Demonofthe2ndkind/new-yorker | 9ed280adefbf2fd86d0aa90016e9a641e5138ea2 | [
"Apache-2.0"
] | null | null | null | newyorker/cartoons.py | Demonofthe2ndkind/new-yorker | 9ed280adefbf2fd86d0aa90016e9a641e5138ea2 | [
"Apache-2.0"
] | null | null | null | """
Created on Jun 10, 2018
Updated on Aud 26, 2020
@author: Demon of the Second Kind
"""
import ast
import bs4
import errno
import itertools
import json
import os
import requests
import sys
import uuid
from optparse import OptionParser
from urllib.parse import quote
from urllib.parse import urlencode
from urllib.parse import urljoin
from urllib.parse import urlparse
class NYCartoonRetriever:
"Script/API to retrieve the New Yorker magazine cartoons by keyword(s) and save them in the specified directory."
__all__ = []
__version__ = 1.1
__date__ = "2018-06-10"
__updated__ = "2020-08-28"
def __init__(self, outdir = None, verbose = False, headers = None):
self.__chunk_size = 100000
self.__image_page_base_url = "https://condenaststore.com"
self.__search_page_base_url = self.__image_page_base_url + "/collections/new+yorker+cartoons/"
self.__base_dir_name = outdir if outdir \
else os.path.join(os.path.curdir, "cartoons")
self.__verbose = verbose
self.__headers = headers if headers \
else {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36"}
def main(self):
"""When run as a script, process the command line arguments, then download the cartoons."""
# Handle command line arguments and options
program_name = os.path.basename(sys.argv[0])
program_version = "v{0:.2f}".format(NYCartoonRetriever.__version__)
program_build_date = "{0}".format(NYCartoonRetriever.__updated__)
program_version_string = "%prog {0} ({1})".format(program_version, program_build_date)
program_usage = """Usage: %prog search-keyword(s) [options]"""
program_desc = """Search for and download cartoons from The New Yorker magazine cartoon gallery."""
# Do not format the next statement
program_footer = """Examples:
newyorker email --verbose
newyorker design desk -v -o ~/example/cartoons
newyorker santa -v --headers \\
'{"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36"}'\n
Copyright 2018-2020, Demon of the Second Kind. Apache License 2.0\n"""
argv = sys.argv[1:]
try:
# Setup option parser
OptionParser.format_epilog = lambda self, formatter: self.epilog
parser = OptionParser(program_usage, version = program_version_string, epilog = program_footer, description = program_desc)
parser.add_option("-o", "--outdir", dest = "outdir", help = "set the base download directory [default: %default]", metavar = "DIR")
parser.add_option("-v", "--verbose", dest = "verbose", action = "store_true", help = "show progress trace [default: %default]")
parser.add_option("--headers", dest = "headers", help = "set HTTP request headers \n [default: %default]")
# Set defaults
parser.set_defaults(outdir = self.__base_dir_name, verbose = self.__verbose, headers = self.__headers)
# Process args and options
(opts, args) = parser.parse_args(argv)
if (len(args) == 0):
parser.print_help()
return 1
self.__base_dir_name = opts.outdir
self.__verbose = opts.verbose
self.__headers = ast.literal_eval(opts.headers) if type(opts.headers) is str else opts.headers
if (self.__verbose):
print("Keywords = {0}".format(", ".join(args)))
print("Base directory = {0}".format(self.__base_dir_name))
print("Verbose = {0}".format(self.__verbose))
print("HTTP headers = {0}".format(self.__headers))
keywords = list(args)
keywords.sort()
# Search for and download the cartoon files
images_downloaded = self.retrieve(keywords)
# Print some stats
if (self.__verbose):
print("Images downloaded: {0}".format(images_downloaded))
return 0
except Exception as e:
indent = len(program_name) * " "
print(f"{program_name}: {e!r}", file=sys.stderr)
print(f"{indent} for help use --help", file=sys.stderr)
return 2
def retrieve(self, keywords):
"""Retrieve the cartoons and save them as files."""
if not keywords:
raise ValueError("At least one search keyword has to be specified")
# Make sure directory exists; if not, create one
dir_name = os.path.join(self.__base_dir_name, "-".join(keywords))
self._ensure_dir_exists(dir_name)
# Search for and download the cartoon files
image_count = 0
for page_no in itertools.count(1):
search_page_url = self._get_search_page_url(keywords, page_no)
(image_page_urls, next_page_exists) = self._get_search_results(search_page_url)
# Extract the image URLs from the search results and download the images
for image_page_url in image_page_urls:
image_url = self._get_image_url(image_page_url)
image_filename = self._download_image(dir_name, image_url)
image_count += 1
if (self.__verbose):
print("Saving image", image_url)
print(" as", image_filename)
if (not next_page_exists):
break
return image_count
def _ensure_dir_exists(self, dir_name):
"""Check if the directory exists. If not, create one"""
if (not os.path.isdir(dir_name)):
try:
os.makedirs(dir_name)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _get_search_page_url(self, keywords, page_no):
"""Construct the URL of the search page based on the keywords and the page number"""
fragment = quote(" ".join(keywords))
url = urljoin(self.__search_page_base_url, fragment)
if (page_no > 1):
next_page_query = "?" + urlencode({"page" : str(page_no)})
url += next_page_query
return url
def _get_search_results(self, search_url):
"""Get the search result page and extract the image URLs and Next Page indicator from it"""
response = requests.get(search_url, headers = self.__headers)
response.raise_for_status()
search_page_parser = bs4.BeautifulSoup(response.text, "html.parser")
image_tags = search_page_parser.find_all("img", attrs={"class" : "flowImage lazy"})
# Yeah, I should have raised an exception if <img> tag's parent is not <a href="...">
# but it is fun to use map/filter/lambda, so I am just filtering out <img> tags I cannot handle :-)
image_page_urls = map(lambda image_tag : urljoin(self.__image_page_base_url, image_tag.parent["href"]),
filter(lambda image_tag : image_tag.parent["href"] is not None, image_tags))
next_page_exists = (search_page_parser.find("a", attrs={"class" : "buttonbottomnext"}) is not None)
return (image_page_urls, next_page_exists)
def _get_image_url(self, image_page_url):
"""Get the image page and extract the image URL from it"""
response = requests.get(image_page_url, headers = self.__headers)
response.raise_for_status()
image_page_parser = bs4.BeautifulSoup(response.text, "html.parser")
image_tag = image_page_parser.find("img", attrs={"id" : "mainimage"})
if (image_tag is None):
raise ValueError("Unexpected image page: missing link to the image")
return image_tag["src"]
def _download_image(self, dir_name, image_url):
"""Download the specified image and save it on disk"""
response = requests.get(image_url, headers = self.__headers)
response.raise_for_status()
image_name = urlparse(image_url).path.split("/")[-1]
if not image_name:
raise ValueError("Unexpected image URL: no file name provided")
full_filename = self._get_safe_filename(dir_name, image_name)
with open(full_filename, "wb") as image_file:
for chunk in response.iter_content(self.__chunk_size):
image_file.write(chunk)
return full_filename
def _get_safe_filename(self, dir_name, proposed_name):
"""Check if the file already exists; if so, add a unique suffix to it"""
full_filename = os.path.join(dir_name, proposed_name)
if (os.path.isfile(full_filename)):
filename, extension = os.path.splitext(full_filename)
if (not extension):
extension = ".jpg"
full_filename = filename + "_" + uuid.uuid4().hex + extension
return full_filename
def main():
"""An entry point for console script"""
retriever = NYCartoonRetriever()
sys.exit(retriever.main())
if __name__ == "__main__":
main()
| 39.818182 | 153 | 0.602325 |
ac2d3006eeb638473b8c0a1fbba3a42d11bfc3bd | 3,602 | py | Python | tao/setup.py | JamesTheZ/BladeDISC | e6c76ee557ebfccd560d44f6b6276bbc4e0a8a34 | [
"Apache-2.0"
] | 1 | 2022-03-31T12:35:53.000Z | 2022-03-31T12:35:53.000Z | tao/setup.py | JamesTheZ/BladeDISC | e6c76ee557ebfccd560d44f6b6276bbc4e0a8a34 | [
"Apache-2.0"
] | null | null | null | tao/setup.py | JamesTheZ/BladeDISC | e6c76ee557ebfccd560d44f6b6276bbc4e0a8a34 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2021 The BladeDISC Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import io
import os
import re
import pkg_resources
import subprocess
from setuptools import find_packages, setup
import tensorflow as tf
# Package meta-data.
NAME_PREFIX = 'blade-disc'
DESCRIPTION = 'TensorFlow wrapper for Blade DISC compiler.'
URL = 'https://https://github.com/alibaba/BladeDISC'
EMAIL = 'tashuang.zk@alibaba-inc.com'
AUTHOR = 'Zhu Kai'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = None
def detect_host_tf_version():
for pkg in pkg_resources.working_set:
if 'tensorflow-io' in pkg.project_name:
continue
if 'tensorflow' in pkg.project_name:
return f"{pkg.project_name}=={pkg.version}"
# Format the Blade-DISC package name prefix on GPU or CPU:
# blade-disc-gpu-tf24 for tensorflow==2.4
# blade-disc-tf115 for tensorflow-gpu==1.15
def format_package_name():
tf_short = "-tf{}".format("".join(tf.__version__.split(".")))
gpu = "-gpu" if tf.test.is_gpu_available() else ""
return "{}{}{}".format(NAME_PREFIX, gpu, tf_short)
# What packages are required for this module to be executed?
REQUIRED = [
detect_host_tf_version()
]
SETUP_REQUIRED = [
'pytest-runner'
]
TEST_REQUIRED = [
'pytest',
]
# What packages are optional?
EXTRAS = {
}
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, 'python', 'blade_disc_tf', '_version.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
# Where the magic happens:
setup(
name=format_package_name(),
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages('python', exclude=('tests',)),
install_requires=REQUIRED,
setup_requires=SETUP_REQUIRED,
extras_require=EXTRAS,
license='Apache License 2.0',
package_dir={'blade_disc_tf': 'python/blade_disc_tf'},
package_data={'blade_disc_tf': ['libtao_ops.so', 'tao_compiler_main']},
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
zip_safe=False,
)
| 31.051724 | 81 | 0.700999 |
f219cf676f80550d75b7d9c285270b2f72c1d3aa | 837 | py | Python | astm/compat.py | Iskander1b/python-astm | 606a77407e59c2f2dd12d65a7b2d2e3c141ad8d9 | [
"BSD-3-Clause"
] | 38 | 2015-06-11T06:43:02.000Z | 2022-03-01T18:21:07.000Z | astm/compat.py | Iskander1b/python-astm | 606a77407e59c2f2dd12d65a7b2d2e3c141ad8d9 | [
"BSD-3-Clause"
] | 7 | 2016-08-12T10:16:34.000Z | 2021-02-11T15:43:34.000Z | astm/compat.py | Iskander1b/python-astm | 606a77407e59c2f2dd12d65a7b2d2e3c141ad8d9 | [
"BSD-3-Clause"
] | 39 | 2015-08-10T16:49:33.000Z | 2021-12-26T10:27:07.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import sys
version = '.'.join(map(str, sys.version_info[:2]))
if version >= '3.0':
basestring = (str, bytes)
unicode = str
bytes = bytes
long = int
def buffer(obj, start=None, stop=None):
memoryview(obj)
if start == None:
start = 0
if stop == None:
stop = len(obj)
x = obj[start:stop]
return x
else:
basestring = basestring
unicode = unicode
b = bytes = str
long = long
buffer = buffer
b = lambda s: isinstance(s, unicode) and s.encode('latin1') or s
u = lambda s: isinstance(s, bytes) and s.decode('utf-8') or s
| 23.25 | 67 | 0.603345 |
4e1552fd85b3ec742b7ffec5f08009449b1dc113 | 4,102 | py | Python | configs/fcos/ddb_v4_plus_r50_caffe_fpn_gn_1x_2gpu_nvidia.py | Lanselott/mmdetection | 03ce0a87f4d52f4adf4f78fd39ad30b2da394376 | [
"Apache-2.0"
] | null | null | null | configs/fcos/ddb_v4_plus_r50_caffe_fpn_gn_1x_2gpu_nvidia.py | Lanselott/mmdetection | 03ce0a87f4d52f4adf4f78fd39ad30b2da394376 | [
"Apache-2.0"
] | null | null | null | configs/fcos/ddb_v4_plus_r50_caffe_fpn_gn_1x_2gpu_nvidia.py | Lanselott/mmdetection | 03ce0a87f4d52f4adf4f78fd39ad30b2da394376 | [
"Apache-2.0"
] | null | null | null | # model settings
model = dict(
type='FCOS',
pretrained='open-mmlab://resnet50_caffe',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='caffe'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
extra_convs_on_inputs=False, # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='DDBV4PHead',
num_classes=81,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0),
loss_sorted_bbox=dict(type='GIoULoss', loss_weight=1.0),
loss_dist_scores=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = '/coco/data/2017/'
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'images/train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'images/val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'images/val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.005,
momentum=0.9,
weight_decay=0.0001,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='constant',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/fcos_r50_caffe_fpn_gn_1x_4gpu'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 29.724638 | 75 | 0.61921 |
678d50621a4270b10c4733f00147f0beb450649d | 675 | py | Python | ai_toolkit/metrics/accuracy.py | TylerYep/ml-toolkit | 095bdce961133acc720f90b6d1bbb0a7becbfc9f | [
"MIT"
] | 7 | 2020-04-07T06:10:29.000Z | 2021-10-30T06:31:46.000Z | ai_toolkit/metrics/accuracy.py | TylerYep/ml-toolkit | 095bdce961133acc720f90b6d1bbb0a7becbfc9f | [
"MIT"
] | 2 | 2021-09-08T01:48:49.000Z | 2022-01-16T19:31:35.000Z | ai_toolkit/metrics/accuracy.py | TylerYep/ml-toolkit | 095bdce961133acc720f90b6d1bbb0a7becbfc9f | [
"MIT"
] | 2 | 2021-04-27T22:45:14.000Z | 2021-07-16T06:40:30.000Z | from types import SimpleNamespace
import torch
from .metric import Metric
class Accuracy(Metric):
def __repr__(self) -> str:
return f"{self.name}: {100. * self.value:.2f}%"
@staticmethod
def calculate_accuracy(output: torch.Tensor, target: torch.Tensor) -> float:
return (output.argmax(1) == target).float().sum().item()
def update(self, val_dict: SimpleNamespace) -> float:
output, target = val_dict.output, val_dict.target
accuracy = self.calculate_accuracy(output, target)
self.epoch_avg += accuracy
self.running_avg += accuracy
self.num_examples += val_dict.batch_size
return accuracy
| 29.347826 | 80 | 0.671111 |
cac32a262c0ead70242f1a38c8719de4b92ce580 | 2,311 | py | Python | TeamProject/src/BabyStepGiantStep.py | cboyer2016/CSE4081TeamProject | 357b182d9cb1be62e03600211b75f36b88dbd964 | [
"MIT"
] | 1 | 2021-09-25T20:42:03.000Z | 2021-09-25T20:42:03.000Z | TeamProject/src/BabyStepGiantStep.py | cboyer2016/CSE4081TeamProject | 357b182d9cb1be62e03600211b75f36b88dbd964 | [
"MIT"
] | null | null | null | TeamProject/src/BabyStepGiantStep.py | cboyer2016/CSE4081TeamProject | 357b182d9cb1be62e03600211b75f36b88dbd964 | [
"MIT"
] | null | null | null | ########################################################################################################################################
# Author: Chad Boyer (cboyer2016@my.fit.edu), Connor Roth (croth2016@my.fit.edu), & Alan Guzman (aguzman2015@my.fit.edu)
# Course: CSE 4081
# Team Project: Discrete Logarithm Analysis
# Implemented Algorithm: Small-Step, Giant Step
########################################################################################################################################
from math import ceil, sqrt
########################################################################################################################################
# This function calculates the discrete logarithm using the Small-Step, Giant-Step algorithm. The worst case time complexity of this
# algorithm is O(square root(m)). The algorithm begins on line 25-30. Lines 17 - 22 are part of the precomputation process.
########################################################################################################################################
def babyStepGiantStep(base, value, modulus):
# Calculate the Baby Step
m = int(ceil(sqrt(modulus-1))) # Take the ceiling square root of the Modulus -1
babyStep = {} # Store the baby step values in an array
for x in range(m): # Iterate over modulus - 1 square root
babyStep[pow(base, x, modulus)] = x # Store (base ^ x) % modulus and x
c = pow(base, m * (modulus - 2), modulus) # Calculate inverse log with Fermat's Little Theorem
# Take the Giant Step now
for x in range(m): # Iterate over modulus -1 square root
y = (value * pow(c, x, modulus)) % modulus # Calculate the giant step: (value * (c ^ x) % modulus) % modulus
if y in babyStep: # Check if the giant step is in the baby step list
return x * m + babyStep[y] # If so, multiply m by x and add the baby step
return None # If not found, return nothing
if __name__ == "__main__":
print babyStepGiantStep(2, 1234567, 389151817) | 67.970588 | 137 | 0.442666 |
58c8222ec52dcdbff7ddda04911f6703a2bdedc7 | 1,859 | py | Python | PPOCRLabel/libs/constants.py | ninetailskim/PaddleOCR | 5586dbf42511517d00dc541070e0c5e094587c72 | [
"Apache-2.0"
] | 20,401 | 2020-05-08T10:56:13.000Z | 2022-03-31T23:34:38.000Z | PPOCRLabel/libs/constants.py | ninetailskim/PaddleOCR | 5586dbf42511517d00dc541070e0c5e094587c72 | [
"Apache-2.0"
] | 4,988 | 2020-05-10T08:19:41.000Z | 2022-03-31T17:57:11.000Z | PPOCRLabel/libs/constants.py | ninetailskim/PaddleOCR | 5586dbf42511517d00dc541070e0c5e094587c72 | [
"Apache-2.0"
] | 4,479 | 2020-05-08T11:12:13.000Z | 2022-03-31T11:55:28.000Z | # Copyright (c) <2015-Present> Tzutalin
# Copyright (C) 2013 MIT, Computer Science and Artificial Intelligence Laboratory. Bryan Russell, Antonio Torralba,
# William T. Freeman. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
SETTING_FILENAME = 'filename'
SETTING_RECENT_FILES = 'recentFiles'
SETTING_WIN_SIZE = 'window/size'
SETTING_WIN_POSE = 'window/position'
SETTING_WIN_GEOMETRY = 'window/geometry'
SETTING_LINE_COLOR = 'line/color'
SETTING_FILL_COLOR = 'fill/color'
SETTING_ADVANCE_MODE = 'advanced'
SETTING_WIN_STATE = 'window/state'
SETTING_SAVE_DIR = 'savedir'
SETTING_PAINT_LABEL = 'paintlabel'
SETTING_LAST_OPEN_DIR = 'lastOpenDir'
SETTING_AUTO_SAVE = 'autosave'
SETTING_SINGLE_CLASS = 'singleclass'
FORMAT_PASCALVOC='PascalVOC'
FORMAT_YOLO='YOLO'
SETTING_DRAW_SQUARE = 'draw/square'
SETTING_LABEL_FILE_FORMAT= 'labelFileFormat'
DEFAULT_ENCODING = 'utf-8'
| 58.09375 | 119 | 0.80043 |
f8d689923722b57ce9eac98eddaaed00bfb0f209 | 1,630 | py | Python | main.py | MathisLeRoyNivot/stock-price-prediction | 359dc5a7cd42ced0739fe76a8ae90a4d63f46cf7 | [
"Apache-2.0"
] | null | null | null | main.py | MathisLeRoyNivot/stock-price-prediction | 359dc5a7cd42ced0739fe76a8ae90a4d63f46cf7 | [
"Apache-2.0"
] | null | null | null | main.py | MathisLeRoyNivot/stock-price-prediction | 359dc5a7cd42ced0739fe76a8ae90a4d63f46cf7 | [
"Apache-2.0"
] | null | null | null | import streamlit as st
from datetime import date
import yfinance as yf
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from plotly import graph_objs as go
START_DATE = "2011-01-01"
TODAY_DATE = date.today().strftime("%Y-%m-%d")
st.title("Stock Price Prediction App")
stocks = ("BTC-USD", "AAPL", "FB", "MSFT", "GOOG", "TSLA")
selected_stocks = st.selectbox("Select stock for prediction", stocks)
n_year = st.slider("Years of prediction", 1, 5)
period = n_year * 365
@st.cache
def load_stock_data(ticker):
data = yf.download(ticker, START_DATE, TODAY_DATE)
data.reset_index(inplace=True)
return data
data_load_state = st.text("Load stock data...")
data = load_stock_data(selected_stocks)
data_load_state.text("Stock data loaded!")
st.subheader("Raw stock data")
st.write(data.tail())
def plot_raw_stock_data():
fig = go.Figure()
fig.add_trace(go.Scatter(x=data['Date'], y=data['Open'], name='stock_open'))
fig.add_trace(go.Scatter(x=data['Date'], y=data['Close'], name='stock_close'))
fig.layout.update(title_text="Time Series Data", xaxis_rangeslider_visible=True)
st.plotly_chart(fig)
plot_raw_stock_data()
# Forecast
df_train = data[['Date', 'Close']]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.subheader("Forecast stock data")
st.write(forecast.tail())
st.write("Forecast stock data")
fig1 = plot_plotly(m, forecast)
st.plotly_chart(fig1)
st.write("Forecast component")
fig2 = m.plot_components(forecast)
st.write(fig2)
| 24.328358 | 84 | 0.725153 |
56baf471dc14838ab9f891c0a84085587c4b8e35 | 1,692 | py | Python | python_scripts/server.py | endepointe/life_generator | 387e791ec2b1cd7d7ac3db982e70b7c0fffb6df5 | [
"MIT"
] | null | null | null | python_scripts/server.py | endepointe/life_generator | 387e791ec2b1cd7d7ac3db982e70b7c0fffb6df5 | [
"MIT"
] | null | null | null | python_scripts/server.py | endepointe/life_generator | 387e791ec2b1cd7d7ac3db982e70b7c0fffb6df5 | [
"MIT"
] | null | null | null | import sys
import http.server
import socketserver
import json
# import lab1
import connect_socket
# with assistence with python server creation (completely new to it)
# https://wiki.python.org/moin/BaseHttpServer
# https://docs.python.org/3/library/socketserver.html#socketserver-tcpserver-example
HOST = ''
PORT = int(sys.argv[1])
class Base_Handler(socketserver.BaseRequestHandler):
# general request handler
def handle(self):
self.data = bytes(connect_socket.lab1(), encoding="utf-8")
self.request.sendall(self.data)
# https://docs.python.org/3/library/http.server.html?highlight=do_get#http.server.SimpleHTTPRequestHandler.do_HEAD
def do_HEAD(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
# https://docs.python.org/3/library/http.server.html?highlight=do_get#http.server.SimpleHTTPRequestHandler.do_GET
def do_GET(self):
''' not ready to handle this yet
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write("<html><head><title>lab1</title></head>")
self.wfile.write("<body>")
self.wfile.write("<h3>")
self.wfile.write(bytes(connect_socket.lab1(), encoding="utf-8"))
self.wfile.write("</h3>")
self.wfile.write("</body><html>")
'''
if __name__ == "__main__":
# https://docs.python.org/3/library/socketserver.html
with socketserver.TCPServer((HOST, PORT), Base_Handler) as httpd:
print("\n\nserving at port", PORT, "\n\n")
# run lab1 to get the resource
print(connect_socket.lab1())
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close() | 31.333333 | 116 | 0.710402 |
e1be081350f3b1edfa77259c82124580e534d776 | 675 | py | Python | clean.py | Deepayan137/spell-check | 31026a094fdd2713ae93eda4e7df09cc83e32f75 | [
"MIT"
] | null | null | null | clean.py | Deepayan137/spell-check | 31026a094fdd2713ae93eda4e7df09cc83e32f75 | [
"MIT"
] | null | null | null | clean.py | Deepayan137/spell-check | 31026a094fdd2713ae93eda4e7df09cc83e32f75 | [
"MIT"
] | 1 | 2021-07-28T21:36:39.000Z | 2021-07-28T21:36:39.000Z | import re
def clean_text(text):
'''Remove unwanted characters and extra spaces from the text'''
text = re.sub(r'\n', ' ', text)
text = re.sub(r'[{}@_*>()\\#%+=\[\]]','', text)
text = re.sub('a0','', text)
text = re.sub('\'92t','\'t', text)
text = re.sub('\'92s','\'s', text)
text = re.sub('\'92m','\'m', text)
text = re.sub('\'92ll','\'ll', text)
text = re.sub('\'91','', text)
text = re.sub('\'92','', text)
text = re.sub('\'93','', text)
text = re.sub('\'94','', text)
text = re.sub('\.','. ', text)
text = re.sub('\!','! ', text)
text = re.sub('\?','? ', text)
text = re.sub(' +',' ', text)
return text | 33.75 | 67 | 0.460741 |
2bdd275ce8229355e21678b997c47d63b5395936 | 284 | py | Python | design_patterns/oop/vehicles/car.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 3 | 2017-05-02T10:28:13.000Z | 2019-02-06T09:10:11.000Z | design_patterns/oop/vehicles/car.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 2 | 2017-06-21T20:39:14.000Z | 2020-02-25T10:28:57.000Z | design_patterns/oop/vehicles/car.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 2 | 2016-07-29T04:35:22.000Z | 2017-01-18T17:05:36.000Z | from ObjectOriented.Vehicles import Vehicle
class Car(Vehicle):
"""
Car class which is a subclass of *Vehicle*
"""
wheels = 4
no_of_seats = 4
def vehicle_type(self):
# noinspection PyCompatibility
super().vehicle_type()
return "Car"
| 18.933333 | 46 | 0.626761 |
286862451b07f2620d975a35f03ec578d81d5d8b | 4,069 | py | Python | src/fetchcode/vcs/pip/_internal/vcs/bazaar.py | quepop/fetchcode | ac2461bdf7a249d8815987b4d421dbc615c043b9 | [
"Apache-2.0"
] | 7 | 2019-10-04T07:27:41.000Z | 2021-06-07T04:39:18.000Z | src/fetchcode/vcs/pip/_internal/vcs/bazaar.py | quepop/fetchcode | ac2461bdf7a249d8815987b4d421dbc615c043b9 | [
"Apache-2.0"
] | 64 | 2019-10-07T12:40:56.000Z | 2022-02-17T18:44:37.000Z | src/fetchcode/vcs/pip/_internal/vcs/bazaar.py | quepop/fetchcode | ac2461bdf7a249d8815987b4d421dbc615c043b9 | [
"Apache-2.0"
] | 16 | 2019-10-04T08:48:12.000Z | 2021-06-11T01:22:56.000Z | # The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os
from fetchcode.vcs.pip._vendor.six.moves.urllib import parse as urllib_parse
from fetchcode.vcs.pip._internal.utils.misc import display_path, rmtree
from fetchcode.vcs.pip._internal.utils.subprocess import make_command
from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING
from fetchcode.vcs.pip._internal.utils.urls import path_to_url
from fetchcode.vcs.pip._internal.vcs.versioncontrol import VersionControl, vcs
if MYPY_CHECK_RUNNING:
from typing import Optional, Tuple
from fetchcode.vcs.pip._internal.utils.misc import HiddenText
from fetchcode.vcs.pip._internal.vcs.versioncontrol import AuthInfo, RevOptions
logger = logging.getLogger(__name__)
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
schemes = (
'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp',
'bzr+lp',
)
def __init__(self, *args, **kwargs):
super(Bazaar, self).__init__(*args, **kwargs)
# This is only needed for python <2.7.5
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(['lp'])
@staticmethod
def get_base_rev_args(rev):
return ['-r', rev]
def export(self, location, url):
# type: (str, HiddenText) -> None
"""
Export the Bazaar repository at the url to the destination location
"""
# Remove the location to make sure Bazaar can export it correctly
if os.path.exists(location):
rmtree(location)
url, rev_options = self.get_url_rev_options(url)
self.run_command(
make_command('export', location, url, rev_options.to_args()),
show_stdout=False,
)
def fetch_new(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
rev_display = rev_options.to_display()
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
cmd_args = (
make_command('branch', '-q', rev_options.to_args(), url, dest)
)
self.run_command(cmd_args)
def switch(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
self.run_command(make_command('switch', url), cwd=dest)
def update(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
cmd_args = make_command('pull', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
@classmethod
def get_url_rev_and_auth(cls, url):
# type: (str) -> Tuple[str, Optional[str], AuthInfo]
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev, user_pass = super(Bazaar, cls).get_url_rev_and_auth(url)
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev, user_pass
@classmethod
def get_remote_url(cls, location):
urls = cls.run_command(['info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if cls._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
@classmethod
def get_revision(cls, location):
revision = cls.run_command(
['revno'], show_stdout=False, cwd=location,
)
return revision.splitlines()[-1]
@classmethod
def is_commit_id_equal(cls, dest, name):
"""Always assume the versions don't match"""
return False
vcs.register(Bazaar)
| 33.628099 | 83 | 0.626444 |
c9304b4b1baa5b11b7777617c565659fe9b445b8 | 22 | py | Python | Dados/Events/Formato/__init__.py | On0n0k1/2020ASSEditor | fd61595696683b440eb8a030163020100e070ed5 | [
"MIT"
] | null | null | null | Dados/Events/Formato/__init__.py | On0n0k1/2020ASSEditor | fd61595696683b440eb8a030163020100e070ed5 | [
"MIT"
] | null | null | null | Dados/Events/Formato/__init__.py | On0n0k1/2020ASSEditor | fd61595696683b440eb8a030163020100e070ed5 | [
"MIT"
] | null | null | null | __all__ = ["Formato"]
| 11 | 21 | 0.636364 |
fd4347511670dd75c5896cb381bfe7f800a0146f | 8,808 | py | Python | src/examplePlugins/init_entity.py | wwfxuk/shotgunEvents | 91c11841db6dc3fcf5d5142b93e32dcdcebb2e8d | [
"MIT"
] | null | null | null | src/examplePlugins/init_entity.py | wwfxuk/shotgunEvents | 91c11841db6dc3fcf5d5142b93e32dcdcebb2e8d | [
"MIT"
] | null | null | null | src/examplePlugins/init_entity.py | wwfxuk/shotgunEvents | 91c11841db6dc3fcf5d5142b93e32dcdcebb2e8d | [
"MIT"
] | null | null | null | # Copyright 2018 Autodesk, Inc. All rights reserved.
#
# Use of this software is subject to the terms of the Autodesk license agreement
# provided at the time of installation or download, or which otherwise accompanies
# this software in either electronic or hard copy form.
#
# See docs folder for detailed usage info.
import os
import shotgun_api3
def registerCallbacks(reg):
"""
Register our callbacks.
:param reg: A Registrar instance provided by the event loop handler.
"""
# Grab authentication env vars for this plugin. Install these into the env
# if they don't already exist.
server = os.environ["SG_SERVER"]
script_name = os.environ["SGDAEMON_INITENTITY_NAME"]
script_key = os.environ["SGDAEMON_INITENTITY_KEY"]
# User-defined plugin args, change at will.
args = {
"entity_type": "Asset",
"initial_data": {"description": "Brand new Asset."},
"force": False,
"filters": [],
}
# Grab an sg connection for the validator and bail if it fails.
sg = shotgun_api3.Shotgun(server, script_name=script_name, api_key=script_key)
if not is_valid(sg, reg.logger, args):
reg.logger.warning("Plugin is not valid, will not register callback.")
return
# Register our callback with the Shotgun_%s_Change event and tell the logger
# about it.
reg.registerCallback(
script_name,
script_key,
init_entity,
{"Shotgun_%s_New" % args["entity_type"]: None},
args,
)
reg.logger.debug("Registered callback.")
def is_valid(sg, logger, args):
"""
Validate our args.
:param sg: Shotgun API handle.
:param logger: Logger instance.
:param args: Any additional misc arguments passed through this plugin.
:returns: True if plugin is valid, None if not.
"""
args_to_check = {
"entity_type": {"type": ["str"], "allow_empty": False},
"initial_data": {"type": ["dict"], "allow_empty": False},
"force": {"type": ["bool"], "allow_empty": False},
}
# Make sure we can read the entity_type's schema.
try:
entity_schema = sg.schema_field_read(args["entity_type"])
except Exception as e:
logger.warning(
'Can\'t read Shotgun schema for "entity_type" args\'s value ("%s"): %s'
% (args["entity_type"], e)
)
return
for name, checks in args_to_check.iteritems():
# Grab the arg's value type.
value_type = type(args[name]).__name__
# We assume unicode and str to be equivalent for these checks because
# args come back from Django as unicode but are first set by the
# Registrar as str.
if value_type == "unicode":
value_type = "str"
# Make sure the arg value is the correct Python type.
if checks.get("type") and value_type not in checks["type"]:
logger.warning(
'"%s" arg\'s value is type "%s" but should be type "%s," please fix.'
% (name, value_type, checks["type"])
)
return
# Make sure the arg has a non-empty value if allow_empty is
# False.
if checks.get("allow_empty") is False:
if "bool" in checks.get("type"):
if args[name] not in [True, False]:
logger.warning(
'"%s" arg\'s value is empty but requires a value, please fix.'
% (name,)
)
return
elif not args[name]:
logger.warning(
'"%s" arg\'s value is empty but requires a value, please fix.'
% (name,)
)
return
# Now make sure each field and value referenced in initial_data
# is valid.
if name == "initial_data":
# Map our SG field types to valid python object types
valid_field_types = {
"entity": ["dict"],
"multi_entity": ["list"],
"number": ["int"],
"float": ["float"],
}
for field_name, field_value in args[name].iteritems():
field_value_type = type(field_value).__name__
# We assume unicode and str to be equivalent for these checks because
# args come back from Django as unicode but are first set by the
# Registrar as str.
if field_value_type == "unicode":
field_value_type = "str"
# First, let's make sure the field is valid for our
# entity type.
if field_name not in entity_schema.keys():
logger.warning(
'%s entity field "%s" does not exist in Shotgun, please fix.'
% (args["entity_type"], field_name,)
)
return
# Since we've already verified that the field exists
# for the entity type, just grab the Shotgun field data
# type to do additional validation.
sg_type = entity_schema[field_name]["data_type"]["value"]
# If there isn't a mapping from the SG field type to a
# python object type defined above, assume that the
# field will accept a string value
valid_value_types = valid_field_types.get(sg_type, ["str"])
# make sure the initial value provided will work for this field
if field_value_type not in valid_value_types:
logger.warning(
'Initial value for Shotgun field "%s" is type "%s" but should be of type(s) "%s," please fix.'
% (field_name, field_value_type, valid_value_types,)
)
return
# if we have a list field, make sure the initial value is valid
if sg_type in ["list", "status_list"]:
valid_values = (
entity_schema[field_name]
.get("properties", {})
.get("valid_values", {})
.get("value")
)
if valid_values and field_value not in valid_values:
logger.warning(
'Initial value for Shotgun field "%s" is "%s" but must be one of the following: "%s".'
% (field_name, str(field_value), ", ".join(valid_values),)
)
return
return True
def init_entity(sg, logger, event, args):
"""
Updates an entity with some initial values on creation.
:param sg: Shotgun API handle.
:param logger: Logger instance.
:param event: A Shotgun EventLogEntry entity dictionary.
:param args: Any additional misc arguments passed through this plugin.
"""
# Return if we don't have all the field values we need; we're intentionally
# excluding event["meta"]["new_value"] because None is a valid value.
if not event.get("meta", {}).get("entity_id"):
return
# Make some vars for convenience.
entity_id = event["meta"]["entity_id"]
entity_type = args["entity_type"]
# Re-query the entity so we don't clobber a value that may have
# been populated by a user
fields_to_update = args["initial_data"].keys()
entity = sg.find_one(
entity_type, args["filters"] + [["id", "is", entity_id]], fields_to_update,
)
# Bail if we don't have an entity. This would happen if user-specified
# filters don't match the event entity. This is a "feature," so folks can
# target entities w/certain field values. E.g., the trigger could only init
# values on an Asset entity with its sg_asset_type field value set to
# "character".
if not entity:
return
# Are we supposed to clobber existing values?
force_update = args["force"]
update_data = {}
# Convert anything that's currently unicode to a string.
for key, value in args["initial_data"].iteritems():
key = str(key)
if isinstance(value, unicode):
value = str(value)
# If the field is already populated, don't clobber it unless
# we've been told to.
if entity[key]:
if force_update:
update_data[key] = value
else:
update_data[key] = value
if update_data:
sg.update(entity_type, entity_id, update_data)
# Tell the logger what happened.
logger.info(
"%s with id %s updated with new data: %s"
% (entity_type, entity_id, update_data)
)
| 36.853556 | 118 | 0.568574 |
9a72885203bcfdbf3a9586c55055783968ddb9bd | 12,301 | py | Python | src/mounts_plot.py | moibenko/enstore | 6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9 | [
"Intel",
"Unlicense"
] | 4 | 2021-10-17T11:17:59.000Z | 2022-02-28T16:58:40.000Z | src/mounts_plot.py | moibenko/enstore | 6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9 | [
"Intel",
"Unlicense"
] | 17 | 2021-10-05T21:44:06.000Z | 2022-03-31T16:58:40.000Z | src/mounts_plot.py | moibenko/enstore | 6f2ff5b67ff73872a9e68f2a68b0bdaa70cef9b9 | [
"Intel",
"Unlicense"
] | 8 | 2021-09-02T18:55:49.000Z | 2022-03-09T21:05:28.000Z | #!/usr/bin/env python
###############################################################################
#
# $Id$
#
###############################################################################
# system imports
import pg
import sys
import os
import time
# enstore imports
import enstore_plotter_module
import enstore_constants
WEB_SUB_DIRECTORY = enstore_constants.MOUNT_PLOTS_SUBDIR
class MountsPlot(enstore_plotter_module.EnstorePlotterModule):
def __init__(self,name,isActive=True):
enstore_plotter_module.EnstorePlotterModule.__init__(
self, name, isActive)
self.low_water_mark=2000
self.high_water_mark=5000
self.step=100
self.yoffset=200
self.ystep=500
self.tol=0
self.toh=0
self.mts=[]
self.mtsg={}
self.mtsh={}
self.hist_keys=[]
self.libraries=None
def hist_key(self,n):
if n >= self.high_water_mark:
return 'Over '+str(self.high_water_mark)
elif n >= self.low_water_mark:
return str(self.low_water_mark)+'-'+str(self.high_water_mark-1)
else:
nb = n/self.step*self.step
return str(nb)+'-'+str(nb+self.step-1)
#Write out the file that gnuplot will use to plot the data.
# plot_filename = The file that will be read in by gnuplot containing
# the gnuplot commands.
# data_filename = The data file that will be read in by gnuplot
# containing the data to be plotted.
# ps_filename = The postscript file that will be created by gnuplot.
# ps_filename_logy = The logy postscript file that will be created.
# library = Name of the Library Manager these tapes belong to.
# count = ???
def write_plot_files(self, plot_filename, pts_filename,
ps_filename, ps_filename_logy, library, count):
outf = open(plot_filename, "w")
#Write out the commands for both files.
outf.write("set grid\n")
outf.write("set ylabel 'Mounts'\n")
outf.write("set terminal postscript color solid\n")
outf.write("set title '%s Tape Mounts per Volume (plotted at %s)'\n"
% (library, time.ctime(time.time())))
if self.toh > 0:
outf.write("set arrow 1 from %d,%d to %d,%d head\n"
% (count-self.toh-500,
self.high_water_mark-500,
count-self.toh, self.high_water_mark))
outf.write("set label 1 '%d' at %d,%d right\n"
%(self.toh, count-self.toh-500,
self.high_water_mark-500))
if self.tol > 0:
outf.write("set arrow 2 from %d,%d to %d,%d head\n"
% (count-self.tol-500,
self.low_water_mark+500, count-self.tol,
self.low_water_mark))
outf.write("set label 2 '%d' at %d,%d right\n"
% (self.tol, count-self.tol-500,
self.low_water_mark+500))
outf.write("set label 3 '%d' at 500,%d left\n"
% (self.high_water_mark, self.high_water_mark))
outf.write("set label 4 '%d' at 500,%d left\n"
% (self.low_water_mark, self.low_water_mark))
#Write out regular plot creating commands.
outf.write("set output '" + ps_filename + "'\n")
outf.write("plot '%s' notitle with impulses, %d notitle, %d notitle\n"
% (pts_filename, self.low_water_mark,
self.high_water_mark))
#Write out logy plot creating commands.
outf.write("set logscale y\n")
outf.write("set output '" + ps_filename_logy + "'\n")
if self.toh > 0:
outf.write("set arrow 1 from %d,%d to %d,%d head\n"
% (count-self.toh-500,
self.high_water_mark-2000,
count-self.toh, self.high_water_mark))
outf.write("set label 1 '%d' at %d,%d right\n"
%(self.toh, count-self.toh-500,
self.high_water_mark-2000))
outf.write("plot '%s' notitle with impulses, %d notitle, %d notitle\n"
% (pts_filename, self.low_water_mark,
self.high_water_mark))
outf.close()
def write_hist_plot_file(self, plot_filename, pts_filename,
ps_filename, library, count, set_xtics,
maxy, set_label):
outf = open(plot_filename, "w")
outf.write("set grid\n")
outf.write("set ylabel 'Volumes'\n")
outf.write("set xlabel 'Mounts'\n")
outf.write("set xrange [0:%d]\n" % (count+1))
outf.write("set yrange [0:%d]\n"
% (((maxy+self.yoffset*2)/self.ystep+1)*self.ystep))
outf.write(set_label)
outf.write(set_xtics)
outf.write("set tics out\n")
if os.uname()[1] == 'cdfensrv2.fnal.gov':
outf.write("set arrow from 21,2000 to 21,500\n")
outf.write("set label \"Bakken's Tape\" at 21,2250 center\n")
outf.write("set terminal postscript color solid\n")
outf.write("set output '" + ps_filename + "'\n")
outf.write("set title '%s Tape Mounts (plotted at %s)'\n"
% (library, time.ctime(time.time())))
outf.write("plot '%s' notitle with impulse lw 20\n"
% (pts_filename,))
outf.close()
#######################################################################
# The following functions must be defined by all plotting modueles.
#######################################################################
def book(self, frame):
if self.get_parameter("low_water_mark"):
self.low_water_mark=self.get_parameter("low_water_mark")
if self.get_parameter("high_water_mark"):
self.high_water_mark=self.get_parameter("high_water_mark")
if self.get_parameter("step"):
self.step=self.get_parameter("step")
if self.get_parameter("yoffset"):
self.yoffset=self.get_parameter("yoffset")
if self.get_parameter("ystep"):
self.ystep=self.get_parameter("ystep")
#if self.get_parameter("library"):
# self.library=self.get_parameter("library")
#Get cron directory information.
cron_dict = frame.get_configuration_client().get("crons", {})
#Pull out just the information we want.
self.temp_dir = cron_dict.get("tmp_dir", "/tmp")
html_dir = cron_dict.get("html_dir", "")
#Handle the case were we don't know where to put the output.
if not html_dir:
sys.stderr.write("Unable to determine html_dir.\n")
sys.exit(1)
self.web_dir = os.path.join(html_dir, WEB_SUB_DIRECTORY)
if not os.path.exists(self.web_dir):
os.makedirs(self.web_dir)
def fill(self, frame):
#Specify the x ranges.
for i in range(0, self.low_water_mark, self.step):
k = self.hist_key(i)
self.mtsh[k] = 0
self.hist_keys.append(k)
k = self.hist_key(self.low_water_mark)
self.hist_keys.append(k)
self.mtsh[k] = 0
k = self.hist_key(self.high_water_mark)
self.hist_keys.append(k)
self.mtsh[k] = 0
edb = frame.get_configuration_client().get('database', {})
db = pg.DB(host = edb.get('db_host', "localhost"),
dbname= edb.get('dbname', "enstoredb"),
port = edb.get('db_port', 5432),
user = edb.get('dbuser', "enstore"))
res_lib = db.query("select distinct library from volume where media_type!='null'").getresult()
self.libraries = {}
for row in res_lib:
if not row:
continue
#Get the data for the plot.
pts_filename = os.path.join(self.temp_dir,
"mounts_%s.pts" % row[0])
count = self.__fill(row[0], pts_filename, db)
self.libraries[row[0]] = {"fname" : pts_filename,
"count" : count,
}
#One last one for the entire system.
pts_filename = os.path.join(self.temp_dir, "mounts.pts")
count = self.__fill(None, pts_filename, db)
self.libraries["All"] = {"fname" : pts_filename,
"count" : count,
}
db.close()
def __fill(self, library, pts_filename, db):
sql_stm = "declare volume_cursor cursor for " \
"select label,sum_mounts,storage_group,file_family,wrapper,library " \
"from volume " \
"where system_inhibit_0!='DELETED' " \
"and media_type!='null'"
if library:
sql_stm = "%s and library = '%s'" \
% (sql_stm, library)
db.query("begin")
db.query(sql_stm)
while True:
res = db.query("fetch 10000 from volume_cursor").getresult()
for row in res:
sg=row[2]
#ff=row[3]
#wp=row[4]
m=row[1]
#lib=row[5]
if m == -1:
m = 0
self.mts.append(m)
if self.mtsg.has_key(sg):
self.mtsg[sg].append(m)
else:
self.mtsg[sg] = [m]
if m >= self.high_water_mark:
self.toh = self.toh + 1
if m >= self.low_water_mark:
self.tol = self.tol + 1
k = self.hist_key(m)
self.mtsh[k] = self.mtsh[k]+1
l=len(res)
if (l < 10000):
break
db.query("rollback") #Close off the "begin".
self.mts.sort()
#Write out the data files.
for i in self.mtsg.keys():
self.mtsg[i].sort()
count = 0
outf = open(pts_filename, "w")
for i in self.mts:
count = count + 1
outf.write("%d %d\n"%(count, i))
outf.close()
if count == 0 :
return count
def plot(self):
for library, lib_dict in self.libraries.items():
pts_filename = lib_dict['fname']
count = lib_dict['count']
#
### The mount plots by library (and one total).
#
#Some preliminary variables to be used in filenames.
output_file="mounts"
if library:
output_file = output_file + '-' + library
output_file_logy = output_file + '_logy'
#The final output
ps_filename = os.path.join(self.web_dir, output_file + '.ps')
ps_filename_logy = os.path.join(self.web_dir,
output_file_logy + '.ps')
#The jpeg output.
jpeg_filename = os.path.join(self.web_dir,
output_file + '.jpg')
jpeg_filename_logy = os.path.join(self.web_dir,
output_file_logy + '.jpg')
jpeg_filename_stamp = os.path.join(self.web_dir,
output_file + '_stamp.jpg')
jpeg_filename_logy_stamp = os.path.join(self.web_dir,
output_file_logy + '_stamp.jpg')
#The commands that gnuplot will execute will go here.
plot_filename = os.path.join(self.temp_dir,
output_file + ".plot")
self.write_plot_files(plot_filename, pts_filename,
ps_filename, ps_filename_logy,
library, count)
os.system("gnuplot %s" % (plot_filename))
# convert to jpeg
os.system("convert -flatten -background lightgray -rotate 90 -modulate 80 %s %s"
% (ps_filename, jpeg_filename))
os.system("convert -flatten -background lightgray -rotate 90 -modulate 80 %s %s"
% (ps_filename_logy, jpeg_filename_logy))
os.system("convert -flatten -background lightgray -rotate 90 -geometry 120x120 -modulate 80 %s %s"
% (ps_filename, jpeg_filename_stamp))
os.system("convert -flatten -background lightgray -rotate 90 -geometry 120x120 -modulate 80 %s %s"
% (ps_filename_logy, jpeg_filename_logy_stamp))
#Cleanup the temporary files.
try:
os.remove(plot_filename)
pass
except:
pass
try:
os.remove(pts_filename)
pass
except:
pass
#
### The histogram.
#
#Filenames for various values.
hist_out = output_file + '_hist'
ps_hist_filename = os.path.join(self.web_dir,
hist_out + '.ps')
jpeg_hist_filename = os.path.join(self.web_dir,
hist_out + '.jpg')
jpeg_hist_filename_stamp = os.path.join(self.web_dir,
hist_out + '_stamp.jpg')
if library:
pts_filename = os.path.join(self.temp_dir,
"%s.pts" % hist_out)
else:
pts_filename = os.path.join(self.temp_dir,
"%s.pts" % hist_out)
count = 0
set_label = ""
set_xtics = "set xtics rotate ("
outf = open(pts_filename, "w")
maxy = 0
for i in self.hist_keys:
count = count + 1
###This does some filling to. Oh well.
outf.write("%d %d\n" % (count, self.mtsh[i]))
if self.mtsh[i] > maxy:
maxy = self.mtsh[i]
set_xtics = set_xtics + '"%s" %d,'%(i, count)
set_label = set_label + \
'set label %d "%d" at %d,%d center\n' \
% (count, self.mtsh[i], count,
self.mtsh[i] + self.yoffset)
outf.close()
set_xtics = set_xtics[:-1]+')'
set_xtics = set_xtics+'\n'
#The commands that gnuplot will execute will go here.
plot_filename = os.path.join(self.temp_dir,
hist_out + ".plot")
self.write_hist_plot_file(plot_filename, pts_filename,
ps_hist_filename, library,
count, set_xtics, maxy, set_label)
#Make the plot and convert it to jpg.
os.system("gnuplot %s" % (plot_filename,))
os.system("convert -flatten -background lightgray -rotate 90 -modulate 80 %s %s"
% (ps_hist_filename, jpeg_hist_filename))
os.system("convert -flatten -background lightgray -rotate 90 -geometry 120x120 -modulate 80 %s %s"
% (ps_hist_filename, jpeg_hist_filename_stamp))
#Cleanup the temporary files.
try:
os.remove(plot_filename)
pass
except:
pass
try:
os.remove(pts_filename)
pass
except:
pass
| 30.7525 | 100 | 0.643362 |
4aca8a889722553469134bcaf43593ac29154994 | 2,362 | py | Python | common/python/protobuf/pyproto/default_empty_fill_pb2.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 39 | 2021-10-12T01:43:27.000Z | 2022-03-28T04:46:35.000Z | common/python/protobuf/pyproto/default_empty_fill_pb2.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 6 | 2021-10-14T02:11:47.000Z | 2022-03-23T02:41:50.000Z | common/python/protobuf/pyproto/default_empty_fill_pb2.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 10 | 2021-10-14T09:36:03.000Z | 2022-02-10T11:05:12.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: default-empty-fill.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='default-empty-fill.proto',
package='com.welab.wefe.core.mlmodel.buffer',
syntax='proto3',
serialized_options=b'B\025DefaultEmptyFillProto',
serialized_pb=b'\n\x18\x64\x65\x66\x61ult-empty-fill.proto\x12\"com.welab.wefe.core.mlmodel.buffer\"\'\n\x17\x44\x65\x66\x61ultEmptyFillMessage\x12\x0c\n\x04\x66lag\x18\x01 \x01(\tB\x17\x42\x15\x44\x65\x66\x61ultEmptyFillProtob\x06proto3'
)
_DEFAULTEMPTYFILLMESSAGE = _descriptor.Descriptor(
name='DefaultEmptyFillMessage',
full_name='com.welab.wefe.core.mlmodel.buffer.DefaultEmptyFillMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='flag', full_name='com.welab.wefe.core.mlmodel.buffer.DefaultEmptyFillMessage.flag', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=103,
)
DESCRIPTOR.message_types_by_name['DefaultEmptyFillMessage'] = _DEFAULTEMPTYFILLMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DefaultEmptyFillMessage = _reflection.GeneratedProtocolMessageType('DefaultEmptyFillMessage', (_message.Message,), {
'DESCRIPTOR': _DEFAULTEMPTYFILLMESSAGE,
'__module__': 'default_empty_fill_pb2'
# @@protoc_insertion_point(class_scope:com.welab.wefe.core.mlmodel.buffer.DefaultEmptyFillMessage)
})
_sym_db.RegisterMessage(DefaultEmptyFillMessage)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 36.90625 | 242 | 0.747671 |
13112c318e9a69cf242a9faecbdb2cc2ed806489 | 7,004 | py | Python | gluoncv/data/kitti/detection.py | cxiang26/mygluon_cv | 1dfebd27afc7d788424b21dfd69007c7576dcbbe | [
"Apache-2.0"
] | 11 | 2019-12-11T12:47:30.000Z | 2021-01-21T16:36:56.000Z | gluoncv/data/kitti/detection.py | liyongsheng-tech/mygluon_cv | 1dfebd27afc7d788424b21dfd69007c7576dcbbe | [
"Apache-2.0"
] | 3 | 2019-09-03T01:35:15.000Z | 2019-11-13T06:28:00.000Z | gluoncv/data/kitti/detection.py | liyongsheng-tech/mygluon_cv | 1dfebd27afc7d788424b21dfd69007c7576dcbbe | [
"Apache-2.0"
] | 3 | 2020-01-10T16:50:08.000Z | 2020-11-13T06:59:13.000Z |
import os
import numpy as np
from . import utils
from ..base import VisionDataset
__all__ = ['KITTIDetection']
class KITTIDetection(VisionDataset):
"""MS COCO detection dataset.
Parameters
----------
root : str, default '~/.mxnet/datasets/voc'
Path to folder storing the dataset.
splits : list of str, default ['instances_val2017']
Json annotations name.
Candidates can be: instances_val2017, instances_train2017.
transform : callable, default None
A function that takes data and label and transforms them. Refer to
:doc:`./transforms` for examples.
A transform function for object detection should take label into consideration,
because any geometric modification will require label to be modified.
min_object_area : float
Minimum accepted ground-truth area, if an object's area is smaller than this value,
it will be ignored.
skip_empty : bool, default is True
Whether skip images with no valid object. This should be `True` in training, otherwise
it will cause undefined behavior.
use_crowd : bool, default is True
Whether use boxes labeled as crowd instance.
"""
# CLASSES = ['Car', 'Truck', 'Van', 'Tram', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Misc']
CLASSES = ['Car', 'Pedestrian', 'Cyclist']
def __init__(self, root=os.path.join('d:/', 'Kitti/object'),
splits='training', transform=None, index_map=None,
min_object_area=0, skip_empty=True, use_crowd=True):
super(KITTIDetection, self).__init__(root)
self._root = os.path.expanduser(root)
self._transform = transform
self._min_object_area = min_object_area
self._skip_empty = skip_empty
self._use_crowd = use_crowd
self._splits = splits
self._items = self._load_items()
self.index_map = index_map or dict(zip(self.classes, range(self.num_class)))
if self._splits == 'training':
self.num_samples = 3712
elif self._splits == 'testing':
self.num_samples = 3769
else:
print('Unknown split: %s' % (self._splits))
exit(-1)
self.image_dir = os.path.join(self._root, self._splits, 'image_2')
self.calib_dir = os.path.join(self._root, self._splits, 'calib')
self.label_dir = os.path.join(self._root, 'training', 'label_2')
self.lidar_dir = os.path.join(self._root, self._splits, 'velodyne')
# self.pred_dir = os.path.join(self._root, self._splits, 'pred')
@property
def num_class(self):
"""Number of categories."""
return len(self.classes)
def _load_items(self):
ids = []
if self._splits == 'training':
lf = os.path.join(self._root, 'train'+'.txt')
elif self._splits == 'testing':
lf = os.path.join(self._root, 'val' + '.txt')
else:
raise TypeError('Getting error type splits {}'.format(self._splits))
with open(lf, 'r') as f:
ids += [line.strip() for line in f.readlines()]
return ids
def _get_image(self, idx):
assert (idx < self.num_samples)
name = self._items[idx]
img_filename = os.path.join(self.image_dir, name+'.png')
return utils.load_image(img_filename)
def _get_lidar(self, idx):
assert (idx < self.num_samples)
name = self._items[idx]
lidar_filename = os.path.join(self.lidar_dir, name+'.bin')
return utils.load_velo_scan(lidar_filename)
def _get_calibration(self, idx):
assert (idx < self.num_samples)
name = self._items[idx]
calib_filename = os.path.join(self.calib_dir, name + '.txt')
return utils.Calibration(calib_filename)
def _get_label_objects(self, idx):
assert (idx < self.num_samples)
name = self._items[idx]
label_filename = os.path.join(self.label_dir, name + '.txt')
return utils.read_label(label_filename)
def _get_label2D(self, idx):
labels = self._get_label_objects(idx)
label = []
for la in labels:
if la.type in ['Car', 'Truck', 'Van', 'Tram']:
la.type = 'Car'
elif la.type in ['Pedestrian', 'Person_sitting']:
la.type = 'Pedestrian'
elif la.type in ['Cyclist']:
la.type = 'Cyclist'
else:
continue
diffcult = int(la.occlusion >= 2)
cls_id = self.index_map[la.type]
label.append([la.xmin, la.ymin, la.xmax, la.ymax, cls_id, diffcult])
return np.array(label)
def _get_depth_map(self, idx):
pass
def _get_top_down(self, idx):
pass
def _isexist_pre_object(self, idx):
assert (idx < self.num_samples and self._splits == 'training')
pred_filename = os.path.join(self.pred_dir, '%06d.txt' % (idx))
return os.path.exists(pred_filename)
@property
def classes(self):
"""Category names."""
return type(self).CLASSES
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
img = self._get_image(idx)
#lidar = self._get_lidar(idx)
#cal = self._get_calibration(idx)
label2d = self._get_label2D(idx)
# pred = self._get_pred_objects(idx)
if self._transform is not None:
return self._transform(img, label2d)
return img, label2d
# def draw_projected_box3d(image, qs, color=(0,255,0), thickness=2):
# ''' Draw 3d bounding box in image
# qs: (8,3) array of vertices for the 3d box in following order:
# 1 -------- 0
# /| /|
# 2 -------- 3 .
# | | | |
# . 5 -------- 4
# |/ |/
# 6 -------- 7
# '''
# qs = qs.astype(np.int32)
# for k in range(0,4):
# # Ref: http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
# i,j=k,(k+1)%4
# # use LINE_AA for opencv3
# #cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)
# cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness)
# i,j=k+4,(k+1)%4 + 4
# cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness)
#
# i,j=k,k+4
# cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness)
# return image
# if __name__ == '__main__':
# import cv2
# data = KITTIDetection()
# img, label, lidar, cal = data.__getitem__(1000)
# print(label)
# for la in label:
# if la.type == 'DontCare':
# continue
# else:
# box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(la, cal.P)
# img = draw_projected_box3d(img, box3d_pts_2d)
# cv2.namedWindow('results')
# cv2.imshow('results',img)
# cv2.waitKey()
# print(img.dtype, label.dtype)
| 36.103093 | 98 | 0.587236 |
1aeedd89473a612360ef272e40f1a3514f5ff0ac | 344 | py | Python | bin/mcstats/mcstats/stats/trigger_raid.py | smashedbotatos/icarey_whitelist_system | cf93d3983f15838a22859781d6a36e8f44cb458f | [
"Apache-2.0"
] | 1 | 2019-08-08T10:36:54.000Z | 2019-08-08T10:36:54.000Z | bin/mcstats/mcstats/stats/trigger_raid.py | smashedbotatos/icarey_whitelist_system | cf93d3983f15838a22859781d6a36e8f44cb458f | [
"Apache-2.0"
] | 2 | 2019-08-06T05:50:04.000Z | 2021-09-01T20:06:15.000Z | bin/mcstats/mcstats/stats/trigger_raid.py | smashedbotatos/iCarey_Whitelist_System | cf93d3983f15838a22859781d6a36e8f44cb458f | [
"Apache-2.0"
] | null | null | null | from mcstats import mcstats
mcstats.registry.append(
mcstats.MinecraftStat(
'trigger_raid',
{
'title': 'Bad Omen',
'desc': 'Raids triggered',
'unit': 'int',
},
mcstats.StatReader(['minecraft:custom','minecraft:raid_trigger']),
1912 # raids added in 18w47a
))
| 24.571429 | 74 | 0.549419 |
6be47581a5084391bcbe8f34d33d170f939b5d57 | 22,104 | py | Python | homeassistant/components/mqtt/__init__.py | afanucchi/home-assistant | 6d838b9a5efeaf3e7a23857251ad6fe6548da651 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/mqtt/__init__.py | afanucchi/home-assistant | 6d838b9a5efeaf3e7a23857251ad6fe6548da651 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/mqtt/__init__.py | afanucchi/home-assistant | 6d838b9a5efeaf3e7a23857251ad6fe6548da651 | [
"Apache-2.0"
] | null | null | null | """
Support for MQTT message handling.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/mqtt/
"""
import asyncio
import logging
import os
import socket
import time
import ssl
import re
import requests.certs
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.config import load_yaml_config_file
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import template, config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, dispatcher_send)
from homeassistant.util.async import (
run_coroutine_threadsafe, run_callback_threadsafe)
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, CONF_VALUE_TEMPLATE, CONF_USERNAME,
CONF_PASSWORD, CONF_PORT, CONF_PROTOCOL, CONF_PAYLOAD)
from homeassistant.components.mqtt.server import HBMQTT_CONFIG_SCHEMA
REQUIREMENTS = ['paho-mqtt==1.3.0']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'mqtt'
DATA_MQTT = 'mqtt'
SERVICE_PUBLISH = 'publish'
SIGNAL_MQTT_MESSAGE_RECEIVED = 'mqtt_message_received'
CONF_EMBEDDED = 'embedded'
CONF_BROKER = 'broker'
CONF_CLIENT_ID = 'client_id'
CONF_DISCOVERY = 'discovery'
CONF_DISCOVERY_PREFIX = 'discovery_prefix'
CONF_KEEPALIVE = 'keepalive'
CONF_CERTIFICATE = 'certificate'
CONF_CLIENT_KEY = 'client_key'
CONF_CLIENT_CERT = 'client_cert'
CONF_TLS_INSECURE = 'tls_insecure'
CONF_TLS_VERSION = 'tls_version'
CONF_BIRTH_MESSAGE = 'birth_message'
CONF_WILL_MESSAGE = 'will_message'
CONF_STATE_TOPIC = 'state_topic'
CONF_COMMAND_TOPIC = 'command_topic'
CONF_QOS = 'qos'
CONF_RETAIN = 'retain'
PROTOCOL_31 = '3.1'
PROTOCOL_311 = '3.1.1'
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
DEFAULT_QOS = 0
DEFAULT_RETAIN = False
DEFAULT_PROTOCOL = PROTOCOL_311
DEFAULT_DISCOVERY = False
DEFAULT_DISCOVERY_PREFIX = 'homeassistant'
DEFAULT_TLS_PROTOCOL = 'auto'
ATTR_TOPIC = 'topic'
ATTR_PAYLOAD = 'payload'
ATTR_PAYLOAD_TEMPLATE = 'payload_template'
ATTR_QOS = CONF_QOS
ATTR_RETAIN = CONF_RETAIN
MAX_RECONNECT_WAIT = 300 # seconds
def valid_subscribe_topic(value, invalid_chars='\0'):
"""Validate that we can subscribe using this MQTT topic."""
value = cv.string(value)
if all(c not in value for c in invalid_chars):
return vol.Length(min=1, max=65535)(value)
raise vol.Invalid('Invalid MQTT topic name')
def valid_publish_topic(value):
"""Validate that we can publish using this MQTT topic."""
return valid_subscribe_topic(value, invalid_chars='#+\0')
def valid_discovery_topic(value):
"""Validate a discovery topic."""
return valid_subscribe_topic(value, invalid_chars='#+\0/')
_VALID_QOS_SCHEMA = vol.All(vol.Coerce(int), vol.In([0, 1, 2]))
CLIENT_KEY_AUTH_MSG = 'client_key and client_cert must both be present in ' \
'the MQTT broker configuration'
MQTT_WILL_BIRTH_SCHEMA = vol.Schema({
vol.Required(ATTR_TOPIC): valid_publish_topic,
vol.Required(ATTR_PAYLOAD, CONF_PAYLOAD): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
}, required=True)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_CLIENT_ID): cv.string,
vol.Optional(CONF_KEEPALIVE, default=DEFAULT_KEEPALIVE):
vol.All(vol.Coerce(int), vol.Range(min=15)),
vol.Optional(CONF_BROKER): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_CERTIFICATE): vol.Any('auto', cv.isfile),
vol.Inclusive(CONF_CLIENT_KEY, 'client_key_auth',
msg=CLIENT_KEY_AUTH_MSG): cv.isfile,
vol.Inclusive(CONF_CLIENT_CERT, 'client_key_auth',
msg=CLIENT_KEY_AUTH_MSG): cv.isfile,
vol.Optional(CONF_TLS_INSECURE): cv.boolean,
vol.Optional(CONF_TLS_VERSION, default=DEFAULT_TLS_PROTOCOL):
vol.Any('auto', '1.0', '1.1', '1.2'),
vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL):
vol.All(cv.string, vol.In([PROTOCOL_31, PROTOCOL_311])),
vol.Optional(CONF_EMBEDDED): HBMQTT_CONFIG_SCHEMA,
vol.Optional(CONF_WILL_MESSAGE): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(CONF_BIRTH_MESSAGE): MQTT_WILL_BIRTH_SCHEMA,
vol.Optional(CONF_DISCOVERY, default=DEFAULT_DISCOVERY): cv.boolean,
vol.Optional(CONF_DISCOVERY_PREFIX,
default=DEFAULT_DISCOVERY_PREFIX): valid_discovery_topic,
}),
}, extra=vol.ALLOW_EXTRA)
SCHEMA_BASE = {
vol.Optional(CONF_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
}
MQTT_BASE_PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(SCHEMA_BASE)
# Sensor type platforms subscribe to MQTT events
MQTT_RO_PLATFORM_SCHEMA = MQTT_BASE_PLATFORM_SCHEMA.extend({
vol.Required(CONF_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
})
# Switch type platforms publish to MQTT and may subscribe
MQTT_RW_PLATFORM_SCHEMA = MQTT_BASE_PLATFORM_SCHEMA.extend({
vol.Required(CONF_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
})
# Service call validation schema
MQTT_PUBLISH_SCHEMA = vol.Schema({
vol.Required(ATTR_TOPIC): valid_publish_topic,
vol.Exclusive(ATTR_PAYLOAD, CONF_PAYLOAD): object,
vol.Exclusive(ATTR_PAYLOAD_TEMPLATE, CONF_PAYLOAD): cv.string,
vol.Optional(ATTR_QOS, default=DEFAULT_QOS): _VALID_QOS_SCHEMA,
vol.Optional(ATTR_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
}, required=True)
def _build_publish_data(topic, qos, retain):
"""Build the arguments for the publish service without the payload."""
data = {ATTR_TOPIC: topic}
if qos is not None:
data[ATTR_QOS] = qos
if retain is not None:
data[ATTR_RETAIN] = retain
return data
def publish(hass, topic, payload, qos=None, retain=None):
"""Publish message to an MQTT topic."""
hass.add_job(async_publish, hass, topic, payload, qos, retain)
@callback
def async_publish(hass, topic, payload, qos=None, retain=None):
"""Publish message to an MQTT topic."""
data = _build_publish_data(topic, qos, retain)
data[ATTR_PAYLOAD] = payload
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_PUBLISH, data))
def publish_template(hass, topic, payload_template, qos=None, retain=None):
"""Publish message to an MQTT topic using a template payload."""
data = _build_publish_data(topic, qos, retain)
data[ATTR_PAYLOAD_TEMPLATE] = payload_template
hass.services.call(DOMAIN, SERVICE_PUBLISH, data)
@asyncio.coroutine
def async_subscribe(hass, topic, msg_callback, qos=DEFAULT_QOS,
encoding='utf-8'):
"""Subscribe to an MQTT topic."""
@callback
def async_mqtt_topic_subscriber(dp_topic, dp_payload, dp_qos):
"""Match subscribed MQTT topic."""
if not _match_topic(topic, dp_topic):
return
if encoding is not None:
try:
payload = dp_payload.decode(encoding)
_LOGGER.debug("Received message on %s: %s", dp_topic, payload)
except (AttributeError, UnicodeDecodeError):
_LOGGER.error("Illegal payload encoding %s from "
"MQTT topic: %s, Payload: %s",
encoding, dp_topic, dp_payload)
return
else:
_LOGGER.debug("Received binary message on %s", dp_topic)
payload = dp_payload
hass.async_run_job(msg_callback, dp_topic, payload, dp_qos)
async_remove = async_dispatcher_connect(
hass, SIGNAL_MQTT_MESSAGE_RECEIVED, async_mqtt_topic_subscriber)
yield from hass.data[DATA_MQTT].async_subscribe(topic, qos)
return async_remove
def subscribe(hass, topic, msg_callback, qos=DEFAULT_QOS,
encoding='utf-8'):
"""Subscribe to an MQTT topic."""
async_remove = run_coroutine_threadsafe(
async_subscribe(hass, topic, msg_callback, qos, encoding), hass.loop
).result()
def remove():
"""Remove listener convert."""
run_callback_threadsafe(hass.loop, async_remove).result()
return remove
@asyncio.coroutine
def _async_setup_server(hass, config):
"""Try to start embedded MQTT broker.
This method is a coroutine.
"""
conf = config.get(DOMAIN, {})
server = yield from async_prepare_setup_platform(
hass, config, DOMAIN, 'server')
if server is None:
_LOGGER.error("Unable to load embedded server")
return None
success, broker_config = \
yield from server.async_start(hass, conf.get(CONF_EMBEDDED))
return success and broker_config
@asyncio.coroutine
def _async_setup_discovery(hass, config):
"""Try to start the discovery of MQTT devices.
This method is a coroutine.
"""
conf = config.get(DOMAIN, {})
discovery = yield from async_prepare_setup_platform(
hass, config, DOMAIN, 'discovery')
if discovery is None:
_LOGGER.error("Unable to load MQTT discovery")
return None
success = yield from discovery.async_start(
hass, conf[CONF_DISCOVERY_PREFIX], config)
return success
@asyncio.coroutine
def async_setup(hass, config):
"""Start the MQTT protocol service."""
conf = config.get(DOMAIN)
if conf is None:
conf = CONFIG_SCHEMA({DOMAIN: {}})[DOMAIN]
client_id = conf.get(CONF_CLIENT_ID)
keepalive = conf.get(CONF_KEEPALIVE)
# Only setup if embedded config passed in or no broker specified
if CONF_EMBEDDED not in conf and CONF_BROKER in conf:
broker_config = None
else:
broker_config = yield from _async_setup_server(hass, config)
if CONF_BROKER in conf:
broker = conf[CONF_BROKER]
port = conf[CONF_PORT]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
certificate = conf.get(CONF_CERTIFICATE)
client_key = conf.get(CONF_CLIENT_KEY)
client_cert = conf.get(CONF_CLIENT_CERT)
tls_insecure = conf.get(CONF_TLS_INSECURE)
protocol = conf[CONF_PROTOCOL]
elif broker_config:
# If no broker passed in, auto config to internal server
broker, port, username, password, certificate, protocol = broker_config
# Embedded broker doesn't have some ssl variables
client_key, client_cert, tls_insecure = None, None, None
else:
err = "Unable to start MQTT broker."
if conf.get(CONF_EMBEDDED) is not None:
# Explicit embedded config, requires explicit broker config
err += " (Broker configuration required.)"
_LOGGER.error(err)
return False
# For cloudmqtt.com, secured connection, auto fill in certificate
if certificate is None and 19999 < port < 30000 and \
broker.endswith('.cloudmqtt.com'):
certificate = os.path.join(os.path.dirname(__file__),
'addtrustexternalcaroot.crt')
# When the certificate is set to auto, use bundled certs from requests
if certificate == 'auto':
certificate = requests.certs.where()
will_message = conf.get(CONF_WILL_MESSAGE)
birth_message = conf.get(CONF_BIRTH_MESSAGE)
# Be able to override versions other than TLSv1.0 under Python3.6
conf_tls_version = conf.get(CONF_TLS_VERSION)
if conf_tls_version == '1.2':
tls_version = ssl.PROTOCOL_TLSv1_2
elif conf_tls_version == '1.1':
tls_version = ssl.PROTOCOL_TLSv1_1
elif conf_tls_version == '1.0':
tls_version = ssl.PROTOCOL_TLSv1
else:
import sys
# Python3.6 supports automatic negotiation of highest TLS version
if sys.hexversion >= 0x03060000:
tls_version = ssl.PROTOCOL_TLS # pylint: disable=no-member
else:
tls_version = ssl.PROTOCOL_TLSv1
try:
hass.data[DATA_MQTT] = MQTT(
hass, broker, port, client_id, keepalive, username, password,
certificate, client_key, client_cert, tls_insecure, protocol,
will_message, birth_message, tls_version)
except socket.error:
_LOGGER.exception("Can't connect to the broker. "
"Please check your settings and the broker itself")
return False
@asyncio.coroutine
def async_stop_mqtt(event):
"""Stop MQTT component."""
yield from hass.data[DATA_MQTT].async_disconnect()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_mqtt)
success = yield from hass.data[DATA_MQTT].async_connect()
if not success:
return False
@asyncio.coroutine
def async_publish_service(call):
"""Handle MQTT publish service calls."""
msg_topic = call.data[ATTR_TOPIC]
payload = call.data.get(ATTR_PAYLOAD)
payload_template = call.data.get(ATTR_PAYLOAD_TEMPLATE)
qos = call.data[ATTR_QOS]
retain = call.data[ATTR_RETAIN]
if payload_template is not None:
try:
payload = \
template.Template(payload_template, hass).async_render()
except template.jinja2.TemplateError as exc:
_LOGGER.error(
"Unable to publish to '%s': rendering payload template of "
"'%s' failed because %s",
msg_topic, payload_template, exc)
return
yield from hass.data[DATA_MQTT].async_publish(
msg_topic, payload, qos, retain)
descriptions = yield from hass.async_add_job(
load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
hass.services.async_register(
DOMAIN, SERVICE_PUBLISH, async_publish_service,
descriptions.get(SERVICE_PUBLISH), schema=MQTT_PUBLISH_SCHEMA)
if conf.get(CONF_DISCOVERY):
yield from _async_setup_discovery(hass, config)
return True
class MQTT(object):
"""Home Assistant MQTT client."""
def __init__(self, hass, broker, port, client_id, keepalive, username,
password, certificate, client_key, client_cert,
tls_insecure, protocol, will_message, birth_message,
tls_version):
"""Initialize Home Assistant MQTT client."""
import paho.mqtt.client as mqtt
self.hass = hass
self.broker = broker
self.port = port
self.keepalive = keepalive
self.topics = {}
self.progress = {}
self.birth_message = birth_message
self._mqttc = None
self._paho_lock = asyncio.Lock(loop=hass.loop)
if protocol == PROTOCOL_31:
proto = mqtt.MQTTv31
else:
proto = mqtt.MQTTv311
if client_id is None:
self._mqttc = mqtt.Client(protocol=proto)
else:
self._mqttc = mqtt.Client(client_id, protocol=proto)
if username is not None:
self._mqttc.username_pw_set(username, password)
if certificate is not None:
self._mqttc.tls_set(
certificate, certfile=client_cert,
keyfile=client_key, tls_version=tls_version)
if tls_insecure is not None:
self._mqttc.tls_insecure_set(tls_insecure)
self._mqttc.on_subscribe = self._mqtt_on_subscribe
self._mqttc.on_unsubscribe = self._mqtt_on_unsubscribe
self._mqttc.on_connect = self._mqtt_on_connect
self._mqttc.on_disconnect = self._mqtt_on_disconnect
self._mqttc.on_message = self._mqtt_on_message
if will_message:
self._mqttc.will_set(will_message.get(ATTR_TOPIC),
will_message.get(ATTR_PAYLOAD),
will_message.get(ATTR_QOS),
will_message.get(ATTR_RETAIN))
@asyncio.coroutine
def async_publish(self, topic, payload, qos, retain):
"""Publish a MQTT message.
This method must be run in the event loop and returns a coroutine.
"""
with (yield from self._paho_lock):
yield from self.hass.async_add_job(
self._mqttc.publish, topic, payload, qos, retain)
@asyncio.coroutine
def async_connect(self):
"""Connect to the host. Does process messages yet.
This method is a coroutine.
"""
result = yield from self.hass.async_add_job(
self._mqttc.connect, self.broker, self.port, self.keepalive)
if result != 0:
import paho.mqtt.client as mqtt
_LOGGER.error('Failed to connect: %s', mqtt.error_string(result))
else:
self._mqttc.loop_start()
return not result
def async_disconnect(self):
"""Stop the MQTT client.
This method must be run in the event loop and returns a coroutine.
"""
def stop():
"""Stop the MQTT client."""
self._mqttc.disconnect()
self._mqttc.loop_stop()
return self.hass.async_add_job(stop)
@asyncio.coroutine
def async_subscribe(self, topic, qos):
"""Subscribe to a topic.
This method is a coroutine.
"""
if not isinstance(topic, str):
raise HomeAssistantError("topic need to be a string!")
with (yield from self._paho_lock):
if topic in self.topics:
return
result, mid = yield from self.hass.async_add_job(
self._mqttc.subscribe, topic, qos)
_raise_on_error(result)
self.progress[mid] = topic
self.topics[topic] = None
@asyncio.coroutine
def async_unsubscribe(self, topic):
"""Unsubscribe from topic.
This method is a coroutine.
"""
result, mid = yield from self.hass.async_add_job(
self._mqttc.unsubscribe, topic)
_raise_on_error(result)
self.progress[mid] = topic
def _mqtt_on_connect(self, _mqttc, _userdata, _flags, result_code):
"""On connect callback.
Resubscribe to all topics we were subscribed to and publish birth
message.
"""
import paho.mqtt.client as mqtt
if result_code != mqtt.CONNACK_ACCEPTED:
_LOGGER.error('Unable to connect to the MQTT broker: %s',
mqtt.connack_string(result_code))
self._mqttc.disconnect()
return
old_topics = self.topics
self.topics = {key: value for key, value in self.topics.items()
if value is None}
for topic, qos in old_topics.items():
# qos is None if we were in process of subscribing
if qos is not None:
self.hass.add_job(self.async_subscribe, topic, qos)
if self.birth_message:
self.hass.add_job(self.async_publish(
self.birth_message.get(ATTR_TOPIC),
self.birth_message.get(ATTR_PAYLOAD),
self.birth_message.get(ATTR_QOS),
self.birth_message.get(ATTR_RETAIN)))
def _mqtt_on_subscribe(self, _mqttc, _userdata, mid, granted_qos):
"""Subscribe successful callback."""
topic = self.progress.pop(mid, None)
if topic is None:
return
self.topics[topic] = granted_qos[0]
def _mqtt_on_message(self, _mqttc, _userdata, msg):
"""Message received callback."""
dispatcher_send(
self.hass, SIGNAL_MQTT_MESSAGE_RECEIVED, msg.topic, msg.payload,
msg.qos
)
def _mqtt_on_unsubscribe(self, _mqttc, _userdata, mid, granted_qos):
"""Unsubscribe successful callback."""
topic = self.progress.pop(mid, None)
if topic is None:
return
self.topics.pop(topic, None)
def _mqtt_on_disconnect(self, _mqttc, _userdata, result_code):
"""Disconnected callback."""
self.progress = {}
self.topics = {key: value for key, value in self.topics.items()
if value is not None}
# Remove None values from topic list
for key in list(self.topics):
if self.topics[key] is None:
self.topics.pop(key)
# When disconnected because of calling disconnect()
if result_code == 0:
return
tries = 0
wait_time = 0
while True:
try:
if self._mqttc.reconnect() == 0:
_LOGGER.info("Successfully reconnected to the MQTT server")
break
except socket.error:
pass
wait_time = min(2**tries, MAX_RECONNECT_WAIT)
_LOGGER.warning(
"Disconnected from MQTT (%s). Trying to reconnect in %s s",
result_code, wait_time)
# It is ok to sleep here as we are in the MQTT thread.
time.sleep(wait_time)
tries += 1
def _raise_on_error(result):
"""Raise error if error result."""
if result != 0:
import paho.mqtt.client as mqtt
raise HomeAssistantError(
'Error talking to MQTT: {}'.format(mqtt.error_string(result)))
def _match_topic(subscription, topic):
"""Test if topic matches subscription."""
reg_ex_parts = []
suffix = ""
if subscription.endswith('#'):
subscription = subscription[:-2]
suffix = "(.*)"
sub_parts = subscription.split('/')
for sub_part in sub_parts:
if sub_part == "+":
reg_ex_parts.append(r"([^\/]+)")
else:
reg_ex_parts.append(re.escape(sub_part))
reg_ex = "^" + (r'\/'.join(reg_ex_parts)) + suffix + "$"
reg = re.compile(reg_ex)
return reg.match(topic) is not None
| 33.695122 | 79 | 0.65608 |
e0256cc9b5f97349f5f9230e08d77bd139ddd620 | 51,720 | py | Python | pydm/widgets/colormaps.py | KurtJacobson/pydm | 5a5cbc6cdb218b77335a3c4ad57a4a49e060e20d | [
"BSD-3-Clause-LBNL"
] | null | null | null | pydm/widgets/colormaps.py | KurtJacobson/pydm | 5a5cbc6cdb218b77335a3c4ad57a4a49e060e20d | [
"BSD-3-Clause-LBNL"
] | null | null | null | pydm/widgets/colormaps.py | KurtJacobson/pydm | 5a5cbc6cdb218b77335a3c4ad57a4a49e060e20d | [
"BSD-3-Clause-LBNL"
] | null | null | null | # magma, inferno, plasma, and viridis colormaps by Nathaniel J. Smith, Stefan van der Walt,
# and (in the case of viridis) Eric Firing.
# Modified by Matt Gibbs for use with pyqtgraph.
#
# This file and the colormaps in it are released under the CC0 license /
# public domain dedication. We would appreciate credit if you use or
# redistribute these colormaps, but do not impose any legal restrictions.
#
# To the extent possible under law, the persons who associated CC0 with
# mpl-colormaps have waived all copyright and related or neighboring rights
# to mpl-colormaps.
#
# You should have received a copy of the CC0 legalcode along with this
# work. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
__all__ = ['magma', 'inferno', 'plasma', 'viridis', 'jet', 'monochrome', 'hot']
_magma_data = [[0.001462, 0.000466, 0.013866],
[0.002258, 0.001295, 0.018331],
[0.003279, 0.002305, 0.023708],
[0.004512, 0.003490, 0.029965],
[0.005950, 0.004843, 0.037130],
[0.007588, 0.006356, 0.044973],
[0.009426, 0.008022, 0.052844],
[0.011465, 0.009828, 0.060750],
[0.013708, 0.011771, 0.068667],
[0.016156, 0.013840, 0.076603],
[0.018815, 0.016026, 0.084584],
[0.021692, 0.018320, 0.092610],
[0.024792, 0.020715, 0.100676],
[0.028123, 0.023201, 0.108787],
[0.031696, 0.025765, 0.116965],
[0.035520, 0.028397, 0.125209],
[0.039608, 0.031090, 0.133515],
[0.043830, 0.033830, 0.141886],
[0.048062, 0.036607, 0.150327],
[0.052320, 0.039407, 0.158841],
[0.056615, 0.042160, 0.167446],
[0.060949, 0.044794, 0.176129],
[0.065330, 0.047318, 0.184892],
[0.069764, 0.049726, 0.193735],
[0.074257, 0.052017, 0.202660],
[0.078815, 0.054184, 0.211667],
[0.083446, 0.056225, 0.220755],
[0.088155, 0.058133, 0.229922],
[0.092949, 0.059904, 0.239164],
[0.097833, 0.061531, 0.248477],
[0.102815, 0.063010, 0.257854],
[0.107899, 0.064335, 0.267289],
[0.113094, 0.065492, 0.276784],
[0.118405, 0.066479, 0.286321],
[0.123833, 0.067295, 0.295879],
[0.129380, 0.067935, 0.305443],
[0.135053, 0.068391, 0.315000],
[0.140858, 0.068654, 0.324538],
[0.146785, 0.068738, 0.334011],
[0.152839, 0.068637, 0.343404],
[0.159018, 0.068354, 0.352688],
[0.165308, 0.067911, 0.361816],
[0.171713, 0.067305, 0.370771],
[0.178212, 0.066576, 0.379497],
[0.184801, 0.065732, 0.387973],
[0.191460, 0.064818, 0.396152],
[0.198177, 0.063862, 0.404009],
[0.204935, 0.062907, 0.411514],
[0.211718, 0.061992, 0.418647],
[0.218512, 0.061158, 0.425392],
[0.225302, 0.060445, 0.431742],
[0.232077, 0.059889, 0.437695],
[0.238826, 0.059517, 0.443256],
[0.245543, 0.059352, 0.448436],
[0.252220, 0.059415, 0.453248],
[0.258857, 0.059706, 0.457710],
[0.265447, 0.060237, 0.461840],
[0.271994, 0.060994, 0.465660],
[0.278493, 0.061978, 0.469190],
[0.284951, 0.063168, 0.472451],
[0.291366, 0.064553, 0.475462],
[0.297740, 0.066117, 0.478243],
[0.304081, 0.067835, 0.480812],
[0.310382, 0.069702, 0.483186],
[0.316654, 0.071690, 0.485380],
[0.322899, 0.073782, 0.487408],
[0.329114, 0.075972, 0.489287],
[0.335308, 0.078236, 0.491024],
[0.341482, 0.080564, 0.492631],
[0.347636, 0.082946, 0.494121],
[0.353773, 0.085373, 0.495501],
[0.359898, 0.087831, 0.496778],
[0.366012, 0.090314, 0.497960],
[0.372116, 0.092816, 0.499053],
[0.378211, 0.095332, 0.500067],
[0.384299, 0.097855, 0.501002],
[0.390384, 0.100379, 0.501864],
[0.396467, 0.102902, 0.502658],
[0.402548, 0.105420, 0.503386],
[0.408629, 0.107930, 0.504052],
[0.414709, 0.110431, 0.504662],
[0.420791, 0.112920, 0.505215],
[0.426877, 0.115395, 0.505714],
[0.432967, 0.117855, 0.506160],
[0.439062, 0.120298, 0.506555],
[0.445163, 0.122724, 0.506901],
[0.451271, 0.125132, 0.507198],
[0.457386, 0.127522, 0.507448],
[0.463508, 0.129893, 0.507652],
[0.469640, 0.132245, 0.507809],
[0.475780, 0.134577, 0.507921],
[0.481929, 0.136891, 0.507989],
[0.488088, 0.139186, 0.508011],
[0.494258, 0.141462, 0.507988],
[0.500438, 0.143719, 0.507920],
[0.506629, 0.145958, 0.507806],
[0.512831, 0.148179, 0.507648],
[0.519045, 0.150383, 0.507443],
[0.525270, 0.152569, 0.507192],
[0.531507, 0.154739, 0.506895],
[0.537755, 0.156894, 0.506551],
[0.544015, 0.159033, 0.506159],
[0.550287, 0.161158, 0.505719],
[0.556571, 0.163269, 0.505230],
[0.562866, 0.165368, 0.504692],
[0.569172, 0.167454, 0.504105],
[0.575490, 0.169530, 0.503466],
[0.581819, 0.171596, 0.502777],
[0.588158, 0.173652, 0.502035],
[0.594508, 0.175701, 0.501241],
[0.600868, 0.177743, 0.500394],
[0.607238, 0.179779, 0.499492],
[0.613617, 0.181811, 0.498536],
[0.620005, 0.183840, 0.497524],
[0.626401, 0.185867, 0.496456],
[0.632805, 0.187893, 0.495332],
[0.639216, 0.189921, 0.494150],
[0.645633, 0.191952, 0.492910],
[0.652056, 0.193986, 0.491611],
[0.658483, 0.196027, 0.490253],
[0.664915, 0.198075, 0.488836],
[0.671349, 0.200133, 0.487358],
[0.677786, 0.202203, 0.485819],
[0.684224, 0.204286, 0.484219],
[0.690661, 0.206384, 0.482558],
[0.697098, 0.208501, 0.480835],
[0.703532, 0.210638, 0.479049],
[0.709962, 0.212797, 0.477201],
[0.716387, 0.214982, 0.475290],
[0.722805, 0.217194, 0.473316],
[0.729216, 0.219437, 0.471279],
[0.735616, 0.221713, 0.469180],
[0.742004, 0.224025, 0.467018],
[0.748378, 0.226377, 0.464794],
[0.754737, 0.228772, 0.462509],
[0.761077, 0.231214, 0.460162],
[0.767398, 0.233705, 0.457755],
[0.773695, 0.236249, 0.455289],
[0.779968, 0.238851, 0.452765],
[0.786212, 0.241514, 0.450184],
[0.792427, 0.244242, 0.447543],
[0.798608, 0.247040, 0.444848],
[0.804752, 0.249911, 0.442102],
[0.810855, 0.252861, 0.439305],
[0.816914, 0.255895, 0.436461],
[0.822926, 0.259016, 0.433573],
[0.828886, 0.262229, 0.430644],
[0.834791, 0.265540, 0.427671],
[0.840636, 0.268953, 0.424666],
[0.846416, 0.272473, 0.421631],
[0.852126, 0.276106, 0.418573],
[0.857763, 0.279857, 0.415496],
[0.863320, 0.283729, 0.412403],
[0.868793, 0.287728, 0.409303],
[0.874176, 0.291859, 0.406205],
[0.879464, 0.296125, 0.403118],
[0.884651, 0.300530, 0.400047],
[0.889731, 0.305079, 0.397002],
[0.894700, 0.309773, 0.393995],
[0.899552, 0.314616, 0.391037],
[0.904281, 0.319610, 0.388137],
[0.908884, 0.324755, 0.385308],
[0.913354, 0.330052, 0.382563],
[0.917689, 0.335500, 0.379915],
[0.921884, 0.341098, 0.377376],
[0.925937, 0.346844, 0.374959],
[0.929845, 0.352734, 0.372677],
[0.933606, 0.358764, 0.370541],
[0.937221, 0.364929, 0.368567],
[0.940687, 0.371224, 0.366762],
[0.944006, 0.377643, 0.365136],
[0.947180, 0.384178, 0.363701],
[0.950210, 0.390820, 0.362468],
[0.953099, 0.397563, 0.361438],
[0.955849, 0.404400, 0.360619],
[0.958464, 0.411324, 0.360014],
[0.960949, 0.418323, 0.359630],
[0.963310, 0.425390, 0.359469],
[0.965549, 0.432519, 0.359529],
[0.967671, 0.439703, 0.359810],
[0.969680, 0.446936, 0.360311],
[0.971582, 0.454210, 0.361030],
[0.973381, 0.461520, 0.361965],
[0.975082, 0.468861, 0.363111],
[0.976690, 0.476226, 0.364466],
[0.978210, 0.483612, 0.366025],
[0.979645, 0.491014, 0.367783],
[0.981000, 0.498428, 0.369734],
[0.982279, 0.505851, 0.371874],
[0.983485, 0.513280, 0.374198],
[0.984622, 0.520713, 0.376698],
[0.985693, 0.528148, 0.379371],
[0.986700, 0.535582, 0.382210],
[0.987646, 0.543015, 0.385210],
[0.988533, 0.550446, 0.388365],
[0.989363, 0.557873, 0.391671],
[0.990138, 0.565296, 0.395122],
[0.990871, 0.572706, 0.398714],
[0.991558, 0.580107, 0.402441],
[0.992196, 0.587502, 0.406299],
[0.992785, 0.594891, 0.410283],
[0.993326, 0.602275, 0.414390],
[0.993834, 0.609644, 0.418613],
[0.994309, 0.616999, 0.422950],
[0.994738, 0.624350, 0.427397],
[0.995122, 0.631696, 0.431951],
[0.995480, 0.639027, 0.436607],
[0.995810, 0.646344, 0.441361],
[0.996096, 0.653659, 0.446213],
[0.996341, 0.660969, 0.451160],
[0.996580, 0.668256, 0.456192],
[0.996775, 0.675541, 0.461314],
[0.996925, 0.682828, 0.466526],
[0.997077, 0.690088, 0.471811],
[0.997186, 0.697349, 0.477182],
[0.997254, 0.704611, 0.482635],
[0.997325, 0.711848, 0.488154],
[0.997351, 0.719089, 0.493755],
[0.997351, 0.726324, 0.499428],
[0.997341, 0.733545, 0.505167],
[0.997285, 0.740772, 0.510983],
[0.997228, 0.747981, 0.516859],
[0.997138, 0.755190, 0.522806],
[0.997019, 0.762398, 0.528821],
[0.996898, 0.769591, 0.534892],
[0.996727, 0.776795, 0.541039],
[0.996571, 0.783977, 0.547233],
[0.996369, 0.791167, 0.553499],
[0.996162, 0.798348, 0.559820],
[0.995932, 0.805527, 0.566202],
[0.995680, 0.812706, 0.572645],
[0.995424, 0.819875, 0.579140],
[0.995131, 0.827052, 0.585701],
[0.994851, 0.834213, 0.592307],
[0.994524, 0.841387, 0.598983],
[0.994222, 0.848540, 0.605696],
[0.993866, 0.855711, 0.612482],
[0.993545, 0.862859, 0.619299],
[0.993170, 0.870024, 0.626189],
[0.992831, 0.877168, 0.633109],
[0.992440, 0.884330, 0.640099],
[0.992089, 0.891470, 0.647116],
[0.991688, 0.898627, 0.654202],
[0.991332, 0.905763, 0.661309],
[0.990930, 0.912915, 0.668481],
[0.990570, 0.920049, 0.675675],
[0.990175, 0.927196, 0.682926],
[0.989815, 0.934329, 0.690198],
[0.989434, 0.941470, 0.697519],
[0.989077, 0.948604, 0.704863],
[0.988717, 0.955742, 0.712242],
[0.988367, 0.962878, 0.719649],
[0.988033, 0.970012, 0.727077],
[0.987691, 0.977154, 0.734536],
[0.987387, 0.984288, 0.742002],
[0.987053, 0.991438, 0.749504]]
_inferno_data = [[0.001462, 0.000466, 0.013866],
[0.002267, 0.001270, 0.018570],
[0.003299, 0.002249, 0.024239],
[0.004547, 0.003392, 0.030909],
[0.006006, 0.004692, 0.038558],
[0.007676, 0.006136, 0.046836],
[0.009561, 0.007713, 0.055143],
[0.011663, 0.009417, 0.063460],
[0.013995, 0.011225, 0.071862],
[0.016561, 0.013136, 0.080282],
[0.019373, 0.015133, 0.088767],
[0.022447, 0.017199, 0.097327],
[0.025793, 0.019331, 0.105930],
[0.029432, 0.021503, 0.114621],
[0.033385, 0.023702, 0.123397],
[0.037668, 0.025921, 0.132232],
[0.042253, 0.028139, 0.141141],
[0.046915, 0.030324, 0.150164],
[0.051644, 0.032474, 0.159254],
[0.056449, 0.034569, 0.168414],
[0.061340, 0.036590, 0.177642],
[0.066331, 0.038504, 0.186962],
[0.071429, 0.040294, 0.196354],
[0.076637, 0.041905, 0.205799],
[0.081962, 0.043328, 0.215289],
[0.087411, 0.044556, 0.224813],
[0.092990, 0.045583, 0.234358],
[0.098702, 0.046402, 0.243904],
[0.104551, 0.047008, 0.253430],
[0.110536, 0.047399, 0.262912],
[0.116656, 0.047574, 0.272321],
[0.122908, 0.047536, 0.281624],
[0.129285, 0.047293, 0.290788],
[0.135778, 0.046856, 0.299776],
[0.142378, 0.046242, 0.308553],
[0.149073, 0.045468, 0.317085],
[0.155850, 0.044559, 0.325338],
[0.162689, 0.043554, 0.333277],
[0.169575, 0.042489, 0.340874],
[0.176493, 0.041402, 0.348111],
[0.183429, 0.040329, 0.354971],
[0.190367, 0.039309, 0.361447],
[0.197297, 0.038400, 0.367535],
[0.204209, 0.037632, 0.373238],
[0.211095, 0.037030, 0.378563],
[0.217949, 0.036615, 0.383522],
[0.224763, 0.036405, 0.388129],
[0.231538, 0.036405, 0.392400],
[0.238273, 0.036621, 0.396353],
[0.244967, 0.037055, 0.400007],
[0.251620, 0.037705, 0.403378],
[0.258234, 0.038571, 0.406485],
[0.264810, 0.039647, 0.409345],
[0.271347, 0.040922, 0.411976],
[0.277850, 0.042353, 0.414392],
[0.284321, 0.043933, 0.416608],
[0.290763, 0.045644, 0.418637],
[0.297178, 0.047470, 0.420491],
[0.303568, 0.049396, 0.422182],
[0.309935, 0.051407, 0.423721],
[0.316282, 0.053490, 0.425116],
[0.322610, 0.055634, 0.426377],
[0.328921, 0.057827, 0.427511],
[0.335217, 0.060060, 0.428524],
[0.341500, 0.062325, 0.429425],
[0.347771, 0.064616, 0.430217],
[0.354032, 0.066925, 0.430906],
[0.360284, 0.069247, 0.431497],
[0.366529, 0.071579, 0.431994],
[0.372768, 0.073915, 0.432400],
[0.379001, 0.076253, 0.432719],
[0.385228, 0.078591, 0.432955],
[0.391453, 0.080927, 0.433109],
[0.397674, 0.083257, 0.433183],
[0.403894, 0.085580, 0.433179],
[0.410113, 0.087896, 0.433098],
[0.416331, 0.090203, 0.432943],
[0.422549, 0.092501, 0.432714],
[0.428768, 0.094790, 0.432412],
[0.434987, 0.097069, 0.432039],
[0.441207, 0.099338, 0.431594],
[0.447428, 0.101597, 0.431080],
[0.453651, 0.103848, 0.430498],
[0.459875, 0.106089, 0.429846],
[0.466100, 0.108322, 0.429125],
[0.472328, 0.110547, 0.428334],
[0.478558, 0.112764, 0.427475],
[0.484789, 0.114974, 0.426548],
[0.491022, 0.117179, 0.425552],
[0.497257, 0.119379, 0.424488],
[0.503493, 0.121575, 0.423356],
[0.509730, 0.123769, 0.422156],
[0.515967, 0.125960, 0.420887],
[0.522206, 0.128150, 0.419549],
[0.528444, 0.130341, 0.418142],
[0.534683, 0.132534, 0.416667],
[0.540920, 0.134729, 0.415123],
[0.547157, 0.136929, 0.413511],
[0.553392, 0.139134, 0.411829],
[0.559624, 0.141346, 0.410078],
[0.565854, 0.143567, 0.408258],
[0.572081, 0.145797, 0.406369],
[0.578304, 0.148039, 0.404411],
[0.584521, 0.150294, 0.402385],
[0.590734, 0.152563, 0.400290],
[0.596940, 0.154848, 0.398125],
[0.603139, 0.157151, 0.395891],
[0.609330, 0.159474, 0.393589],
[0.615513, 0.161817, 0.391219],
[0.621685, 0.164184, 0.388781],
[0.627847, 0.166575, 0.386276],
[0.633998, 0.168992, 0.383704],
[0.640135, 0.171438, 0.381065],
[0.646260, 0.173914, 0.378359],
[0.652369, 0.176421, 0.375586],
[0.658463, 0.178962, 0.372748],
[0.664540, 0.181539, 0.369846],
[0.670599, 0.184153, 0.366879],
[0.676638, 0.186807, 0.363849],
[0.682656, 0.189501, 0.360757],
[0.688653, 0.192239, 0.357603],
[0.694627, 0.195021, 0.354388],
[0.700576, 0.197851, 0.351113],
[0.706500, 0.200728, 0.347777],
[0.712396, 0.203656, 0.344383],
[0.718264, 0.206636, 0.340931],
[0.724103, 0.209670, 0.337424],
[0.729909, 0.212759, 0.333861],
[0.735683, 0.215906, 0.330245],
[0.741423, 0.219112, 0.326576],
[0.747127, 0.222378, 0.322856],
[0.752794, 0.225706, 0.319085],
[0.758422, 0.229097, 0.315266],
[0.764010, 0.232554, 0.311399],
[0.769556, 0.236077, 0.307485],
[0.775059, 0.239667, 0.303526],
[0.780517, 0.243327, 0.299523],
[0.785929, 0.247056, 0.295477],
[0.791293, 0.250856, 0.291390],
[0.796607, 0.254728, 0.287264],
[0.801871, 0.258674, 0.283099],
[0.807082, 0.262692, 0.278898],
[0.812239, 0.266786, 0.274661],
[0.817341, 0.270954, 0.270390],
[0.822386, 0.275197, 0.266085],
[0.827372, 0.279517, 0.261750],
[0.832299, 0.283913, 0.257383],
[0.837165, 0.288385, 0.252988],
[0.841969, 0.292933, 0.248564],
[0.846709, 0.297559, 0.244113],
[0.851384, 0.302260, 0.239636],
[0.855992, 0.307038, 0.235133],
[0.860533, 0.311892, 0.230606],
[0.865006, 0.316822, 0.226055],
[0.869409, 0.321827, 0.221482],
[0.873741, 0.326906, 0.216886],
[0.878001, 0.332060, 0.212268],
[0.882188, 0.337287, 0.207628],
[0.886302, 0.342586, 0.202968],
[0.890341, 0.347957, 0.198286],
[0.894305, 0.353399, 0.193584],
[0.898192, 0.358911, 0.188860],
[0.902003, 0.364492, 0.184116],
[0.905735, 0.370140, 0.179350],
[0.909390, 0.375856, 0.174563],
[0.912966, 0.381636, 0.169755],
[0.916462, 0.387481, 0.164924],
[0.919879, 0.393389, 0.160070],
[0.923215, 0.399359, 0.155193],
[0.926470, 0.405389, 0.150292],
[0.929644, 0.411479, 0.145367],
[0.932737, 0.417627, 0.140417],
[0.935747, 0.423831, 0.135440],
[0.938675, 0.430091, 0.130438],
[0.941521, 0.436405, 0.125409],
[0.944285, 0.442772, 0.120354],
[0.946965, 0.449191, 0.115272],
[0.949562, 0.455660, 0.110164],
[0.952075, 0.462178, 0.105031],
[0.954506, 0.468744, 0.099874],
[0.956852, 0.475356, 0.094695],
[0.959114, 0.482014, 0.089499],
[0.961293, 0.488716, 0.084289],
[0.963387, 0.495462, 0.079073],
[0.965397, 0.502249, 0.073859],
[0.967322, 0.509078, 0.068659],
[0.969163, 0.515946, 0.063488],
[0.970919, 0.522853, 0.058367],
[0.972590, 0.529798, 0.053324],
[0.974176, 0.536780, 0.048392],
[0.975677, 0.543798, 0.043618],
[0.977092, 0.550850, 0.039050],
[0.978422, 0.557937, 0.034931],
[0.979666, 0.565057, 0.031409],
[0.980824, 0.572209, 0.028508],
[0.981895, 0.579392, 0.026250],
[0.982881, 0.586606, 0.024661],
[0.983779, 0.593849, 0.023770],
[0.984591, 0.601122, 0.023606],
[0.985315, 0.608422, 0.024202],
[0.985952, 0.615750, 0.025592],
[0.986502, 0.623105, 0.027814],
[0.986964, 0.630485, 0.030908],
[0.987337, 0.637890, 0.034916],
[0.987622, 0.645320, 0.039886],
[0.987819, 0.652773, 0.045581],
[0.987926, 0.660250, 0.051750],
[0.987945, 0.667748, 0.058329],
[0.987874, 0.675267, 0.065257],
[0.987714, 0.682807, 0.072489],
[0.987464, 0.690366, 0.079990],
[0.987124, 0.697944, 0.087731],
[0.986694, 0.705540, 0.095694],
[0.986175, 0.713153, 0.103863],
[0.985566, 0.720782, 0.112229],
[0.984865, 0.728427, 0.120785],
[0.984075, 0.736087, 0.129527],
[0.983196, 0.743758, 0.138453],
[0.982228, 0.751442, 0.147565],
[0.981173, 0.759135, 0.156863],
[0.980032, 0.766837, 0.166353],
[0.978806, 0.774545, 0.176037],
[0.977497, 0.782258, 0.185923],
[0.976108, 0.789974, 0.196018],
[0.974638, 0.797692, 0.206332],
[0.973088, 0.805409, 0.216877],
[0.971468, 0.813122, 0.227658],
[0.969783, 0.820825, 0.238686],
[0.968041, 0.828515, 0.249972],
[0.966243, 0.836191, 0.261534],
[0.964394, 0.843848, 0.273391],
[0.962517, 0.851476, 0.285546],
[0.960626, 0.859069, 0.298010],
[0.958720, 0.866624, 0.310820],
[0.956834, 0.874129, 0.323974],
[0.954997, 0.881569, 0.337475],
[0.953215, 0.888942, 0.351369],
[0.951546, 0.896226, 0.365627],
[0.950018, 0.903409, 0.380271],
[0.948683, 0.910473, 0.395289],
[0.947594, 0.917399, 0.410665],
[0.946809, 0.924168, 0.426373],
[0.946392, 0.930761, 0.442367],
[0.946403, 0.937159, 0.458592],
[0.946903, 0.943348, 0.474970],
[0.947937, 0.949318, 0.491426],
[0.949545, 0.955063, 0.507860],
[0.951740, 0.960587, 0.524203],
[0.954529, 0.965896, 0.540361],
[0.957896, 0.971003, 0.556275],
[0.961812, 0.975924, 0.571925],
[0.966249, 0.980678, 0.587206],
[0.971162, 0.985282, 0.602154],
[0.976511, 0.989753, 0.616760],
[0.982257, 0.994109, 0.631017],
[0.988362, 0.998364, 0.644924]]
_plasma_data = [[0.050383, 0.029803, 0.527975],
[0.063536, 0.028426, 0.533124],
[0.075353, 0.027206, 0.538007],
[0.086222, 0.026125, 0.542658],
[0.096379, 0.025165, 0.547103],
[0.105980, 0.024309, 0.551368],
[0.115124, 0.023556, 0.555468],
[0.123903, 0.022878, 0.559423],
[0.132381, 0.022258, 0.563250],
[0.140603, 0.021687, 0.566959],
[0.148607, 0.021154, 0.570562],
[0.156421, 0.020651, 0.574065],
[0.164070, 0.020171, 0.577478],
[0.171574, 0.019706, 0.580806],
[0.178950, 0.019252, 0.584054],
[0.186213, 0.018803, 0.587228],
[0.193374, 0.018354, 0.590330],
[0.200445, 0.017902, 0.593364],
[0.207435, 0.017442, 0.596333],
[0.214350, 0.016973, 0.599239],
[0.221197, 0.016497, 0.602083],
[0.227983, 0.016007, 0.604867],
[0.234715, 0.015502, 0.607592],
[0.241396, 0.014979, 0.610259],
[0.248032, 0.014439, 0.612868],
[0.254627, 0.013882, 0.615419],
[0.261183, 0.013308, 0.617911],
[0.267703, 0.012716, 0.620346],
[0.274191, 0.012109, 0.622722],
[0.280648, 0.011488, 0.625038],
[0.287076, 0.010855, 0.627295],
[0.293478, 0.010213, 0.629490],
[0.299855, 0.009561, 0.631624],
[0.306210, 0.008902, 0.633694],
[0.312543, 0.008239, 0.635700],
[0.318856, 0.007576, 0.637640],
[0.325150, 0.006915, 0.639512],
[0.331426, 0.006261, 0.641316],
[0.337683, 0.005618, 0.643049],
[0.343925, 0.004991, 0.644710],
[0.350150, 0.004382, 0.646298],
[0.356359, 0.003798, 0.647810],
[0.362553, 0.003243, 0.649245],
[0.368733, 0.002724, 0.650601],
[0.374897, 0.002245, 0.651876],
[0.381047, 0.001814, 0.653068],
[0.387183, 0.001434, 0.654177],
[0.393304, 0.001114, 0.655199],
[0.399411, 0.000859, 0.656133],
[0.405503, 0.000678, 0.656977],
[0.411580, 0.000577, 0.657730],
[0.417642, 0.000564, 0.658390],
[0.423689, 0.000646, 0.658956],
[0.429719, 0.000831, 0.659425],
[0.435734, 0.001127, 0.659797],
[0.441732, 0.001540, 0.660069],
[0.447714, 0.002080, 0.660240],
[0.453677, 0.002755, 0.660310],
[0.459623, 0.003574, 0.660277],
[0.465550, 0.004545, 0.660139],
[0.471457, 0.005678, 0.659897],
[0.477344, 0.006980, 0.659549],
[0.483210, 0.008460, 0.659095],
[0.489055, 0.010127, 0.658534],
[0.494877, 0.011990, 0.657865],
[0.500678, 0.014055, 0.657088],
[0.506454, 0.016333, 0.656202],
[0.512206, 0.018833, 0.655209],
[0.517933, 0.021563, 0.654109],
[0.523633, 0.024532, 0.652901],
[0.529306, 0.027747, 0.651586],
[0.534952, 0.031217, 0.650165],
[0.540570, 0.034950, 0.648640],
[0.546157, 0.038954, 0.647010],
[0.551715, 0.043136, 0.645277],
[0.557243, 0.047331, 0.643443],
[0.562738, 0.051545, 0.641509],
[0.568201, 0.055778, 0.639477],
[0.573632, 0.060028, 0.637349],
[0.579029, 0.064296, 0.635126],
[0.584391, 0.068579, 0.632812],
[0.589719, 0.072878, 0.630408],
[0.595011, 0.077190, 0.627917],
[0.600266, 0.081516, 0.625342],
[0.605485, 0.085854, 0.622686],
[0.610667, 0.090204, 0.619951],
[0.615812, 0.094564, 0.617140],
[0.620919, 0.098934, 0.614257],
[0.625987, 0.103312, 0.611305],
[0.631017, 0.107699, 0.608287],
[0.636008, 0.112092, 0.605205],
[0.640959, 0.116492, 0.602065],
[0.645872, 0.120898, 0.598867],
[0.650746, 0.125309, 0.595617],
[0.655580, 0.129725, 0.592317],
[0.660374, 0.134144, 0.588971],
[0.665129, 0.138566, 0.585582],
[0.669845, 0.142992, 0.582154],
[0.674522, 0.147419, 0.578688],
[0.679160, 0.151848, 0.575189],
[0.683758, 0.156278, 0.571660],
[0.688318, 0.160709, 0.568103],
[0.692840, 0.165141, 0.564522],
[0.697324, 0.169573, 0.560919],
[0.701769, 0.174005, 0.557296],
[0.706178, 0.178437, 0.553657],
[0.710549, 0.182868, 0.550004],
[0.714883, 0.187299, 0.546338],
[0.719181, 0.191729, 0.542663],
[0.723444, 0.196158, 0.538981],
[0.727670, 0.200586, 0.535293],
[0.731862, 0.205013, 0.531601],
[0.736019, 0.209439, 0.527908],
[0.740143, 0.213864, 0.524216],
[0.744232, 0.218288, 0.520524],
[0.748289, 0.222711, 0.516834],
[0.752312, 0.227133, 0.513149],
[0.756304, 0.231555, 0.509468],
[0.760264, 0.235976, 0.505794],
[0.764193, 0.240396, 0.502126],
[0.768090, 0.244817, 0.498465],
[0.771958, 0.249237, 0.494813],
[0.775796, 0.253658, 0.491171],
[0.779604, 0.258078, 0.487539],
[0.783383, 0.262500, 0.483918],
[0.787133, 0.266922, 0.480307],
[0.790855, 0.271345, 0.476706],
[0.794549, 0.275770, 0.473117],
[0.798216, 0.280197, 0.469538],
[0.801855, 0.284626, 0.465971],
[0.805467, 0.289057, 0.462415],
[0.809052, 0.293491, 0.458870],
[0.812612, 0.297928, 0.455338],
[0.816144, 0.302368, 0.451816],
[0.819651, 0.306812, 0.448306],
[0.823132, 0.311261, 0.444806],
[0.826588, 0.315714, 0.441316],
[0.830018, 0.320172, 0.437836],
[0.833422, 0.324635, 0.434366],
[0.836801, 0.329105, 0.430905],
[0.840155, 0.333580, 0.427455],
[0.843484, 0.338062, 0.424013],
[0.846788, 0.342551, 0.420579],
[0.850066, 0.347048, 0.417153],
[0.853319, 0.351553, 0.413734],
[0.856547, 0.356066, 0.410322],
[0.859750, 0.360588, 0.406917],
[0.862927, 0.365119, 0.403519],
[0.866078, 0.369660, 0.400126],
[0.869203, 0.374212, 0.396738],
[0.872303, 0.378774, 0.393355],
[0.875376, 0.383347, 0.389976],
[0.878423, 0.387932, 0.386600],
[0.881443, 0.392529, 0.383229],
[0.884436, 0.397139, 0.379860],
[0.887402, 0.401762, 0.376494],
[0.890340, 0.406398, 0.373130],
[0.893250, 0.411048, 0.369768],
[0.896131, 0.415712, 0.366407],
[0.898984, 0.420392, 0.363047],
[0.901807, 0.425087, 0.359688],
[0.904601, 0.429797, 0.356329],
[0.907365, 0.434524, 0.352970],
[0.910098, 0.439268, 0.349610],
[0.912800, 0.444029, 0.346251],
[0.915471, 0.448807, 0.342890],
[0.918109, 0.453603, 0.339529],
[0.920714, 0.458417, 0.336166],
[0.923287, 0.463251, 0.332801],
[0.925825, 0.468103, 0.329435],
[0.928329, 0.472975, 0.326067],
[0.930798, 0.477867, 0.322697],
[0.933232, 0.482780, 0.319325],
[0.935630, 0.487712, 0.315952],
[0.937990, 0.492667, 0.312575],
[0.940313, 0.497642, 0.309197],
[0.942598, 0.502639, 0.305816],
[0.944844, 0.507658, 0.302433],
[0.947051, 0.512699, 0.299049],
[0.949217, 0.517763, 0.295662],
[0.951344, 0.522850, 0.292275],
[0.953428, 0.527960, 0.288883],
[0.955470, 0.533093, 0.285490],
[0.957469, 0.538250, 0.282096],
[0.959424, 0.543431, 0.278701],
[0.961336, 0.548636, 0.275305],
[0.963203, 0.553865, 0.271909],
[0.965024, 0.559118, 0.268513],
[0.966798, 0.564396, 0.265118],
[0.968526, 0.569700, 0.261721],
[0.970205, 0.575028, 0.258325],
[0.971835, 0.580382, 0.254931],
[0.973416, 0.585761, 0.251540],
[0.974947, 0.591165, 0.248151],
[0.976428, 0.596595, 0.244767],
[0.977856, 0.602051, 0.241387],
[0.979233, 0.607532, 0.238013],
[0.980556, 0.613039, 0.234646],
[0.981826, 0.618572, 0.231287],
[0.983041, 0.624131, 0.227937],
[0.984199, 0.629718, 0.224595],
[0.985301, 0.635330, 0.221265],
[0.986345, 0.640969, 0.217948],
[0.987332, 0.646633, 0.214648],
[0.988260, 0.652325, 0.211364],
[0.989128, 0.658043, 0.208100],
[0.989935, 0.663787, 0.204859],
[0.990681, 0.669558, 0.201642],
[0.991365, 0.675355, 0.198453],
[0.991985, 0.681179, 0.195295],
[0.992541, 0.687030, 0.192170],
[0.993032, 0.692907, 0.189084],
[0.993456, 0.698810, 0.186041],
[0.993814, 0.704741, 0.183043],
[0.994103, 0.710698, 0.180097],
[0.994324, 0.716681, 0.177208],
[0.994474, 0.722691, 0.174381],
[0.994553, 0.728728, 0.171622],
[0.994561, 0.734791, 0.168938],
[0.994495, 0.740880, 0.166335],
[0.994355, 0.746995, 0.163821],
[0.994141, 0.753137, 0.161404],
[0.993851, 0.759304, 0.159092],
[0.993482, 0.765499, 0.156891],
[0.993033, 0.771720, 0.154808],
[0.992505, 0.777967, 0.152855],
[0.991897, 0.784239, 0.151042],
[0.991209, 0.790537, 0.149377],
[0.990439, 0.796859, 0.147870],
[0.989587, 0.803205, 0.146529],
[0.988648, 0.809579, 0.145357],
[0.987621, 0.815978, 0.144363],
[0.986509, 0.822401, 0.143557],
[0.985314, 0.828846, 0.142945],
[0.984031, 0.835315, 0.142528],
[0.982653, 0.841812, 0.142303],
[0.981190, 0.848329, 0.142279],
[0.979644, 0.854866, 0.142453],
[0.977995, 0.861432, 0.142808],
[0.976265, 0.868016, 0.143351],
[0.974443, 0.874622, 0.144061],
[0.972530, 0.881250, 0.144923],
[0.970533, 0.887896, 0.145919],
[0.968443, 0.894564, 0.147014],
[0.966271, 0.901249, 0.148180],
[0.964021, 0.907950, 0.149370],
[0.961681, 0.914672, 0.150520],
[0.959276, 0.921407, 0.151566],
[0.956808, 0.928152, 0.152409],
[0.954287, 0.934908, 0.152921],
[0.951726, 0.941671, 0.152925],
[0.949151, 0.948435, 0.152178],
[0.946602, 0.955190, 0.150328],
[0.944152, 0.961916, 0.146861],
[0.941896, 0.968590, 0.140956],
[0.940015, 0.975158, 0.131326]]
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
_jet_data = [[0,0,127,255],[0,0,255,255],[0,127,255,255],[0,255,255,255],[127,255,127,255],[255,255,0,255],[255,127,0,255],[255,0,0,255], [127,0,0,255]]
_monochrome_data = [[0,0,0,255],[255,255,255,255]]
_hot_data = [[0,0,0,255],[255,0,0,255],[255,255,0,255],[255,255,255,255]]
import numpy as np
cmaps = {}
class PyDMColorMap(object):
Magma = 0
Inferno = 1
Plasma = 2
Viridis = 3
Jet = 4
Monochrome = 5
Hot = 6
for (name, data) in ((PyDMColorMap.Magma, np.array(_magma_data)),
(PyDMColorMap.Inferno, np.array(_inferno_data)),
(PyDMColorMap.Plasma, np.array(_plasma_data)),
(PyDMColorMap.Viridis, np.array(_viridis_data)),
(PyDMColorMap.Jet, np.array(_jet_data)),
(PyDMColorMap.Monochrome, np.array(_monochrome_data)),
(PyDMColorMap.Hot, np.array(_hot_data))):
cmaps[name] = data
magma = cmaps[PyDMColorMap.Magma]
inferno = cmaps[PyDMColorMap.Inferno]
plasma = cmaps[PyDMColorMap.Plasma]
viridis = cmaps[PyDMColorMap.Viridis]
jet = cmaps[PyDMColorMap.Jet]
monochrome = cmaps[PyDMColorMap.Monochrome]
hot = cmaps[PyDMColorMap.Hot]
cmap_names = {}
cmap_names[PyDMColorMap.Magma] = "Magma"
cmap_names[PyDMColorMap.Inferno] = "Inferno"
cmap_names[PyDMColorMap.Plasma] = "Plasma"
cmap_names[PyDMColorMap.Viridis] = "Viridis"
cmap_names[PyDMColorMap.Jet] = "Jet"
cmap_names[PyDMColorMap.Monochrome] = "Monochrome"
cmap_names[PyDMColorMap.Hot] = "Hot" | 47.668203 | 152 | 0.447525 |
727d9a4e4d68d2c02cc111af3c77916b420093b5 | 5,403 | py | Python | apps/useradmin/src/useradmin/middleware.py | zhedoubushishi/hue | fe86ec65f06eb891ae80d95daf15ea9ecb158c4f | [
"Apache-2.0"
] | 1 | 2021-01-23T07:45:29.000Z | 2021-01-23T07:45:29.000Z | apps/useradmin/src/useradmin/middleware.py | zhedoubushishi/hue | fe86ec65f06eb891ae80d95daf15ea9ecb158c4f | [
"Apache-2.0"
] | null | null | null | apps/useradmin/src/useradmin/middleware.py | zhedoubushishi/hue | fe86ec65f06eb891ae80d95daf15ea9ecb158c4f | [
"Apache-2.0"
] | 1 | 2021-02-01T19:55:11.000Z | 2021-02-01T19:55:11.000Z | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import absolute_import
from builtins import next
from builtins import object
import logging
import math
from datetime import datetime
from django.contrib import messages
from django.contrib.sessions.models import Session
from django.db import DatabaseError
from django.db.models import Q
from django.utils.translation import ugettext as _
from django.utils.deprecation import MiddlewareMixin
from desktop.auth.views import dt_logout
from desktop.conf import AUTH, LDAP, SESSION
from desktop.lib.security_util import get_localhost_name
from useradmin import ldap_access
from useradmin.models import UserProfile, get_profile, User
from useradmin.views import import_ldap_users
LOG = logging.getLogger(__name__)
class LdapSynchronizationMiddleware(MiddlewareMixin):
"""
Synchronize against LDAP authority.
"""
USER_CACHE_NAME = 'ldap_use_group_sync_cache'
def process_request(self, request):
user = request.user
server = None
# Used by tests only
if request.method == "GET":
server = request.GET.get('server')
if not user or not user.is_authenticated:
return
if not User.objects.filter(username=user.username, userprofile__creation_method=UserProfile.CreationMethod.EXTERNAL.name).exists():
LOG.warn("User %s is not an Ldap user" % user.username)
return
# Cache should be cleared when user logs out.
if self.USER_CACHE_NAME not in request.session:
if LDAP.LDAP_SERVERS.get():
connection = ldap_access.get_connection_from_server(next(LDAP.LDAP_SERVERS.__iter__()))
else:
connection = ldap_access.get_connection_from_server()
import_ldap_users(connection, user.username, sync_groups=True, import_by_dn=False, server=server)
request.session[self.USER_CACHE_NAME] = True
request.session.modified = True
class LastActivityMiddleware(MiddlewareMixin):
"""
Middleware to track the last activity of a user and automatically log out the user after a specified period of inactivity
"""
def process_request(self, request):
user = request.user
if not user or not user.is_authenticated:
return
profile = get_profile(user)
expires_after = AUTH.IDLE_SESSION_TIMEOUT.get()
now = datetime.now()
logout = False
if profile.last_activity and expires_after > 0 and self._total_seconds(now - profile.last_activity) > expires_after:
logout = True
# Save last activity for user except when polling
if not (request.path.strip('/') == 'notebook/api/check_status') \
and not (request.path.strip('/').startswith('jobbrowser/api/job')) \
and not (request.path.strip('/') == 'jobbrowser/jobs' and request.POST.get('format') == 'json') \
and not (request.path.strip('/') == 'desktop/debug/is_idle') \
and not (request.path.strip('/').startswith('oozie/list_oozie_')):
try:
profile.last_activity = datetime.now()
profile.hostname = get_localhost_name()
profile.save()
except DatabaseError:
LOG.exception('Error saving profile information')
if logout:
dt_logout(request, next_page='/')
def _total_seconds(self, dt):
# Keep backward compatibility with Python 2.6 which doesn't support total_seconds()
if hasattr(dt, 'total_seconds'):
return dt.total_seconds()
else:
return math.floor((dt.microseconds + (dt.seconds + dt.days * 24 * 3600) * 10**6) / 10**6)
class ConcurrentUserSessionMiddleware(MiddlewareMixin):
"""
Middleware that remove concurrent user sessions when configured
"""
def process_response(self, request, response):
try:
user = request.user
except AttributeError: # When the request does not store user. We care only about the login request which does store the user.
return response
# request.session.modified checks if a user just logged in
if request.user.is_authenticated and request.session.modified and request.user.id:
limit = SESSION.CONCURRENT_USER_SESSION_LIMIT.get()
if limit:
count = 1
for session in Session.objects.filter(~Q(session_key=request.session.session_key),
expire_date__gte=datetime.now()).order_by('-expire_date'):
data = session.get_decoded()
if data.get('_auth_user_id') == str(request.user.id):
if count >= limit:
LOG.info('Expiring concurrent user session %s' % request.user.username)
session.expire_date = datetime.now()
session.save()
else:
count += 1
return response
| 36.506757 | 135 | 0.71923 |
19b4d647c3edde91f8211ca985b0cb717bab0d47 | 2,249 | py | Python | ssseg/cfgs/ce2p/cfgs_atr_resnet50os16.py | zhizhangxian/sssegmentation | 90613f6e0abf4cdd729cf382ab2a915e106d8649 | [
"MIT"
] | 2 | 2021-10-31T21:52:30.000Z | 2021-12-21T12:35:37.000Z | ssseg/cfgs/ce2p/cfgs_atr_resnet50os16.py | zhizhangxian/sssegmentation | 90613f6e0abf4cdd729cf382ab2a915e106d8649 | [
"MIT"
] | null | null | null | ssseg/cfgs/ce2p/cfgs_atr_resnet50os16.py | zhizhangxian/sssegmentation | 90613f6e0abf4cdd729cf382ab2a915e106d8649 | [
"MIT"
] | null | null | null | '''define the config file for atr and resnet50os16'''
import os
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'atr',
'rootdir': os.path.join(os.getcwd(), 'ATR'),
})
DATASET_CFG['train']['aug_opts'] = [
('Resize', {'output_size': (520, 520), 'keep_ratio': False, 'scale_range': (0.75, 1.25)}),
('RandomCrop', {'crop_size': (473, 473), 'one_category_max_ratio': 0.75}),
('RandomFlip', {'flip_prob': 0.5, 'fix_ann_pairs': [(9, 10), (12, 13), (14, 15)]}),
('RandomRotation', {'angle_upper': 30, 'rotation_prob': 0.6}),
('PhotoMetricDistortion', {}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
('Padding', {'output_size': (473, 473), 'data_type': 'tensor'}),
]
DATASET_CFG['test']['aug_opts'] = [
('Resize', {'output_size': (473, 473), 'keep_ratio': False, 'scale_range': None}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
]
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
DATALOADER_CFG['train'].update(
{
'batch_size': 32,
}
)
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 150
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify segmentor config
SEGMENTOR_CFG = SEGMENTOR_CFG.copy()
SEGMENTOR_CFG.update(
{
'num_classes': 18,
'backbone': {
'type': 'resnet50',
'series': 'resnet',
'pretrained': True,
'outstride': 16,
'use_stem': True,
'selected_indices': (0, 1, 2, 3),
}
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'ce2p_resnet50os16_atr_train',
'logfilepath': 'ce2p_resnet50os16_atr_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'ce2p_resnet50os16_atr_test',
'logfilepath': 'ce2p_resnet50os16_atr_test/test.log',
'resultsavepath': 'ce2p_resnet50os16_atr_test/ce2p_resnet50os16_atr_results.pkl'
}
) | 30.391892 | 94 | 0.612717 |
f0a2e57719aca217e19714a673f48a214bbcc482 | 7,957 | py | Python | pycatia/navigator_interfaces/dmu_reviews.py | Tian-Jionglu/pycatia | b315aeb3a74846f134ff6b67b3a6334b9d3905fa | [
"MIT"
] | 1 | 2020-04-27T13:59:10.000Z | 2020-04-27T13:59:10.000Z | pycatia/navigator_interfaces/dmu_reviews.py | Luanee/pycatia | ea5eef8178f73de12404561c00baf7a7ca30da59 | [
"MIT"
] | null | null | null | pycatia/navigator_interfaces/dmu_reviews.py | Luanee/pycatia | ea5eef8178f73de12404561c00baf7a7ca30da59 | [
"MIT"
] | null | null | null | #! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.navigator_interfaces.dmu_review import DMUReview
from pycatia.product_structure_interfaces.product import Product
from pycatia.system_interfaces.any_object import AnyObject
from pycatia.system_interfaces.collection import Collection
from pycatia.types import cat_variant
class DMUReviews(Collection):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.Collection
| DMUReviews
|
| A collection of all DMUReviews currently managed by the
| application.
|
| The method Product.GetTechnologicalObject ("DMUReviews") retrieves this
| collection.
"""
def __init__(self, com_object):
super().__init__(com_object, child_object=DMUReview)
self.dmu_reviews = com_object
@property
def current(self) -> AnyObject:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Current() As CATBaseDispatch (Read Only)
|
| Returns the current DMUReview.
|
| Returns:
| The current DMUReview (the collection is returned if there is no
| current review)
| Example:
|
| This example retrieves the current oDMUReview DMU
| Review
| from the cDMUReviews collection.
|
|
| Set oDMUReview = cDMUReviews.Current
:return: AnyObject
:rtype: AnyObject
"""
return AnyObject(self.dmu_reviews.Current)
def add(self) -> DMUReview:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func Add() As DMUReview
|
| Creates a DMUReview and adds it to the DMUReviews
| Collection.
|
| Returns:
| The created DMUReview
| Example:
|
| This example creates a new DMUReview in the cDMUReviews
| collection.
|
|
| Set oDMUReview = cDMUReviews.Add
:return: DMUReview
:rtype: DMUReview
"""
return DMUReview(self.dmu_reviews.Add())
def import_from(self, i_product: Product) -> DMUReview:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func ImportFrom(Product iProduct) As DMUReview
|
| Imports Applicative data froma given product in a new DMU
| Review
|
| Parameters:
|
| iProduct
| The product to import applicative data from
|
| Returns:
| The created DMUReview
| Example:
|
| This example imports a new DMUReview from a product in the
| cDMUReviews collection.
|
|
| Set oDMUReview = cDMUReviews.ImportFrom(iExistingProduct)
:param Product i_product:
:return: DMUReview
:rtype: DMUReview
"""
return DMUReview(self.dmu_reviews.ImportFrom(i_product.com_object))
def item(self, i_index: cat_variant) -> DMUReview:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func Item(CATVariant iIndex) As DMUReview
|
| Returns a DMUReview using its index or its name from the DMUReviews
| collection.
|
| Parameters:
|
| iIndex
| The index or the name of the DMUReview to retrieve from the
| collection of DMUReviews. As a numerics, this index is the rank of the
| DMUReview in the collection. The index of the first DMUReview in the collection
| is 1, and the index of the last DMUReview is Count. As a string, it is the name
| you assigned to the DMUReview.
|
| Returns:
| The retrieved DMUReview
| Example:
|
| This example retrieves in oThisDMUReview the ninth
| DMUReview,
| and in oThatDMUReview the DMUReview named
| DMUReview3 from the cDMUReviews collection.
|
|
| Set oThisDMUReview = cDMUReviews.Item(9)
| Set oThatDMUReview = cDMUReviews.Item("DMUReview3")
:param cat_variant i_index:
:return: DMUReview
:rtype: DMUReview
"""
return DMUReview(self.dmu_reviews.Item(i_index))
def remove(self, i_index: cat_variant) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub Remove(CATVariant iIndex)
|
| Removes a DMUReview from the DMUReviews collection.
|
| Parameters:
|
| iIndex
| The index or the name of the DMUReview to retrieve from the
| collection of DMUReviews. As a numerics, this index is the rank of the
| DMUReview in the collection. The index of the first DMUReview in the collection
| is 1, and the index of the last DMUReview is Count. As a string, it is the name
| you assigned to the DMUReview.
|
| Example:
|
| The following example removes the tenth DMUReview and the
| DMUReview named
| DMUReview2 from the cDMUReviews collection.
|
|
| cDMUReviews.Remove(10)
| cDMUReviews.Remove("DMUReview2")
:param cat_variant i_index:
:return: None
:rtype: None
"""
return self.dmu_reviews.Remove(i_index)
def __getitem__(self, n: int) -> DMUReview:
if (n + 1) > self.count:
raise StopIteration
return DMUReview(self.dmu_reviews.item(n + 1))
def __repr__(self):
return f'DmuReviews(name="{self.name}")'
| 37.356808 | 109 | 0.471283 |
8781119f015b31d48723dfd4e9c71989e5cd3494 | 99 | py | Python | akData/version.py | adamkerz/akData | 673884671da54b2b96480616a3f2633ba4b4710d | [
"BSD-3-Clause"
] | null | null | null | akData/version.py | adamkerz/akData | 673884671da54b2b96480616a3f2633ba4b4710d | [
"BSD-3-Clause"
] | null | null | null | akData/version.py | adamkerz/akData | 673884671da54b2b96480616a3f2633ba4b4710d | [
"BSD-3-Clause"
] | null | null | null | __version__ = "0.0.2"
__author__ = "Adam Kerz"
__copyright__ = "Copyright (C) 2015 Adam Kerz"
| 24.75 | 46 | 0.676768 |
fd25cb867d6cf8b4d3402a382307b52e6199de69 | 6,898 | py | Python | river_profiles.py | IntroQG-2017/Exercise-11 | ad2bffbbb23e94534c396d827f06e85965a4e174 | [
"MIT"
] | null | null | null | river_profiles.py | IntroQG-2017/Exercise-11 | ad2bffbbb23e94534c396d827f06e85965a4e174 | [
"MIT"
] | null | null | null | river_profiles.py | IntroQG-2017/Exercise-11 | ad2bffbbb23e94534c396d827f06e85965a4e174 | [
"MIT"
] | null | null | null | """Calculates bedrock river profiles.
Description:
This script calculates bedrock river profiles as a function of rock uplift
rate and various stream-power erosion law parameters.
User-defined variables are listed at the top of the script.
Source:
This code is based on a similar MATLAB code written by Brian Yanites and
Todd Ehlers.
Author:
Dave Whipp - 20.11.2017
"""
# Import NumPy, Matplotlib and system modules
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
import sys
# Define function to update river profile elevations
def update_topo(topography, dhdt, dx, K, area, m, n, uplift, dt, dtPlot):
for step in range(int(dtPlot/dt)): # Loop over amount of time between plots
slope = abs(np.diff(topography))/dx # Calculate profile slopes; diff() calculates elevation difference between points
slope = np.append(slope, slope[-1]) # Append one extra slope value to be same size as area array
dhdt = -K * area**m * slope**n # Calculate stream-power erosion rate
topography += (dhdt + uplift) * dt # Change topography based on uplift and river erosion
tfix = np.where(topography < 0.0) # Find any values of topography less than 0.0
topography[tfix] = 0.0 # Set those values equal to 0.0
return topography, dhdt # Return updated elevations and erosion rates for river profile
# Define function to animate river profile elevations
def animate(i, topography, dhdt, dx, K, area, m, n, uplift, dt, dtPlot, plotTime):
# Update elevations on river profile
topography, dhdt = update_topo(topography, dhdt, dx, K, area, m, n, uplift, dt, dtPlot)
timeNow = plotTime[i] # Set timenow to current model time
elevMax = max(topography) # Calculate maximum elevation of river profile
plot1.set_ydata(topography) # Update plot topography
axis1.set_ylim([0.0, max(topography) * 1.1]) # Update y-axis range for new topography
timeText.set_y(max(topography) * 0.9) # Reposition time text
timeText.set_text(str(timeNow)+" years") # Update time text
elevText.set_y(max(topography) * 0.8) # Reposition time text
elevText.set_text("Max elevation: {0:.1f} m".format(elevMax)) # Update time text
# ADD STUFF TO UPDATE SECOND SUBPLOT HERE
#
#
return plot1, # Return updated plot
#--- USER-DEFINED VARIABLES BELOW -----------------------------------------#
maxElevation = 1500.0 # Maximum elevation for initial topography [m]
minElevation = 0.0 # Minimum elevation for initial topography [m]
xLength = 100000.0 # Lateral length of river profile [m]
dx = 1000.0 # Spacing of points along profile for elevation calculations [m]
topoOption = 2 # Flag for initial topography; 1 = Constant slope, 2 = Flat
simulationTime = 2000000.0 # Total simulation time [a]
dt = 2.0 # Time step size for profile calculations [a]
dtPlot = 2000.0 # Time step for displaying plots [a]
# EROSIONAL CONSTANTS
m = 1.0 # Area exponent
# m = 0.3 for bed shear stress representation
# m = 1.0 for stream power per unit channel length
# m = 0.5 for stream power per unit bed area
n = 2.0 # Slope exponent
# n = 0.7 for bed shear stress representation
# n = 1.0 for stream power per unit channel length
# n = 1.0 for stream power per unit bed area
h = 1.69 # Scaling exponent for distance-to-area conversion
ka = 6.69 # Distance/area scaling coefficient for distance-to-area conversion [m**0.33]
K = 1.6E-8 # Fluvial erosional efficiency factor [m**0.33 / a]
# This factor accounts for lithology, climate, channel geometry, and perhaps sediment supply (!)
upliftRate = 0.001 # Rock uplift rate [m/a]
#--- END USER-DEFINED VARIABLES -------------------------------------------#
# Define arrays needed for river profile calculations
xPoints = int(xLength/dx) # Number of points for x array
x = np.linspace(0.0, xLength, xPoints + 1) # Define x array from 0 to L by dx
uplift = np.zeros(len(x)) + upliftRate # Create rock uplift array with constant uplift rate
xkm = x / 1000.0 # Create xkm array with x values converted to km
dhdt = np.zeros(len(x)) # Erosion rate array, same size as x
# SELECT INPUT TOPOGRAPHY
# OPTION 1: Initial topography is a sloping surface
if topoOption == 1:
topography = np.linspace(maxElevation, minElevation, xPoints + 1)
# OPTION 2: Initial topography is a flat surface
elif topoOption == 2:
topography = np.zeros(len(x)) + maxElevation
topography[-1] = minElevation
# Stop with error message if given a bad value
else:
print("Bad value listed for topoOption. topoOption must be '1' or '2'.")
sys.exit()
# Calculate drainage basin area as a function of distance from divide x
area = ka * x**h
area[0] = area[1]
# Fill values for time and plottime arrays
time = np.linspace(0.0, simulationTime, int(simulationTime / dt + 1))
plotTime = np.linspace(0.0, simulationTime, int(simulationTime / dtPlot + 1))
# Create plotting window
fig = plt.figure()
# Format subplot 1
axis1 = plt.subplot(1, 1, 1) # Set axis1 as the first plot
axis1.set_xlim([0.0, max(xkm)]) # Set the x-axis limits for plot 1
axis1.set_ylim([0.0, maxElevation*1.1]) # Set the y-axis limits for plot 1
plot1, = plt.plot(xkm, topography) # Define plot1 as the first plot
# Add axis labels and title
plt.xlabel("Distance from drainage divide [km]")
plt.ylabel("River channel elevation [m]")
plt.title("River channel profile evolution model")
# Define starting time for display on plot (value updated in animate() function)
timeNow = 0.0
timeText = plt.text(60.,maxElevation*0.9, str(timeNow)+" years")
# Define starting max elevation for display on plot (value updated in animate() function)
elevMax = max(topography)
elevText = plt.text(60., maxElevation*0.8, "Max elevation: {0:.1f} m".format(elevMax))
# Format subplot 2
#
# FIGURE 2 STUFF GOES HERE
#
# Animate and display plot results
anim = animation.FuncAnimation(fig, animate, range(len(plotTime)),
fargs=(topography, dhdt, dx, K, area, m, n,
uplift, dt, dtPlot, plotTime), interval=20,
blit=False, repeat=False)
# Display plot
plt.show()
| 48.921986 | 130 | 0.624674 |
17e98ecd699e50235cefa2b50a1985ebc909482b | 6,296 | py | Python | drb/commands/srcrpm.py | vinted/docker-rpm-builder | 2af4d70f3c4da6589ab46e7d5fed46bcd5e5b21e | [
"Apache-2.0"
] | 355 | 2015-01-09T01:53:29.000Z | 2019-02-28T17:50:45.000Z | drb/commands/srcrpm.py | vinted/docker-rpm-builder | 2af4d70f3c4da6589ab46e7d5fed46bcd5e5b21e | [
"Apache-2.0"
] | 54 | 2015-02-06T17:16:56.000Z | 2019-04-02T16:25:59.000Z | drb/commands/srcrpm.py | vinted/docker-rpm-builder | 2af4d70f3c4da6589ab46e7d5fed46bcd5e5b21e | [
"Apache-2.0"
] | 59 | 2015-02-06T18:50:58.000Z | 2019-03-05T08:42:39.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import shutil
import logging
import click
from drb.configure_logging import configure_root_logger
from drb.docker import Docker
from drb.exception_transformer import UserExceptionTransformer
from drb.mkdir_p import mkdir_p
from drb.tempdir import TempDir
from drb.path import getpath
from drb.parse_ownership import parse_ownership
_HELP = """Builds a binary RPM from .src.rpm file.
Uses `docker run` under the hood.
IMAGE should be a docker image id or a repository:tag,
e.g something like a682b68bbaba or alanfranz/drb-epel-6-x86-64:latest ;
anything that can be passed to `docker run` as an IMAGE parameter will do.
SRCRPM should be a .src.rpm file that contains the .spec and all the
references source files. It will be readonly inside the container.
TARGET_DIRECTORY is where the RPMS will be written. Anything inside
may be overwritten during the build phase.
ADDITIONAL_DOCKER_OPTIONS whatever is passed will be forwarded
straight to the 'docker run' command. PLEASE REMEMBER to insert a double dash (--)
before the first additional option, otherwise it will be mistaken
for a docker-rpm-builder option.
Options:
--verify-signature: if enabled, the .src.rpm signature will be verified;
if the verification fails, the build will be aborted.
--bash-on-failure: if enabled, the tool will drop in an interactive
shell inside the container if the build fails.
--sign-with <PATH>: if passed, the chosen GPG key file is used to sign the package.
Currently, such file MUST be a readable, password-free, ascii-armored
GPG private key file.
--always-pull: if passed, a `docker pull` for the latest
image version from Docker Hub (or other configured endpoint) is performed. Please note that
any error that may arise from the operation is currently ignored.
--target-ownership: a string in NN:MM format, which let you choose the ownership of the files
in the output directory. Defaults to current user's uid:gid if not passed. Must be numeric.
Not supported on OSX.
--preserve-container: if passed, the build container(s) won't be removed after the build.
Still, you've got to dig out its id/name yourself. It's useful for debugging purposes,
by the way.
--verbose: display whatever happens during the build.
Examples:
- in this scenario we use no option of ours but we add an option to be forwarded to docker:
docker-rpm-builder srcrpm a682b68bbaba mypackage.src.rpm /tmp/rpms -- --dns=192.168.1.1
- in this scenario we use a repository:tag as an image, and we ask drb to sign the package
docker-rpm-builder srcrpm alanfranz/drb-epel-6-x86-64:latest mypackage.src.rpm /tmp/rpms --sign-with mykey.pgp
There's an additional feature which lets you pass further macros to the rpmbuild call inside the container (see
dockerscripts directory in the source if you want to know more) - if bind a /rpmmacros file inside the container,
it will be copied where it's meant to be used:
docker-rpm-builder dir a682b68bbaba . /tmp/rpms -- --volume=/home/user/my.macros:/rpmmacros:ro,Z
"""
_logger = logging.getLogger("drb.commands.srcrpm")
@click.command(help=_HELP)
@click.argument("image", type=click.STRING)
@click.argument("srcrpm", type=click.Path(exists=True, dir_okay=False, resolve_path=True))
@click.argument("target_directory", type=click.Path(file_okay=False, resolve_path=True))
@click.argument("additional_docker_options", type=click.STRING, nargs=-1)
@click.option("--verify-signature", is_flag=True, default=False)
@click.option("--bash-on-failure", is_flag=True, default=False)
@click.option("--sign-with", nargs=1, type=click.Path(exists=True, dir_okay=False, resolve_path=True), default=None)
@click.option("--always-pull", is_flag=True, default=False)
@click.option("--target-ownership", type=click.STRING, default="{0}:{1}".format(os.getuid(), os.getgid()))
@click.option('--verbose', is_flag=True, default=False)
@click.option('--preserve-container', is_flag=True, default=False)
def srcrpm(image, srcrpm, target_directory, additional_docker_options, verify_signature, bash_on_failure,
sign_with, always_pull, target_ownership, verbose, preserve_container):
configure_root_logger(verbose)
docker = Docker().image(image).init()
if not preserve_container:
docker.rm()
if always_pull:
_logger.info("Now pulling remote image; this could take a while.")
docker.do_pull(ignore_errors=True)
if not always_pull:
_logger.info("Will now perform some operations on target image; we may need to pull it and it could take a while")
srpms_inner_dir = docker.cmd_and_args("rpm", "--eval", "%{_srcrpmdir}").do_run()
rpms_inner_dir = docker.cmd_and_args("rpm", "--eval", "%{_rpmdir}").do_run()
uid, gid = parse_ownership(target_ownership)
rpmbuild_options = "" if verify_signature else "--nosignature"
dockerscripts = getpath("drb/dockerscripts")
docker.additional_options(*additional_docker_options)
if sign_with:
docker.bindmount_file(sign_with, "/private.key")
bashonfail = ""
if bash_on_failure:
bashonfail = "bashonfail"
docker.interactive_and_tty()
with TempDir.platformwise() as srpms_temp:
shutil.copy(srcrpm, srpms_temp.path)
srcrpm_basename = os.path.basename(srcrpm)
mkdir_p(target_directory)
docker.bindmount_dir(dockerscripts, "/dockerscripts").bindmount_dir(srpms_temp.path, srpms_inner_dir) \
.bindmount_dir(target_directory, rpms_inner_dir, read_only=False).workdir("/dockerscripts") \
.env("CALLING_UID", str(uid)).env("CALLING_GID", str(gid)).env("BASH_ON_FAIL", bashonfail) \
.env("RPMBUILD_OPTIONS", rpmbuild_options).env("SRCRPM", srcrpm_basename) \
.cmd_and_args("./rpmbuild-srcrpm-in-docker.sh")
_logger.info("Now building %(srcrpm)s on image %(image)s", locals())
with UserExceptionTransformer(Exception, "docker run error", append_original_message=True):
docker.do_launch_interactively()
_logger.info("Build completed successfully. Your results are in %s", target_directory)
| 47.338346 | 122 | 0.730623 |
ab2ab24f354c1fbdc8b5221061db56a8d8a48689 | 18,211 | py | Python | python/paddle/fluid/tests/unittests/test_layers.py | jerrywgz/Paddle | 85c4912755b783dd7554a9d6b9dae4a7e40371bc | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_layers.py | jerrywgz/Paddle | 85c4912755b783dd7554a9d6b9dae4a7e40371bc | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_layers.py | jerrywgz/Paddle | 85c4912755b783dd7554a9d6b9dae4a7e40371bc | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid.layers as layers
from paddle.fluid.layers.device import get_places
import paddle.fluid.nets as nets
from paddle.fluid.framework import Program, program_guard, default_main_program
from paddle.fluid.param_attr import ParamAttr
import decorators
class TestBook(unittest.TestCase):
def test_fit_a_line(self):
program = Program()
with program_guard(program, startup_program=Program()):
x = layers.data(name='x', shape=[13], dtype='float32')
y_predict = layers.fc(input=x, size=1, act=None)
y = layers.data(name='y', shape=[1], dtype='float32')
cost = layers.square_error_cost(input=y_predict, label=y)
avg_cost = layers.mean(cost)
self.assertIsNotNone(avg_cost)
print(str(program))
def test_recognize_digits_mlp(self):
program = Program()
with program_guard(program, startup_program=Program()):
# Change g_program, so the rest layers use `g_program`
images = layers.data(name='pixel', shape=[784], dtype='float32')
label = layers.data(name='label', shape=[1], dtype='int32')
hidden1 = layers.fc(input=images, size=128, act='relu')
hidden2 = layers.fc(input=hidden1, size=64, act='relu')
predict = layers.fc(input=[hidden2, hidden1],
size=10,
act='softmax',
param_attr=["sftmax.w1", "sftmax.w2"])
cost = layers.cross_entropy(input=predict, label=label)
avg_cost = layers.mean(cost)
self.assertIsNotNone(avg_cost)
print(str(program))
def test_simple_conv2d(self):
program = Program()
with program_guard(program, startup_program=Program()):
images = layers.data(name='pixel', shape=[3, 48, 48], dtype='int32')
layers.conv2d(input=images, num_filters=3, filter_size=[4, 4])
print(str(program))
def test_conv2d_transpose(self):
program = Program()
with program_guard(program):
img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32')
layers.conv2d_transpose(input=img, num_filters=10, output_size=28)
print(str(program))
def test_recognize_digits_conv(self):
program = Program()
with program_guard(program, startup_program=Program()):
images = layers.data(
name='pixel', shape=[1, 28, 28], dtype='float32')
label = layers.data(name='label', shape=[1], dtype='int32')
conv_pool_1 = nets.simple_img_conv_pool(
input=images,
filter_size=5,
num_filters=2,
pool_size=2,
pool_stride=2,
act="relu")
conv_pool_2 = nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=4,
pool_size=2,
pool_stride=2,
act="relu")
predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
cost = layers.cross_entropy(input=predict, label=label)
avg_cost = layers.mean(cost)
print(str(program))
def test_word_embedding(self):
program = Program()
with program_guard(program, startup_program=Program()):
dict_size = 10000
embed_size = 32
first_word = layers.data(name='firstw', shape=[1], dtype='int64')
second_word = layers.data(name='secondw', shape=[1], dtype='int64')
third_word = layers.data(name='thirdw', shape=[1], dtype='int64')
forth_word = layers.data(name='forthw', shape=[1], dtype='int64')
next_word = layers.data(name='nextw', shape=[1], dtype='int64')
embed_first = layers.embedding(
input=first_word,
size=[dict_size, embed_size],
dtype='float32',
param_attr='shared_w')
embed_second = layers.embedding(
input=second_word,
size=[dict_size, embed_size],
dtype='float32',
param_attr='shared_w')
embed_third = layers.embedding(
input=third_word,
size=[dict_size, embed_size],
dtype='float32',
param_attr='shared_w')
embed_forth = layers.embedding(
input=forth_word,
size=[dict_size, embed_size],
dtype='float32',
param_attr='shared_w')
concat_embed = layers.concat(
input=[embed_first, embed_second, embed_third, embed_forth],
axis=1)
hidden1 = layers.fc(input=concat_embed, size=256, act='sigmoid')
predict_word = layers.fc(input=hidden1,
size=dict_size,
act='softmax')
cost = layers.cross_entropy(input=predict_word, label=next_word)
avg_cost = layers.mean(cost)
self.assertIsNotNone(avg_cost)
print(str(program))
def test_linear_chain_crf(self):
program = Program()
with program_guard(program, startup_program=Program()):
label_dict_len = 10
images = layers.data(name='pixel', shape=[784], dtype='float32')
label = layers.data(name='label', shape=[1], dtype='int32')
hidden = layers.fc(input=images, size=128)
crf = layers.linear_chain_crf(
input=hidden, label=label, param_attr=ParamAttr(name="crfw"))
crf_decode = layers.crf_decoding(
input=hidden, param_attr=ParamAttr(name="crfw"))
layers.chunk_eval(
input=crf_decode,
label=label,
chunk_scheme="IOB",
num_chunk_types=(label_dict_len - 1) / 2)
self.assertFalse(crf is None)
self.assertFalse(crf_decode is None)
print(str(program))
def test_sigmoid_cross_entropy(self):
program = Program()
with program_guard(program):
dat = layers.data(name='data', shape=[10], dtype='float32')
lbl = layers.data(name='label', shape=[10], dtype='float32')
self.assertIsNotNone(
layers.sigmoid_cross_entropy_with_logits(
x=dat, label=lbl))
print(str(program))
def test_hsigmoid(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[2], dtype='float32')
y = layers.data(name='y', shape=[2], dtype='int64')
self.assertIsNotNone(
layers.hsigmoid(
input=x, label=y, num_classes=2))
print(str(program))
def test_sequence_expand(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[10], dtype='float32')
y = layers.data(
name='y', shape=[10, 20], dtype='float32', lod_level=2)
self.assertIsNotNone(layers.sequence_expand(x=x, y=y, ref_level=1))
print(str(program))
def test_lstm_unit(self):
program = Program()
with program_guard(program):
x_t_data = layers.data(
name='x_t_data', shape=[10, 10], dtype='float32')
x_t = layers.fc(input=x_t_data, size=10)
prev_hidden_data = layers.data(
name='prev_hidden_data', shape=[10, 30], dtype='float32')
prev_hidden = layers.fc(input=prev_hidden_data, size=30)
prev_cell_data = layers.data(
name='prev_cell', shape=[10, 30], dtype='float32')
prev_cell = layers.fc(input=prev_cell_data, size=30)
self.assertIsNotNone(
layers.lstm_unit(
x_t=x_t, hidden_t_prev=prev_hidden, cell_t_prev=prev_cell))
print(str(program))
def test_dynamic_lstmp(self):
program = Program()
with program_guard(program):
hidden_dim, proj_dim = 16, 8
seq_data = layers.data(
name='seq_data', shape=[10, 10], dtype='float32', lod_level=1)
fc_out = layers.fc(input=seq_data, size=4 * hidden_dim)
self.assertIsNotNone(
layers.dynamic_lstmp(
input=fc_out, size=4 * hidden_dim, proj_size=proj_dim))
print(str(program))
def test_sequence_softmax(self):
program = Program()
with program_guard(program):
seq_data = layers.data(
name='seq_data', shape=[10, 10], dtype='float32', lod_level=1)
seq = layers.fc(input=seq_data, size=20)
self.assertIsNotNone(layers.sequence_softmax(seq))
print(str(program))
def test_softmax(self):
program = Program()
with program_guard(program):
data = layers.data(name='data', shape=[10], dtype='float32')
hid = layers.fc(input=data, size=20)
self.assertIsNotNone(layers.softmax(hid))
print(str(program))
def test_lrn(self):
program = Program()
with program_guard(program):
data = layers.data(name='data', shape=[6, 2, 2], dtype='float32')
self.assertIsNotNone(layers.lrn(data))
print(str(program))
def test_get_places(self):
program = Program()
with program_guard(program):
x = get_places(device_count=4)
self.assertIsNotNone(x)
print(str(program))
def test_sequence_reshape(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[8], dtype='float32', lod_level=1)
out = layers.sequence_reshape(input=x, new_dim=16)
self.assertIsNotNone(out)
print(str(program))
def test_im2sequence(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[3, 128, 128], dtype='float32')
y = layers.data(name='y', shape=[], dtype='float32')
output = layers.im2sequence(
input=x,
input_image_size=y,
stride=[1, 1],
filter_size=[2, 2],
out_stride=[1, 1])
self.assertIsNotNone(output)
print(str(program))
@decorators.prog_scope()
def test_nce(self):
window_size = 5
words = []
for i in xrange(window_size):
words.append(
layers.data(
name='word_{0}'.format(i), shape=[1], dtype='int64'))
dict_size = 10000
label_word = int(window_size / 2) + 1
embs = []
for i in xrange(window_size):
if i == label_word:
continue
emb = layers.embedding(
input=words[i],
size=[dict_size, 32],
param_attr='emb.w',
is_sparse=True)
embs.append(emb)
embs = layers.concat(input=embs, axis=1)
loss = layers.nce(input=embs,
label=words[label_word],
num_total_classes=dict_size,
param_attr='nce.w',
bias_attr='nce.b')
avg_loss = layers.mean(loss)
self.assertIsNotNone(avg_loss)
print(str(default_main_program()))
def test_row_conv(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[16], dtype='float32', lod_level=1)
out = layers.row_conv(input=x, future_context_size=2)
self.assertIsNotNone(out)
print(str(program))
def test_multiplex(self):
program = Program()
with program_guard(program):
x1 = layers.data(name='x1', shape=[4], dtype='float32')
x2 = layers.data(name='x2', shape=[4], dtype='float32')
index = layers.data(name='index', shape=[1], dtype='int32')
out = layers.multiplex(inputs=[x1, x2], index=index)
self.assertIsNotNone(out)
print(str(program))
def test_softmax_with_cross_entropy(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[16], dtype='float32')
y = layers.data(name='label', shape=[1], dtype='int64')
loss = layers.softmax_with_cross_entropy(x, y)
self.assertIsNotNone(loss)
print(str(program))
def test_smooth_l1(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[4], dtype='float32')
y = layers.data(name='label', shape=[4], dtype='float32')
loss = layers.smooth_l1(x, y)
self.assertIsNotNone(loss)
print(str(program))
def test_lod_reset(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[10], dtype='float32')
y = layers.data(
name='y', shape=[10, 20], dtype='float32', lod_level=2)
print(layers.lod_reset(x=x, y=y))
print(str(program))
def test_label_smooth(self):
program = Program()
with program_guard(program):
label = layers.data(name="label", shape=[1], dtype="float32")
one_hot_label = layers.one_hot(input=label, depth=10)
smooth_label = layers.label_smooth(
label=one_hot_label, epsilon=0.1, dtype="float32")
self.assertIsNotNone(smooth_label)
print(str(program))
def test_topk(self):
program = Program()
with program_guard(program):
data = layers.data(name="label", shape=[200], dtype="float32")
values, indices = layers.topk(data, k=5)
self.assertIsNotNone(values)
self.assertIsNotNone(indices)
print(str(program))
def test_roi_pool(self):
program = Program()
with program_guard(program):
x = layers.data(name="x", shape=[256, 30, 30], dtype="float32")
rois = layers.data(
name="rois", shape=[4], dtype="float32", lod_level=1)
output = layers.roi_pool(x, rois, 7, 7, 0.6)
self.assertIsNotNone(output)
print(str(program))
def test_resize_bilinear(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[3, 9, 6], dtype="float32")
output = layers.resize_bilinear(x, out_shape=[12, 12])
self.assertIsNotNone(output)
output = layers.resize_bilinear(x, scale=3)
self.assertIsNotNone(output)
print(str(program))
def test_polygon_box_transform(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[8, 4, 4], dtype="float32")
output = layers.polygon_box_transform(input=x)
self.assertIsNotNone(output)
print(str(program))
def test_l2_normalize(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[8, 7, 10], dtype="float32")
output = layers.l2_normalize(x, axis=1)
def test_maxout(self):
program = Program()
with program_guard(program):
data = layers.data(name='x', shape=[8, 6, 6], dtype="float32")
output = layers.maxout(x=data, groups=2)
self.assertIsNotNone(output)
print(str(program))
def test_crop(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[3, 5], dtype="float32")
y = layers.data(name='y', shape=[2, 3], dtype="float32")
output = layers.crop(x, shape=y)
self.assertIsNotNone(output)
print(str(program))
def test_mean_iou(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[16], dtype='float32')
y = layers.data(name='label', shape=[1], dtype='int64')
iou = layers.mean_iou(x, y, 2)
self.assertIsNotNone(iou)
print(str(program))
def test_argsort(self):
program = Program()
with program_guard(program):
data = layers.data(name='x', shape=[2, 3, 3], dtype="float32")
out, ids = layers.argsort(input=data, axis=1)
self.assertIsNotNone(out)
self.assertIsNotNone(ids)
print(str(program))
def test_rank_loss(self):
program = Program()
with program_guard(program):
label = layers.data(
name='label',
append_batch_size=False,
shape=[16, 1],
dtype="float32")
left = layers.data(
name='left',
append_batch_size=False,
shape=[16, 1],
dtype="float32")
right = layers.data(
name='right',
append_batch_size=False,
shape=[16, 1],
dtype="float32")
out = layers.rank_loss(label, left, right, name="rank_loss")
self.assertIsNotNone(out)
print(str(program))
if __name__ == '__main__':
unittest.main()
| 38.664544 | 80 | 0.565208 |
c9487ea275929491902eb32cd01dc1c4f268f5f4 | 1,351 | py | Python | fm/migrations/0005_default_colors.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | fm/migrations/0005_default_colors.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | fm/migrations/0005_default_colors.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ----------------------------------------------------------------------
# default colors
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.migration.base import BaseMigration
COLORS = {
"CRITICAL": ("#BB0000", "#FFDDCC"),
"MAJOR": ("#BB0000", "#FFEEDD"),
"MINOR": ("#BB0000", "#FFFFBB"),
"WARNING": ("#BB0000", "#FFFFDD"),
"NORMAL": ("#BB0000", "#A8C2C2"),
"INFO": ("#BB0000", "#DCF3F3"),
"DEFAULT": ("#FFFF00", "#643200"),
}
class Migration(BaseMigration):
def migrate(self):
for p, colors in COLORS.items():
font, bg = colors
r = self.db.execute(
"SELECT id,font_color,background_color FROM fm_eventpriority WHERE name=%s", [p]
)
if len(r) == 1:
pid, dbf, dbg = r[0]
if not dbf:
self.db.execute(
"UPDATE fm_eventpriority SET font_color=%s WHERE id=%s", [font, pid]
)
if not dbf:
self.db.execute(
"UPDATE fm_eventpriority SET background_color=%s WHERE id=%s", [bg, pid]
)
| 33.775 | 96 | 0.42191 |
41bba7c73a24faa25874d0b2c6eaed1939226cfb | 4,213 | py | Python | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/azure_workload_container_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/azure_workload_container_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/azure_workload_container_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .protection_container_py3 import ProtectionContainer
class AzureWorkloadContainer(ProtectionContainer):
"""Container for the workloads running inside Azure Compute or Classic
Compute.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AzureSQLAGWorkloadContainerProtectionContainer,
AzureVMAppContainerProtectionContainer
All required parameters must be populated in order to send to Azure.
:param friendly_name: Friendly name of the container.
:type friendly_name: str
:param backup_management_type: Type of backup managemenent for the
container. Possible values include: 'Invalid', 'AzureIaasVM', 'MAB',
'DPM', 'AzureBackupServer', 'AzureSql', 'AzureStorage', 'AzureWorkload',
'DefaultBackup'
:type backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.models.BackupManagementType
:param registration_status: Status of registration of the container with
the Recovery Services Vault.
:type registration_status: str
:param health_status: Status of health of the container.
:type health_status: str
:param container_type: Required. Constant filled by server.
:type container_type: str
:param source_resource_id: ARM ID of the virtual machine represented by
this Azure Workload Container
:type source_resource_id: str
:param last_updated_time: Time stamp when this container was updated.
:type last_updated_time: datetime
:param extended_info: Additional details of a workload container.
:type extended_info:
~azure.mgmt.recoveryservicesbackup.models.AzureWorkloadContainerExtendedInfo
:param workload_type: Workload type for which registration was sent.
Possible values include: 'Invalid', 'VM', 'FileFolder', 'AzureSqlDb',
'SQLDB', 'Exchange', 'Sharepoint', 'VMwareVM', 'SystemState', 'Client',
'GenericDataSource', 'SQLDataBase', 'AzureFileShare', 'SAPHanaDatabase'
:type workload_type: str or
~azure.mgmt.recoveryservicesbackup.models.WorkloadType
"""
_validation = {
'container_type': {'required': True},
}
_attribute_map = {
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'backup_management_type': {'key': 'backupManagementType', 'type': 'str'},
'registration_status': {'key': 'registrationStatus', 'type': 'str'},
'health_status': {'key': 'healthStatus', 'type': 'str'},
'container_type': {'key': 'containerType', 'type': 'str'},
'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'},
'last_updated_time': {'key': 'lastUpdatedTime', 'type': 'iso-8601'},
'extended_info': {'key': 'extendedInfo', 'type': 'AzureWorkloadContainerExtendedInfo'},
'workload_type': {'key': 'workloadType', 'type': 'str'},
}
_subtype_map = {
'container_type': {'SQLAGWorkLoadContainer': 'AzureSQLAGWorkloadContainerProtectionContainer', 'VMAppContainer': 'AzureVMAppContainerProtectionContainer'}
}
def __init__(self, *, friendly_name: str=None, backup_management_type=None, registration_status: str=None, health_status: str=None, source_resource_id: str=None, last_updated_time=None, extended_info=None, workload_type=None, **kwargs) -> None:
super(AzureWorkloadContainer, self).__init__(friendly_name=friendly_name, backup_management_type=backup_management_type, registration_status=registration_status, health_status=health_status, **kwargs)
self.source_resource_id = source_resource_id
self.last_updated_time = last_updated_time
self.extended_info = extended_info
self.workload_type = workload_type
self.container_type = 'AzureWorkloadContainer'
| 50.759036 | 248 | 0.704961 |
01a9d063165b03dddb20881199fd1d53c2d199a9 | 354 | py | Python | uBloxStream/archive/GPSmap.py | roblee357/JRone | f6c3080f260858f12da0be9f353cc9b62fd47c06 | [
"MIT"
] | null | null | null | uBloxStream/archive/GPSmap.py | roblee357/JRone | f6c3080f260858f12da0be9f353cc9b62fd47c06 | [
"MIT"
] | null | null | null | uBloxStream/archive/GPSmap.py | roblee357/JRone | f6c3080f260858f12da0be9f353cc9b62fd47c06 | [
"MIT"
] | null | null | null | import mplleaflet
import csv
from matplotlib import pyplot as plt
x,y = [],[]
with open('/home/ros/Documents/archive/GPS_Delta_log (copy).txt') as f:
reader = csv.reader(f)
for row in reader:
x.append(row[0])
y.append(row[1])
fig = plt.figure()
plt.plot(x[:],y[:])
plt.plot(x[:],y[:],'ro');
plt.show()
mplleaflet.display(fig=fig) | 23.6 | 71 | 0.638418 |
35546351c700ff3b058fc689362145b0d946b802 | 15,260 | py | Python | dataset/pan_pp/pan_pp_synth.py | rigvedsah000/PAN- | 16f8482886c5eccecf29fe072025ba54c64e4b9d | [
"Apache-2.0"
] | null | null | null | dataset/pan_pp/pan_pp_synth.py | rigvedsah000/PAN- | 16f8482886c5eccecf29fe072025ba54c64e4b9d | [
"Apache-2.0"
] | null | null | null | dataset/pan_pp/pan_pp_synth.py | rigvedsah000/PAN- | 16f8482886c5eccecf29fe072025ba54c64e4b9d | [
"Apache-2.0"
] | null | null | null | import math
import random
import string
import cv2
import mmcv
import numpy as np
import Polygon as plg
import pyclipper
import torch
import torchvision.transforms as transforms
from PIL import Image
from torch.utils import data
def get_img(img_path, read_type='pil'):
try:
if read_type == 'cv2':
img = cv2.imread(img_path)
img = img[:, :, [2, 1, 0]]
elif read_type == 'pil':
img = np.array(Image.open(img_path))
except Exception:
print('Cannot read image: %s.' % img_path)
raise
return img
def get_ann(img, gt_path):
h, w = img.shape[0:2]
lines = mmcv.list_from_file(gt_path)
bboxes = []
words = []
for line in lines:
line = line.encode('utf-8').decode('utf-8-sig')
line = line.replace('\xef\xbb\xbf', '')
gt = line.split(',')
word = gt[8].replace('\r', '').replace('\n', '')
if word[0] == '#':
words.append('###')
else:
words.append(word)
bbox = [int(gt[i]) for i in range(8)]
bbox = np.array(bbox) / ([w * 1.0, h * 1.0] * 4)
bboxes.append(bbox)
return np.array(bboxes), words
def random_horizontal_flip(imgs):
if random.random() < 0.5:
for i in range(len(imgs)):
imgs[i] = np.flip(imgs[i], axis=1).copy()
return imgs
def random_rotate(imgs):
max_angle = 10
angle = random.random() * 2 * max_angle - max_angle
for i in range(len(imgs)):
img = imgs[i]
w, h = img.shape[:2]
rotation_matrix = cv2.getRotationMatrix2D((h / 2, w / 2), angle, 1)
img_rotation = cv2.warpAffine(img,
rotation_matrix, (h, w),
flags=cv2.INTER_NEAREST)
imgs[i] = img_rotation
return imgs
def scale_aligned(img, h_scale, w_scale):
h, w = img.shape[0:2]
h = int(h * h_scale + 0.5)
w = int(w * w_scale + 0.5)
if h % 32 != 0:
h = h + (32 - h % 32)
if w % 32 != 0:
w = w + (32 - w % 32)
img = cv2.resize(img, dsize=(w, h))
return img
def scale_aligned_short(img, short_size=736):
h, w = img.shape[0:2]
scale = short_size * 1.0 / min(h, w)
h = int(h * scale + 0.5)
w = int(w * scale + 0.5)
if h % 32 != 0:
h = h + (32 - h % 32)
if w % 32 != 0:
w = w + (32 - w % 32)
img = cv2.resize(img, dsize=(w, h))
return img
def random_scale(img, short_size=736):
h, w = img.shape[0:2]
scale = np.random.choice(np.array([0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3]))
scale = (scale * short_size) / min(h, w)
aspect = np.random.choice(np.array([0.9, 0.95, 1.0, 1.05, 1.1]))
h_scale = scale * math.sqrt(aspect)
w_scale = scale / math.sqrt(aspect)
img = scale_aligned(img, h_scale, w_scale)
return img
def random_crop_padding(imgs, target_size):
h, w = imgs[0].shape[0:2]
t_w, t_h = target_size
p_w, p_h = target_size
if w == t_w and h == t_h:
return imgs
t_h = t_h if t_h < h else h
t_w = t_w if t_w < w else w
if random.random() > 3.0 / 8.0 and np.max(imgs[1]) > 0:
# make sure to crop the text region
tl = np.min(np.where(imgs[1] > 0), axis=1) - (t_h, t_w)
tl[tl < 0] = 0
br = np.max(np.where(imgs[1] > 0), axis=1) - (t_h, t_w)
br[br < 0] = 0
br[0] = min(br[0], h - t_h)
br[1] = min(br[1], w - t_w)
i = random.randint(tl[0], br[0]) if tl[0] < br[0] else 0
j = random.randint(tl[1], br[1]) if tl[1] < br[1] else 0
else:
i = random.randint(0, h - t_h) if h - t_h > 0 else 0
j = random.randint(0, w - t_w) if w - t_w > 0 else 0
n_imgs = []
for idx in range(len(imgs)):
if len(imgs[idx].shape) == 3:
s3_length = int(imgs[idx].shape[-1])
img = imgs[idx][i:i + t_h, j:j + t_w, :]
img_p = cv2.copyMakeBorder(img,
0,
p_h - t_h,
0,
p_w - t_w,
borderType=cv2.BORDER_CONSTANT,
value=tuple(0
for i in range(s3_length)))
else:
img = imgs[idx][i:i + t_h, j:j + t_w]
img_p = cv2.copyMakeBorder(img,
0,
p_h - t_h,
0,
p_w - t_w,
borderType=cv2.BORDER_CONSTANT,
value=(0, ))
n_imgs.append(img_p)
return n_imgs
def update_word_mask(instance, instance_before_crop, word_mask):
labels = np.unique(instance)
for label in labels:
if label == 0:
continue
ind = instance == label
if np.sum(ind) == 0:
word_mask[label] = 0
continue
ind_before_crop = instance_before_crop == label
# print(np.sum(ind), np.sum(ind_before_crop))
if float(np.sum(ind)) / np.sum(ind_before_crop) > 0.9:
continue
word_mask[label] = 0
return word_mask
def dist(a, b):
return np.linalg.norm((a - b), ord=2, axis=0)
def perimeter(bbox):
peri = 0.0
for i in range(bbox.shape[0]):
peri += dist(bbox[i], bbox[(i + 1) % bbox.shape[0]])
return peri
def shrink(bboxes, rate, max_shr=20):
rate = rate * rate
shrinked_bboxes = []
for bbox in bboxes:
area = plg.Polygon(bbox).area()
peri = perimeter(bbox)
try:
pco = pyclipper.PyclipperOffset()
pco.AddPath(bbox, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
offset = min(int(area * (1 - rate) / (peri + 0.001) + 0.5),
max_shr)
shrinked_bbox = pco.Execute(-offset)
if len(shrinked_bbox) == 0:
shrinked_bboxes.append(bbox)
continue
shrinked_bbox = np.array(shrinked_bbox)[0]
if shrinked_bbox.shape[0] <= 2:
shrinked_bboxes.append(bbox)
continue
shrinked_bboxes.append(shrinked_bbox)
except Exception:
print('area:', area, 'peri:', peri)
shrinked_bboxes.append(bbox)
return shrinked_bboxes
def get_vocabulary(voc_type, EOS='EOS', PADDING='PAD', UNKNOWN='UNK'):
if voc_type == 'LOWERCASE':
voc = list(string.digits + string.ascii_lowercase)
elif voc_type == 'ALLCASES':
voc = list(string.digits + string.ascii_letters)
elif voc_type == 'ALLCASES_SYMBOLS':
voc = list(string.printable[:-6])
else:
raise KeyError('voc_type must be one of "LOWERCASE", '
'"ALLCASES", "ALLCASES_SYMBOLS"')
# update the voc with specifical chars
voc.append(EOS)
voc.append(PADDING)
voc.append(UNKNOWN)
char2id = dict(zip(voc, range(len(voc))))
id2char = dict(zip(range(len(voc)), voc))
return voc, char2id, id2char
class PAN_PP_IC15(data.Dataset):
def __init__(self,
split='train',
is_transform=False,
img_size=None,
short_size=736,
kernel_scale=0.5,
with_rec=False,
read_type='pil',
report_speed=False,
batch_size=16,
num_workers=8,
shuffle=True,
drop_last=False,
img_dir='',
ann_dir=''):
self.split = split
self.is_transform = is_transform
self.img_size = img_size if (
img_size is None or isinstance(img_size, tuple)) else (img_size,
img_size)
self.kernel_scale = kernel_scale
self.short_size = short_size
self.with_rec = with_rec
self.read_type = read_type
if split == 'train':
data_dirs = [img_dir]
gt_dirs = [ann_dir]
elif split == 'test':
data_dirs = [img_dir]
gt_dirs = [ann_dir]
else:
print('Error: split must be train or test!')
raise
self.img_paths = []
self.gt_paths = []
for data_dir, gt_dir in zip(data_dirs, gt_dirs):
img_names = [
img_name for img_name in mmcv.utils.scandir(data_dir, '.jpg')
]
img_names.extend([
img_name for img_name in mmcv.utils.scandir(data_dir, '.png')
])
img_paths = []
gt_paths = []
for idx, img_name in enumerate(img_names):
img_path = data_dir + img_name
img_paths.append(img_path)
gt_name = img_name.split('.')[0] + '.txt'
gt_path = gt_dir + gt_name
gt_paths.append(gt_path)
self.img_paths.extend(img_paths)
self.gt_paths.extend(gt_paths)
if report_speed:
target_size = 3000
extend_scale = (target_size + len(self.img_paths) - 1) // len(
self.img_paths)
self.img_paths = (self.img_paths * extend_scale)[:target_size]
self.gt_paths = (self.gt_paths * extend_scale)[:target_size]
self.voc, self.char2id, self.id2char = get_vocabulary('LOWERCASE')
self.max_word_num = 200
self.max_word_len = 32
print('reading type: %s.' % self.read_type)
def __len__(self):
return len(self.img_paths)
def prepare_train_data(self, index):
img_path = self.img_paths[index]
gt_path = self.gt_paths[index]
img = get_img(img_path, self.read_type)
bboxes, words = get_ann(img, gt_path)
if bboxes.shape[0] > self.max_word_num:
bboxes = bboxes[:self.max_word_num]
words = words[:self.max_word_num]
gt_words = np.full((self.max_word_num + 1, self.max_word_len),
self.char2id['PAD'],
dtype=np.int32)
word_mask = np.zeros((self.max_word_num + 1, ), dtype=np.int32)
for i, word in enumerate(words):
if word == '###':
continue
word = word.lower()
gt_word = np.full((self.max_word_len, ),
self.char2id['PAD'],
dtype=np.int)
for j, char in enumerate(word):
if j > self.max_word_len - 1:
break
if char in self.char2id:
gt_word[j] = self.char2id[char]
else:
gt_word[j] = self.char2id['UNK']
if len(word) > self.max_word_len - 1:
gt_word[-1] = self.char2id['EOS']
else:
gt_word[len(word)] = self.char2id['EOS']
gt_words[i + 1] = gt_word
word_mask[i + 1] = 1
if self.is_transform:
img = random_scale(img, self.short_size)
gt_instance = np.zeros(img.shape[0:2], dtype='uint8')
training_mask = np.ones(img.shape[0:2], dtype='uint8')
if bboxes.shape[0] > 0:
bboxes = np.reshape(bboxes * ([img.shape[1], img.shape[0]] * 4),
(bboxes.shape[0], -1, 2)).astype('int32')
for i in range(bboxes.shape[0]):
cv2.drawContours(gt_instance, [bboxes[i]], -1, i + 1, -1)
if words[i] == '###':
cv2.drawContours(training_mask, [bboxes[i]], -1, 0, -1)
gt_kernels = []
for rate in [self.kernel_scale]:
gt_kernel = np.zeros(img.shape[0:2], dtype='uint8')
kernel_bboxes = shrink(bboxes, rate)
for i in range(bboxes.shape[0]):
cv2.drawContours(gt_kernel, [kernel_bboxes[i]], -1, 1, -1)
gt_kernels.append(gt_kernel)
if self.is_transform:
imgs = [img, gt_instance, training_mask]
imgs.extend(gt_kernels)
if not self.with_rec:
imgs = random_horizontal_flip(imgs)
imgs = random_rotate(imgs)
gt_instance_before_crop = imgs[1].copy()
imgs = random_crop_padding(imgs, self.img_size)
img, gt_instance, training_mask, gt_kernels = imgs[0], imgs[
1], imgs[2], imgs[3:]
word_mask = update_word_mask(gt_instance, gt_instance_before_crop,
word_mask)
gt_text = gt_instance.copy()
gt_text[gt_text > 0] = 1
gt_kernels = np.array(gt_kernels)
max_instance = np.max(gt_instance)
gt_bboxes = np.zeros((self.max_word_num + 1, 4), dtype=np.int32)
for i in range(1, max_instance + 1):
ind = gt_instance == i
if np.sum(ind) == 0:
continue
points = np.array(np.where(ind)).transpose((1, 0))
tl = np.min(points, axis=0)
br = np.max(points, axis=0) + 1
gt_bboxes[i] = (tl[0], tl[1], br[0], br[1])
img = Image.fromarray(img)
img = img.convert('RGB')
if self.is_transform:
img = transforms.ColorJitter(brightness=32.0 / 255,
saturation=0.5)(img)
img = transforms.ToTensor()(img)
img = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])(img)
gt_text = torch.from_numpy(gt_text).long()
gt_kernels = torch.from_numpy(gt_kernels).long()
training_mask = torch.from_numpy(training_mask).long()
gt_instance = torch.from_numpy(gt_instance).long()
gt_bboxes = torch.from_numpy(gt_bboxes).long()
gt_words = torch.from_numpy(gt_words).long()
word_mask = torch.from_numpy(word_mask).long()
data = dict(
imgs=img,
gt_texts=gt_text,
gt_kernels=gt_kernels,
training_masks=training_mask,
gt_instances=gt_instance,
gt_bboxes=gt_bboxes,
)
if self.with_rec:
data.update(dict(gt_words=gt_words, word_masks=word_mask))
return data
def prepare_test_data(self, index):
img_path = self.img_paths[index]
img = get_img(img_path, self.read_type)
img_meta = dict(org_img_size=np.array(img.shape[:2]))
img = scale_aligned_short(img, self.short_size)
img_meta.update(dict(img_size=np.array(img.shape[:2])))
img = Image.fromarray(img)
img = img.convert('RGB')
img = transforms.ToTensor()(img)
img = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])(img)
data = dict(imgs=img, img_metas=img_meta)
return data
def __getitem__(self, index):
if self.split == 'train':
return self.prepare_train_data(index)
elif self.split == 'test':
return self.prepare_test_data(index)
| 33.173913 | 78 | 0.516972 |
26e8f5c91a52c44590ca035a10bbefa02a587b28 | 3,540 | py | Python | test/test_shell.py | DalavanCloud/hpc-container-maker | 555093c0a5c98bd2b0114831b8c676c0c3c50dd7 | [
"Apache-2.0"
] | 1 | 2019-02-25T22:54:31.000Z | 2019-02-25T22:54:31.000Z | test/test_shell.py | DalavanCloud/hpc-container-maker | 555093c0a5c98bd2b0114831b8c676c0c3c50dd7 | [
"Apache-2.0"
] | null | null | null | test/test_shell.py | DalavanCloud/hpc-container-maker | 555093c0a5c98bd2b0114831b8c676c0c3c50dd7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Test cases for the shell module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import docker, invalid_ctype, singularity
from hpccm.primitives.shell import shell
class Test_shell(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@docker
def test_empty(self):
"""No commands specified"""
s = shell()
self.assertEqual(str(s), '')
@invalid_ctype
def test_invalid_ctype(self):
"""Invalid container type specified"""
s = shell(commands=['a'])
with self.assertRaises(RuntimeError):
str(s)
@docker
def test_single_docker(self):
"""Single command specified"""
cmd = ['z']
s = shell(commands=cmd)
self.assertEqual(str(s), 'RUN z')
@singularity
def test_single_singularity(self):
"""Single command specified"""
cmd = ['z']
s = shell(commands=cmd)
self.assertEqual(str(s), '%post\n cd /\n z')
@docker
def test_nochdir_docker(self):
"""chdir disable"""
cmd = ['z']
s = shell(chdir=False, commands=cmd)
self.assertEqual(str(s), 'RUN z')
@singularity
def test_nochdir_singularity(self):
"""chdir disable"""
cmd = ['z']
s = shell(chdir=False, commands=cmd)
self.assertEqual(str(s), '%post\n z')
@docker
def test_multiple_docker(self):
"""List of commands specified"""
cmds = ['a', 'b', 'c']
s = shell(commands=cmds)
self.assertEqual(str(s), 'RUN a && \\\n b && \\\n c')
@singularity
def test_multiple_singularity(self):
"""List of commands specified"""
cmds = ['a', 'b', 'c']
s = shell(commands=cmds)
self.assertEqual(str(s), '%post\n cd /\n a\n b\n c')
@singularity
def test_appinstall_multiple_singularity(self):
"""Multiple app-specific install commands"""
cmds = ['a', 'b', 'c']
s = shell(commands=cmds, _app='foo')
self.assertEqual(str(s), '%appinstall foo\n a\n b\n c')
@docker
def test_appinstall_docker(self):
"""appinstall not implemented in Docker"""
cmds = ['a', 'b', 'c']
s = shell(commands=cmds, _app='foo')
self.assertEqual(str(s), 'RUN a && \\\n b && \\\n c')
@singularity
def test_appinstall_env_multiple_singularity(self):
"""Multiple app-specific install commands"""
cmds = ['a', 'b', 'c']
s = shell(commands=cmds, _app='foo', _appenv=True)
self.assertEqual(str(s), '%appinstall foo\n'
' for f in /.singularity.d/env/*; do . $f; done\n'
' a\n b\n c')
| 31.891892 | 74 | 0.608192 |
45e5ff7b71a0ed709cf7d288a16ed9eafd42a13d | 131 | py | Python | Code/ModTestCode.py | BoasWhip/Black | 7740ded8bc5a0645d363a3c53602af928a776f0d | [
"MIT"
] | null | null | null | Code/ModTestCode.py | BoasWhip/Black | 7740ded8bc5a0645d363a3c53602af928a776f0d | [
"MIT"
] | null | null | null | Code/ModTestCode.py | BoasWhip/Black | 7740ded8bc5a0645d363a3c53602af928a776f0d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 25 09:41:54 2017
@author: ozsanos
"""
from ModTest import Pi
print(Pi())
| 13.1 | 36 | 0.580153 |
652ad5d6b7ba64c942050885bc5d70c3b0df2cc7 | 312 | py | Python | generated-libraries/python/netapp/job/cron_day_of_month.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | 2 | 2017-03-28T15:31:26.000Z | 2018-08-16T22:15:18.000Z | generated-libraries/python/netapp/job/cron_day_of_month.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | generated-libraries/python/netapp/job/cron_day_of_month.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | class CronDayOfMonth(int):
"""
Job Manager cron scheduling day of month. -1 represents all days
of a month from 1 to 31, and only supported for cron schedule
create and modify.
Range : [-1..31].
"""
@staticmethod
def get_api_name():
return "cron-day-of-month"
| 24 | 68 | 0.615385 |
6bca0f9a4265d1fe8973e528b17f3771eafad2b6 | 386 | py | Python | codes_/0100_Same_Tree.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | codes_/0100_Same_Tree.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | codes_/0100_Same_Tree.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
] | null | null | null | # %% [100. *Same Tree](https://leetcode.com/problems/same-tree/)
# 問題:2つのTreeNodeが同じかどうか調べる
# 解法:再帰的に調べる
class Solution:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
if not p or not q:
return p is q
return (
p.val == q.val
and self.isSameTree(p.left, q.left)
and self.isSameTree(p.right, q.right)
)
| 29.692308 | 64 | 0.567358 |
291b91320130eaae8d43ec7e52e0c97012b9a114 | 2,869 | py | Python | openstack_dashboard/dashboards/admin/defaults/tables.py | timpricecatalyst/horizon | 8279ae0ed464e62e1c91e78341342160f8a07172 | [
"Apache-2.0"
] | 1 | 2021-01-20T00:14:15.000Z | 2021-01-20T00:14:15.000Z | openstack_dashboard/dashboards/admin/defaults/tables.py | timpricecatalyst/horizon | 8279ae0ed464e62e1c91e78341342160f8a07172 | [
"Apache-2.0"
] | 1 | 2019-10-27T15:57:25.000Z | 2019-10-27T15:57:25.000Z | openstack_dashboard/dashboards/admin/defaults/tables.py | timpricecatalyst/horizon | 8279ae0ed464e62e1c91e78341342160f8a07172 | [
"Apache-2.0"
] | 15 | 2017-01-12T10:40:00.000Z | 2019-04-19T08:28:05.000Z | # Copyright 2013 Kylin, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import tables
class QuotaFilterAction(tables.FilterAction):
def filter(self, table, tenants, filter_string):
q = filter_string.lower()
def comp(tenant):
if q in tenant.name.lower():
return True
return False
return filter(comp, tenants)
class UpdateDefaultQuotas(tables.LinkAction):
name = "update_defaults"
verbose_name = _("Update Defaults")
url = "horizon:admin:defaults:update_defaults"
classes = ("ajax-modal",)
icon = "pencil"
def get_quota_name(quota):
QUOTA_NAMES = {
'injected_file_content_bytes': _('Injected File Content Bytes'),
'injected_file_path_bytes': _('Length of Injected File Path'),
'metadata_items': _('Metadata Items'),
'cores': _('VCPUs'),
'instances': _('Instances'),
'injected_files': _('Injected Files'),
'volumes': _('Volumes'),
'snapshots': _('Volume Snapshots'),
'gigabytes': _('Total Size of Volumes and Snapshots (GB)'),
'ram': _('RAM (MB)'),
'floating_ips': _('Floating IPs'),
'security_groups': _('Security Groups'),
'security_group_rules': _('Security Group Rules'),
'key_pairs': _('Key Pairs'),
'fixed_ips': _('Fixed IPs'),
'volumes_volume_luks': _('LUKS Volumes'),
'snapshots_volume_luks': _('LUKS Volume Snapshots'),
'gigabytes_volume_luks':
_('Total Size of LUKS Volumes and Snapshots (GB)'),
'dm-crypt': _('dm-crypt'),
'server_group_members': _('Server Group Members'),
'server_groups': _('Server Groups'),
'backup_gigabytes': _('Backup Gigabytes'),
'backups': _('Backups')
}
return QUOTA_NAMES.get(quota.name, quota.name.replace("_", " ").title())
class QuotasTable(tables.DataTable):
name = tables.Column(get_quota_name, verbose_name=_('Quota Name'))
limit = tables.Column("limit", verbose_name=_('Limit'))
def get_object_id(self, obj):
return obj.name
class Meta(object):
name = "quotas"
verbose_name = _("Quotas")
table_actions = (QuotaFilterAction, UpdateDefaultQuotas)
multi_select = False
| 34.987805 | 78 | 0.644475 |
a2347fdcc1113e6b5ce750f2a2684ae55566bbfa | 1,641 | py | Python | scripts/pdts/pdts_bbp_g0.py | georgelamb19/chempropBayes | 88d660398a772705804568b671b3614c636505aa | [
"MIT"
] | 16 | 2020-09-17T16:23:43.000Z | 2022-02-23T07:54:34.000Z | scripts/pdts/pdts_bbp_g0.py | georgelamb19/chempropBayes | 88d660398a772705804568b671b3614c636505aa | [
"MIT"
] | null | null | null | scripts/pdts/pdts_bbp_g0.py | georgelamb19/chempropBayes | 88d660398a772705804568b671b3614c636505aa | [
"MIT"
] | 2 | 2021-05-14T14:22:37.000Z | 2021-07-06T01:06:13.000Z | import os
import torch
# checks
print('working directory is:')
print(os.getcwd())
print('is CUDA available?')
print(torch.cuda.is_available())
# imports
from chemprop.args import TrainArgs
from chemprop.train.pdts import pdts
# instantiate args class and load from dict
args = TrainArgs()
args.from_dict({
'dataset_type': 'regression',
'data_path': '/home/willlamb/chempropBayes/data/qm9.csv'
})
##################### ARGS #####################
# architecture
args.hidden_size = 500
args.depth = 5
args.ffn_num_layers = 3
args.activation = 'ReLU'
args.ffn_hidden_size = args.hidden_size
args.features_path = None
args.features_generator = None
args.atom_messages = False
args.undirected = False
args.bias = False
# data
args.max_data_size = 100000
args.data_seeds = [0,1,2,3,4]
args.split_type = 'random'
args.split_sizes = (0.05, 0.95)
# metric
args.metric = 'mae'
# run seeds
args.pytorch_seeds = [0,1,2,3,4]
################################################
# names and directories
args.results_dir = '/home/willlamb/results_pdts/bbp_greedy'
args.save_dir = '/home/willlamb/checkpoints_pdts/bbp_greedy'
args.checkpoint_path = '/home/willlamb/checkpoints_pdts/map_greedy'
args.wandb_proj = 'lanterne_bbp'
args.wandb_name = 'bbp_greedy'
args.thompson = False
### bbp ###
args.bbp = True
args.samples = 50
args.pdts = True
args.pdts_batches = 30
args.epochs_init_map = 0
args.epochs_init = 1000
args.epochs = 100
args.lr = 1e-4
args.prior_sig_bbp = 0.15
args.rho_min_bbp = -5.5
args.rho_max_bbp = -5
args.samples_bbp = 5
################################################
# run
results = pdts(args, model_idx = 0)
| 19.535714 | 67 | 0.677636 |
e46fe0aaa6550c8ff395a9721eb344bdeeff8fa9 | 6,259 | py | Python | vnpy/app/data_manager/engine.py | hardywu/vnpy | 81ab73dc57d12a3ff7c74c73665513b46fc0f668 | [
"MIT"
] | 1 | 2021-05-14T12:57:08.000Z | 2021-05-14T12:57:08.000Z | vnpy/app/data_manager/engine.py | hardywu/vnpy | 81ab73dc57d12a3ff7c74c73665513b46fc0f668 | [
"MIT"
] | null | null | null | vnpy/app/data_manager/engine.py | hardywu/vnpy | 81ab73dc57d12a3ff7c74c73665513b46fc0f668 | [
"MIT"
] | 1 | 2021-06-14T13:26:41.000Z | 2021-06-14T13:26:41.000Z | import csv
from datetime import datetime
from typing import List, Tuple
from pytz import timezone
from vnpy.trader.database import BarOverview, DB_TZ
from vnpy.trader.engine import BaseEngine, MainEngine, EventEngine
from vnpy.trader.constant import Interval, Exchange
from vnpy.trader.object import BarData, HistoryRequest
from vnpy.trader.rqdata import rqdata_client
from vnpy.trader.database import database_manager
APP_NAME = "DataManager"
class ManagerEngine(BaseEngine):
""""""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
):
""""""
super().__init__(main_engine, event_engine, APP_NAME)
def import_data_from_csv(
self,
file_path: str,
symbol: str,
exchange: Exchange,
interval: Interval,
tz_name: str,
datetime_head: str,
open_head: str,
high_head: str,
low_head: str,
close_head: str,
volume_head: str,
open_interest_head: str,
datetime_format: str
) -> Tuple:
""""""
with open(file_path, "rt") as f:
buf = [line.replace("\0", "") for line in f]
reader = csv.DictReader(buf, delimiter=",")
bars = []
start = None
count = 0
tz = timezone(tz_name)
for item in reader:
if datetime_format:
dt = datetime.strptime(item[datetime_head], datetime_format)
else:
dt = datetime.fromisoformat(item[datetime_head])
dt = tz.localize(dt)
open_interest = item.get(open_interest_head, 0)
bar = BarData(
symbol=symbol,
exchange=exchange,
datetime=dt,
interval=interval,
volume=float(item[volume_head]),
open_price=float(item[open_head]),
high_price=float(item[high_head]),
low_price=float(item[low_head]),
close_price=float(item[close_head]),
open_interest=float(open_interest),
gateway_name="DB",
)
bars.append(bar)
# do some statistics
count += 1
if not start:
start = bar.datetime
# insert into database
database_manager.save_bar_data(bars)
end = bar.datetime
return start, end, count
def output_data_to_csv(
self,
file_path: str,
symbol: str,
exchange: Exchange,
interval: Interval,
start: datetime,
end: datetime
) -> bool:
""""""
bars = self.load_bar_data(symbol, exchange, interval, start, end)
fieldnames = [
"symbol",
"exchange",
"datetime",
"open",
"high",
"low",
"close",
"volume",
"open_interest"
]
try:
with open(file_path, "w") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames, lineterminator="\n")
writer.writeheader()
for bar in bars:
d = {
"symbol": bar.symbol,
"exchange": bar.exchange.value,
"datetime": bar.datetime.strftime("%Y-%m-%d %H:%M:%S"),
"open": bar.open_price,
"high": bar.high_price,
"low": bar.low_price,
"close": bar.close_price,
"volume": bar.volume,
"open_interest": bar.open_interest,
}
writer.writerow(d)
return True
except PermissionError:
return False
def get_bar_overview(self) -> List[BarOverview]:
""""""
return database_manager.get_bar_overview()
def load_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: Interval,
start: datetime,
end: datetime
) -> List[BarData]:
""""""
bars = database_manager.load_bar_data(
symbol,
exchange,
interval,
start,
end
)
return bars
def delete_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: Interval
) -> int:
""""""
count = database_manager.delete_bar_data(
symbol,
exchange,
interval
)
return count
def download_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: str,
start: datetime
) -> int:
"""
Query bar data from RQData.
"""
req = HistoryRequest(
symbol=symbol,
exchange=exchange,
interval=Interval(interval),
start=start,
end=datetime.now(DB_TZ)
)
vt_symbol = f"{symbol}.{exchange.value}"
contract = self.main_engine.get_contract(vt_symbol)
# If history data provided in gateway, then query
if contract and contract.history_data:
data = self.main_engine.query_history(
req, contract.gateway_name
)
# Otherwise use RQData to query data
else:
if not rqdata_client.inited:
rqdata_client.init()
data = rqdata_client.query_history(req)
if data:
database_manager.save_bar_data(data)
return(len(data))
return 0
def download_tick_data(
self,
symbol: str,
exchange: Exchange,
start: datetime
) -> int:
"""
Query tick data from RQData.
"""
req = HistoryRequest(
symbol=symbol,
exchange=exchange,
start=start,
end=datetime.now(DB_TZ)
)
if not rqdata_client.inited:
rqdata_client.init()
data = rqdata_client.query_tick_history(req)
if data:
database_manager.save_tick_data(data)
return(len(data))
return 0
| 25.863636 | 86 | 0.514779 |
17d7c3311919bffd0acafd1fc92016a7a40e48c6 | 17,952 | py | Python | corehq/apps/reminders/models.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2020-05-05T13:10:01.000Z | 2020-05-05T13:10:01.000Z | corehq/apps/reminders/models.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2019-12-09T14:00:14.000Z | 2019-12-09T14:00:14.000Z | corehq/apps/reminders/models.py | MaciejChoromanski/commcare-hq | fd7f65362d56d73b75a2c20d2afeabbc70876867 | [
"BSD-3-Clause"
] | 5 | 2015-11-30T13:12:45.000Z | 2019-07-01T19:27:07.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
import pytz
from datetime import timedelta, datetime, date, time
import re
from collections import namedtuple
from corehq.apps.casegroups.models import CommCareCaseGroup
from dimagi.ext.couchdbkit import *
from corehq.apps.sms.models import MessagingEvent
from corehq.apps.users.cases import get_owner_id, get_wrapped_owner
from corehq.apps.users.models import CouchUser, CommCareUser
from corehq.apps.groups.models import Group
from corehq.apps.locations.dbaccessors import get_all_users_by_location
from corehq.apps.locations.models import SQLLocation
from corehq.form_processor.abstract_models import DEFAULT_PARENT_IDENTIFIER
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.utils import is_commcarecase
from dimagi.utils.parsing import string_to_datetime, json_format_datetime
from dateutil.parser import parse
from couchdbkit.exceptions import ResourceConflict
from couchdbkit import ResourceNotFound
from corehq.apps.smsforms.models import SQLXFormsSession
from corehq.apps.smsforms.util import critical_section_for_smsforms_sessions
from corehq.util.quickcache import quickcache
from corehq.util.timezones.conversions import ServerTime, UserTime
from dimagi.utils.couch import LockableMixIn, CriticalSection
from dimagi.utils.couch.cache.cache_core import get_redis_client
from dimagi.utils.logging import notify_exception
from dimagi.utils.modules import to_function
from random import randint
from django.conf import settings
from dimagi.utils.couch.database import iter_docs
from django.db import models, transaction
from string import Formatter
import six
from six.moves import filter
class IllegalModelStateException(Exception):
pass
class UnexpectedConfigurationException(Exception):
pass
METHOD_SMS = "sms"
METHOD_SMS_CALLBACK = "callback"
METHOD_SMS_SURVEY = "survey"
METHOD_IVR_SURVEY = "ivr_survey"
METHOD_EMAIL = "email"
METHOD_STRUCTURED_SMS = "structured_sms"
METHOD_CHOICES = [
METHOD_SMS,
METHOD_SMS_CALLBACK,
METHOD_SMS_SURVEY,
METHOD_IVR_SURVEY,
METHOD_EMAIL,
]
# The Monday - Sunday constants are meant to match the result from
# date.weekday()
DAY_ANY = -1
DAY_MON = 0
DAY_TUE = 1
DAY_WED = 2
DAY_THU = 3
DAY_FRI = 4
DAY_SAT = 5
DAY_SUN = 6
DAY_OF_WEEK_CHOICES = [
DAY_ANY,
DAY_MON,
DAY_TUE,
DAY_WED,
DAY_THU,
DAY_FRI,
DAY_SAT,
DAY_SUN,
]
REPEAT_SCHEDULE_INDEFINITELY = -1
EVENT_AS_SCHEDULE = "SCHEDULE"
EVENT_AS_OFFSET = "OFFSET"
EVENT_INTERPRETATIONS = [EVENT_AS_SCHEDULE, EVENT_AS_OFFSET]
UI_SIMPLE_FIXED = "SIMPLE_FIXED"
UI_COMPLEX = "COMPLEX"
UI_CHOICES = [UI_SIMPLE_FIXED, UI_COMPLEX]
RECIPIENT_SENDER = "SENDER"
RECIPIENT_USER = "USER"
RECIPIENT_OWNER = "OWNER"
RECIPIENT_CASE = "CASE"
RECIPIENT_PARENT_CASE = "PARENT_CASE"
RECIPIENT_ALL_SUBCASES = "ALL_SUBCASES"
RECIPIENT_SUBCASE = "SUBCASE"
RECIPIENT_SURVEY_SAMPLE = "SURVEY_SAMPLE"
RECIPIENT_USER_GROUP = "USER_GROUP"
RECIPIENT_LOCATION = "LOCATION"
DEPRECATED_RECIPIENT_CHOICES = [
'TB_PERSON_CASE_FROM_VOUCHER_CASE',
'TB_AGENCY_USER_CASE_FROM_VOUCHER_FULFILLED_BY_ID',
'TB_BENEFICIARY_REGISTRATION_RECIPIENTS',
'TB_PRESCRIPTION_VOUCHER_ALERT_RECIPIENTS',
]
RECIPIENT_CHOICES = [
RECIPIENT_USER, RECIPIENT_OWNER, RECIPIENT_CASE, RECIPIENT_SURVEY_SAMPLE,
RECIPIENT_PARENT_CASE, RECIPIENT_SUBCASE, RECIPIENT_USER_GROUP,
RECIPIENT_LOCATION
] + list(settings.AVAILABLE_CUSTOM_REMINDER_RECIPIENTS) + DEPRECATED_RECIPIENT_CHOICES
KEYWORD_RECIPIENT_CHOICES = [RECIPIENT_SENDER, RECIPIENT_OWNER, RECIPIENT_USER_GROUP]
KEYWORD_ACTION_CHOICES = [METHOD_SMS, METHOD_SMS_SURVEY, METHOD_STRUCTURED_SMS]
FIRE_TIME_DEFAULT = "DEFAULT"
FIRE_TIME_CASE_PROPERTY = "CASE_PROPERTY"
FIRE_TIME_RANDOM = "RANDOM"
FIRE_TIME_CHOICES = [FIRE_TIME_DEFAULT, FIRE_TIME_CASE_PROPERTY, FIRE_TIME_RANDOM]
MATCH_EXACT = "EXACT"
MATCH_REGEX = "REGEX"
MATCH_ANY_VALUE = "ANY_VALUE"
MATCH_TYPE_CHOICES = [MATCH_EXACT, MATCH_REGEX, MATCH_ANY_VALUE]
CASE_CRITERIA = "CASE_CRITERIA"
ON_DATETIME = "ON_DATETIME"
START_CONDITION_TYPES = [CASE_CRITERIA, ON_DATETIME]
SURVEY_METHOD_LIST = ["SMS", "CATI"]
UI_FREQUENCY_ADVANCED = "ADVANCED"
UI_FREQUENCY_CHOICES = [UI_FREQUENCY_ADVANCED]
QUESTION_RETRY_CHOICES = [1, 2, 3, 4, 5]
FORM_TYPE_ONE_BY_ONE = "ONE_BY_ONE" # Answer each question one at a time
FORM_TYPE_ALL_AT_ONCE = "ALL_AT_ONCE" # Complete the entire form with just one sms using the delimiter to separate answers
FORM_TYPE_CHOICES = [FORM_TYPE_ONE_BY_ONE, FORM_TYPE_ALL_AT_ONCE]
REMINDER_TYPE_ONE_TIME = "ONE_TIME"
REMINDER_TYPE_KEYWORD_INITIATED = "KEYWORD_INITIATED"
REMINDER_TYPE_DEFAULT = "DEFAULT"
REMINDER_TYPE_SURVEY_MANAGEMENT = "SURVEY_MANAGEMENT"
REMINDER_TYPE_CHOICES = [REMINDER_TYPE_DEFAULT, REMINDER_TYPE_ONE_TIME,
REMINDER_TYPE_KEYWORD_INITIATED, REMINDER_TYPE_SURVEY_MANAGEMENT]
SEND_NOW = "NOW"
SEND_LATER = "LATER"
class CaseReminderEvent(DocumentSchema):
day_num = IntegerProperty()
fire_time = TimeProperty()
fire_time_aux = StringProperty()
fire_time_type = StringProperty(choices=FIRE_TIME_CHOICES, default=FIRE_TIME_DEFAULT)
time_window_length = IntegerProperty()
subject = DictProperty()
message = DictProperty()
callback_timeout_intervals = ListProperty(IntegerProperty)
form_unique_id = StringProperty()
class CaseReminderHandler(Document):
domain = StringProperty()
last_modified = DateTimeProperty()
active = BooleanProperty(default=True)
case_type = StringProperty()
nickname = StringProperty()
default_lang = StringProperty()
method = StringProperty(choices=METHOD_CHOICES, default="sms")
ui_type = StringProperty(choices=UI_CHOICES, default=UI_SIMPLE_FIXED)
recipient = StringProperty(choices=RECIPIENT_CHOICES, default=RECIPIENT_USER)
ui_frequency = StringProperty(choices=UI_FREQUENCY_CHOICES, default=UI_FREQUENCY_ADVANCED) # This will be used to simplify the scheduling process in the ui
sample_id = StringProperty()
user_group_id = StringProperty()
user_id = StringProperty()
case_id = StringProperty()
reminder_type = StringProperty(choices=REMINDER_TYPE_CHOICES, default=REMINDER_TYPE_DEFAULT)
locked = BooleanProperty(default=False)
# Only used when recipient is RECIPIENT_LOCATION
# All users belonging to these locations will be recipients
# Should be a list of (Couch model) Location ids
location_ids = ListProperty()
# If True, all users belonging to any child locations of the above
# locations will also be recipients
include_child_locations = BooleanProperty(default=False)
# Only used when recipient is RECIPIENT_SUBCASE.
# All subcases matching the given criteria will be the recipients.
recipient_case_match_property = StringProperty()
recipient_case_match_type = StringProperty(choices=MATCH_TYPE_CHOICES)
recipient_case_match_value = StringProperty()
# Only applies when method is "survey".
# If this is True, on the last survey timeout, instead of resending the current question,
# it will submit the form for the recipient with whatever is completed up to that point.
submit_partial_forms = BooleanProperty(default=False)
# Only applies when submit_partial_forms is True.
# If this is True, partial form submissions will be allowed to create / update / close cases.
# If this is False, partial form submissions will just submit the form without case create / update / close.
include_case_side_effects = BooleanProperty(default=False)
# Only applies for method = "ivr_survey" right now.
# This is the maximum number of times that it will retry asking a question with an invalid response before hanging
# up. This is meant to prevent long running calls.
max_question_retries = IntegerProperty(choices=QUESTION_RETRY_CHOICES, default=QUESTION_RETRY_CHOICES[-1])
survey_incentive = StringProperty()
# start condition
start_condition_type = StringProperty(choices=START_CONDITION_TYPES, default=CASE_CRITERIA)
# used when start_condition_type == ON_DATETIME
start_datetime = DateTimeProperty()
# used when start_condition_type == CASE_CRITERIA
start_property = StringProperty()
start_value = StringProperty()
start_date = StringProperty()
start_offset = IntegerProperty()
start_match_type = StringProperty(choices=MATCH_TYPE_CHOICES)
start_day_of_week = IntegerProperty(choices=DAY_OF_WEEK_CHOICES,
default=DAY_ANY)
# reminder schedule
events = SchemaListProperty(CaseReminderEvent)
schedule_length = IntegerProperty()
event_interpretation = StringProperty(choices=EVENT_INTERPRETATIONS, default=EVENT_AS_OFFSET)
max_iteration_count = IntegerProperty()
# stop condition
until = StringProperty()
# If present, references an entry in settings.ALLOWED_CUSTOM_CONTENT_HANDLERS, which maps to a function
# that should be called to retrieve the sms content to send in an sms reminder.
# The signature of a custom content handler should be function(reminder, handler, recipient)
custom_content_handler = StringProperty()
# If a subcase triggers an SMS survey, but we're sending it to the parent case,
# we sometimes want the subcase to be the one on which we execute case actions
# during form submission. This option will allow for that.
# Note that this option only makes a difference if a case is filling out the SMS survey,
# and if a case other than that case triggered the reminder.
force_surveys_to_use_triggered_case = BooleanProperty(default=False)
# If this reminder definition is being created as a subevent of a
# MessagingEvent, this is the id of the MessagingEvent
messaging_event_id = IntegerProperty()
# Set this property to filter the recipient list using custom user data.
# Should be a dictionary where each key is the name of the custom user data
# field, and each value is a list of allowed values to filter on.
# For example, if set to:
# {'nickname': ['bob', 'jim'],
# 'phone_type': ['android']}
# then the recipient list would be filtered to only include users whose phone
# type is android and whose nickname is either bob or jim.
# If {}, then no filter is applied to the recipient list.
user_data_filter = DictProperty()
# When sending a case criteria reminder whose start date is defined in
# a case property, this option tells what to do if that start date case
# property is blank or unparseable. If set to True, we use today's date.
# If False, we don't schedule any reminder at all.
use_today_if_start_date_is_blank = BooleanProperty(default=True)
class CaseReminder(SafeSaveDocument, LockableMixIn):
"""
Where the CaseReminderHandler is the rule and schedule for sending out reminders,
a CaseReminder is an instance of that rule as it is being applied to a specific
CommCareCase. A CaseReminder only applies to a single CommCareCase/CaseReminderHandler
interaction and is just a representation of the state of the rule in the lifecycle
of the CaseReminderHandler.
"""
domain = StringProperty() # Domain
last_modified = DateTimeProperty()
case_id = StringProperty() # Reference to the CommCareCase
handler_id = StringProperty() # Reference to the CaseReminderHandler
user_id = StringProperty() # Reference to the CouchUser who will receive the SMS messages
method = StringProperty(choices=METHOD_CHOICES) # See CaseReminderHandler.method
next_fire = DateTimeProperty() # The date and time that the next message should go out
last_fired = DateTimeProperty() # The date and time that the last message went out
active = BooleanProperty(default=False) # True if active, False if deactivated
start_date = DateProperty() # For CaseReminderHandlers with event_interpretation=SCHEDULE, this is the date (in the recipient's time zone) from which all event times are calculated
schedule_iteration_num = IntegerProperty() # The current iteration through the cycle of self.handler.events
current_event_sequence_num = IntegerProperty() # The current event number (index to self.handler.events)
callback_try_count = IntegerProperty() # Keeps track of the number of times a callback has timed out
skip_remaining_timeouts = BooleanProperty() # An event handling method can set this to True to skip the remaining timeout intervals for the current event
start_condition_datetime = DateTimeProperty() # The date and time matching the case property specified by the CaseReminderHandler.start_condition
sample_id = StringProperty()
xforms_session_ids = ListProperty(StringProperty)
error_retry_count = IntegerProperty(default=0)
last_scheduled_fire_time = DateTimeProperty()
event_initiation_timestamp = DateTimeProperty() # The date and time that the event was started (which is the same throughout all timeouts)
error = BooleanProperty(default=False)
error_msg = StringProperty()
# This is the id of the MessagingEvent that was created the
# last time an event for this reminder fired.
last_messaging_event_id = IntegerProperty()
class SurveyKeywordAction(DocumentSchema):
recipient = StringProperty(choices=KEYWORD_RECIPIENT_CHOICES)
recipient_id = StringProperty()
action = StringProperty(choices=KEYWORD_ACTION_CHOICES)
# Only used for action == METHOD_SMS
message_content = StringProperty()
# Only used for action in [METHOD_SMS_SURVEY, METHOD_STRUCTURED_SMS]
form_unique_id = StringProperty()
# Only used for action == METHOD_STRUCTURED_SMS
use_named_args = BooleanProperty()
named_args = DictProperty() # Dictionary of {argument name in the sms (caps) : form question xpath}
named_args_separator = StringProperty() # Can be None in which case there is no separator (i.e., a100 b200)
class SurveyKeyword(Document):
domain = StringProperty()
keyword = StringProperty()
description = StringProperty()
actions = SchemaListProperty(SurveyKeywordAction)
delimiter = StringProperty() # Only matters if this is a structured SMS: default is None, in which case the delimiter is any consecutive white space
override_open_sessions = BooleanProperty()
initiator_doc_type_filter = ListProperty(StringProperty) # List of doc types representing the only types of contacts who should be able to invoke this keyword. Empty list means anyone can invoke.
# Properties needed for migration and then can be removed
form_type = StringProperty(choices=FORM_TYPE_CHOICES, default=FORM_TYPE_ONE_BY_ONE)
form_unique_id = StringProperty()
use_named_args = BooleanProperty()
named_args = DictProperty()
named_args_separator = StringProperty()
oct13_migration_timestamp = DateTimeProperty()
def is_structured_sms(self):
return METHOD_STRUCTURED_SMS in [a.action for a in self.actions]
@property
def get_id(self):
return self._id
@classmethod
def get_keyword(cls, domain, keyword):
return cls.view("reminders/survey_keywords",
key = [domain, keyword.upper()],
include_docs=True,
reduce=False,
).one()
@classmethod
def get_by_domain(cls, domain, limit=None, skip=None):
extra_kwargs = {}
if limit is not None:
extra_kwargs['limit'] = limit
if skip is not None:
extra_kwargs['skip'] = skip
return cls.view(
'reminders/survey_keywords',
startkey=[domain],
endkey=[domain, {}],
include_docs=True,
reduce=False,
**extra_kwargs
).all()
def save(self, *args, **kwargs):
self.clear_caches()
return super(SurveyKeyword, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.clear_caches()
return super(SurveyKeyword, self).delete(*args, **kwargs)
def clear_caches(self):
self.domain_has_keywords.clear(SurveyKeyword, self.domain)
@classmethod
@quickcache(['domain'], timeout=60 * 60)
def domain_has_keywords(cls, domain):
reduced = cls.view('reminders/survey_keywords',
startkey=[domain],
endkey=[domain, {}],
include_docs=False,
reduce=True
).all()
count = reduced[0]['value'] if reduced else 0
return count > 0
class EmailUsage(models.Model):
domain = models.CharField(max_length=126, db_index=True)
year = models.IntegerField()
month = models.IntegerField()
count = models.IntegerField(default=0)
class Meta(object):
unique_together = ('domain', 'year', 'month')
app_label = "reminders"
@classmethod
def get_or_create_usage_record(cls, domain):
now = datetime.utcnow()
domain_year_month = '%s-%d-%d' % (
domain,
now.year,
now.month
)
email_usage_get_or_create_key = 'get-or-create-email-usage-%s' % domain_year_month
with CriticalSection([email_usage_get_or_create_key]):
return cls.objects.get_or_create(
domain=domain,
year=now.year,
month=now.month
)[0]
@classmethod
def get_total_count(cls, domain):
qs = cls.objects.filter(domain=domain)
result = qs.aggregate(total=models.Sum('count'))
return result['total'] or 0
def update_count(self, increase_by=1):
# This operation is thread safe, no need to use CriticalSection
EmailUsage.objects.filter(pk=self.pk).update(count=models.F('count') + increase_by)
| 41.174312 | 204 | 0.742981 |
6f709fc11b3c3ca911d87800c925fe3284bc9427 | 779 | py | Python | setup.py | TanmayArya-1p/TRH | a1aacf1ddb029b9f2aa83bc72e75a8c133aaded8 | [
"Unlicense"
] | null | null | null | setup.py | TanmayArya-1p/TRH | a1aacf1ddb029b9f2aa83bc72e75a8c133aaded8 | [
"Unlicense"
] | null | null | null | setup.py | TanmayArya-1p/TRH | a1aacf1ddb029b9f2aa83bc72e75a8c133aaded8 | [
"Unlicense"
] | null | null | null | from setuptools import setup, find_packages
import os
VERSION = '2.0'
DESCRIPTION = 'TypeRacer and MonkeyType Hack using Selenium Chromium Webdriver and BeatifulSoup Web Scraping.'
# Setting up
setup(
name="TRH",
version=VERSION,
author="TanmayArya1-p",
author_email="<tanmayarya2018@gmail.com>",
description=DESCRIPTION,
packages=find_packages(),
install_requires=['selenium', 'beautifulsoup4'],
keywords=['python', 'selenium', 'web-scraping'],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
) | 28.851852 | 110 | 0.65982 |
d425f928620574ebd03fb5ea9bac9aab5450775e | 10,813 | py | Python | Kalman Filter/kalman_filter.py | dadiasahil94/Machine-Learning-Improvement | b7492755c01e2b16e7939672ee5de8a8326f4094 | [
"MIT"
] | null | null | null | Kalman Filter/kalman_filter.py | dadiasahil94/Machine-Learning-Improvement | b7492755c01e2b16e7939672ee5de8a8326f4094 | [
"MIT"
] | null | null | null | Kalman Filter/kalman_filter.py | dadiasahil94/Machine-Learning-Improvement | b7492755c01e2b16e7939672ee5de8a8326f4094 | [
"MIT"
] | null | null | null |
'''
=======================
KALMAN FILTER
=======================
ONLY NEED "F, Z ,H" TO CALCULATE/ confirm dimensions of all other parameters
X == state VECTOR --------|
F == Prediction Matrix --------|---- Fundamental requirement
H == Prediction to Measurement Transformation Matrix --------|
Z == Observed Measurements VECTOR --------|
P == Covariance matrix
B == Control Matrix
U == Control VECTOR
Q == Noise of Covariance
K == Kalman Gain
R == Noise in Measurement
NOTE : HERE F` indicates Transpose
Prediction Equation:
X = FX + BU
P = FPF` + Q <-------------|
|
Updation Equation: |
K = PH`(HpH` + R)^-1 |
P = P - KHP |
X = X + K(Z-HK) ^
|
| |
| |
|--------> -------------|
After updation Equation repeat Prediction Equation until convergance
Dimensionality (Rows*Columns):
X = M*1
F = M*M
H = L*M
Z = L*1
----------------------------------------------------------------------
P = M*M
B = M*N
U = N*1
Q = M*M
K = M*L
R = L*L
'''
import numpy as np
from random import uniform
import sys
sys.path.insert(0,"/home/sahil/Documents/machine learning algorithm/common")
import helper_functions as hf
import plot_helper_functions as plhf
import matplotlib.pyplot as plt
class Kalman():
parameters = {'F': None, \
'B':None,\
'U':None,\
'Q':None,
'x_start':None,\
'p_start':None,\
'X_PREDICT_':None, \
'P_PREDICT_':None, \
'H':None,\
'R':None,\
'Z':None,\
'KALMAN_GAIN_':None,\
'X_UPDATED_':None,\
'P_UPDATED_':None\
}
data_points =[]
def __init__(self, no_states, no_iteration = 1000 ):
''' Initialization of class'''
self.no_states = no_states
self.no_iteration = no_iteration
def set_parameters(self , parametes_dictinary):
''' Set values for the parameters'''
assert isinstance(parametes_dictinary , dict)
for key in parametes_dictinary.keys():
self.parameters[key] = np.array(parametes_dictinary[key])
#correct for 2-Dimesnion
if self.parameters[key].ndim <2 and self.parameters[key].ndim >=1 :
self.parameters[key] = np.expand_dims(self.parameters[key] , 1)
elif self.parameters[key].ndim <1 :
self.parameters[key] = np.expand_dims(self.parameters[key] , 0)
self.parameters[key] = np.expand_dims(self.parameters[key] , 1)
print self.parameters[key].ndim , self.parameters[key].shape , key
def get_parameters(self):
''' Returns parameters'''
return self.parameters
def check_dimensionality(self):
''' Checks dimesnionality of parameters'''
M = self.parameters['F'].shape[0]
L = self.parameters['H'].shape[0]
N = self.parameters['B'].shape[1]
#check the dimensions
if (self.parameters['X_PREDICT_'].shape != (M,1)):
print "Dimesnion of X (state vector) is not correct. It should be "+str(M)+str("*")+str("1")+". Currently it is " + str(self.parameters['X_PREDICT_'].shape)
if (self.parameters['X_UPDATED_'].shape != (M,1)):
print "Dimesnion of X (state vector) is not correct. It should be "+str(M)+str("*")+str("1")+". Currently it is " + str(self.parameters['X_UPDATED_'].shape)
if (self.parameters['x_start'].shape != (M,1)):
print "Dimesnion of X0 (state vector) is not correct. It should be "+str(M)+str("*")+str("1")+". Currently it is " + str(self.parameters['x_start'].shape)
if (self.parameters['p_start'].shape != (M,M)):
print "Dimesnion of p_start is not correct. It should be "+str(M)+str("*")+str(M)+" Currently it is " + str(self.parameters['p_start'].shape)
if (self.parameters['P_UPDATED_'].shape != (M,M)):
print "Dimesnion of P is not correct. It should be "+str(M)+str("*")+str(M)+" Currently it is " + str(self.parameters['P_UPDATED_'].shape)
if (self.parameters['P_PREDICT_'].shape != (M,M)):
print "Dimesnion of P is not correct. It should be "+str(M)+str("*")+str(M)+" Currently it is " + str(self.parameters['P_PREDICT_'].shape)
if (self.parameters['F'].shape != (M,M)):
print "Dimesnion of F is not correct. It should be "+str(M)+str("*")+str(N)+". Currently it is " + str(self.parameters['F'].shape)
if (self.parameters['B'].shape != (M,N)):
print "Dimesnion of B is not correct. It should be "+str(M)+str("*")+str(N)+". Currently it is " + str(self.parameters['B'].shape)
if (self.parameters['U'].shape != (N,1)):
print "Dimesnion of U is not correct. It should be "+str(N)+str("*")+str("1")+". Currently it is " + str(self.parameters['U'].shape)
if (self.parameters['Q'].shape != (M,M)):
print "Dimesnion of Q is not correct. It should be "+str(M)+str("*")+str(M)+". Currently it is " + str(self.parameters['Q'].shape)
if (self.parameters['H'].shape != (L,M)):
print "Dimesnion of H is not correct. It should be "+str(L)+str("*")+str(M)+". Currently it is " + str(self.parameters['H'].shape)
if (self.parameters['KALMAN_GAIN_'].shape != (M,L)):
print "Dimesnion of Kalmanin Gain is not correct. It should be "+str(M)+str("*")+str(L)+". Currently it is " + str(self.parameters['KALMAN_GAIN_'].shape)
if (self.parameters['R'].shape != (L,L)):
print "Dimesnion of R is not correct. It should be "+str(L)+str("*")+str(L)+". Currently it is " + str(self.parameters['R'].shape)
if (self.parameters['Z'][0].shape != (self.no_states,1)):
print "Dimesnion of Z is not correct. It should be "+str(L)+str("*")+str(1)+". Currently it is " + str(self.parameters['Z'][0].shape)
def adjust_matrix(self):
''' Adjust the parameters before starting the calculations'''
empty_keys = [key for key in self.parameters.keys() if self.parameters[key]==None]
if ('B' in empty_keys):
N = int(5)
else:
N = int(self.parameters['B'].shape[1])
M = int(self.parameters['F'].shape[0])
L = int(self.parameters['H'].shape[0])
for key in empty_keys:
# print empty_keys
if key == 'x_start':
self.parameters['x_start'] = np.zeros((M,1))
if key == 'X_UPDATED_':
self.parameters['X_UPDATED_'] = np.zeros((M,1))
if key == 'X_PREDICT_':
self.parameters['X_PREDICT_'] = np.zeros((M,1))
if key == 'p_start':
self.parameters['p_start'] = 1000 * np.identity((M)) #size is M*M
if key == 'P_PREDICT_':
self.parameters['P_PREDICT_'] = 1000 * np.identity((M)) #size is M*M
if key == 'P_UPDATED_':
self.parameters['P_UPDATED_'] = 1000 * np.identity((M)) #size is M*M
if key == 'B':
self.parameters['B'] = np.zeros((M,N))
if key == 'U':
self.parameters['U'] = np.zeros((N,1))
if key == 'Q':
self.parameters['Q'] = 0.05 * np.identity((M)) #size is M*M
if key == 'R':
self.parameters['R'] = 0.1 * np.identity((L)) #size is L*L
if key == 'KALMAN_GAIN_':
self.parameters['KALMAN_GAIN_'] = np.zeros((M,L))
# empty_keys.remove(key)
print "++++++++++++++++++++++++"
self.check_dimensionality()
def prediction_step(self):
part1 = np.dot(self.parameters['F'],self.parameters['X_UPDATED_'])
part2 = np.dot(self.parameters['B'] , self.parameters['U'])
self.parameters['X_PREDICT_'] = part1+part2
# print part1.shape,part2.shape
part3 = np.dot(self.parameters['F'],np.dot(self.parameters['P_UPDATED_'],self.parameters['F'].T))
part4 = self.parameters['Q']
self.parameters['P_PREDICT_'] = part3 + part4
# print part3.shape,part4.shape
def updation_step(self,count):
part5 = np.dot(self.parameters['P_PREDICT_'],self.parameters['H'].T)
part6 = np.linalg.inv(np.dot(self.parameters['H'], part5) +self.parameters['R'])
self.parameters['KALMAN_GAIN_'] = np.dot(part5,part6)
# print part5.shape,part6.shape
part7 = self.parameters['P_PREDICT_']
part8 = np.dot(self.parameters['KALMAN_GAIN_'],np.dot(self.parameters['H'],self.parameters['P_PREDICT_']))
self.parameters['P_UPDATED_'] = part7 - part8
# print part7.shape,part8.shape
part9 = self.parameters['X_PREDICT_']
part10 = self.parameters['Z'][count,0] - np.dot(self.parameters['H'],self.parameters['KALMAN_GAIN_'])
# print part9.shape,part10.shape
part11 = np.dot(self.parameters['KALMAN_GAIN_'],part10)
# print "Done"
self.parameters['X_UPDATED_'] = part9 + part11
print "-------------------------------------"
def solve(self):
count = 0
while count<self.no_iteration-1:
print count
self.prediction_step()
self.updation_step(count)
print self.parameters['X_UPDATED_']
self.data_points.append(self.parameters['X_UPDATED_'])
count+=1
return self.data_points
if __name__ == '__main__':
mango = Kalman(1, no_iteration =30)
F = np.array([1])
H = np.array([1])
# Z = np.array([0.39,0.50,0.48,0.29,0.25,0.32,0.34,0.48,0.41,0.45])
Z1 = np.array([uniform(0, 1) for i in range(2500)])
# Z = np.array([1.1,1,0.95,1.05,1.2,0.9,0.84,1.15,0.41,0.45])
Z = [x*0.1 for x in range(30)]
# Z = Z[:]+Z1[0:10]
True_vals = [1 for i in range(len(Z))]
# Z = np.array(Z)
parameters = {'F':F, \
'H': H,\
'Z':Z,\
}
mango.set_parameters(parameters)
mango.adjust_matrix()
# mango.set_parameters({'x_start':10})
# mango.adjust_matrix()
print mango.parameters['x_start']
j = mango.solve()
j = [float(x) for x in j]
# print j
m ,figure_no = plhf.LINE_PLOT_2D(j,figure_no=1)
plhf.LINE_PLOT_2D(True_vals,figure_no=1,color='g')
plhf.SCATTER_PLOT_2D(Z,figure_no=1,color='r')
plt.show()
| 43.079681 | 168 | 0.535652 |
995b9d75705fe25d4c0258a58f2b3a3b9ede9d26 | 941 | py | Python | quick-sort/iterative.py | 9andresc/dena | 9315905c7e597344fa2c33773bf875cdaf38021f | [
"MIT"
] | null | null | null | quick-sort/iterative.py | 9andresc/dena | 9315905c7e597344fa2c33773bf875cdaf38021f | [
"MIT"
] | 18 | 2019-01-24T21:19:11.000Z | 2019-02-08T19:25:49.000Z | quick-sort/iterative.py | 9andresc/dsna | 9315905c7e597344fa2c33773bf875cdaf38021f | [
"MIT"
] | null | null | null | class Stack(list):
push = list.append
def qsi(items):
"""It sorts the items using Quick Sort with the Hoare partition algorithm"""
n = len(items)
if n < 2:
return
unsorted = Stack([(0, n - 1)])
while unsorted:
elem_idx, pivot_idx = left_idx, right_idx = unsorted.pop()
elem = items[elem_idx]
pivot = items[pivot_idx]
while pivot_idx > elem_idx:
if elem > pivot:
items[pivot_idx] = elem
pivot_idx -= 1
items[elem_idx] = elem = items[pivot_idx]
else:
elem_idx += 1
elem = items[elem_idx]
items[pivot_idx] = pivot
left_size = pivot_idx - left_idx
right_size = right_idx - pivot_idx
if left_size > 1:
unsorted.push((left_idx, pivot_idx - 1))
if right_size > 1:
unsorted.push((pivot_idx + 1, right_idx))
| 24.763158 | 80 | 0.539851 |
8b1c4d42951f55666b149f185ef767e5feadcbe1 | 971 | py | Python | src/pretix/base/migrations/0099_auto_20180807_0841.py | pajowu/pretix | d6985123b4528f134ead71ce0a4613c9a309fd2c | [
"ECL-2.0",
"Apache-2.0"
] | 1,248 | 2015-04-24T13:32:06.000Z | 2022-03-29T07:01:36.000Z | src/pretix/base/migrations/0099_auto_20180807_0841.py | pajowu/pretix | d6985123b4528f134ead71ce0a4613c9a309fd2c | [
"ECL-2.0",
"Apache-2.0"
] | 2,113 | 2015-02-18T18:58:16.000Z | 2022-03-31T11:12:32.000Z | src/pretix/base/migrations/0099_auto_20180807_0841.py | pajowu/pretix | d6985123b4528f134ead71ce0a4613c9a309fd2c | [
"ECL-2.0",
"Apache-2.0"
] | 453 | 2015-05-13T09:29:06.000Z | 2022-03-24T13:39:16.000Z | # Generated by Django 2.1 on 2018-08-07 08:41
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0098_auto_20180731_1243'),
]
operations = [
migrations.AlterModelOptions(
name='waitinglistentry',
options={'ordering': ('-priority', 'created'), 'verbose_name': 'Waiting list entry', 'verbose_name_plural': 'Waiting list entries'},
),
migrations.AddField(
model_name='waitinglistentry',
name='priority',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='waitinglistentry',
name='voucher',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='waitinglistentries', to='pretixbase.Voucher', verbose_name='Assigned voucher'),
),
]
| 33.482759 | 197 | 0.639547 |
9c03d52619d1ddaee86b4fae18348769710bd14a | 16,747 | py | Python | scripts/evaluate_all_datasets_cross.py | rpo19/BLINK | a9b62d08aa3ce4958e9bec62888c67c4eee18533 | [
"MIT"
] | null | null | null | scripts/evaluate_all_datasets_cross.py | rpo19/BLINK | a9b62d08aa3ce4958e9bec62888c67c4eee18533 | [
"MIT"
] | null | null | null | scripts/evaluate_all_datasets_cross.py | rpo19/BLINK | a9b62d08aa3ce4958e9bec62888c67c4eee18533 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.metrics import f1_score
import pickle
from sklearn.metrics import classification_report, roc_curve
import os
import matplotlib.pyplot as plt
import seaborn as sns
import sys
only = None
if len(sys.argv) >= 2:
only = sys.argv[1].split(',')
print('Only', only)
sns.set(style='ticks', palette='Set2')
sns.despine()
def myplot(y, y_pred, y_pred_round, title, outpath):
plt.figure()
df = pd.DataFrame({
'y': y,
'y_pred': y_pred,
'y_pred_rnd': y_pred_round
})
# kde
df[['y', 'y_pred']].plot(kind='kde', title=title+'_kde')
plt.tight_layout(pad=0.5)
plt.savefig(os.path.join(outpath, title+'_kde.png'))
plt.close()
# roc curve
fpr, tpr, thresholds = roc_curve(df['y'], df['y_pred'])
plt.figure()
pd.DataFrame({
'tpr': tpr,
'fpr': fpr,
'thresholds': thresholds
}).plot(x='fpr', y='tpr', title=title+'_roc')
plt.tight_layout(pad=0.5)
plt.savefig(os.path.join(outpath, title+'_roc.png'))
plt.close()
# correct
correct = df.query('y == y_pred_rnd')['y_pred'].rename('correct')
plt.figure()
correct.plot(kind='kde', title=title+'_kde', legend=True)
plt.tight_layout(pad=0.5)
# plt.savefig(os.path.join(outpath, title+'_kde_correct.png'))
# plt.close()
# errors
errors = df.query('y != y_pred_rnd')['y_pred'].rename('errors')
# plt.figure()
# errors.plot(kind='density', title=title+'errors kde')
errors.plot(kind='density', title=title+'kde', legend=True)
plt.tight_layout(pad=0.5)
# plt.savefig(os.path.join(outpath, title+'_kde_errors.png'))
plt.savefig(os.path.join(outpath, title+'_kde_correct_errors.png'))
plt.close()
plt.close('all')
outpath = 'output/evaluate_all_datasets/cross'
os.makedirs(outpath, exist_ok=True)
print('loading dataset...')
dataset = pd.read_pickle('./data/nil_dataset.pickle')
print('loaded...')
tasks = [
# {
# 'name': 'aida_under_all10_max_stdev4_hamming',
# 'train': ['AIDA-YAGO2_train_ner'],
# 'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
# 'sampling': 'undersample',
# 'features': [
# 'cross_stats_10_max',
# 'bi_stats_10_max',
# 'cross_stats_4_stdev',
# 'bi_stats_4_stdev',
# 'cross_hamming',
# 'bi_hamming'
# ]
# },
# {
# 'name': 'aida_under_all10_max_stdev4_hamming_no_bi',
# 'train': ['AIDA-YAGO2_train_ner'],
# 'test': ['AIDA-YAGO2_testa_ner', 'AIDA-YAGO2_testb_ner'],
# 'sampling': 'undersample',
# 'features': [
# 'cross_stats_10_max',
# 'bi_stats_10_max',
# 'cross_stats_4_stdev',
# 'bi_stats_4_stdev',
# 'cross_hamming',
# ]
# },
{'features': ['cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev'],
'name': 'aida_under_all10_max_stdev4_ner_wiki_tested_on_AIDA-YAGO2_testa',
'sampling': 'pretrained',
'test': ['AIDA-YAGO2_testa_ner'],
'train': 'output/feature_ablation_study/aida_under_all_max_stats10_levenshtein_model.pickle'},
{'features': ['cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev'],
'name': 'aida_under_all10_max_stdev4_ner_wiki_tested_on_AIDA-YAGO2_train',
'sampling': 'pretrained',
'test': ['AIDA-YAGO2_train_ner'],
'train': 'output/feature_ablation_study/aida_under_all_max_stats10_levenshtein_model.pickle'},
{'features': ['cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev'],
'name': 'aida_under_all10_max_stdev4_ner_wiki_tested_on_clueweb_questions',
'sampling': 'pretrained',
'test': ['clueweb_questions'],
'train': 'output/feature_ablation_study/aida_under_all_max_stats10_levenshtein_model.pickle'},
{'features': ['cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev'],
'name': 'aida_under_all10_max_stdev4_ner_wiki_tested_on_wnedwiki_questions',
'sampling': 'pretrained',
'test': ['wnedwiki_questions'],
'train': 'output/feature_ablation_study/aida_under_all_max_stats10_levenshtein_model.pickle'},
{'features': ['cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev'],
'name': 'aida_under_all10_max_stdev4_ner_wiki_tested_on_AIDA-YAGO2_testb',
'sampling': 'pretrained',
'test': ['AIDA-YAGO2_testb_ner'],
'train': 'output/feature_ablation_study/aida_under_all_max_stats10_levenshtein_model.pickle'},
{'features': ['cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev'],
'name': 'aida_under_all10_max_stdev4_ner_wiki_tested_on_msnbc_questions',
'sampling': 'pretrained',
'test': ['msnbc_questions'],
'train': 'output/feature_ablation_study/aida_under_all_max_stats10_levenshtein_model.pickle'},
{'features': ['cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev'],
'name': 'aida_under_all10_max_stdev4_ner_wiki_tested_on_aquaint_questions',
'sampling': 'pretrained',
'test': ['aquaint_questions'],
'train': 'output/feature_ablation_study/aida_under_all_max_stats10_levenshtein_model.pickle'},
{'features': ['cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev'],
'name': 'aida_under_all10_max_stdev4_ner_wiki_tested_on_ace2004_questions',
'sampling': 'pretrained',
'test': ['ace2004_questions'],
'train': 'output/feature_ablation_study/aida_under_all_max_stats10_levenshtein_model.pickle'},
{'features': ['cross_stats_10_max',
'cross_stats_4_stdev',
'bi_stats_10_max',
'bi_stats_4_stdev'],
'name': 'aida_under_all10_max_stdev4_ner_wiki_tested_on_all',
'sampling': 'pretrained',
'test': ['AIDA-YAGO2_testa_ner',
'AIDA-YAGO2_train_ner',
'clueweb_questions',
'wnedwiki_questions',
'AIDA-YAGO2_testb_ner',
'msnbc_questions',
'aquaint_questions',
'ace2004_questions'],
'train': 'output/feature_ablation_study/aida_under_all_max_stats10_levenshtein_model.pickle'}]
# assert no duplicates
vc = pd.DataFrame([task['name'] for task in tasks]).value_counts()
if not (vc <= 1).all():
print('!' * 30)
print('Duplicates:')
print('!' * 30)
print(vc[vc > 1])
raise Exception('duplicate task!')
csv_report = pd.DataFrame()
if only is not None:
tasks = [t for t in tasks if t['name'] in only]
for task in tasks:
print('-'*30)
print(task['name'])
task['features'] = [
'cross_stats_10_max',
'cross_stats_10_mean',
'cross_stats_10_median',
'cross_stats_10_stdev',
'bi_stats_10_max',
'bi_stats_10_mean',
'bi_stats_10_median',
'bi_stats_10_stdev',
'cross_levenshtein'
]
train_value_counts = None
if isinstance(task['train'], str):
# load pre trained model
with open(task['train'], 'rb') as fd:
clf = pickle.load(fd)
assert isinstance(task['test'], list)
test_df = dataset[dataset['src'].isin(task['test'])]
test_df_shape_original = test_df.shape[0]
test_df = test_df[test_df[task['features']].notna().all(axis=1)]
test_df_shape_notna = test_df.shape[0]
test_df_shape_actual = test_df.shape[0]
df_size_report = pd.DataFrame({
'test': [test_df_shape_original, test_df_shape_notna, test_df_shape_actual]
}, index=['original', 'notna', 'actual']).to_markdown()
print(df_size_report)
X_test = test_df[task['features']].values
y_test = test_df['y_cross'].values
else:
train_df = dataset[dataset['src'].isin(task['train'])]
if isinstance(task['test'], list):
test_df = dataset[dataset['src'].isin(task['test'])]
elif isinstance(task['test'], float):
train_df, test_df = train_test_split(train_df, test_size = task['test'], random_state = 1234)
else:
raise Exception()
train_df_shape_original = train_df.shape[0]
test_df_shape_original = test_df.shape[0]
train_df = train_df[train_df[task['features']].notna().all(axis=1)]
test_df = test_df[test_df[task['features']].notna().all(axis=1)]
train_df_shape_notna = train_df.shape[0]
test_df_shape_notna = test_df.shape[0]
if task['sampling'] == 'undersample':
print('undersampling...')
train_df_0 = train_df.query('y == 0')
train_df_1 = train_df.query('y == 1')
train_df_1 = train_df_1.sample(frac=1).iloc[:train_df_0.shape[0]]
train_df = pd.concat([train_df_0, train_df_1]).sample(frac=1)
elif task['sampling'] == 'no':
pass
else:
raise Exception()
train_df_shape_actual = train_df.shape[0]
test_df_shape_actual = test_df.shape[0]
df_size_report = pd.DataFrame({
'train': [train_df_shape_original, train_df_shape_notna, train_df_shape_actual],
'test': [test_df_shape_original, test_df_shape_notna, test_df_shape_actual]
}, index=['original', 'notna', 'actual']).to_markdown()
print(df_size_report)
train_value_counts = pd.DataFrame(train_df['y_cross'].value_counts()).to_markdown()
X_train = train_df[task['features']].values
y_train = train_df['y_cross'].values
X_test = test_df[task['features']].values
y_test = test_df['y_cross'].values
# model
clf = make_pipeline(
StandardScaler(),
LogisticRegression(random_state=1234, max_iter=200)
)
clf.fit(X_train, y_train)
y_pred = np.array(list(map(lambda x: x[1], clf.predict_proba(X_test))))
y_pred_round = np.round(y_pred)
test_df['y_pred_round'] = y_pred_round
test_df['y_pred'] = y_pred
bi_baseline = test_df.query('bi_labels == bi_best_candidate or Wikipedia_title == bi_best_candidate_title').shape[0]
cross_baseline = test_df.query('cross_labels == cross_best_candidate or Wikipedia_title == cross_best_candidate_title').shape[0]
bi_acc = test_df.query('(y_pred_round == 1 and (bi_labels == bi_best_candidate or Wikipedia_title == bi_best_candidate_title)) or (bi_labels == -1 and y_pred_round == 0)').shape[0]
cross_acc = test_df.query('(y_pred_round == 1 and (cross_labels == cross_best_candidate or Wikipedia_title == cross_best_candidate_title)) or (cross_labels == -1 and y_pred_round == 0)').shape[0]
bi_acc_correcting_nel = test_df.query(
'(y_pred_round == 1 and (bi_labels == bi_best_candidate or Wikipedia_title == bi_best_candidate_title))'
' or (bi_labels != bi_best_candidate and y_pred_round == 0)').shape[0]
cross_acc_correcting_nel = test_df.query(
'(y_pred_round == 1 and '
'(cross_labels == cross_best_candidate or Wikipedia_title == cross_best_candidate_title))'
' or (cross_labels != cross_best_candidate and y_pred_round == 0)').shape[0]
_classification_report = classification_report(y_test, y_pred_round)
# oracle corrects in [0.25, 0.75]
# TODO maybe look for a better way to get them (e.g. correct-error kde intersections ?)
tl = 0.25
th = 0.75
oracle_df = pd.DataFrame({
'y_test': y_test,
'y_pred': y_pred,
'y_pred_round': y_pred_round
})
oracle_original_shape = oracle_df.shape[0]
oracle_df = oracle_df.query(f'y_pred <= {tl} or y_pred >= {th}')
_classification_report_oracle = classification_report(oracle_df['y_test'], oracle_df['y_pred_round'])
test_df_oracle = test_df.query(f'y_pred <= {tl} or y_pred >= {th}')
bi_acc_oracle = test_df_oracle.query('(y_pred_round == 1 and (bi_labels == bi_best_candidate or Wikipedia_title == bi_best_candidate_title)) or (bi_labels == -1 and y_pred_round == 0)').shape[0]
cross_acc_oracle = test_df_oracle.query('(y_pred_round == 1 and (cross_labels == cross_best_candidate or Wikipedia_title == cross_best_candidate_title)) or (cross_labels == -1 and y_pred_round == 0)').shape[0]
csv_report = csv_report.append({
'name': task['name'],
'bi_baseline': bi_baseline / test_df_shape_actual,
'cross_baseline': cross_baseline / test_df_shape_actual,
'bi_acc': bi_acc / test_df_shape_actual,
'cross_acc': cross_acc / test_df_shape_actual,
'bi_acc_adjusted': bi_acc / test_df_shape_original,
'cross_acc_adjusted': cross_acc / test_df_shape_original,
'bi_acc_correcting_nel': bi_acc_correcting_nel / test_df_shape_actual,
'cross_acc_correcting_nel': cross_acc_correcting_nel / test_df_shape_actual,
'0-f1': f1_score(y_test, y_pred_round, pos_label=0),
'1-f1': f1_score(y_test, y_pred_round, pos_label=1),
'oracle_ratio': 1 - (oracle_df.shape[0] / oracle_original_shape),
'bi_acc_oracle': bi_acc_oracle / test_df_oracle.shape[0],
'cross_acc_oracle': cross_acc_oracle / test_df_oracle.shape[0],
'0-f1-oracle': f1_score(oracle_df['y_test'], oracle_df['y_pred_round'], pos_label=0),
'1-f1-oracle': f1_score(oracle_df['y_test'], oracle_df['y_pred_round'], pos_label=1),
}, ignore_index=True)
if train_value_counts is not None:
print(train_value_counts)
print(_classification_report)
print('-- Performances over test set:', task['test'], '--')
print('Bi baseline:', bi_baseline / test_df_shape_actual)
print('Cross baseline:', cross_baseline / test_df_shape_actual)
print('Bi acc:', bi_acc / test_df_shape_actual)
print('Cross acc:', cross_acc / test_df_shape_actual)
print('Bi acc adjusted:', bi_acc / test_df_shape_original)
print('Cross acc adjusted:', cross_acc / test_df_shape_original)
print(f'-- Oracle HITL evaluation when y_pred in [{tl}, {th}]')
print('Ratio to human validator:', 1 - (oracle_df.shape[0] / oracle_original_shape))
print(_classification_report_oracle)
print('Bi acc oracle:', bi_acc_oracle / test_df_oracle.shape[0])
print('Cross acc oracle:', cross_acc_oracle / test_df_oracle.shape[0])
with open(os.path.join(outpath, task['name']+'_report.txt'), 'w') as fd:
if train_value_counts is not None:
print(train_value_counts, file=fd)
print(df_size_report, file=fd)
print(_classification_report, file=fd)
print('-- Performances over test set:', task['test'], '--', file=fd)
print('Bi baseline:', bi_baseline / test_df_shape_actual, file=fd)
print('Cross baseline:', cross_baseline / test_df_shape_actual, file=fd)
print('Bi acc:', bi_acc / test_df_shape_actual, file=fd)
print('Cross acc:', cross_acc / test_df_shape_actual, file=fd)
print('Bi acc adjusted:', bi_acc / test_df_shape_original, file=fd)
print('Cross acc adjusted:', cross_acc / test_df_shape_original, file=fd)
print(f'-- Oracle HITL evaluation when y_pred in [{tl}, {th}]', file=fd)
print('Ratio to human validator:', oracle_df.shape[0] / oracle_original_shape, file=fd)
print(_classification_report_oracle, file=fd)
print('Bi acc oracle:', bi_acc_oracle / test_df_oracle.shape[0], file=fd)
print('Cross acc oracle:', cross_acc_oracle / test_df_oracle.shape[0], file=fd)
if not isinstance(task['train'], str):
# if not pre-trained save it
with open(os.path.join(outpath, task['name']+'_model.pickle'), 'wb') as fd:
pickle.dump(clf, fd)
myplot(y_test, y_pred, y_pred_round, task['name'], outpath)
print('-'*30)
if only is not None and os.path.isfile(os.path.join(outpath, 'evaluation_summary.csv')):
csv_report_old = pd.read_csv(os.path.join(outpath, 'evaluation_summary.csv'), index_col=0)
csv_report_old = csv_report_old[~csv_report_old['name'].isin(csv_report['name'].unique())]
csv_report = pd.concat([csv_report_old, csv_report])
csv_report.to_csv(os.path.join(outpath, 'evaluation_summary.csv')) | 39.779097 | 213 | 0.654087 |
da07e59c592d16348593e59950b2bf4aade99af9 | 551 | py | Python | 2749/main.py | yeonghoey/baekjoon | a3f7c0aa901ad0e2ca6a863f1867fc574feb8c8e | [
"MIT"
] | 1 | 2018-09-20T05:15:30.000Z | 2018-09-20T05:15:30.000Z | 2749/main.py | yeonghoey/baekjoon | a3f7c0aa901ad0e2ca6a863f1867fc574feb8c8e | [
"MIT"
] | null | null | null | 2749/main.py | yeonghoey/baekjoon | a3f7c0aa901ad0e2ca6a863f1867fc574feb8c8e | [
"MIT"
] | null | null | null | from functools import lru_cache
base = [1, 1, 1, 0]
@lru_cache()
def fib(n):
if n == 0:
return [1, 0, 0, 1]
if n == 1:
return base
half = fib(n // 2)
half2 = mul(half, half)
if n % 2 == 0:
return half2
else:
return mul(half2, base)
def mul(mx, my):
xa, xb, xc, xd = mx
ya, yb, yc, yd = my
return [
(xa*ya + xb*yc) % 1000000,
(xa*yb + xb*yd) % 1000000,
(xc*ya + xd*yc) % 1000000,
(xc*yb + xd*yd) % 1000000,
]
n = int(input())
print(fib(n)[1])
| 17.774194 | 34 | 0.46824 |
320f4a5e551e7353ae62b704c6399bed69d9471a | 7,565 | py | Python | EXAMPLES/PYTAN_API/invalid_ask_manual_question_sensor_help.py | netsec/pytan | 29a3484d21cb90d8896275febd1c535e4f3cdc7e | [
"MIT"
] | 1 | 2019-01-29T21:22:06.000Z | 2019-01-29T21:22:06.000Z | EXAMPLES/PYTAN_API/invalid_ask_manual_question_sensor_help.py | c1rdan/pytan | 5e537a6dcf4136e3b9c3905a39f073396e7f044f | [
"MIT"
] | null | null | null | EXAMPLES/PYTAN_API/invalid_ask_manual_question_sensor_help.py | c1rdan/pytan | 5e537a6dcf4136e3b9c3905a39f073396e7f044f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Have ask_manual() return the help for sensors
"""
# import the basic python packages we need
import os
import sys
import tempfile
import pprint
import traceback
# disable python from generating a .pyc file
sys.dont_write_bytecode = True
# change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API
pytan_loc = "~/gh/pytan"
pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib')
# Determine our script name, script dir
my_file = os.path.abspath(sys.argv[0])
my_dir = os.path.dirname(my_file)
# try to automatically determine the pytan lib directory by assuming it is in '../../lib/'
parent_dir = os.path.dirname(my_dir)
pytan_root_dir = os.path.dirname(parent_dir)
lib_dir = os.path.join(pytan_root_dir, 'lib')
# add pytan_loc and lib_dir to the PYTHONPATH variable
path_adds = [lib_dir, pytan_static_path]
[sys.path.append(aa) for aa in path_adds if aa not in sys.path]
# import pytan
import pytan
# create a dictionary of arguments for the pytan handler
handler_args = {}
# establish our connection info for the Tanium Server
handler_args['username'] = "Administrator"
handler_args['password'] = "Tanium2015!"
handler_args['host'] = "10.0.1.240"
handler_args['port'] = "443" # optional
# optional, level 0 is no output except warnings/errors
# level 1 through 12 are more and more verbose
handler_args['loglevel'] = 1
# optional, use a debug format for the logging output (uses two lines per log entry)
handler_args['debugformat'] = False
# optional, this saves all response objects to handler.session.ALL_REQUESTS_RESPONSES
# very useful for capturing the full exchange of XML requests and responses
handler_args['record_all_requests'] = True
# instantiate a handler using all of the arguments in the handler_args dictionary
print "...CALLING: pytan.handler() with args: {}".format(handler_args)
handler = pytan.Handler(**handler_args)
# print out the handler string
print "...OUTPUT: handler string: {}".format(handler)
# setup the arguments for the handler() class
kwargs = {}
kwargs["qtype"] = u'manual'
kwargs["sensors_help"] = True
print "...CALLING: handler.ask() with args: {}".format(kwargs)
try:
handler.ask(**kwargs)
except Exception as e:
print "...EXCEPTION: {}".format(e)
# this should throw an exception of type: pytan.exceptions.PytanHelp
# uncomment to see full exception
# traceback.print_exc(file=sys.stdout)
'''STDOUT from running this:
...CALLING: pytan.handler() with args: {'username': 'Administrator', 'record_all_requests': True, 'loglevel': 1, 'debugformat': False, 'host': '10.0.1.240', 'password': 'Tanium2015!', 'port': '443'}
...OUTPUT: handler string: PyTan v2.1.4 Handler for Session to 10.0.1.240:443, Authenticated: True, Platform Version: 6.5.314.4301
...CALLING: handler.ask() with args: {'qtype': u'manual', 'sensors_help': True}
...EXCEPTION:
Sensors Help
============
Supplying sensors controls what columns will be showed when you ask a
question.
A sensor string is a human string that describes, at a minimum, a sensor.
It can also optionally define a selector for the sensor, parameters for
the sensor, a filter for the sensor, and options for the filter for the
sensor. Sensors can be provided as a string or a list of strings.
Examples for basic sensors
---------------------------------
Supplying a single sensor:
'Computer Name'
Supplying two sensors in a list of strings:
['Computer Name', 'IP Route Details']
Supplying multiple sensors with selectors (name is the default
selector if none is supplied):
[
'Computer Name',
'name:Computer Name',
'id:1',
'hash:123456789',
]
Sensor Parameters
-----------------
Supplying parameters to a sensor can control the arguments that are
supplied to a sensor, if that sensor takes any arguments.
Sensor parameters must be surrounded with curly braces '{}',
and must have a key and value specified that is separated by
an equals '='. Multiple parameters must be seperated by
a comma ','. The key should match up to a valid parameter key
for the sensor in question.
If a parameter is supplied and the sensor doesn't have a
corresponding key name, it will be ignored. If the sensor has
parameters and a parameter is NOT supplied then one of two
paths will be taken:
* if the parameter does not require a default value, the
parameter is left blank and not supplied.
* if the parameter does require a value (pulldowns, for
example), a default value is derived (for pulldowns,
the first value available as a pulldown entry is used).
Examples for sensors with parameters
------------------------------------
Supplying a single sensor with a single parameter 'dirname':
'Sensor With Params{dirname=Program Files}'
Supplying a single sensor with two parameters, 'param1' and
'param2':
'Sensor With Params{param1=value1,param2=value2}'
Sensor Filters
--------------
Supplying a filter to a sensor controls what data will be shown in
those columns (sensors) you've provided.
Sensor filters can be supplied by adding ', that FILTER:VALUE',
where FILTER is a valid filter string, and VALUE is the string
that you want FILTER to match on.
See filter help for a list of all possible FILTER strings.
See options help for a list of options that can control how
the filter works.
Examples for sensors with filters
---------------------------------
Supplying a sensor with a filter that limits the results to only
show column data that matches the regular expression
'.*Windows.*' (Tanium does a case insensitive match by default):
'Computer Name, that contains:Windows'
Supplying a sensor with a filter that limits the results to only
show column data that matches the regular expression
'Microsoft.*':
'Computer Name, that starts with:Microsoft'
Supply a sensor with a filter that limits the results to only
show column data that has a version greater or equal to
'39.0.0.0'. Since this sensor uses Version as its default result
type, there is no need to change the value type using filter
options.
'Installed Application Version' \
'{Application Name=Google Chrome}, that =>:39.0.0.0'
Sensor Options
--------------
Supplying options to a sensor can change how the filter for
that sensor works.
Sensor options can be supplied by adding ', opt:OPTION' or
', opt:OPTION:VALUE' for those options that require values,
where OPTION is a valid option string, and VALUE is the
appropriate value required by accordant OPTION.
See options help for a list of options that can control how
the filter works.
Examples for sensors with options
---------------------------------
Supplying a sensor with an option that forces tanium to
re-fetch any cached column data that is older than 1 minute:
'Computer Name, opt:max_data_age:60'
Supplying a sensor with filter and an option that causes
Tanium to match case for the filter value:
'Computer Name, that contains:Windows, opt:match_case'
Supplying a sensor with a filter and an option that causes
Tanium to match all values supplied:
'Computer Name, that contains:Windows, opt:match_all_values'
Supplying a sensor with a filter and a set of options that
causes Tanium to recognize the value type as String (which is
the default type for most sensors), re-fetch data older than
10 minutes, match any values, and match case:
'Computer Name', that contains:Windows, ' \
opt:value_type:string, opt:max_data_age:600, ' \
'opt:match_any_value, opt:match_case'
'''
'''STDERR from running this:
'''
| 32.467811 | 198 | 0.73232 |
d4378489d8ab2f59561021c693e22fdfcc2ca757 | 15,748 | py | Python | autogluon/scheduler/fifo.py | zhanghang1989/autogluon-legacy | 8638fb6a71947161bdd4f6876cc7ad035229c95b | [
"Apache-2.0"
] | 4 | 2019-12-11T02:44:22.000Z | 2020-02-22T06:44:43.000Z | autogluon/scheduler/fifo.py | zhanghang1989/autogluon-legacy | 8638fb6a71947161bdd4f6876cc7ad035229c95b | [
"Apache-2.0"
] | null | null | null | autogluon/scheduler/fifo.py | zhanghang1989/autogluon-legacy | 8638fb6a71947161bdd4f6876cc7ad035229c95b | [
"Apache-2.0"
] | null | null | null | import os
import time
import copy
import pickle
import json
import logging
import threading
import multiprocessing as mp
from collections import OrderedDict
from .resource import DistributedResource
from ..utils import save, load, mkdir, try_import_mxboard
from ..core import Task
from ..core.decorator import _autogluon_method
from .scheduler import TaskScheduler
from ..searcher import *
from .reporter import DistStatusReporter, FakeReporter
from ..utils import DeprecationHelper, in_ipynb
from tqdm.auto import tqdm
__all__ = ['FIFOScheduler', 'DistributedFIFOScheduler']
logger = logging.getLogger(__name__)
searchers = {
'random': RandomSearcher,
'skopt': SKoptSearcher,
'grid': GridSearcher,
}
class FIFOScheduler(TaskScheduler):
r"""Simple scheduler that just runs trials in submission order.
Parameters
----------
train_fn : callable
A task launch function for training. Note: please add the `@autogluon_method` decorater to the original function.
args : object (optional)
Default arguments for launching train_fn.
resource : dict
Computation resources. For example, `{'num_cpus':2, 'num_gpus':1}`
searcher : str or object
Autogluon searcher. For example, autogluon.searcher.self.argsRandomSampling
time_attr : str
A training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_epoch` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
reward_attr : str
The training result objective value attribute. As with `time_attr`, this may refer to any objective value.
Stopping procedures will use this attribute.
dist_ip_addrs : list of str
IP addresses of remote machines.
Examples
--------
>>> import numpy as np
>>> import autogluon as ag
>>> @ag.args(
... lr=ag.space.Real(1e-3, 1e-2, log=True),
... wd=ag.space.Real(1e-3, 1e-2))
>>> def train_fn(args, reporter):
... print('lr: {}, wd: {}'.format(args.lr, args.wd))
... for e in range(10):
... dummy_accuracy = 1 - np.power(1.8, -np.random.uniform(e, 2*e))
... reporter(epoch=e, accuracy=dummy_accuracy, lr=args.lr, wd=args.wd)
>>> scheduler = ag.scheduler.FIFOScheduler(train_fn,
... resource={'num_cpus': 2, 'num_gpus': 0},
... num_trials=20,
... reward_attr='accuracy',
... time_attr='epoch')
>>> scheduler.run()
>>> scheduler.join_jobs()
>>> scheduler.get_training_curves(plot=True)
>>> ag.done()
"""
def __init__(self, train_fn, args=None, resource=None,
searcher='random', search_options=None,
checkpoint='./exp/checkpoint.ag',
resume=False, num_trials=None,
time_out=None, max_reward=1.0, time_attr='epoch',
reward_attr='accuracy',
visualizer='none', dist_ip_addrs=None):
super(FIFOScheduler,self).__init__(dist_ip_addrs)
if resource is None:
resource = {'num_cpus': 1, 'num_gpus': 0}
if search_options is None:
search_options = dict()
assert isinstance(train_fn, _autogluon_method)
self.train_fn = train_fn
self.args = args if args else train_fn.args
self.resource = resource
if isinstance(searcher, str):
self.searcher = searchers[searcher](train_fn.cs, **search_options)
else:
assert isinstance(searcher, BaseSearcher)
self.searcher = searcher
# meta data
self.metadata = {}
self.metadata['search_space'] = train_fn.kwspaces
keys = copy.deepcopy(list(self.metadata['search_space'].keys()))
#for k in keys:
# if '.' in k:
# v = self.metadata['search_space'].pop(k)
# new_k = k.split('.')[-1]
# self.metadata['search_space'][new_k] = v
self.metadata['search_strategy'] = searcher
self.metadata['stop_criterion'] = {'time_limits': time_out, 'max_reward': max_reward}
self.metadata['resources_per_trial'] = resource
self.num_trials = num_trials
self.time_out = time_out
self.max_reward = max_reward
self._checkpoint = checkpoint
self._time_attr = time_attr
self._reward_attr = reward_attr
self.visualizer = visualizer.lower()
if self.visualizer == 'tensorboard' or self.visualizer == 'mxboard':
try_import_mxboard()
from mxboard import SummaryWriter
self.mxboard = SummaryWriter(
logdir=os.path.join(os.path.splitext(checkpoint)[0], 'logs'),
flush_secs=3,
verbose=False)
self.log_lock = mp.Lock()
self.training_history = OrderedDict()
self.config_history = OrderedDict()
if resume:
if os.path.isfile(checkpoint):
self.load_state_dict(load(checkpoint))
else:
msg = 'checkpoint path {} is not available for resume.'.format(checkpoint)
logger.exception(msg)
raise FileExistsError(msg)
def run(self, **kwargs):
"""Run multiple number of trials
"""
start_time = time.time()
self.num_trials = kwargs.get('num_trials', self.num_trials)
self.time_out = kwargs.get('time_out', self.time_out)
logger.info('Starting Experiments')
logger.info('Num of Finished Tasks is {}'.format(self.num_finished_tasks))
logger.info('Num of Pending Tasks is {}'.format(self.num_trials - self.num_finished_tasks))
tbar = tqdm(range(self.num_finished_tasks, self.num_trials))
for _ in tbar:
if self.time_out and time.time() - start_time >= self.time_out \
or self.max_reward and self.get_best_reward() >= self.max_reward:
break
self.schedule_next()
def save(self, checkpoint=None):
"""Save Checkpoint
"""
if checkpoint is None:
if self._checkpoint is None:
logger.warning("Checkpointing is disabled")
else:
checkpoint = self._checkpoint
if checkpoint is not None:
mkdir(os.path.dirname(checkpoint))
save(self.state_dict(), checkpoint)
def schedule_next(self):
"""Schedule next searcher suggested task
"""
# Allow for the promotion of a previously chosen config. Also,
# extra_kwargs contains extra info passed to both add_job and to
# get_config (if no config is promoted)
config, extra_kwargs = self._promote_config()
if config is None:
# No config to promote: Query next config to evaluate from searcher
config = self.searcher.get_config(**extra_kwargs)
extra_kwargs['new_config'] = True
else:
# This is not a new config, but a paused one which is now promoted
extra_kwargs['new_config'] = False
task = Task(self.train_fn, {'args': self.args, 'config': config},
DistributedResource(**self.resource))
self.add_job(task, **extra_kwargs)
def run_with_config(self, config):
"""Run with config for final fit.
It launches a single training trial under any fixed values of the hyperparameters.
For example, after HPO has identified the best hyperparameter values based on a hold-out dataset,
one can use this function to retrain a model with the same hyperparameters on all the available labeled data
(including the hold out set). It can also returns other objects or states.
"""
task = Task(self.train_fn, {'args': self.args, 'config': config},
DistributedResource(**self.resource))
reporter = FakeReporter()
task.args['reporter'] = reporter
return self.run_job(task)
def _dict_from_task(self, task):
if isinstance(task, Task):
return {'TASK_ID': task.task_id, 'Config': task.args['config']}
else:
assert isinstance(task, dict)
return {'TASK_ID': task['TASK_ID'], 'Config': task['Config']}
def add_job(self, task, **kwargs):
"""Adding a training task to the scheduler.
Args:
task (:class:`autogluon.scheduler.Task`): a new trianing task
Relevant entries in kwargs:
- bracket: HB bracket to be used. Has been sampled in _promote_config
- new_config: If True, task starts new config eval, otherwise it promotes
a config (only if type == 'promotion')
Only if new_config == False:
- config_key: Internal key for config
- resume_from: config promoted from this milestone
- milestone: config promoted to this milestone (next from resume_from)
"""
cls = FIFOScheduler
cls.RESOURCE_MANAGER._request(task.resources)
# reporter
reporter = DistStatusReporter()
task.args['reporter'] = reporter
# Register pending evaluation
self.searcher.register_pending(task.args['config'])
# main process
job = cls._start_distributed_job(task, cls.RESOURCE_MANAGER, self.env_sem)
# reporter thread
rp = threading.Thread(target=self._run_reporter, args=(task, job, reporter,
self.searcher), daemon=False)
rp.start()
task_dict = self._dict_from_task(task)
task_dict.update({'Task': task, 'Job': job, 'ReporterThread': rp})
# checkpoint thread
if self._checkpoint is not None:
def _save_checkpoint_callback(fut):
self._cleaning_tasks()
self.save()
job.add_done_callback(_save_checkpoint_callback)
with self.LOCK:
self.scheduled_tasks.append(task_dict)
def _clean_task_internal(self, task_dict):
task_dict['ReporterThread'].join()
def _run_reporter(self, task, task_job, reporter, searcher,
checkpoint_semaphore=None):
last_result = None
while not task_job.done():
reported_result = reporter.fetch()
if reported_result.get('done', False):
reporter.move_on()
if checkpoint_semaphore is not None:
checkpoint_semaphore.release()
break
self._add_training_result(
task.task_id, reported_result, config=task.args['config'])
reporter.move_on()
last_result = reported_result
if last_result is not None:
last_result['done'] = True
searcher.update(
config=task.args['config'],
reward=last_result[self._reward_attr], **last_result)
def _promote_config(self):
"""
Provides a hook in schedule_next, which allows to promote a config
which has been selected and partially evaluated previously.
:return: config, extra_args
"""
config = None
extra_args = dict()
return config, extra_args
def get_best_config(self):
"""Get the best configuration from the finished jobs.
"""
return self.searcher.get_best_config()
def get_best_reward(self):
"""Get the best reward from the finished jobs.
"""
return self.searcher.get_best_reward()
def _add_training_result(self, task_id, reported_result, config=None):
if self.visualizer == 'mxboard' or self.visualizer == 'tensorboard':
if 'loss' in reported_result:
self.mxboard.add_scalar(tag='loss',
value=('task {task_id} valid_loss'.format(task_id=task_id),
reported_result['loss']),
global_step=reported_result[self._reward_attr])
self.mxboard.add_scalar(tag=self._reward_attr,
value=('task {task_id} {reward_attr}'.format(
task_id=task_id, reward_attr=self._reward_attr),
reported_result[self._reward_attr]),
global_step=reported_result[self._reward_attr])
with self.log_lock:
# Note: We store all of reported_result in training_history[task_id],
# not just the reward value.
if task_id in self.training_history:
self.training_history[task_id].append(reported_result)
else:
self.training_history[task_id] = [reported_result]
if config:
self.config_history[task_id] = config
def get_training_curves(self, filename=None, plot=False, use_legend=True):
"""Get Training Curves
Parameters
----------
filename : str
plot : bool
use_legend : bool
Examples
--------
>>> scheduler.run()
>>> scheduler.join_jobs()
>>> scheduler.get_training_curves(plot=True)
.. image:: https://github.com/zhanghang1989/AutoGluonWebdata/blob/master/doc/api/autogluon.1.png?raw=true
"""
if filename is None and not plot:
logger.warning('Please either provide filename or allow plot in get_training_curves')
import matplotlib.pyplot as plt
plt.ylabel(self._reward_attr)
plt.xlabel(self._time_attr)
with self.log_lock:
for task_id, task_res in self.training_history.items():
rewards = [x[self._reward_attr] for x in task_res]
x = list(range(len(task_res)))
plt.plot(x, rewards, label='task {}'.format(task_id))
if use_legend:
plt.legend(loc='best')
if filename is not None:
logger.info('Saving Training Curve in {}'.format(filename))
plt.savefig(filename)
if plot: plt.show()
def state_dict(self, destination=None):
"""Returns a dictionary containing a whole state of the Scheduler
Examples
--------
>>> ag.save(scheduler.state_dict(), 'checkpoint.ag')
"""
destination = super(FIFOScheduler, self).state_dict(destination)
destination['searcher'] = pickle.dumps(self.searcher)
with self.log_lock:
destination['training_history'] = json.dumps(self.training_history)
if self.visualizer == 'mxboard' or self.visualizer == 'tensorboard':
destination['visualizer'] = json.dumps(self.mxboard._scalar_dict)
return destination
def load_state_dict(self, state_dict):
"""Load from the saved state dict.
Examples
--------
>>> scheduler.load_state_dict(ag.load('checkpoint.ag'))
"""
super(FIFOScheduler, self).load_state_dict(state_dict)
self.searcher = pickle.loads(state_dict['searcher'])
with self.log_lock:
self.training_history = json.loads(state_dict['training_history'])
if self.visualizer == 'mxboard' or self.visualizer == 'tensorboard':
self.mxboard._scalar_dict = json.loads(state_dict['visualizer'])
logger.debug('Loading Searcher State {}'.format(self.searcher))
DistributedFIFOScheduler = DeprecationHelper(FIFOScheduler, 'DistributedFIFOScheduler')
| 41.771883 | 121 | 0.605029 |
a326c7c77ee14d642ea073c246c55c110ffec02b | 2,567 | py | Python | nngen/onnx/util.py | m1kit/nngen | c96457aa0f1681a8ba7c12881ea3d455eccffde0 | [
"Apache-2.0"
] | null | null | null | nngen/onnx/util.py | m1kit/nngen | c96457aa0f1681a8ba7c12881ea3d455eccffde0 | [
"Apache-2.0"
] | null | null | null | nngen/onnx/util.py | m1kit/nngen | c96457aa0f1681a8ba7c12881ea3d455eccffde0 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import nngen.basic_types as bt
import nngen.storage as storage
import nngen.operator as operator
def get_name(node):
name = node.name
if name == '': # if isinstance(node, onnx.FOO.BAR):
name = '_'.join([output for output in node.output])
return name
def search_node_from_model(model, name):
for node in model.graph.node:
node_name = get_name(node)
if name == node_name:
return node
return None
def to_shape(node):
return tuple([d.dim_value for d in node.type.tensor_type.shape.dim])
def transpose_layout(value, expected_layout, default_layout):
if value.layout == expected_layout:
return value
if isinstance(value, bt._Storage) and value.layout is None and not value.consumers:
if len(expected_layout) != len(default_layout):
raise ValueError('layout format size mismatch: %d != %d' %
(len(expected_layout), len(default_layout)))
perm = []
new_shape = []
for e in expected_layout:
index = default_layout.find(e)
perm.append(index)
new_shape.append(value.shape[index])
value.shape = tuple(new_shape)
value.layout = expected_layout
value.perm = tuple(perm)
if value.value is not None:
value.value = np.transpose(value.value, perm)
return value
#current_layout = value.layout if value.layout is not None else default_layout
current_layout = get_layout(value)
if current_layout == expected_layout:
return value
if current_layout is None:
current_layout = default_layout
perm = []
for e in expected_layout:
index = current_layout.find(e)
perm.append(index)
value = operator.transpose(value, perm)
value.layout = expected_layout
return value
def get_layout(value):
if value.layout is not None:
return value.layout
if not isinstance(value, bt._Operator):
return None
if isinstance(value, operator.conv2d):
return get_layout(value.args[0])
if isinstance(value, (operator.normalize, operator.scaled_add)):
return get_layout(value.args[0])
if isinstance(value, bt._ElementwiseOperator):
for arg in value.args:
ret = get_layout(arg)
if ret is not None:
return ret
raise ValueError('can not identify layout.')
| 25.929293 | 87 | 0.651344 |
40fee48eba464574f50b3814184d03fa32d0e63d | 4,452 | py | Python | pyaz/sig/gallery_application/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | null | null | null | pyaz/sig/gallery_application/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | null | null | null | pyaz/sig/gallery_application/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
] | 1 | 2022-02-03T09:12:01.000Z | 2022-02-03T09:12:01.000Z | from ... pyaz_utils import _call_az
from . import version
def list(gallery_name, resource_group):
'''
Required Parameters:
- gallery_name -- gallery name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az sig gallery-application list", locals())
def show(gallery_name, name, resource_group):
'''
Required Parameters:
- gallery_name -- gallery name
- name -- The name of the gallery Application
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az sig gallery-application show", locals())
def create(gallery_name, name, os_type, resource_group, description=None, location=None, no_wait=None, tags=None):
'''
Create a gallery Application Definition.
Required Parameters:
- gallery_name -- gallery name
- name -- The name of the gallery Application
- os_type -- This property allows you to specify the supported type of the OS that application is built for. <br><br> Possible values are: <br><br> **Windows** <br><br> **Linux**
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- description -- The description of this gallery Application Definition resource. This property is updatable.
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- no_wait -- Do not wait for the long-running operation to finish.
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
'''
return _call_az("az sig gallery-application create", locals())
def update(gallery_name, name, resource_group, description=None, location=None, no_wait=None, tags=None):
'''
Update a gallery Application Definition.
Required Parameters:
- gallery_name -- gallery name
- name -- The name of the gallery Application
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- description -- The description of this gallery Application Definition resource. This property is updatable.
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- no_wait -- Do not wait for the long-running operation to finish.
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
'''
return _call_az("az sig gallery-application update", locals())
def delete(gallery_name, name, resource_group, no_wait=None, yes=None):
'''
Required Parameters:
- gallery_name -- gallery name
- name -- The name of the gallery Application
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- no_wait -- Do not wait for the long-running operation to finish.
- yes -- Do not prompt for confirmation.
'''
return _call_az("az sig gallery-application delete", locals())
def wait(gallery_name, name, resource_group, created=None, custom=None, deleted=None, exists=None, interval=None, timeout=None, updated=None):
'''
Place the CLI in a waiting state until a condition of the sig gallery-application is met.
Required Parameters:
- gallery_name -- gallery name
- name -- The name of the gallery Application
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- created -- wait until created with 'provisioningState' at 'Succeeded'
- custom -- Wait until the condition satisfies a custom JMESPath query. E.g. provisioningState!='InProgress', instanceView.statuses[?code=='PowerState/running']
- deleted -- wait until deleted
- exists -- wait until the resource exists
- interval -- polling interval in seconds
- timeout -- maximum wait in seconds
- updated -- wait until updated with provisioningState at 'Succeeded'
'''
return _call_az("az sig gallery-application wait", locals())
| 44.079208 | 182 | 0.708221 |
b836c54508818ed04706426e6d662f05407d072c | 5,621 | py | Python | examples/02-plot/chart_basics.py | eino/pyvista | b9c4e67d43491958f70b04cd2664965b938910ba | [
"MIT"
] | 25 | 2018-12-03T18:22:58.000Z | 2019-02-26T01:30:35.000Z | examples/02-plot/chart_basics.py | eino/pyvista | b9c4e67d43491958f70b04cd2664965b938910ba | [
"MIT"
] | 108 | 2019-02-27T19:52:12.000Z | 2019-05-08T02:15:21.000Z | examples/02-plot/chart_basics.py | eino/pyvista | b9c4e67d43491958f70b04cd2664965b938910ba | [
"MIT"
] | 8 | 2019-03-02T13:41:48.000Z | 2019-04-22T16:57:44.000Z | """
.. _chart_basics_example:
Chart Basics
~~~~~~~~~~~~
This example shows how different types of charts can be added to the scene. A more complex example, showing how to
combine multiple charts as overlays in the same renderer, is given in :ref:`chart_overlays_example`.
"""
import numpy as np
import pyvista as pv
rng = np.random.default_rng(1) # Seeded random number generator for consistent data generation
###############################################################################
# This example shows how to create a 2D scatter plot from 100 randomly sampled
# datapoints. By default, the chart automatically rescales its axes such that
# all plotted data is visible. By right clicking on the chart you can enable
# zooming and panning of the chart.
x = rng.standard_normal(100)
y = rng.standard_normal(100)
chart = pv.Chart2D()
chart.scatter(x, y, size=10, style="+")
chart.show()
###############################################################################
# To connect datapoints with lines, you can create a 2D line plot as shown in
# the example below. You can also dynamically 'zoom in' on the plotted data
# by specifying a custom axis range yourself.
x = np.linspace(0, 10, 1000)
y = np.sin(x**2)
chart = pv.Chart2D()
chart.line(x, y)
chart.x_range = [5, 10] # Focus on the second half of the curve
chart.show()
###############################################################################
# You can also easily combine scatter and line plots using the general
# :func:`pyvista.Chart2D.plot` function, specifying both the line and marker
# style at once.
x = np.arange(11)
y = rng.integers(-5, 6, 11)
chart = pv.Chart2D()
chart.background_color = (0.5, 0.9, 0.5) # Use custom background color for chart
chart.plot(x, y, 'x--b') # Marker style 'x', striped line style '--', blue color 'b'
chart.show()
###############################################################################
# The following example shows how to create filled areas between two polylines.
x = np.linspace(0, 10, 1000)
y1 = np.cos(x) + np.sin(3 * x)
y2 = 0.1 * (x - 5)
chart = pv.Chart2D()
chart.area(x, y1, y2, color=(0.1, 0.1, 0.9, 0.5))
chart.line(x, y1, color=(0.9, 0.1, 0.1), width=4, style="--")
chart.line(x, y2, color=(0.1, 0.9, 0.1), width=4, style="--")
chart.title = "Area plot" # Set custom chart title
chart.show()
###############################################################################
# Bar charts are also supported. Multiple bar plots are placed next to each
# other.
x = np.arange(1, 13)
y1 = rng.integers(1e2, 1e4, 12)
y2 = rng.integers(1e2, 1e4, 12)
chart = pv.Chart2D()
chart.bar(x, y1, color="b", label="2020")
chart.bar(x, y2, color="r", label="2021")
chart.x_axis.tick_locations = x
chart.x_axis.tick_labels = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
chart.x_label = "Month"
chart.y_axis.tick_labels = "2e"
chart.y_label = "# incidents"
chart.show()
###############################################################################
# In case you want to stack the bars, instead of drawing them next to each
# other, pass a sequence of y values.
x = np.arange(1, 11)
ys = [rng.integers(1, 11, 10) for _ in range(5)]
labels = [f"Machine {i}" for i in range(5)]
chart = pv.Chart2D()
chart.bar(x, ys, label=labels)
chart.x_axis.tick_locations = x
chart.x_label = "Configuration"
chart.y_label = "Production"
chart.grid = False # Disable the grid lines
chart.show()
###############################################################################
# In a similar way, you can stack multiple area plots on top of each other.
x = np.arange(0, 11)
ys = [rng.integers(1, 11, 11) for _ in range(5)]
labels = [f"Segment {i}" for i in range(5)]
chart = pv.Chart2D()
chart.stack(x, ys, labels=labels)
chart.show()
###############################################################################
# Beside the flexible Chart2D used in the previous examples, there are a couple
# other dedicated charts you can create. The example below shows how a pie
# chart can be created.
data = np.array([8.4, 6.1, 2.7, 2.4, 0.9])
chart = pv.ChartPie(data)
chart.plot.labels = [f"slice {i}" for i in range(len(data))]
chart.show()
###############################################################################
# To summarize statistics of datasets, you can easily create a boxplot.
data = [rng.poisson(lam, 20) for lam in range(2, 12, 2)]
chart = pv.ChartBox(data)
chart.plot.labels = [f"Experiment {i}" for i in range(len(data))]
chart.show()
###############################################################################
# If you would like to add other types of chart that are currently not
# supported by pyvista or VTK, you can resort to matplotlib to create your
# custom chart and afterwards embed it into a pyvista plotting window.
# The below example shows how you can do this.
import matplotlib.pyplot as plt
# First, create the matplotlib figure
f, ax = plt.subplots(
tight_layout=True
) # Tight layout to keep axis labels visible on smaller figures
alphas = [0.5 + i for i in range(5)]
betas = [*reversed(alphas)]
N = int(1e4)
data = [rng.beta(alpha, beta, N) for alpha, beta in zip(alphas, betas)]
labels = [f"$\\alpha={alpha:.1f}\\,;\\,\\beta={beta:.1f}$" for alpha, beta in zip(alphas, betas)]
ax.violinplot(data)
ax.set_xticks(np.arange(1, 1 + len(labels)))
ax.set_xticklabels(labels)
ax.set_title("$B(\\alpha, \\beta)$")
# Next, embed the figure into a pyvista plotting window
p = pv.Plotter()
chart = pv.ChartMPL(f)
chart.background_color = 'w'
p.add_chart(chart)
p.show()
| 33.658683 | 114 | 0.593845 |
8acae655c79356dd2ade82041c1ffa58fd5bd9ca | 1,686 | py | Python | jmanager/utils/file_utils.py | LaraFerCue/jail_manager | d289145d6b01deb85a5364dcb94b879b220c1235 | [
"BSD-3-Clause"
] | 1 | 2019-10-26T19:42:33.000Z | 2019-10-26T19:42:33.000Z | jmanager/utils/file_utils.py | LaraFerCue/jmanager | d289145d6b01deb85a5364dcb94b879b220c1235 | [
"BSD-3-Clause"
] | null | null | null | jmanager/utils/file_utils.py | LaraFerCue/jmanager | d289145d6b01deb85a5364dcb94b879b220c1235 | [
"BSD-3-Clause"
] | null | null | null | import lzma
import os
import shutil
import sys
import tarfile
from pathlib import PosixPath
from stat import SF_IMMUTABLE
from tempfile import TemporaryDirectory
from typing import Callable
def extract_tarball_into(jail_path: PosixPath, path_to_tarball: PosixPath,
callback: Callable[[str, int, int], None]):
with TemporaryDirectory(prefix="jail_factory_") as temp_dir:
temp_file_path = f"{temp_dir}/{path_to_tarball.name}"
with lzma.open(path_to_tarball.as_posix(), 'r') as lz_file:
with open(temp_file_path, "wb") as temp_file:
temp_file.write(lz_file.read())
msg = f"Extracting {path_to_tarball.name}"
with tarfile.open(temp_file_path, mode='r') as tar_file:
members = tar_file.getmembers()
iteration = 0
for member in members:
if callback is not None:
callback(msg, iteration, len(members))
tar_file.extract(member, path=jail_path.as_posix())
iteration += 1
if callback is not None:
callback(msg, len(members), len(members))
def set_flags_to_folder_recursively(path: PosixPath, flags: int):
if not path.is_dir() and not path.is_symlink():
if sys.platform.startswith('freebsd'):
os.chflags(path.as_posix(), flags)
return
if path.is_symlink():
return
for node in path.iterdir():
set_flags_to_folder_recursively(path=node, flags=flags)
def remove_immutable_path(jail_path: PosixPath):
set_flags_to_folder_recursively(jail_path, not SF_IMMUTABLE)
shutil.rmtree(jail_path, ignore_errors=True)
| 34.408163 | 74 | 0.661329 |
07f290492902ce737a4adbe61ec8c4028b37fc6f | 336 | py | Python | pytorch_lightning/core/root_module.py | ammaraskar/pytorch-lightning | 539d1291789e2960960a9bfe85096eaed13d8a22 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/core/root_module.py | ammaraskar/pytorch-lightning | 539d1291789e2960960a9bfe85096eaed13d8a22 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/core/root_module.py | ammaraskar/pytorch-lightning | 539d1291789e2960960a9bfe85096eaed13d8a22 | [
"Apache-2.0"
] | 1 | 2020-12-18T19:46:19.000Z | 2020-12-18T19:46:19.000Z | """
.. warning:: `root_module` module has been renamed to `lightning` since v0.6.0.
The deprecated module name will be removed in v0.8.0.
"""
import warnings
warnings.warn("`root_module` module has been renamed to `lightning` since v0.6.0."
" The deprecated module name will be removed in v0.8.0.", DeprecationWarning)
| 33.6 | 91 | 0.705357 |
117488b7ea531c637eaca3d7aefc9324bdc056ed | 11,714 | py | Python | mlflow/cli.py | vic0777/mlflow | ac210a39b45c8d28a181d11178abc6229bb392aa | [
"Apache-2.0"
] | null | null | null | mlflow/cli.py | vic0777/mlflow | ac210a39b45c8d28a181d11178abc6229bb392aa | [
"Apache-2.0"
] | null | null | null | mlflow/cli.py | vic0777/mlflow | ac210a39b45c8d28a181d11178abc6229bb392aa | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import json
import os
import sys
import logging
import click
from click import UsageError
import mlflow.azureml.cli
import mlflow.projects as projects
import mlflow.data
import mlflow.experiments
import mlflow.models.cli
import mlflow.sagemaker.cli
import mlflow.runs
import mlflow.store.db.utils
import mlflow.db
from mlflow.tracking.utils import _is_local_uri
from mlflow.utils.logging_utils import eprint
from mlflow.utils.process import ShellCommandException
from mlflow.utils import cli_args
from mlflow.server import _run_server
from mlflow.store import DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH
from mlflow import tracking
import mlflow.store.cli
_logger = logging.getLogger(__name__)
@click.group()
@click.version_option()
def cli():
pass
@cli.command()
@click.argument("uri")
@click.option("--entry-point", "-e", metavar="NAME", default="main",
help="Entry point within project. [default: main]. If the entry point is not found, "
"attempts to run the project file with the specified name as a script, "
"using 'python' to run .py files and the default shell (specified by "
"environment variable $SHELL) to run .sh files")
@click.option("--version", "-v", metavar="VERSION",
help="Version of the project to run, as a Git commit reference for Git projects.")
@click.option("--param-list", "-P", metavar="NAME=VALUE", multiple=True,
help="A parameter for the run, of the form -P name=value. Provided parameters that "
"are not in the list of parameters for an entry point will be passed to the "
"corresponding entry point as command-line arguments in the form `--name value`")
@click.option("--experiment-name", envvar=tracking._EXPERIMENT_NAME_ENV_VAR,
help="Name of the experiment under which to launch the run. If not "
"specified, 'experiment-id' option will be used to launch run.")
@click.option("--experiment-id", envvar=tracking._EXPERIMENT_ID_ENV_VAR, type=click.STRING,
help="ID of the experiment under which to launch the run.")
# TODO: Add tracking server argument once we have it working.
@click.option("--backend", "-b", metavar="BACKEND",
help="Execution backend to use for run. Supported values: 'local' (runs project "
"locally) and 'databricks' (runs project on a Databricks cluster). "
"Defaults to 'local'. If running against Databricks, will run against a "
"Databricks workspace determined as follows: if a Databricks tracking URI "
"of the form 'databricks://profile' has been set (e.g. by setting "
"the MLFLOW_TRACKING_URI environment variable), will run against the "
"workspace specified by <profile>. Otherwise, runs against the workspace "
"specified by the default Databricks CLI profile. See "
"https://github.com/databricks/databricks-cli for more info on configuring a "
"Databricks CLI profile.")
@click.option("--backend-config", "-c", metavar="FILE",
help="Path to JSON file (must end in '.json') or JSON string which will be passed "
"as config to the backend. For the Databricks backend, this should be a "
"cluster spec: see "
"https://docs.databricks.com/api/latest/jobs.html#jobsclusterspecnewcluster "
"for more information. Note that MLflow runs are currently launched against "
"a new cluster.")
@cli_args.NO_CONDA
@click.option("--storage-dir", envvar="MLFLOW_TMP_DIR",
help="Only valid when ``backend`` is local."
"MLflow downloads artifacts from distributed URIs passed to parameters of "
"type 'path' to subdirectories of storage_dir.")
@click.option("--run-id", metavar="RUN_ID",
help="If specified, the given run ID will be used instead of creating a new run. "
"Note: this argument is used internally by the MLflow project APIs "
"and should not be specified.")
def run(uri, entry_point, version, param_list, experiment_name, experiment_id, backend,
backend_config, no_conda, storage_dir, run_id):
"""
Run an MLflow project from the given URI.
For local runs, the run will block until it completes.
Otherwise, the project will run asynchronously.
If running locally (the default), the URI can be either a Git repository URI or a local path.
If running on Databricks, the URI must be a Git repository.
By default, Git projects run in a new working directory with the given parameters, while
local projects run from the project's root directory.
"""
if experiment_id is not None and experiment_name is not None:
eprint("Specify only one of 'experiment-name' or 'experiment-id' options.")
sys.exit(1)
param_dict = {}
for s in param_list:
index = s.find("=")
if index == -1:
eprint("Invalid format for -P parameter: '%s'. Use -P name=value." % s)
sys.exit(1)
name = s[:index]
value = s[index + 1:]
if name in param_dict:
eprint("Repeated parameter: '%s'" % name)
sys.exit(1)
param_dict[name] = value
cluster_spec_arg = backend_config
if backend_config is not None and os.path.splitext(backend_config)[-1] != ".json":
try:
cluster_spec_arg = json.loads(backend_config)
except ValueError as e:
eprint("Invalid cluster spec JSON. Parse error: %s" % e)
raise
try:
projects.run(
uri,
entry_point,
version,
experiment_name=experiment_name,
experiment_id=experiment_id,
parameters=param_dict,
backend=backend,
backend_config=cluster_spec_arg,
use_conda=(not no_conda),
storage_dir=storage_dir,
synchronous=backend == "local" or backend is None,
run_id=run_id,
)
except projects.ExecutionException as e:
_logger.error("=== %s ===", e)
sys.exit(1)
@cli.command()
@click.option("--backend-store-uri", metavar="PATH",
default=DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH,
help="URI to which to persist experiment and run data. Acceptable URIs are "
"SQLAlchemy-compatible database connection strings "
"(e.g. 'sqlite:///path/to/file.db') or local filesystem URIs "
"(e.g. 'file:///absolute/path/to/directory'). By default, data will be logged "
"to the ./mlruns directory.")
@click.option("--default-artifact-root", metavar="URI", default=None,
help="Path to local directory to store artifacts, for new experiments. "
"Note that this flag does not impact already-created experiments. "
"Default: " + DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH)
@click.option("--port", "-p", default=5000,
help="The port to listen on (default: 5000).")
def ui(backend_store_uri, default_artifact_root, port):
"""
Launch the MLflow tracking UI for local viewing of run results. To launch a production
server, use the "mlflow server" command instead.
The UI will be visible at http://localhost:5000 by default.
"""
# Ensure that both backend_store_uri and default_artifact_uri are set correctly.
if not backend_store_uri:
backend_store_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH
if not default_artifact_root:
if _is_local_uri(backend_store_uri):
default_artifact_root = backend_store_uri
else:
default_artifact_root = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH
# TODO: We eventually want to disable the write path in this version of the server.
try:
_run_server(backend_store_uri, default_artifact_root, "127.0.0.1", port, 1, None, [])
except ShellCommandException:
eprint("Running the mlflow server failed. Please see the logs above for details.")
sys.exit(1)
def _validate_static_prefix(ctx, param, value): # pylint: disable=unused-argument
"""
Validate that the static_prefix option starts with a "/" and does not end in a "/".
Conforms to the callback interface of click documented at
http://click.pocoo.org/5/options/#callbacks-for-validation.
"""
if value is not None:
if not value.startswith("/"):
raise UsageError("--static-prefix must begin with a '/'.")
if value.endswith("/"):
raise UsageError("--static-prefix should not end with a '/'.")
return value
@cli.command()
@click.option("--backend-store-uri", metavar="PATH",
default=DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH,
help="URI to which to persist experiment and run data. Acceptable URIs are "
"SQLAlchemy-compatible database connection strings "
"(e.g. 'sqlite:///path/to/file.db') or local filesystem URIs "
"(e.g. 'file:///absolute/path/to/directory'). By default, data will be logged "
"to the ./mlruns directory.")
@click.option("--default-artifact-root", metavar="URI", default=None,
help="Local or S3 URI to store artifacts, for new experiments. "
"Note that this flag does not impact already-created experiments. "
"Default: Within file store, if a file:/ URI is provided. If a sql backend is"
" used, then this option is required.")
@cli_args.HOST
@cli_args.PORT
@cli_args.WORKERS
@click.option("--static-prefix", default=None, callback=_validate_static_prefix,
help="A prefix which will be prepended to the path of all static paths.")
@click.option("--gunicorn-opts", default=None,
help="Additional command line options forwarded to gunicorn processes.")
def server(backend_store_uri, default_artifact_root, host, port,
workers, static_prefix, gunicorn_opts):
"""
Run the MLflow tracking server.
The server which listen on http://localhost:5000 by default, and only accept connections from
the local machine. To let the server accept connections from other machines, you will need to
pass --host 0.0.0.0 to listen on all network interfaces (or a specific interface address).
"""
# Ensure that both backend_store_uri and default_artifact_uri are set correctly.
if not backend_store_uri:
backend_store_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH
if not default_artifact_root:
if _is_local_uri(backend_store_uri):
default_artifact_root = backend_store_uri
else:
eprint("Option 'default-artifact-root' is required, when backend store is not "
"local file based.")
sys.exit(1)
try:
_run_server(backend_store_uri, default_artifact_root, host, port, workers, static_prefix,
gunicorn_opts)
except ShellCommandException:
eprint("Running the mlflow server failed. Please see the logs above for details.")
sys.exit(1)
cli.add_command(mlflow.models.cli.commands)
cli.add_command(mlflow.sagemaker.cli.commands)
cli.add_command(mlflow.experiments.commands)
cli.add_command(mlflow.store.cli.commands)
cli.add_command(mlflow.azureml.cli.commands)
cli.add_command(mlflow.runs.commands)
cli.add_command(mlflow.db.commands)
if __name__ == '__main__':
cli()
| 45.757813 | 100 | 0.657333 |
f854ef81d8ee8ae784c1ffbd5696df4a6131d956 | 5,777 | py | Python | remotools/remote_fs.py | vladstreltsin/remotools | aef8f3c7c040205d2b70f0e07f4a63f946d3a67d | [
"MIT"
] | null | null | null | remotools/remote_fs.py | vladstreltsin/remotools | aef8f3c7c040205d2b70f0e07f4a63f946d3a67d | [
"MIT"
] | null | null | null | remotools/remote_fs.py | vladstreltsin/remotools | aef8f3c7c040205d2b70f0e07f4a63f946d3a67d | [
"MIT"
] | null | null | null | import typing as tp
from remotools.remote_dict import RemoteDict, RemoteBlobDict, RemoteBlobDictWithLRUCache, CompositeRemoteDict
from remotools.savers import BaseSaver
from remotools.remotes import BaseRemote
class RemoteFSError(Exception):
pass
class RemoteFS(CompositeRemoteDict):
def _split_key(self, key):
if self.SEP not in key:
name, key = key, None
else:
name, key = key.split(self.SEP, maxsplit=1)
return name, key
def _get_parent(self):
if self._parent is None:
raise RemoteFSError('Top level reached')
return self._parent
def mkdir(self, key: tp.Optional[str]):
if key is None:
return self
else:
# Split key by separator
name, key = self._split_key(key)
# Stay at current level
if name == '.':
return self.mkdir(key)
# Go up one level
elif name == '..':
return self._get_parent().mkdir(key)
else:
# Create a RemoteFS at current level
if name not in self:
self[name] = self.__class__()
# Error if current level already contains a non-RemoteFS object
elif not isinstance(self[name], RemoteFS):
raise RemoteFSError("Path Already contains a non RemoteFS element")
# Go to to next level
return self[name].mkdir(key)
def __call__(self, key: tp.Optional[str]):
# TODO make it function either as cd or as open (for files)
return self.cd(key=key)
def cd(self, key: tp.Optional[str]):
if key is None:
return self
name, key = self._split_key(key)
if name == '.':
return self.cd(key)
elif name == '..':
return self._get_parent().cd(key)
else:
if name not in self:
raise RemoteFSError(f'No such RemoteFS {name}')
elif not isinstance(self[name], RemoteFS):
raise RemoteFSError(f"Path {name} is not a RemoteFS")
return self[name].cd(key)
def exists(self, key: str):
name, key = self._split_key(key)
if name == '.':
if key is None:
return True
else:
return self.exists(key)
if name == '..':
if self._parent is None:
return False
if key is None:
return True
return self._parent.exists(key)
if name not in self:
return False
if key is None:
return True
if not isinstance(self[name], RemoteFS):
return False
return self[name].exists(key)
def isfile(self, key: str):
name, key = self._split_key(key)
if name == '.':
if key is None:
return False
else:
return self.isfile(key)
if name == '..':
if self._parent is None:
return False
if key is None:
return False
return self._parent.isfile(key)
if name not in self:
return False
if key is None:
return not isinstance(self[name], RemoteFS)
if not isinstance(self[name], RemoteFS):
return False
return self[name].isfile(key)
def isdir(self, key: str):
name, key = self._split_key(key)
if name == '.':
if key is None:
return True
else:
return self.isdir(key)
if name == '..':
if self._parent is None:
return False
if key is None:
return True
return self._parent.isdir(key)
if name not in self:
return False
if key is None:
return isinstance(self[name], RemoteFS)
if not isinstance(self[name], RemoteFS):
return False
return self[name].isdir(key)
def touch(self, key: str,
saver_cls: tp.Optional[tp.Type[BaseSaver]]=None,
cache=True,
ignore_errors=False,
**kwargs):
if self.SEP in key:
path, key = key.rsplit(sep=self.SEP, maxsplit=1)
if key == '.' or key == '..':
raise RemoteFSError(f'Illegal file name {key}')
fs = self.cd(path)
else:
fs = self
if fs.exists(key):
if ignore_errors:
if not isinstance(fs[key], RemoteFS):
return fs[key]
else:
return None
else:
raise RemoteFSError(f'File exists: {key}')
# Create the appropriate RemoteDict
if saver_cls is None:
fs[key] = RemoteDict(**kwargs)
elif not cache:
fs[key] = RemoteBlobDict(saver_cls=saver_cls, **kwargs)
else:
fs[key] = RemoteBlobDictWithLRUCache(saver_cls=saver_cls, **kwargs)
return fs[key]
def open(self, key: str):
if self.SEP in key:
path, key = key.rsplit(sep=self.SEP, maxsplit=1)
if key == '.' or key == '..':
raise RemoteFSError(f'Illegal file name {key}')
fs = self.cd(path)
else:
fs = self
if not fs.isfile(key):
raise RemoteFSError(f'No such file {key}')
return fs[key]
def ls(self, key: tp.Optional[str]=None):
if key is None:
return list(self.keys())
else:
fs = self.cd(key)
return fs.ls(key=None)
| 25.449339 | 109 | 0.50978 |
d3e04ac56764d91658b77352db8676b4b16653b0 | 6,550 | py | Python | src/demos/python/vehicle/demo_VEH_SteeringController.py | lucasw/chrono | e79d8c761c718ecb4c796725cff37026f357da8c | [
"BSD-3-Clause"
] | null | null | null | src/demos/python/vehicle/demo_VEH_SteeringController.py | lucasw/chrono | e79d8c761c718ecb4c796725cff37026f357da8c | [
"BSD-3-Clause"
] | null | null | null | src/demos/python/vehicle/demo_VEH_SteeringController.py | lucasw/chrono | e79d8c761c718ecb4c796725cff37026f357da8c | [
"BSD-3-Clause"
] | null | null | null | # =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Radu Serban
# =============================================================================
#
# Demonstration of a steering path-follower and cruise control PID controlers.
#
# The vehicle reference frame has Z up, X towards the front of the vehicle, and
# Y pointing to the left.
#
# =============================================================================
import pychrono as chrono
import pychrono.vehicle as veh
import pychrono.irrlicht as irr
import math as m
# =============================================================================
def main():
#print("Copyright (c) 2017 projectchrono.org\nChrono version: ", CHRONO_VERSION , "\n\n")
# Create the HMMWV vehicle, set parameters, and initialize
my_hmmwv = veh.HMMWV_Full()
my_hmmwv.SetContactMethod(contact_method)
my_hmmwv.SetChassisFixed(False)
my_hmmwv.SetInitPosition(chrono.ChCoordsysD(initLoc, chrono.ChQuaternionD(1, 0, 0, 0)))
my_hmmwv.SetPowertrainType(powertrain_model)
my_hmmwv.SetDriveType(drive_type)
my_hmmwv.SetSteeringType(steering_type)
my_hmmwv.SetTireType(tire_model)
my_hmmwv.SetTireStepSize(tire_step_size)
my_hmmwv.Initialize()
my_hmmwv.SetChassisVisualizationType(chassis_vis_type)
my_hmmwv.SetSuspensionVisualizationType(suspension_vis_type)
my_hmmwv.SetSteeringVisualizationType(steering_vis_type)
my_hmmwv.SetWheelVisualizationType(wheel_vis_type)
my_hmmwv.SetTireVisualizationType(tire_vis_type)
# Create the terrain
terrain = veh.RigidTerrain(my_hmmwv.GetSystem())
if (contact_method == chrono.ChContactMethod_NSC):
patch_mat = chrono.ChMaterialSurfaceNSC()
patch_mat.SetFriction(0.9)
patch_mat.SetRestitution(0.01)
elif (contact_method == chrono.ChContactMethod_SMC):
patch_mat = chrono.ChMaterialSurfaceSMC()
patch_mat.SetFriction(0.9)
patch_mat.SetRestitution(0.01)
patch_mat.SetYoungModulus(2e7)
patch = terrain.AddPatch(patch_mat,
chrono.ChVectorD(0, 0, 0), chrono.ChVectorD(0, 0, 1),
300, 50)
patch.SetTexture(veh.GetDataFile("terrain/textures/tile4.jpg"), 200, 200)
patch.SetColor(chrono.ChColor(0.8, 0.8, 0.5))
terrain.Initialize()
# Create the path-follower, cruise-control driver
# Use a parameterized ISO double lane change (to left)
path = veh.DoubleLaneChangePath(initLoc, 13.5, 4.0, 11.0, 50.0, True)
driver = veh.ChPathFollowerDriver(my_hmmwv.GetVehicle(), path, "my_path", target_speed)
driver.GetSteeringController().SetLookAheadDistance(5)
driver.GetSteeringController().SetGains(0.8, 0, 0)
driver.GetSpeedController().SetGains(0.4, 0, 0)
driver.Initialize()
# Create the vehicle Irrlicht interface
vis = veh.ChWheeledVehicleVisualSystemIrrlicht()
my_hmmwv.GetVehicle().SetVisualSystem(vis)
vis.SetWindowTitle('HMMWV steering controller')
vis.SetWindowSize(1280, 1024)
vis.SetChaseCamera(chrono.ChVectorD(0.0, 0.0, 1.75), 6.0, 0.5)
vis.Initialize()
vis.AddLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
vis.AddTypicalLights()
vis.AddSkyBox()
# Visualization of controller points (sentinel & target)
ballS = vis.GetSceneManager().addSphereSceneNode(0.1);
ballT = vis.GetSceneManager().addSphereSceneNode(0.1);
ballS.getMaterial(0).EmissiveColor = irr.SColor(0, 255, 0, 0);
ballT.getMaterial(0).EmissiveColor = irr.SColor(0, 0, 255, 0);
# Simulation loop
realtime_timer = chrono.ChRealtimeStepTimer()
while vis.Run() :
time = my_hmmwv.GetSystem().GetChTime()
# End simulation
if (time >= t_end):
break
# Update sentinel and target location markers for the path-follower controller.
pS = driver.GetSteeringController().GetSentinelLocation()
pT = driver.GetSteeringController().GetTargetLocation()
ballS.setPosition(irr.vector3df(pS.x, pS.y, pS.z))
ballT.setPosition(irr.vector3df(pT.x, pT.y, pT.z))
# Draw scene
vis.BeginScene()
vis.DrawAll()
vis.EndScene()
# Get driver inputs
driver_inputs = driver.GetInputs()
# Update modules (process inputs from other modules)
driver.Synchronize(time)
terrain.Synchronize(time)
my_hmmwv.Synchronize(time, driver_inputs, terrain)
vis.Synchronize("", driver_inputs)
# Advance simulation for one timestep for all modules
driver.Advance(step_size)
terrain.Advance(step_size)
my_hmmwv.Advance(step_size)
vis.Advance(step_size)
# Spin in place for real time to catch up
realtime_timer.Spin(step_size)
return 0
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('path/to/data')
veh.SetDataPath(chrono.GetChronoDataPath() + 'vehicle/')
# Initial vehicle location
initLoc = chrono.ChVectorD(-50, 0, 0.7)
# Vehicle target speed (cruise-control)
target_speed = 12
# Visualization type for vehicle parts (PRIMITIVES, MESH, or NONE)
chassis_vis_type = veh.VisualizationType_NONE
suspension_vis_type = veh.VisualizationType_PRIMITIVES
steering_vis_type = veh.VisualizationType_PRIMITIVES
wheel_vis_type = veh.VisualizationType_MESH
tire_vis_type = veh.VisualizationType_MESH
# Type of powertrain model (SHAFTS, SIMPLE)
powertrain_model = veh.PowertrainModelType_SHAFTS
# Drive type (FWD, RWD, or AWD)
drive_type = veh.DrivelineTypeWV_AWD
# Steering type (PITMAN_ARM or PITMAN_ARM_SHAFTS)
steering_type = veh.SteeringTypeWV_PITMAN_ARM
# Type of tire model (RIGID, RIGID_MESH, PACEJKA, LUGRE, FIALA, PAC89)
tire_model = veh.TireModelType_TMEASY
# Contact method
contact_method = chrono.ChContactMethod_SMC
# Simulation step sizes
step_size = 2e-3;
tire_step_size = 1e-3;
# Simulation end time
t_end = 100;
main()
| 36.592179 | 96 | 0.679542 |
943afea58b9a15da0bb070b81dd651edb4b22afa | 1,716 | py | Python | bk/z_main_bk.py | racheliurui/vscode-hello-python | de4da54b2787fe1d0a1c624864e2773f2ec99e77 | [
"MIT"
] | null | null | null | bk/z_main_bk.py | racheliurui/vscode-hello-python | de4da54b2787fe1d0a1c624864e2773f2ec99e77 | [
"MIT"
] | null | null | null | bk/z_main_bk.py | racheliurui/vscode-hello-python | de4da54b2787fe1d0a1c624864e2773f2ec99e77 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
'''
Main entrance to the logic
1) Read the config from Config.py
2) Initiate Sensor Reading from SensorReading.py
3) Keep updating the Sensor Reading
'''
from threading import Thread
import debug
import ggpub
import refreshSensorReading
import button
import sound
import vars
import movemotor
import powerSupply
####Debug
import debug_devicetest
import os
import sys
import time
#https://sites.google.com/site/ev3devpython/learn_ev3_python/threads
def main():
main_content()
def debug_print(*args, **kwargs):
'''Print debug messages to stderr.
This shows up in the output panel in VS Code.
'''
print(*args, **kwargs, file=sys.stderr)
def debug_getreading():
debug_print(powerSupply.getPowerSupplyReading())
#https://sites.google.com/site/ev3devpython/learn_ev3_python/threads
def main_content():
t5=Thread(target=ggpub.publish,args=())
t5.daemon=True
t5.start()
movemotor.goforward(10)
while not vars.buttonPressed :
pass
#https://sites.google.com/site/ev3devpython/learn_ev3_python/threads
def main_content_multithread():
t1=Thread(target=refreshSensorReading.refresh_colorsensor_reading_lt,args=())
t2=Thread(target=refreshSensorReading.refresh_colorsensor_reading_rt,args=())
t3=Thread(target=refreshSensorReading.refresh_ultrasonicSensorReading,args=())
t4=Thread(target=button.wait_buttonPressed,args=())
t5=Thread(target=ggpub.publish,args=())
t1.daemon=True
t2.daemon=True
t3.daemon=True
t4.daemon=True
t5.daemon=True
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
movemotor.goforward(10)
while not vars.buttonPressed :
pass
if __name__ == '__main__':
main()
| 23.189189 | 81 | 0.744172 |
bd8f06d692d6a165e2e8fba9350693da2930281f | 1,298 | py | Python | django2/myblog/blog/migrations/0001_initial.py | Gozeon/code-collections | 7304e2b9c4c91a809125198d22cf40dcbb45a23b | [
"MIT"
] | null | null | null | django2/myblog/blog/migrations/0001_initial.py | Gozeon/code-collections | 7304e2b9c4c91a809125198d22cf40dcbb45a23b | [
"MIT"
] | 1 | 2020-07-17T09:25:42.000Z | 2020-07-17T09:25:42.000Z | django2/myblog/blog/migrations/0001_initial.py | Gozeon/code-collections | 7304e2b9c4c91a809125198d22cf40dcbb45a23b | [
"MIT"
] | null | null | null | # Generated by Django 2.1 on 2018-08-06 02:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Pose',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
},
),
]
| 37.085714 | 147 | 0.608629 |
f1dc90c8bea50784feeb341285a3bfed13590a49 | 5,515 | py | Python | spotlight/losses.py | DEVESHTARASIA/spotlight | 675de3b57523b850b9cd2b4ceb29a8205e3221e0 | [
"MIT"
] | 2 | 2020-02-09T03:50:52.000Z | 2021-06-07T10:06:55.000Z | spotlight/losses.py | DEVESHTARASIA/spotlight | 675de3b57523b850b9cd2b4ceb29a8205e3221e0 | [
"MIT"
] | null | null | null | spotlight/losses.py | DEVESHTARASIA/spotlight | 675de3b57523b850b9cd2b4ceb29a8205e3221e0 | [
"MIT"
] | 2 | 2017-09-17T01:28:44.000Z | 2018-04-28T22:26:57.000Z | """
Loss functions for recommender models.
The pointwise, BPR, and hinge losses are a good fit for
implicit feedback models trained through negative sampling.
The regression and Poisson losses are used for explicit feedback
models.
"""
import torch
import torch.nn.functional as F
from spotlight.torch_utils import assert_no_grad
def pointwise_loss(positive_predictions, negative_predictions, mask=None):
"""
Logistic loss function.
Parameters
----------
positive_predictions: tensor
Tensor containing predictions for known positive items.
negative_predictions: tensor
Tensor containing predictions for sampled negative items.
mask: tensor, optional
A binary tensor used to zero the loss from some entries
of the loss tensor.
Returns
-------
loss, float
The mean value of the loss function.
"""
positives_loss = (1.0 - F.sigmoid(positive_predictions))
negatives_loss = F.sigmoid(negative_predictions)
loss = (positives_loss + negatives_loss)
if mask is not None:
mask = mask.float()
loss = loss * mask
return loss.sum() / mask.sum()
return loss.mean()
def bpr_loss(positive_predictions, negative_predictions, mask=None):
"""
Bayesian Personalised Ranking [1]_ pairwise loss function.
Parameters
----------
positive_predictions: tensor
Tensor containing predictions for known positive items.
negative_predictions: tensor
Tensor containing predictions for sampled negative items.
mask: tensor, optional
A binary tensor used to zero the loss from some entries
of the loss tensor.
Returns
-------
loss, float
The mean value of the loss function.
References
----------
.. [1] Rendle, Steffen, et al. "BPR: Bayesian personalized ranking from
implicit feedback." Proceedings of the twenty-fifth conference on
uncertainty in artificial intelligence. AUAI Press, 2009.
"""
loss = (1.0 - F.sigmoid(positive_predictions -
negative_predictions))
if mask is not None:
mask = mask.float()
loss = loss * mask
return loss.sum() / mask.sum()
return loss.mean()
def hinge_loss(positive_predictions, negative_predictions, mask=None):
"""
Hinge pairwise loss function.
Parameters
----------
positive_predictions: tensor
Tensor containing predictions for known positive items.
negative_predictions: tensor
Tensor containing predictions for sampled negative items.
mask: tensor, optional
A binary tensor used to zero the loss from some entries
of the loss tensor.
Returns
-------
loss, float
The mean value of the loss function.
"""
loss = torch.clamp(negative_predictions -
positive_predictions +
1.0, 0.0)
if mask is not None:
mask = mask.float()
loss = loss * mask
return loss.sum() / mask.sum()
return loss.mean()
def adaptive_hinge_loss(positive_predictions, negative_predictions, mask=None):
"""
Adaptive hinge pairwise loss function. Takes a set of predictions
for implicitly negative items, and selects those that are highest,
thus sampling those negatives that are closes to violating the
ranking implicit in the pattern of user interactions.
Approximates the idea of weighted approximate-rank pairwise loss
introduced in [2]_
Parameters
----------
positive_predictions: tensor
Tensor containing predictions for known positive items.
negative_predictions: tensor
Iterable of tensors containing predictions for sampled negative items.
More tensors increase the likelihood of finding ranking-violating
pairs, but risk overfitting.
mask: tensor, optional
A binary tensor used to zero the loss from some entries
of the loss tensor.
Returns
-------
loss, float
The mean value of the loss function.
References
----------
.. [2] Weston, Jason, Samy Bengio, and Nicolas Usunier. "Wsabie:
Scaling up to large vocabulary image annotation." IJCAI.
Vol. 11. 2011.
"""
stacked_negative_predictions = torch.stack(negative_predictions, dim=0)
highest_negative_predictions, _ = torch.max(stacked_negative_predictions, 0)
return hinge_loss(positive_predictions, highest_negative_predictions.squeeze(), mask=mask)
def regression_loss(observed_ratings, predicted_ratings):
"""
Regression loss.
Parameters
----------
observed_ratings: tensor
Tensor containing observed ratings.
negative_predictions: tensor
Tensor containing rating predictions.
Returns
-------
loss, float
The mean value of the loss function.
"""
assert_no_grad(observed_ratings)
return ((observed_ratings - predicted_ratings) ** 2).mean()
def poisson_loss(observed_ratings, predicted_ratings):
"""
Poisson loss.
Parameters
----------
observed_ratings: tensor
Tensor containing observed ratings.
negative_predictions: tensor
Tensor containing rating predictions.
Returns
-------
loss, float
The mean value of the loss function.
"""
assert_no_grad(observed_ratings)
return (predicted_ratings - observed_ratings * torch.log(predicted_ratings)).mean()
| 25.532407 | 94 | 0.667996 |
e0b6b322561c6c165cdfc82e799b926402312bc4 | 2,724 | py | Python | ddsp/training/preprocessing.py | ana-simionescu/ddsp | 9f37ff66e79cf912c3377ba1beddb220196aa1a3 | [
"Apache-2.0"
] | 1 | 2021-12-14T04:10:45.000Z | 2021-12-14T04:10:45.000Z | ddsp/training/preprocessing.py | ana-simionescu/ddsp | 9f37ff66e79cf912c3377ba1beddb220196aa1a3 | [
"Apache-2.0"
] | null | null | null | ddsp/training/preprocessing.py | ana-simionescu/ddsp | 9f37ff66e79cf912c3377ba1beddb220196aa1a3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Library of preprocess functions."""
import ddsp
import gin
import tensorflow.compat.v2 as tf
hz_to_midi = ddsp.core.hz_to_midi
F0_RANGE = ddsp.spectral_ops.F0_RANGE
LD_RANGE = ddsp.spectral_ops.LD_RANGE
# ---------------------- Preprocess Helpers ------------------------------------
def at_least_3d(x):
"""Optionally adds time, batch, then channel dimension."""
x = x[tf.newaxis] if not x.shape else x
x = x[tf.newaxis, :] if len(x.shape) == 1 else x
x = x[:, :, tf.newaxis] if len(x.shape) == 2 else x
return x
# ---------------------- Preprocess objects ------------------------------------
class Preprocessor(object):
"""Base class for chaining a series of preprocessing functions."""
def __init__(self):
pass
def __call__(self, features, training=True):
"""Get outputs after preprocessing functions.
Copy inputs if in graph mode to preserve pure function (no side-effects).
Args:
features: dict of feature key and tensors
training: boolean for controlling training-specfic preprocessing behavior
Returns:
Dictionary of transformed features
"""
return ddsp.core.copy_if_tf_function(features)
@gin.register
class DefaultPreprocessor(Preprocessor):
"""Default class that resamples features and adds `f0_hz` key."""
def __init__(self, time_steps=1000):
super().__init__()
self.time_steps = time_steps
def __call__(self, features, training=True):
features = super().__call__(features, training)
return self._default_processing(features)
def _default_processing(self, features):
"""Always resample to `time_steps` and scale 'loudness_db' and 'f0_hz'."""
for k in ['loudness_db', 'f0_hz']:
features[k] = at_least_3d(features[k])
features[k] = ddsp.core.resample(features[k], n_timesteps=self.time_steps)
# For NN training, scale frequency and loudness to the range [0, 1].
# Log-scale f0 features. Loudness from [-1, 0] to [1, 0].
features['f0_scaled'] = hz_to_midi(features['f0_hz']) / F0_RANGE
features['ld_scaled'] = (features['loudness_db'] / LD_RANGE) + 1.0
return features
| 33.62963 | 80 | 0.686858 |
f50e18e78659029a79e8a7c700233f06aa151ac6 | 5,881 | py | Python | src/test_infinitam.py | DaoyiG/Dense-Monocular-3D-Mapping-for-AR | 1ccec6f7a886737623390912a5e1c35cac3e7b4d | [
"MIT"
] | 9 | 2021-03-24T06:24:51.000Z | 2021-12-06T03:32:59.000Z | src/test_infinitam.py | DaoyiG/Dense-Monocular-3D-Mapping-for-AR | 1ccec6f7a886737623390912a5e1c35cac3e7b4d | [
"MIT"
] | null | null | null | src/test_infinitam.py | DaoyiG/Dense-Monocular-3D-Mapping-for-AR | 1ccec6f7a886737623390912a5e1c35cac3e7b4d | [
"MIT"
] | 2 | 2020-12-28T21:12:33.000Z | 2021-03-24T06:24:50.000Z | # Copyright Niantic 2019. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the Monodepth2 licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
from __future__ import absolute_import, division, print_function
import os
import sys
import glob
import argparse
import numpy as np
import PIL.Image as pil
import matplotlib as mpl
import matplotlib.cm as cm
import torch
from torchvision import transforms, datasets
import networks
from layers import disp_to_depth
from utils import download_model_if_doesnt_exist
import cv2
def parse_args():
parser = argparse.ArgumentParser(
description='Simple testing funtion for Monodepthv2 models.')
parser.add_argument('--image_path', type=str,
help='path to a test image or folder of images', required=True)
parser.add_argument('--output_depth', type=str,
help='path to depth output of the model', required=True)
parser.add_argument('--output_npy', type=str,
help='path to pose output of the model', required=True)
parser.add_argument('--model_name', type=str,
help='name of a pretrained model to use',
choices=[
"mono_640x192",
"stereo_640x192",
"mono+stereo_640x192",
"mono_no_pt_640x192",
"stereo_no_pt_640x192",
"mono+stereo_no_pt_640x192",
"mono_1024x320",
"stereo_1024x320",
"mono+stereo_1024x320"])
parser.add_argument('--ext', type=str,
help='image extension to search for in folder', default="jpg")
parser.add_argument("--no_cuda",
help='if set, disables CUDA',
action='store_true')
return parser.parse_args()
def test_simple(args):
"""Function to predict for a single image or folder of images
"""
assert args.model_name is not None, \
"You must specify the --model_name parameter; see README.md for an example"
if torch.cuda.is_available() and not args.no_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
download_model_if_doesnt_exist(args.model_name)
model_path = os.path.join("models", args.model_name)
# print("-> Loading model from ", model_path)
encoder_path = os.path.join(model_path, "encoder.pth")
depth_decoder_path = os.path.join(model_path, "depth.pth")
# LOADING PRETRAINED MODEL
# print(" Loading pretrained encoder")
encoder = networks.ResnetEncoder(18, False)
loaded_dict_enc = torch.load(encoder_path, map_location=device)
# extract the height and width of image that this model was trained with
feed_height = loaded_dict_enc['height']
feed_width = loaded_dict_enc['width']
filtered_dict_enc = {k: v for k, v in loaded_dict_enc.items() if k in encoder.state_dict()}
encoder.load_state_dict(filtered_dict_enc)
encoder.to(device)
encoder.eval()
# print(" Loading pretrained decoder")
depth_decoder = networks.DepthDecoder(
num_ch_enc=encoder.num_ch_enc, scales=range(4))
loaded_dict = torch.load(depth_decoder_path, map_location=device)
depth_decoder.load_state_dict(loaded_dict)
depth_decoder.to(device)
depth_decoder.eval()
# FINDING INPUT IMAGES
if os.path.isfile(args.image_path):
# Only testing on a single image
paths = [args.image_path]
elif os.path.isdir(args.image_path):
# Searching folder for images
paths = glob.glob(os.path.join(args.image_path, '*.{}'.format(args.ext)))
else:
raise Exception("Can not find args.image_path: {}".format(args.image_path))
# SETTING OUTPUT PATH FOR DEPTH IMAGES
if os.path.isfile(args.output_depth):
depth_output_directory = os.path.dirname(args.output_depth)
elif os.path.isdir(args.output_depth):
depth_output_directory = args.output_depth
else:
raise Exception("Can not find args.output_depth: {}".format(args.output_depth))
# PREDICTING ON EACH IMAGE IN TURN
with torch.no_grad():
for idx, image_path in enumerate(paths):
if image_path.endswith("_disp.jpg"):
# don't try to predict disparity for a disparity image!
continue
# Load image and preprocess
input_image = pil.open(image_path).convert('RGB')
original_width, original_height = input_image.size
input_image = input_image.resize((feed_width, feed_height), pil.LANCZOS)
input_image = transforms.ToTensor()(input_image).unsqueeze(0)
# PREDICTION
input_image = input_image.to(device)
features = encoder(input_image)
outputs = depth_decoder(features)
disp = outputs[("disp", 0)]
disp_resized = torch.nn.functional.interpolate(
disp, (original_height, original_width), mode="bilinear", align_corners=False)
# Saving numpy file
output_name = os.path.splitext(os.path.basename(image_path))[0]
# Saving grey scale disparity image with type uint16
disp_resized *= 10000.0
disp_resized_np = disp_resized.squeeze().cpu().numpy().astype(np.uint16)
name_dest_im = os.path.join(depth_output_directory, "{}.png".format(output_name))
cv2.imwrite(name_dest_im, disp_resized_np)
print("Saved depth prediction for InfiniTAM to {}".format(name_dest_im))
# print('-> Done!')
if __name__ == '__main__':
args = parse_args()
test_simple(args)
| 37.221519 | 95 | 0.642408 |
a4e9bad7e491ba38ba9cefc7ccc10ed3f9d8ca8e | 1,125 | py | Python | FeatureExtractionModule/src/bandreader.py | Aekai/Wi-Mind | a02a2f4cd10fc362e6a17d3c67c2662c90b1a980 | [
"0BSD"
] | 1 | 2021-08-14T13:49:51.000Z | 2021-08-14T13:49:51.000Z | FeatureExtractionModule/src/bandreader.py | Aekai/Wi-Mind | a02a2f4cd10fc362e6a17d3c67c2662c90b1a980 | [
"0BSD"
] | null | null | null | FeatureExtractionModule/src/bandreader.py | Aekai/Wi-Mind | a02a2f4cd10fc362e6a17d3c67c2662c90b1a980 | [
"0BSD"
] | 1 | 2020-08-18T08:39:58.000Z | 2020-08-18T08:39:58.000Z | import numpy as np
import pandas as pd
from datetime import datetime
class HeartRateBand:
"""Provides basic functions to extract heart beat rate from the given path.
Heart beat rates are acquired with Microsoft Band and custom Android app."""
def __init__(self, path, ident):
"""
:param path: (str) Path to a files
:param ident: (str) User identifier
"""
self.path = path
self.ident = ident
def load(self):
"""Parse and return time of acquired HR and HR value for this time."""
data = pd.read_csv(self.path+self.ident+'.txt', sep=';|,', engine='python')
data = data.dropna(axis=1, how='all')
date_time, rate = data.iloc[:, 0], data.iloc[:, 1]
rate = rate.tolist()
date_time = [convert_to_epoch(i) for i in date_time]
return [np.array(date_time), np.array(rate)]
def convert_to_epoch(value):
"""Converts to Unix epoch time format"""
datetime_object = datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
return int(datetime_object.timestamp()) * 1000 + int(datetime_object.microsecond / 1000)
| 33.088235 | 92 | 0.637333 |
bb7c6a2415566149645d785a0aa835d67216e6c8 | 1,456 | py | Python | trace/google/cloud/trace_v1/types.py | erikwebb/google-cloud-python | 288a878e9a07239015c78a193eca1cc15e926127 | [
"Apache-2.0"
] | 1 | 2019-04-16T08:13:06.000Z | 2019-04-16T08:13:06.000Z | trace/google/cloud/trace_v1/types.py | erikwebb/google-cloud-python | 288a878e9a07239015c78a193eca1cc15e926127 | [
"Apache-2.0"
] | null | null | null | trace/google/cloud/trace_v1/types.py | erikwebb/google-cloud-python | 288a878e9a07239015c78a193eca1cc15e926127 | [
"Apache-2.0"
] | 1 | 2020-11-15T11:44:36.000Z | 2020-11-15T11:44:36.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api_core.protobuf_helpers import get_messages
from google.api import http_pb2
from google.cloud.trace_v1.proto import trace_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
_shared_modules = [http_pb2, descriptor_pb2, empty_pb2, timestamp_pb2]
_local_modules = [trace_pb2]
names = []
for module in _shared_modules:
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = "google.cloud.trace_v1.types"
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
| 32.355556 | 74 | 0.758929 |
96f96877dc9bd67c94f75abe620e7d5d7ea99359 | 1,408 | py | Python | misc/renumber.py | DaviNakamuraCardoso/Projeto-ATMCP | ac4bbde04294681091d916e83c20c27be67c623a | [
"MIT"
] | null | null | null | misc/renumber.py | DaviNakamuraCardoso/Projeto-ATMCP | ac4bbde04294681091d916e83c20c27be67c623a | [
"MIT"
] | null | null | null | misc/renumber.py | DaviNakamuraCardoso/Projeto-ATMCP | ac4bbde04294681091d916e83c20c27be67c623a | [
"MIT"
] | null | null | null | # renumber realiza uma renumeração de arquivos em uma pasta
import re
import os
import shutil
# definindo uma expressão regular para detectar os arquivos enumerados
numbered_file = re.compile(r'''^(.*?)
(\d{1,3})
(\..*)$
''', re.VERBOSE)
def renumber_file(dir, new_dir):
"""Esta função redefine os números de arquivo em uma pasta, para que não haja lacunas
:param dir: Nome do diretório em que se deseja fazer a renumeração
:return: Os arquivos são retornados renomeados, na mesma pasta
"""
numbered_files = []
for filename in os.listdir(dir):
if numbered_file.search(filename):
numbered_files.append(filename)
numbered_files.sort()
a = 1
new_names = []
for filename in numbered_files:
a
mo = numbered_file.search(filename)
if len(str(a)) == 1:
new_filename = mo.group(1) + '00' + str(a) + mo.group(3)
elif len(str(a)) == 2:
new_filename = mo.group(1) + '0' + str(a) + mo.group(3)
else:
new_filename = mo.group(1) + str(a) + mo.group(3)
new_names.append(new_filename)
a += 1
for filename, new_filename in zip(numbered_files, new_names):
actual_doc = os.path.join(dir, filename)
next_doc = os.path.join(new_dir, new_filename)
shutil.move(actual_doc, next_doc)
renumber_file('..\\documentos', '..\\MyPyhtonScriptsBackup')
| 30.608696 | 89 | 0.636364 |
8943d28575d13c32ef6bcb04b4fb1bf7f6f1c473 | 1,998 | py | Python | boost/tools/build/v2/test/copy_time.py | randolphwong/mcsema | eb5b376736e7f57ff0a61f7e4e5a436bbb874720 | [
"BSD-3-Clause"
] | 130 | 2015-01-04T23:34:53.000Z | 2022-02-09T19:22:35.000Z | boost/tools/build/v2/test/copy_time.py | randolphwong/mcsema | eb5b376736e7f57ff0a61f7e4e5a436bbb874720 | [
"BSD-3-Clause"
] | 13 | 2015-01-12T19:30:52.000Z | 2020-10-27T10:53:56.000Z | boost/tools/build/v2/test/copy_time.py | randolphwong/mcsema | eb5b376736e7f57ff0a61f7e4e5a436bbb874720 | [
"BSD-3-Clause"
] | 25 | 2015-01-15T16:33:54.000Z | 2021-02-27T17:45:57.000Z | #!/usr/bin/python
#
# Copyright (c) 2008
# Steven Watanabe
#
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test that the common.copy rule set the modification
# date of the new file the current time.
import BoostBuild
tester = BoostBuild.Tester()
tester.write("test1.cpp", """
#include <iostream>
template<bool, int M, class Next>
struct time_waster {
typedef typename time_waster<true, M-1, time_waster>::type type1;
typedef typename time_waster<false, M-1, time_waster>::type type2;
typedef void type;
};
template<bool B, class Next>
struct time_waster<B, 0, Next> {
typedef void type;
};
typedef time_waster<true, 10, void>::type type;
int f() { return 0; }
""")
tester.write("test2.cpp", """
#include <iostream>
template<bool, int M, class Next>
struct time_waster {
typedef typename time_waster<true, M-1, time_waster>::type type1;
typedef typename time_waster<false, M-1, time_waster>::type type2;
typedef void type;
};
template<bool B, class Next>
struct time_waster<B, 0, Next> {
typedef void type;
};
typedef time_waster<true, 10, void>::type type;
int g() { return 0; }
""")
tester.write("jamroot.jam", """
obj test2 : test2.cpp ;
obj test1 : test1.cpp : <dependency>test2 ;
install test2i : test2 : <dependency>test1 ;
""")
tester.run_build_system()
tester.expect_addition("bin/$toolset/debug/test2.obj")
tester.expect_addition("bin/$toolset/debug/test1.obj")
tester.expect_addition("test2i/test2.obj")
tester.expect_nothing_more()
test2src = tester.read("test2i/test2.obj")
test2dest = tester.read("bin/$toolset/debug/test2.obj")
if test2src != test2dest:
BoostBuild.annotation("failure", "The object file was not copied correctly")
tester.fail_test(1)
del test2src
del test2dest
tester.run_build_system("-d1")
tester.expect_output_line("common.copy*", expected_to_exist=False)
tester.expect_nothing_more()
tester.cleanup()
| 25.948052 | 80 | 0.729229 |
18a9e51c089bfe1b37f9ec0e0801e8334b62c7fa | 1,151 | py | Python | manage.py | saengate/wagtail-blog | db875d8f455fc92d5f9ac7ce98a4efdac1744381 | [
"Apache-2.0"
] | null | null | null | manage.py | saengate/wagtail-blog | db875d8f455fc92d5f9ac7ce98a4efdac1744381 | [
"Apache-2.0"
] | 9 | 2021-04-06T18:47:10.000Z | 2021-09-22T19:48:02.000Z | manage.py | saengate/wagtail-blog | db875d8f455fc92d5f9ac7ce98a4efdac1744381 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
import json
def main():
environments = {
'local': 'local',
'production': 'production',
}
environment = os.environ.get('ENV', 'local')
settings = os.environ.setdefault(
'DJANGO_SETTINGS_MODULE', 'config.settings.' + environment)
env = environments[environment]
os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings)
with open('zappa_settings.json') as environment:
data = json.load(environment)
env_vars = data[env]['environment_variables']
for item in env_vars:
os.environ.setdefault(item, env_vars[item])
try:
from django.core.management import execute_from_command_line # NOQA
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?" # NOQA
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.512821 | 76 | 0.662033 |
9c0fb62b49e81659e4786945d908d34df967ef82 | 49,948 | py | Python | pennylane/operation.py | kareem1925/pennylane | 04bb5ba0fcced558e1273b94b3ea8c39622c5ca4 | [
"Apache-2.0"
] | null | null | null | pennylane/operation.py | kareem1925/pennylane | 04bb5ba0fcced558e1273b94b3ea8c39622c5ca4 | [
"Apache-2.0"
] | null | null | null | pennylane/operation.py | kareem1925/pennylane | 04bb5ba0fcced558e1273b94b3ea8c39622c5ca4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=protected-access
r"""
This module contains the abstract base classes for defining PennyLane
operations and observables.
Description
-----------
Qubit Operations
~~~~~~~~~~~~~~~~
The :class:`Operator` class serves as a base class for operators,
and is inherited by both the :class:`Observable` class and the
:class:`Operation` class. These classes are subclassed to implement quantum operations
and measure observables in PennyLane.
* Each :class:`~.Operator` subclass represents a general type of
map between physical states. Each instance of these subclasses
represents either
- an application of the operator or
- an instruction to measure and return the respective result.
Operators act on a sequence of wires (subsystems) using given parameter values.
* Each :class:`~.Operation` subclass represents a type of quantum operation,
for example a unitary quantum gate. Each instance of these subclasses
represents an application of the operation with given parameter values to
a given sequence of wires (subsystems).
* Each :class:`~.Observable` subclass represents a type of physical observable.
Each instance of these subclasses represents an instruction to measure and
return the respective result for the given parameter values on a
sequence of wires (subsystems).
Differentiation
^^^^^^^^^^^^^^^
In general, an :class:`Operation` is differentiable (at least using the finite-difference
method) with respect to a parameter iff
* the domain of that parameter is continuous.
For an :class:`Operation` to be differentiable with respect to a parameter using the
analytic method of differentiation, it must satisfy an additional constraint:
* the parameter domain must be real.
.. note::
These conditions are *not* sufficient for analytic differentiation. For example,
CV gates must also define a matrix representing their Heisenberg linear
transformation on the quadrature operators.
For gates that *are* supported via the analytic method, the gradient recipe
(with multiplier :math:`c_k`, parameter shift :math:`s_k` for parameter :math:`\phi_k`)
works as follows:
.. math:: \frac{\partial}{\partial\phi_k}O = c_k\left[O(\phi_k+s_k)-O(\phi_k-s_k)\right].
CV Operation base classes
~~~~~~~~~~~~~~~~~~~~~~~~~
Due to additional requirements, continuous-variable (CV) operations must subclass the
:class:`~.CVOperation` or :class:`~.CVObservable` classes instead of :class:`~.Operation`
and :class:`~.Observable`.
Differentiation
^^^^^^^^^^^^^^^
To enable gradient computation using the analytic method for Gaussian CV operations, in addition, you need to
provide the static class method :meth:`~.CV._heisenberg_rep` that returns the Heisenberg representation of
the operation given its list of parameters, namely:
* For Gaussian CV Operations this method should return the matrix of the linear transformation carried out by the
operation on the vector of quadrature operators :math:`\mathbf{r}` for the given parameter
values.
* For Gaussian CV Observables this method should return a real vector (first-order observables)
or symmetric matrix (second-order observables) of coefficients of the quadrature
operators :math:`\x` and :math:`\p`.
PennyLane uses the convention :math:`\mathbf{r} = (\I, \x, \p)` for single-mode operations and observables
and :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)` for multi-mode operations and observables.
.. note::
Non-Gaussian CV operations and observables are currently only supported via
the finite-difference method of gradient computation.
"""
import abc
import itertools
import functools
import numbers
from collections.abc import Sequence
from enum import Enum, IntEnum
from pennylane.wires import Wires
import numpy as np
from numpy.linalg import multi_dot
import pennylane as qml
from .utils import pauli_eigs
from .variable import Variable
# =============================================================================
# Wire types
# =============================================================================
class ActsOn(IntEnum):
"""Integer enumeration class
to represent the number of wires
an operation acts on"""
AnyWires = -1
AllWires = 0
AllWires = ActsOn.AllWires
"""IntEnum: An enumeration which represents all wires in the
subsystem. It is equivalent to an integer with value 0."""
AnyWires = ActsOn.AnyWires
"""IntEnum: An enumeration which represents any wires in the
subsystem. It is equivalent to an integer with value -1."""
# =============================================================================
# ObservableReturnTypes types
# =============================================================================
class ObservableReturnTypes(Enum):
"""Enumeration class to represent the return types of an observable."""
Sample = "sample"
Variance = "var"
Expectation = "expval"
Probability = "probs"
def __repr__(self):
"""String representation of the return types."""
return str(self.value)
Sample = ObservableReturnTypes.Sample
"""Enum: An enumeration which represents sampling an observable."""
Variance = ObservableReturnTypes.Variance
"""Enum: An enumeration which represents returning the variance of
an observable on specified wires."""
Expectation = ObservableReturnTypes.Expectation
"""Enum: An enumeration which represents returning the expectation
value of an observable on specified wires."""
Probability = ObservableReturnTypes.Probability
"""Enum: An enumeration which represents returning probabilities
of all computational basis states."""
# =============================================================================
# Class property
# =============================================================================
class ClassPropertyDescriptor: # pragma: no cover
"""Allows a class property to be defined"""
# pylint: disable=too-few-public-methods
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, klass=None):
if klass is None:
klass = type(obj)
return self.fget.__get__(obj, klass)()
def __set__(self, obj, value):
if not self.fset:
raise AttributeError("can't set attribute")
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
def setter(self, func):
"""Set the function as a class method, and store as an attribute."""
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def classproperty(func):
"""The class property decorator"""
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
# =============================================================================
# Base Operator class
# =============================================================================
class Operator(abc.ABC):
r"""Base class for quantum operators supported by a device.
The following class attributes must be defined for all Operators:
* :attr:`~.Operator.num_params`
* :attr:`~.Operator.num_wires`
* :attr:`~.Operator.par_domain`
Args:
params (tuple[float, int, array, Variable]): operator parameters
Keyword Args:
wires (Iterable): Iterable containing the wires that the operator acts
on. If not given, args[-1] is interpreted as wires.
do_queue (bool): Indicates whether the operator should be
immediately pushed into the Operator queue.
"""
do_check_domain = True #: bool: flag: should we perform a domain check for the parameters?
@classmethod
def _matrix(cls, *params):
"""Matrix representation of the operator
in the computational basis.
This is a *class method* that should be defined for all
new operations and observables, that returns the matrix representing
the operator in the computational basis.
This private method allows matrices to be computed
directly without instantiating the operators first.
To return the matrices of *instantiated* operators,
please use the :attr:`~.Operator.matrix` property instead.
**Example:**
>>> qml.RY._matrix(0.5)
>>> array([[ 0.96891242+0.j, -0.24740396+0.j],
[ 0.24740396+0.j, 0.96891242+0.j]])
Returns:
array: matrix representation
"""
raise NotImplementedError
@property
def matrix(self):
r"""Matrix representation of an instantiated operator
in the computational basis.
**Example:**
>>> U = qml.RY(0.5, wires=1)
>>> U.matrix
>>> array([[ 0.96891242+0.j, -0.24740396+0.j],
[ 0.24740396+0.j, 0.96891242+0.j]])
Returns:
array: matrix representation
"""
return self._matrix(*self.parameters)
@classmethod
def _eigvals(cls, *params):
"""Eigenvalues of the operator.
This is a *class method* that should be defined for all
new operations and observables that returns the eigenvalues
of the operator. Note that the eigenvalues are not guaranteed
to be in any particular order.
This private method allows eigenvalues to be computed
directly without instantiating the operators first.
The default implementation relies on the presence of the
:attr:`_matrix` method.
To return the eigenvalues of *instantiated* operators,
please use the :attr:`~.Operator.eigvals` property instead.
**Example:**
>>> qml.RZ._eigvals(0.5)
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigenvalue representation
"""
return np.linalg.eigvals(cls._matrix(*params))
@property
def eigvals(self):
r"""Eigenvalues of an instantiated operator.
Note that the eigenvalues are not guaranteed to be in any
particular order.
**Example:**
>>> U = qml.RZ(0.5, wires=1)
>>> U.eigvals
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigvals representation
"""
return self._eigvals(*self.parameters)
@property
@abc.abstractmethod
def num_params(self):
"""Number of parameters the operator takes."""
@property
@abc.abstractmethod
def num_wires(self):
"""Number of wires the operator acts on."""
@property
@abc.abstractmethod
def par_domain(self):
"""Domain of the gate parameters.
* ``'N'``: natural numbers (including zero).
* ``'R'``: floats.
* ``'A'``: arrays of real or complex values.
* ``None``: if there are no parameters.
"""
@property
def name(self):
"""String for the name of the operator.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
def __init__(self, *params, wires=None, do_queue=True):
# pylint: disable=too-many-branches
self._name = self.__class__.__name__ #: str: name of the operator
self.queue_idx = None #: int, None: index of the Operator in the circuit queue, or None if not in a queue
if wires is None:
raise ValueError("Must specify the wires that {} acts on".format(self.name))
wires = Wires(wires)
self._wires = wires.tolist() #: list[int]: wires on which the operator acts
# check that the number of wires given corresponds to required number
if (
self.num_wires != AllWires
and self.num_wires != AnyWires
and len(self._wires) != self.num_wires
):
raise ValueError(
"{}: wrong number of wires. "
"{} wires given, {} expected.".format(self.name, len(self._wires), self.num_wires)
)
if len(params) != self.num_params:
raise ValueError(
"{}: wrong number of parameters. "
"{} parameters passed, {} expected.".format(self.name, len(params), self.num_params)
)
# check the validity of the params
if self.do_check_domain:
for p in params:
self.check_domain(p)
self.params = list(params) #: list[Any]: parameters of the operator
if do_queue:
self.queue()
def __str__(self):
"""Operator name and some information."""
return "{}: {} params, wires {}".format(self.name, len(self.params), self.wires)
def __repr__(self):
"""Constructor-call-like representation."""
# FIXME using self.parameters here instead of self.params is dangerous, it assumes the params can be evaluated
# which is only true if something suitable happens to remain in VariableRef.positional_arg_values etc. after
# the last evaluation.
if self.parameters:
params = ", ".join([repr(p) for p in self.parameters])
return "{}({}, wires={})".format(self.name, params, self.wires)
return "{}(wires={})".format(self.name, self.wires)
def check_domain(self, p, flattened=False):
"""Check the validity of a parameter.
:class:`.Variable` instances can represent any real scalars (but not arrays).
Args:
p (Number, array, Variable): parameter to check
flattened (bool): True means p is an element of a flattened parameter
sequence (affects the handling of 'A' parameters)
Raises:
TypeError: parameter is not an element of the expected domain
ValueError: parameter is an element of an unknown domain
Returns:
Number, array, Variable: p
"""
# pylint: disable=too-many-branches
# If parameter is a NumPy scalar, convert it into a Python scalar.
if isinstance(p, np.ndarray) and p.ndim == 0:
p = p.item()
if isinstance(p, Variable):
if self.par_domain == "A":
raise TypeError(
"{}: Array parameter expected, got a Variable, "
"which can only represent real scalars.".format(self.name)
)
return p
# p is not a Variable
if self.par_domain == "A":
if flattened:
if isinstance(p, np.ndarray):
raise TypeError(
"{}: Flattened array parameter expected, got {}.".format(self.name, type(p))
)
else:
if not isinstance(p, np.ndarray):
raise TypeError(
"{}: Array parameter expected, got {}.".format(self.name, type(p))
)
elif self.par_domain in ("R", "N"):
if not isinstance(p, numbers.Real):
raise TypeError(
"{}: Real scalar parameter expected, got {}.".format(self.name, type(p))
)
if self.par_domain == "N":
if not isinstance(p, numbers.Integral):
raise TypeError(
"{}: Natural number parameter expected, got {}.".format(self.name, type(p))
)
if p < 0:
raise TypeError(
"{}: Natural number parameter expected, got {}.".format(self.name, p)
)
else:
raise ValueError(
"{}: Unknown parameter domain '{}'.".format(self.name, self.par_domain)
)
return p
@property
def wires(self):
"""Wires of this operator.
Returns:
list[int]: wires
"""
return self._wires
@property
def parameters(self):
"""Current parameter values.
Fixed parameters are returned as is, free parameters represented by
:class:`.Variable` instances are replaced by their
current numerical value.
Returns:
list[Any]: parameter values
"""
# TODO profiling
def evaluate(p):
"""Evaluate a single parameter."""
if isinstance(p, np.ndarray):
# object arrays may have Variables inside them
if p.dtype == object:
temp = np.array([x.val if isinstance(x, Variable) else x for x in p.flat])
return temp.reshape(p.shape)
return p
if isinstance(p, Variable):
p = self.check_domain(p.val)
return p
return [evaluate(p) for p in self.params]
def queue(self):
"""Append the operator to the Operator queue."""
qml.QueuingContext.append_operator(self)
return self # so pre-constructed Observable instances can be queued and returned in a single statement
# =============================================================================
# Base Operation class
# =============================================================================
class Operation(Operator):
r"""Base class for quantum operations supported by a device.
As with :class:`~.Operator`, the following class attributes must be
defined for all operations:
* :attr:`~.Operator.num_params`
* :attr:`~.Operator.num_wires`
* :attr:`~.Operator.par_domain`
The following two class attributes are optional, but in most cases
should be clearly defined to avoid unexpected behavior during
differentiation.
* :attr:`~.Operation.grad_method`
* :attr:`~.Operation.grad_recipe`
Finally, there are some additional optional class attributes
that may be set, and used by certain quantum optimizers:
* :attr:`~.Operation.generator`
Args:
params (tuple[float, int, array, Variable]): operation parameters
Keyword Args:
wires (Sequence[int]): Subsystems it acts on. If not given, args[-1]
is interpreted as wires.
do_queue (bool): Indicates whether the operation should be
immediately pushed into a :class:`BaseQNode` circuit queue.
This flag is useful if there is some reason to run an Operation
outside of a BaseQNode context.
"""
# pylint: disable=abstract-method
string_for_inverse = ".inv"
@property
def grad_method(self):
"""Gradient computation method.
* ``'A'``: analytic differentiation using the parameter-shift method.
* ``'F'``: finite difference numerical differentiation.
* ``None``: the operation may not be differentiated.
Default is ``'F'``, or ``None`` if the Operation has zero parameters.
"""
return None if self.num_params == 0 else "F"
grad_recipe = None
r"""list[tuple[float]] or None: Gradient recipe for the parameter-shift method.
This is a list with one tuple per operation parameter. For parameter
:math:`k`, the tuple is of the form :math:`(c_k, s_k)`, resulting in
a gradient recipe of
.. math:: \frac{\partial}{\partial\phi_k}O = c_k\left[O(\phi_k+s_k)-O(\phi_k-s_k)\right].
If ``None``, the default gradient recipe
:math:`(c_k, s_k)=(1/2, \pi/2)` is assumed for every parameter.
"""
def get_parameter_shift(self, idx):
"""Multiplier and shift for the given parameter, based on its gradient recipe.
Args:
idx (int): parameter index
Returns:
float, float: multiplier, shift
"""
# get the gradient recipe for this parameter
recipe = self.grad_recipe[idx]
multiplier, shift = (0.5, np.pi / 2) if recipe is None else recipe
# internal multiplier in the Variable
var_mult = self.params[idx].mult
multiplier *= var_mult
if var_mult != 0:
# zero multiplier means the shift is unimportant
shift /= var_mult
return multiplier, shift
@property
def generator(self):
r"""Generator of the operation.
A length-2 list ``[generator, scaling_factor]``, where
* ``generator`` is an existing PennyLane
operation class or :math:`2\times 2` Hermitian array
that acts as the generator of the current operation
* ``scaling_factor`` represents a scaling factor applied
to the generator operation
For example, if :math:`U(\theta)=e^{i0.7\theta \sigma_x}`, then
:math:`\sigma_x`, with scaling factor :math:`s`, is the generator
of operator :math:`U(\theta)`:
.. code-block:: python
generator = [PauliX, 0.7]
Default is ``[None, 1]``, indicating the operation has no generator.
"""
return [None, 1]
@property
def inverse(self):
"""Boolean determining if the inverse of the operation was requested.
"""
return self._inverse
@inverse.setter
def inverse(self, boolean):
self._inverse = boolean
@staticmethod
def decomposition(*params, wires):
"""Returns a template decomposing the operation into other
quantum operations."""
raise NotImplementedError
def inv(self):
"""Inverts the operation, such that the inverse will
be used for the computations by the specific device.
This method concatenates a string to the name of the operation,
to indicate that the inverse will be used for computations.
Any subsequent call of this method will toggle between the original
operation and the inverse of the operation.
Returns:
:class:`Operator`: operation to be inverted
"""
self.inverse = not self._inverse
return self
@property
def matrix(self):
op_matrix = self._matrix(*self.parameters)
if self.inverse:
return op_matrix.conj().T
return op_matrix
@property
def eigvals(self):
op_eigvals = self._eigvals(*self.parameters)
if self.inverse:
return op_eigvals.conj()
return op_eigvals
@property
def base_name(self):
"""Get base name of the operator.
"""
return self.__class__.__name__
@property
def name(self):
"""Get and set the name of the operator.
"""
return self._name + Operation.string_for_inverse if self.inverse else self._name
def __init__(self, *params, wires=None, do_queue=True):
self._inverse = False
# check the grad_method validity
if self.par_domain == "N":
assert (
self.grad_method is None
), "An operation may only be differentiated with respect to real scalar parameters."
elif self.par_domain == "A":
assert self.grad_method in (
None,
"F",
), "Operations that depend on arrays containing free variables may only be differentiated using the F method."
# check the grad_recipe validity
if self.grad_method == "A":
if self.grad_recipe is None:
# default recipe for every parameter
self.grad_recipe = [None] * self.num_params
else:
assert (
len(self.grad_recipe) == self.num_params
), "Gradient recipe must have one entry for each parameter!"
else:
assert self.grad_recipe is None, "Gradient recipe is only used by the A method!"
super().__init__(*params, wires=wires, do_queue=do_queue)
class DiagonalOperation(Operation):
r"""Base class for diagonal quantum operations supported by a device.
As with :class:`~.Operation`, the following class attributes must be
defined for all operations:
* :attr:`~.Operator.num_params`
* :attr:`~.Operator.num_wires`
* :attr:`~.Operator.par_domain`
The following two class attributes are optional, but in most cases
should be clearly defined to avoid unexpected behavior during
differentiation.
* :attr:`~.Operation.grad_method`
* :attr:`~.Operation.grad_recipe`
Finally, there are some additional optional class attributes
that may be set, and used by certain quantum optimizers:
* :attr:`~.Operation.generator`
Args:
params (tuple[float, int, array, Variable]): operation parameters
Keyword Args:
wires (Sequence[int]): Subsystems it acts on. If not given, args[-1]
is interpreted as wires.
do_queue (bool): Indicates whether the operation should be
immediately pushed into a :class:`BaseQNode` circuit queue.
This flag is useful if there is some reason to run an Operation
outside of a BaseQNode context.
"""
# pylint: disable=abstract-method
@classmethod
def _eigvals(cls, *params):
"""Eigenvalues of the operator.
The order of the eigenvalues needs to match the order of
the computational basis vectors.
This is a *class method* that must be defined for all
new diagonal operations, that returns the eigenvalues
of the operator in the computational basis.
This private method allows eigenvalues to be computed
directly without instantiating the operators first.
To return the eigenvalues of *instantiated* operators,
please use the :attr:`~.Operator.eigvals` property instead.
**Example:**
>>> qml.RZ._eigvals(0.5)
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigenvalue representation
"""
raise NotImplementedError
@property
def eigvals(self):
r"""Eigenvalues of an instantiated diagonal operation.
The order of the eigenvalues needs to match the order of
the computational basis vectors.
**Example:**
>>> U = qml.RZ(0.5, wires=1)
>>> U.eigvals
>>> array([0.96891242-0.24740396j, 0.96891242+0.24740396j])
Returns:
array: eigvals representation
"""
return super().eigvals
@classmethod
def _matrix(cls, *params):
return np.diag(cls._eigvals(*params))
# =============================================================================
# Base Observable class
# =============================================================================
class Observable(Operator):
"""Base class for observables supported by a device.
:class:`Observable` is used to describe Hermitian quantum observables.
As with :class:`~.Operator`, the following class attributes must be
defined for all observables:
* :attr:`~.Operator.num_params`
* :attr:`~.Operator.num_wires`
* :attr:`~.Operator.par_domain`
Args:
params (tuple[float, int, array, Variable]): observable parameters
Keyword Args:
wires (Sequence[int]): subsystems it acts on.
Currently, only one subsystem is supported.
do_queue (bool): Indicates whether the operation should be
immediately pushed into the Operator queue.
"""
# pylint: disable=abstract-method
return_type = None
@classmethod
def _eigvals(cls, *params):
"""Eigenvalues of the observable.
The order of the eigenvalues needs to match the order of
the computational basis vectors when the observable is
diagonalized using :attr:`diagonalizing_gates`.
This is a *class method* that must be defined for all
new diagonal operations, that returns the eigenvalues
of the operator in the computational basis.
This private method allows eigenvalues to be computed
directly without instantiating the operators first.
To return the eigenvalues of *instantiated* operators,
please use the :attr:`~.Operator.eigvals` property instead.
**Example:**
>>> qml.PauliZ._eigvals()
>>> array([1, -1])
Returns:
array: eigenvalue representation
"""
raise NotImplementedError
@property
def eigvals(self):
r"""Eigenvalues of an instantiated observable.
The order of the eigenvalues needs to match the order of
the computational basis vectors when the observable is
diagonalized using :attr:`diagonalizing_gates`. This is a requirement for using qubit observables in quantum functions.
**Example:**
>>> U = qml.PauliZ(wires=1)
>>> U.eigvals
>>> array([1, -1])
Returns:
array: eigvals representation
"""
return super().eigvals
def __init__(self, *params, wires=None, do_queue=True):
# extract the arguments
if wires is None:
wires = params[-1]
params = params[:-1]
super().__init__(*params, wires=wires, do_queue=do_queue)
def __repr__(self):
"""Constructor-call-like representation."""
temp = super().__repr__()
if self.return_type is None:
return temp
if self.return_type is Probability:
return repr(self.return_type) + "(wires={})".format(self.wires)
return repr(self.return_type) + "(" + temp + ")"
def __matmul__(self, other):
if isinstance(other, Tensor):
return other.__rmatmul__(self)
if isinstance(other, Observable):
return Tensor(self, other)
raise ValueError("Can only perform tensor products between observables.")
def diagonalizing_gates(self):
r"""Returns the list of operations such that they
diagonalize the observable in the computational basis.
Returns:
list(qml.Operation): A list of gates that diagonalize
the observable in the computational basis.
"""
raise NotImplementedError
class Tensor(Observable):
"""Container class representing tensor products of observables.
To create a tensor, simply initiate it like so:
>>> T = Tensor(qml.PauliX(0), qml.Hermitian(A, [1, 2]))
You can also create a tensor from other Tensors:
>>> T = Tensor(T, qml.PauliZ(4))
The ``@`` symbol can be used as a tensor product operation:
>>> T = qml.PauliX(0) @ qml.Hadamard(2)
"""
# pylint: disable=abstract-method
return_type = None
tensor = True
par_domain = None
def __init__(self, *args): # pylint: disable=super-init-not-called
self._eigvals_cache = None
self.obs = []
for o in args:
if isinstance(o, Tensor):
self.obs.extend(o.obs)
elif isinstance(o, Observable):
self.obs.append(o)
else:
raise ValueError("Can only perform tensor products between observables.")
def __str__(self):
"""Print the tensor product and some information."""
return "Tensor product {}: {} params, wires {}".format(
[i.name for i in self.obs], len(self.params), self.wires
)
def __repr__(self):
"""Constructor-call-like representation."""
return "Tensor(" + ", ".join([repr(o) for o in self.obs]) + ")"
@property
def name(self):
"""All constituent observable names making up the tensor product.
Returns:
list[str]: list containing all observable names
"""
return [o.name for o in self.obs]
@property
def num_wires(self):
"""Number of wires the tensor product acts on.
Returns:
int: number of wires
"""
return len(self.wires)
@property
def wires(self):
"""All wires in the system the tensor product acts on.
Returns:
list[int]: wires addressed by the observables in the tensor product
"""
return [w for o in self.obs for w in o.wires]
@property
def params(self):
"""Raw parameters of all constituent observables in the tensor product.
Returns:
list[Any]: flattened list containing all dependent parameters
"""
return [p for sublist in [o.params for o in self.obs] for p in sublist]
@property
def num_params(self):
"""Raw parameters of all constituent observables in the tensor product.
Returns:
list[Any]: flattened list containing all dependent parameters
"""
return len(self.params)
@property
def parameters(self):
"""Evaluated parameter values of all constituent observables in the tensor product.
Returns:
list[list[Any]]: nested list containing the parameters per observable
in the tensor product
"""
return [o.parameters for o in self.obs]
@property
def non_identity_obs(self):
"""Returns the non-identity observables contained in the tensor product.
Returns:
list[:class:`~.Observable`]: list containing the non-identity observables
in the tensor product
"""
return [obs for obs in self.obs if not isinstance(obs, qml.Identity)]
def __matmul__(self, other):
if isinstance(other, Tensor):
self.obs.extend(other.obs)
return self
if isinstance(other, Observable):
self.obs.append(other)
return self
raise ValueError("Can only perform tensor products between observables.")
def __rmatmul__(self, other):
if isinstance(other, Observable):
self.obs[:0] = [other]
return self
raise ValueError("Can only perform tensor products between observables.")
__imatmul__ = __matmul__
@property
def eigvals(self):
"""Return the eigenvalues of the specified tensor product observable.
This method uses pre-stored eigenvalues for standard observables where
possible.
Returns:
array[float]: array containing the eigenvalues of the tensor product
observable
"""
if self._eigvals_cache is not None:
return self._eigvals_cache
standard_observables = {"PauliX", "PauliY", "PauliZ", "Hadamard"}
# observable should be Z^{\otimes n}
self._eigvals_cache = pauli_eigs(len(self.wires))
# TODO: check for edge cases of the sorting, e.g. Tensor(Hermitian(obs, wires=[0, 2]),
# Hermitian(obs, wires=[1, 3, 4])
# Sorting the observables based on wires, so that the order of
# the eigenvalues is correct
obs_sorted = sorted(self.obs, key=lambda x: x.wires)
# check if there are any non-standard observables (such as Identity)
if set(self.name) - standard_observables:
# Tensor product of observables contains a mixture
# of standard and non-standard observables
self._eigvals_cache = np.array([1])
for k, g in itertools.groupby(obs_sorted, lambda x: x.name in standard_observables):
if k:
# Subgroup g contains only standard observables.
self._eigvals_cache = np.kron(self._eigvals_cache, pauli_eigs(len(list(g))))
else:
# Subgroup g contains only non-standard observables.
for ns_ob in g:
# loop through all non-standard observables
self._eigvals_cache = np.kron(self._eigvals_cache, ns_ob.eigvals)
return self._eigvals_cache
def diagonalizing_gates(self):
"""Return the gate set that diagonalizes a circuit according to the
specified tensor observable.
This method uses pre-stored eigenvalues for standard observables where
possible and stores the corresponding eigenvectors from the eigendecomposition.
Returns:
list: list containing the gates diagonalizing the tensor observable
"""
diag_gates = []
for o in self.obs:
diag_gates.extend(o.diagonalizing_gates())
return diag_gates
@property
def matrix(self):
r"""Matrix representation of the tensor operator
in the computational basis.
**Example:**
Note that the returned matrix *only includes explicitly
declared observables* making up the tensor product;
that is, it only returns the matrix for the specified
subsystem it is defined for.
>>> O = qml.PauliZ(0) @ qml.PauliZ(2)
>>> O.matrix
array([[ 1, 0, 0, 0],
[ 0, -1, 0, 0],
[ 0, 0, -1, 0],
[ 0, 0, 0, 1]])
To get the full :math:`2^3\times 2^3` Hermitian matrix
acting on the 3-qubit system, the identity on wire 1
must be explicitly included:
>>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)
>>> O.matrix
array([[ 1., 0., 0., 0., 0., 0., 0., 0.],
[ 0., -1., 0., -0., 0., -0., 0., -0.],
[ 0., 0., 1., 0., 0., 0., 0., 0.],
[ 0., -0., 0., -1., 0., -0., 0., -0.],
[ 0., 0., 0., 0., -1., -0., -0., -0.],
[ 0., -0., 0., -0., -0., 1., -0., 0.],
[ 0., 0., 0., 0., -0., -0., -1., -0.],
[ 0., -0., 0., -0., -0., 0., -0., 1.]])
Returns:
array: matrix representation
"""
# group the observables based on what wires they act on
U_list = []
for _, g in itertools.groupby(self.obs, lambda x: x.wires):
# extract the matrices of each diagonalizing gate
mats = [i.matrix for i in g]
if len(mats) > 1:
# multiply all unitaries together before appending
mats = [multi_dot(mats)]
# append diagonalizing unitary for specific wire to U_list
U_list.append(mats[0])
# Return the Hermitian matrix representing the observable
# over the defined wires.
return functools.reduce(np.kron, U_list)
def prune(self):
"""Returns a pruned tensor product of observables by removing :class:`~.Identity` instances from
the observables building up the :class:`~.Tensor`.
The ``return_type`` attribute is preserved while pruning.
If the tensor product only contains one observable, then this observable instance is
returned.
Note that, as a result, this method can return observables that are not a :class:`~.Tensor`
instance.
**Example:**
Pruning that returns a :class:`~.Tensor`:
>>> O = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)
>>> O.prune()
<pennylane.operation.Tensor at 0x7fc1642d1590
>>> [(o.name, o.wires) for o in O.prune().obs]
[('PauliZ', [0]), ('PauliZ', [2])]
Pruning that returns a single observable:
>>> O = qml.PauliZ(0) @ qml.Identity(1)
>>> O_pruned = O.prune()
>>> (O_pruned.name, O_pruned.wires)
('PauliZ', [0])
Returns:
~.Observable: the pruned tensor product of observables
"""
if len(self.non_identity_obs) == 0:
# Return a single Identity as the tensor only contains Identities
obs = qml.Identity(0)
elif len(self.non_identity_obs) == 1:
obs = self.non_identity_obs[0]
else:
obs = Tensor(*self.non_identity_obs)
obs.return_type = self.return_type
return obs
# =============================================================================
# CV Operations and observables
# =============================================================================
class CV:
"""A mixin base class denoting a continuous-variable operation."""
# pylint: disable=no-member
def heisenberg_expand(self, U, num_wires):
"""Expand the given local Heisenberg-picture array into a full-system one.
Args:
U (array[float]): array to expand (expected to be of the dimension ``1+2*self.num_wires``)
num_wires (int): total number of wires in the quantum circuit. If zero, return ``U`` as is.
Raises:
ValueError: if the size of the input matrix is invalid or `num_wires` is incorrect
Returns:
array[float]: expanded array, dimension ``1+2*num_wires``
"""
# TODO: re-assess this function for non-consec wires
U_dim = len(U)
nw = len(self.wires)
if U.ndim > 2:
raise ValueError("Only order-1 and order-2 arrays supported.")
if U_dim != 1 + 2 * nw:
raise ValueError("{}: Heisenberg matrix is the wrong size {}.".format(self.name, U_dim))
if num_wires == 0 or self.wires == list(range(num_wires)):
# no expansion necessary (U is a full-system matrix in the correct order)
return U
if num_wires < len(self.wires):
raise ValueError(
"{}: Number of wires {} is too small to fit Heisenberg matrix".format(
self.name, num_wires
)
)
# expand U into the I, x_0, p_0, x_1, p_1, ... basis
dim = 1 + num_wires * 2
def loc(w):
"Returns the slice denoting the location of (x_w, p_w) in the basis."
ind = 2 * w + 1
return slice(ind, ind + 2)
if U.ndim == 1:
W = np.zeros(dim)
W[0] = U[0]
for k, w in enumerate(self.wires):
W[loc(w)] = U[loc(k)]
elif U.ndim == 2:
if isinstance(self, Observable):
W = np.zeros((dim, dim))
else:
W = np.eye(dim)
W[0, 0] = U[0, 0]
for k1, w1 in enumerate(self.wires):
s1 = loc(k1)
d1 = loc(w1)
# first column
W[d1, 0] = U[s1, 0]
# first row (for gates, the first row is always (1, 0, 0, ...), but not for observables!)
W[0, d1] = U[0, s1]
for k2, w2 in enumerate(self.wires):
W[d1, loc(w2)] = U[s1, loc(k2)] # block k1, k2 in U goes to w1, w2 in W.
return W
@staticmethod
def _heisenberg_rep(p):
r"""Heisenberg picture representation of the operation.
* For Gaussian CV gates, this method returns the matrix of the linear
transformation carried out by the gate for the given parameter values.
The method is not defined for non-Gaussian gates.
**The existence of this method is equivalent to setting** ``grad_method = 'A'``.
* For observables, returns a real vector (first-order observables) or
symmetric matrix (second-order observables) of expansion coefficients
of the observable.
For single-mode Operations we use the basis :math:`\mathbf{r} = (\I, \x, \p)`.
For multi-mode Operations we use the basis :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`.
.. note::
For gates, we assume that the inverse transformation is obtained
by negating the first parameter.
Args:
p (Sequence[float]): parameter values for the transformation
Returns:
array[float]: :math:`\tilde{U}` or :math:`q`
"""
# pylint: disable=unused-argument
return None
@classproperty
def supports_heisenberg(self):
"""Returns True iff the CV Operation has overridden the :meth:`~.CV._heisenberg_rep`
static method, thereby indicating that it is Gaussian and does not block the use
of the parameter-shift differentiation method if found between the differentiated gate
and an observable.
"""
return CV._heisenberg_rep != self._heisenberg_rep
class CVOperation(CV, Operation):
"""Base class for continuous-variable quantum operations."""
# pylint: disable=abstract-method
@classproperty
def supports_parameter_shift(self):
"""Returns True iff the CV Operation supports the parameter-shift differentiation method.
This means that it has ``grad_method='A'`` and
has overridden the :meth:`~.CV._heisenberg_rep` static method.
"""
return self.grad_method == "A" and self.supports_heisenberg
def heisenberg_pd(self, idx):
"""Partial derivative of the Heisenberg picture transform matrix.
Computed using grad_recipe.
Args:
idx (int): index of the parameter with respect to which the
partial derivative is computed.
Returns:
array[float]: partial derivative
"""
# get the gradient recipe for this parameter
recipe = self.grad_recipe[idx]
multiplier = 0.5 if recipe is None else recipe[0]
shift = np.pi / 2 if recipe is None else recipe[1]
p = self.parameters
# evaluate the transform at the shifted parameter values
p[idx] += shift
U2 = self._heisenberg_rep(p) # pylint: disable=assignment-from-none
p[idx] -= 2 * shift
U1 = self._heisenberg_rep(p) # pylint: disable=assignment-from-none
return (U2 - U1) * multiplier # partial derivative of the transformation
def heisenberg_tr(self, num_wires, inverse=False):
r"""Heisenberg picture representation of the linear transformation carried
out by the gate at current parameter values.
Given a unitary quantum gate :math:`U`, we may consider its linear
transformation in the Heisenberg picture, :math:`U^\dagger(\cdot) U`.
If the gate is Gaussian, this linear transformation preserves the polynomial order
of any observables that are polynomials in :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`.
This also means it maps :math:`\text{span}(\mathbf{r})` into itself:
.. math:: U^\dagger \mathbf{r}_i U = \sum_j \tilde{U}_{ij} \mathbf{r}_j
For Gaussian CV gates, this method returns the transformation matrix for
the current parameter values of the Operation. The method is not defined
for non-Gaussian (and non-CV) gates.
Args:
num_wires (int): total number of wires in the quantum circuit
inverse (bool): if True, return the inverse transformation instead
Raises:
RuntimeError: if the specified operation is not Gaussian or is missing the `_heisenberg_rep` method
Returns:
array[float]: :math:`\tilde{U}`, the Heisenberg picture representation of the linear transformation
"""
p = self.parameters
if inverse:
if self.par_domain == "A":
# TODO: expand this for the new par domain class, for non-unitary matrices.
p[0] = np.linalg.inv(p[0])
else:
p[0] = -p[0] # negate first parameter
U = self._heisenberg_rep(p) # pylint: disable=assignment-from-none
# not defined?
if U is None:
raise RuntimeError(
"{} is not a Gaussian operation, or is missing the _heisenberg_rep method.".format(
self.name
)
)
return self.heisenberg_expand(U, num_wires)
class CVObservable(CV, Observable):
r"""Base class for continuous-variable observables.
The class attribute :attr:`~.ev_order` can be defined to indicate
to PennyLane whether the corresponding CV observable is a polynomial in the
quadrature operators. If so,
* ``ev_order = 1`` indicates a first order polynomial in quadrature
operators :math:`(\x, \p)`.
* ``ev_order = 2`` indicates a second order polynomial in quadrature
operators :math:`(\x, \p)`.
If :attr:`~.ev_order` is not ``None``, then the Heisenberg representation
of the observable should be defined in the static method :meth:`~.CV._heisenberg_rep`,
returning an array of the correct dimension.
"""
# pylint: disable=abstract-method
ev_order = None #: None, int: if not None, the observable is a polynomial of the given order in `(x, p)`.
def heisenberg_obs(self, num_wires):
r"""Representation of the observable in the position/momentum operator basis.
Returns the expansion :math:`q` of the observable, :math:`Q`, in the
basis :math:`\mathbf{r} = (\I, \x_0, \p_0, \x_1, \p_1, \ldots)`.
* For first-order observables returns a real vector such
that :math:`Q = \sum_i q_i \mathbf{r}_i`.
* For second-order observables returns a real symmetric matrix
such that :math:`Q = \sum_{ij} q_{ij} \mathbf{r}_i \mathbf{r}_j`.
Args:
num_wires (int): total number of wires in the quantum circuit
Returns:
array[float]: :math:`q`
"""
p = self.parameters
U = self._heisenberg_rep(p) # pylint: disable=assignment-from-none
return self.heisenberg_expand(U, num_wires)
| 34.446897 | 127 | 0.603708 |
7c7fb725690eab171ba37743a782da368b6ce205 | 2,619 | py | Python | administration/src/services/images_service.py | shivamvku/flakrfid | 559198d23907eea6e87f38fac1c5fb5c2b6fbca8 | [
"MIT"
] | null | null | null | administration/src/services/images_service.py | shivamvku/flakrfid | 559198d23907eea6e87f38fac1c5fb5c2b6fbca8 | [
"MIT"
] | 1 | 2019-05-13T16:19:36.000Z | 2019-05-19T11:21:22.000Z | administration/src/services/images_service.py | shivamvku/flakrfid | 559198d23907eea6e87f38fac1c5fb5c2b6fbca8 | [
"MIT"
] | null | null | null | import os
from ..db import SqliteManager as dbm
from ..config import UPLOAD_URI as upu
def get_img_rel_url(img_url):
start_of_rel_url = img_url.find('/static')
return img_url[start_of_rel_url:]
def get_img_url(pic_id, is_relative=False):
if None is pic_id:
return os.path.join(upu, 'default.png')
db = dbm.get_db()
cur = db.cursor()
sql_command = 'SELECT location FROM ImageStore WHERE id = ?'
params = (pic_id,)
cur.execute(sql_command, params)
result = cur.fetchone()
print('From server image service - Image location is %s' % result)
pic_url = result[0]
dbm.close_connection(db)
return pic_url if not is_relative else get_img_rel_url(pic_url)
def get_img_id(pic_url):
if None is pic_url or '' is pic_url:
pic_url = os.path.join(upu, 'default.png')
db = dbm.get_db()
cur = db.cursor()
sql_command = 'SELECT id FROM ImageStore WHERE location = ?'
params = (pic_url,)
cur.execute(sql_command, params)
result = cur.fetchone()
print('From server image service - Image id is %s' % result)
pic_id = result[0]
dbm.close_connection(db)
return pic_id
def save_img(image):
if None is image:
return os.path.join(upu, 'default.png'), 1
print('From server - image service - image file received %s' % image)
if '' == image.filename:
return os.path.join(upu, 'default.png'), 1
file = image
file_name = image.filename
print('From server - image service - image filename is %s' % file_name)
file_src = os.path.join(upu, file_name)
db = dbm.get_db()
cur = db.cursor()
sql_command = 'SELECT id FROM ImageStore WHERE name = ? AND location = ?'
params = (file_name, file_src,)
cur.execute(sql_command, params)
stored_pic = cur.fetchone()
if None is not stored_pic and os.path.isfile(file_src):
pic_id = stored_pic[0]
print('From server image service - already stored image - id is %s' % pic_id)
else:
file.save(file_src)
print('From server - image service - new file src is %s' % file_src)
sql_command = 'INSERT OR IGNORE INTO ImageStore (name, location) VALUES (?, ?)'
params = (file_name, file_src,)
cur.execute(sql_command, params)
db.commit()
sql_command = 'SELECT id FROM ImageStore WHERE name = ? AND location = ?'
cur.execute(sql_command, params)
stored_pic = cur.fetchone()
pic_id = stored_pic[0]
print('From server image service - newly stored image - id is %s' % pic_id)
dbm.close_connection(db)
return file_src, pic_id
| 35.391892 | 87 | 0.654448 |
dcf7019eae532d329b70ca6c37ceb9543b61e52a | 1,159 | py | Python | tests/conftest.py | python-trio/trio-click | 0cf915e5f444103f248749cc0a6c1f6fd30ed7c5 | [
"BSD-3-Clause"
] | 41 | 2018-01-25T07:55:25.000Z | 2020-09-12T18:15:56.000Z | tests/conftest.py | python-trio/trio-click | 0cf915e5f444103f248749cc0a6c1f6fd30ed7c5 | [
"BSD-3-Clause"
] | 6 | 2018-01-25T08:04:55.000Z | 2020-09-14T05:56:14.000Z | tests/conftest.py | python-trio/trio-click | 0cf915e5f444103f248749cc0a6c1f6fd30ed7c5 | [
"BSD-3-Clause"
] | 4 | 2018-01-28T08:45:38.000Z | 2020-09-12T10:55:22.000Z | import os
import tempfile
import pytest
import anyio
from functools import partial
from threading import Thread
from asyncclick.testing import CliRunner
class SyncCliRunner(CliRunner):
def invoke(self,*a,_sync=False,**k):
fn = super().invoke
if _sync:
return fn(*a,**k)
# anyio now protects against nested calls, so we use a thread
result = None
def f():
nonlocal result,fn
async def r():
return await fn(*a,**k)
result = anyio.run(r) ## , backend="trio")
t=Thread(target=f, name="TEST")
t.start()
t.join()
return result
@pytest.fixture(scope="function")
def runner(request):
return SyncCliRunner()
def _check_symlinks_supported():
with tempfile.TemporaryDirectory(prefix="click-pytest-") as tempdir:
target = os.path.join(tempdir, "target")
open(target, "w").close()
link = os.path.join(tempdir, "link")
try:
os.symlink(target, link)
return True
except OSError:
return False
symlinks_supported = _check_symlinks_supported()
| 24.145833 | 72 | 0.604832 |
e2e803b327adda63fcc168ff7c94afad60d04788 | 16,058 | py | Python | txdav/common/datastore/upgrade/sql/test/test_upgrade.py | backwardn/ccs-calendarserver | 13c706b985fb728b9aab42dc0fef85aae21921c3 | [
"Apache-2.0"
] | 462 | 2016-08-14T17:43:24.000Z | 2022-03-17T07:38:16.000Z | txdav/common/datastore/upgrade/sql/test/test_upgrade.py | backwardn/ccs-calendarserver | 13c706b985fb728b9aab42dc0fef85aae21921c3 | [
"Apache-2.0"
] | 72 | 2016-09-01T23:19:35.000Z | 2020-02-05T02:09:26.000Z | txdav/common/datastore/upgrade/sql/test/test_upgrade.py | backwardn/ccs-calendarserver | 13c706b985fb728b9aab42dc0fef85aae21921c3 | [
"Apache-2.0"
] | 171 | 2016-08-16T03:50:30.000Z | 2022-03-26T11:49:55.000Z | ##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for L{txdav.common.datastore.upgrade.sql.upgrade}.
"""
from twext.enterprise.dal.parseschema import schemaFromPath
from twext.enterprise.ienterprise import ORACLE_DIALECT, POSTGRES_DIALECT
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.python.modules import getModule
from twisted.trial.unittest import TestCase
from txdav.base.datastore.suboracle import cleanDatabase
from txdav.common.datastore.sql_dump import dumpSchema
from txdav.common.datastore.test.util import StubNotifierFactory, SQLStoreBuilder, \
DB_TYPE, theStoreBuilder
from txdav.common.datastore.upgrade.sql.upgrade import (
UpgradeDatabaseSchemaStep, UpgradeDatabaseAddressBookDataStep, UpgradeDatabaseCalendarDataStep, NotAllowedToUpgrade)
import re
class SchemaUpgradeTests(TestCase):
"""
Tests for L{UpgradeDatabaseSchemaStep}.
"""
def __init__(self, methodName='runTest'):
super(SchemaUpgradeTests, self).__init__(methodName)
if DB_TYPE[0] == POSTGRES_DIALECT:
self.testStoreBuilder = theStoreBuilder
else:
self.testStoreBuilder = SQLStoreBuilder(dsnUser="test_dbUpgrades", noCleanup=True)
@staticmethod
def _getRawSchemaVersion(fp, versionKey):
schema = fp.getContent()
found = re.search("insert into CALENDARSERVER (\(NAME, VALUE\) )?values \('%s', '(\d+)'\);" % (versionKey,), schema)
return int(found.group(2)) if found else None
def _getSchemaVersion(self, fp, versionKey):
found = SchemaUpgradeTests._getRawSchemaVersion(fp, versionKey)
if found is None:
if versionKey == "VERSION":
self.fail("Could not determine schema version for: %s" % (fp,))
else:
return 1
return found
def test_scanUpgradeFiles(self):
upgrader = UpgradeDatabaseSchemaStep(None)
upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema1")
files = upgrader.scanForUpgradeFiles("fake_dialect")
self.assertEqual(
files,
[(3, 4, upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_4.sql"))],
)
upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema2")
files = upgrader.scanForUpgradeFiles("fake_dialect")
self.assertEqual(
files,
[
(3, 4, upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_4.sql")),
(3, 5, upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_5.sql")),
(4, 5, upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_4_to_5.sql")),
]
)
def test_determineUpgradeSequence(self):
upgrader = UpgradeDatabaseSchemaStep(None)
upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema1")
files = upgrader.scanForUpgradeFiles("fake_dialect")
upgrades = upgrader.determineUpgradeSequence(3, 4, files, "fake_dialect")
self.assertEqual(
upgrades,
[upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_4.sql")],
)
self.assertRaises(RuntimeError, upgrader.determineUpgradeSequence, 3, 5, files, "fake_dialect")
upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema2")
files = upgrader.scanForUpgradeFiles("fake_dialect")
upgrades = upgrader.determineUpgradeSequence(3, 5, files, "fake_dialect")
self.assertEqual(
upgrades,
[upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_5.sql")]
)
upgrades = upgrader.determineUpgradeSequence(4, 5, files, "fake_dialect")
self.assertEqual(
upgrades,
[upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_4_to_5.sql")]
)
upgrader.schemaLocation = getModule(__name__).filePath.sibling("fake_schema3")
files = upgrader.scanForUpgradeFiles("fake_dialect")
upgrades = upgrader.determineUpgradeSequence(3, 5, files, "fake_dialect")
self.assertEqual(
upgrades,
[
upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_3_to_4.sql"),
upgrader.schemaLocation.child("upgrades").child("fake_dialect").child("upgrade_from_4_to_5.sql"),
]
)
def test_upgradeAvailability(self):
"""
Make sure that each old schema has a valid upgrade path to the current one.
"""
for dialect in (POSTGRES_DIALECT, ORACLE_DIALECT,):
upgrader = UpgradeDatabaseSchemaStep(None)
files = upgrader.scanForUpgradeFiles(dialect)
current_version = self._getSchemaVersion(upgrader.schemaLocation.child(DB_TYPE[2]), "VERSION")
for child in upgrader.schemaLocation.child("old").child(dialect).globChildren("*.sql"):
old_version = self._getSchemaVersion(child, "VERSION")
upgrades = upgrader.determineUpgradeSequence(old_version, current_version, files, dialect)
self.assertNotEqual(len(upgrades), 0)
# def test_upgradeDataAvailability(self):
# """
# Make sure that each upgrade file has a valid data upgrade file or None.
# """
#
# for dialect in (POSTGRES_DIALECT, ORACLE_DIALECT,):
# upgrader = UpgradeDatabaseSchemaStep(None)
# files = upgrader.scanForUpgradeFiles(dialect)
# for _ignore_from, _ignore_to, fp in files:
# result = upgrader.getDataUpgrade(fp)
# if result is not None:
# self.assertIsInstance(result, types.FunctionType)
@inlineCallbacks
def _dbSchemaUpgrades(self, child):
"""
This does a full DB test of all possible upgrade paths. For each old schema, it loads it into the DB
then runs the upgrade service. This ensures all the upgrade.sql files work correctly - at least for
postgres.
"""
store = yield self.testStoreBuilder.buildStore(
self, {"push": StubNotifierFactory()}, enableJobProcessing=False
)
@inlineCallbacks
def _loadOldSchema(path):
"""
Use the postgres schema mechanism to do tests under a separate "namespace"
in postgres that we can quickly wipe clean afterwards.
"""
startTxn = store.newTransaction("test_dbUpgrades")
if startTxn.dbtype.dialect == POSTGRES_DIALECT:
yield startTxn.execSQL("create schema test_dbUpgrades")
yield startTxn.execSQL("set search_path to test_dbUpgrades")
yield startTxn.execSQLBlock(path.getContent())
yield startTxn.commit()
@inlineCallbacks
def _loadVersion():
startTxn = store.newTransaction("test_dbUpgrades")
new_version = yield startTxn.execSQL("select value from calendarserver where name = 'VERSION'")
yield startTxn.commit()
returnValue(int(new_version[0][0]))
@inlineCallbacks
def _loadSchemaFromDatabase():
startTxn = store.newTransaction("test_dbUpgrades")
schema = yield dumpSchema(startTxn, "Upgraded from %s" % (child.basename(),), "test_dbUpgrades")
yield startTxn.commit()
returnValue(schema)
@inlineCallbacks
def _unloadOldSchema():
startTxn = store.newTransaction("test_dbUpgrades")
if startTxn.dbtype.dialect == POSTGRES_DIALECT:
yield startTxn.execSQL("set search_path to public")
yield startTxn.execSQL("drop schema test_dbUpgrades cascade")
elif startTxn.dbtype.dialect == ORACLE_DIALECT:
yield cleanDatabase(startTxn)
yield startTxn.commit()
@inlineCallbacks
def _cleanupOldSchema():
startTxn = store.newTransaction("test_dbUpgrades")
if startTxn.dbtype.dialect == POSTGRES_DIALECT:
yield startTxn.execSQL("set search_path to public")
yield startTxn.execSQL("drop schema if exists test_dbUpgrades cascade")
elif startTxn.dbtype.dialect == ORACLE_DIALECT:
yield cleanDatabase(startTxn)
yield startTxn.commit()
self.addCleanup(_cleanupOldSchema)
test_upgrader = UpgradeDatabaseSchemaStep(None)
expected_version = self._getSchemaVersion(test_upgrader.schemaLocation.child(DB_TYPE[2]), "VERSION")
# Upgrade allowed
upgrader = UpgradeDatabaseSchemaStep(store)
yield _loadOldSchema(child)
yield upgrader.databaseUpgrade()
new_version = yield _loadVersion()
# Compare the upgraded schema with the expected current schema
new_schema = yield _loadSchemaFromDatabase()
currentSchema = schemaFromPath(test_upgrader.schemaLocation.child(DB_TYPE[2]))
mismatched = currentSchema.compare(new_schema)
# These are special case exceptions
for i in (
"Table: CALENDAR_HOME, column name DATAVERSION default mismatch",
"Table: CALENDAR_HOME, mismatched constraints: set([<Constraint: (NOT NULL ('DATAVERSION',) None)>])",
"Table: ADDRESSBOOK_HOME, column name DATAVERSION default mismatch",
"Table: ADDRESSBOOK_HOME, mismatched constraints: set([<Constraint: (NOT NULL ('DATAVERSION',) None)>])",
"Table: PUSH_NOTIFICATION_WORK, column name PUSH_PRIORITY default mismatch",
):
try:
mismatched.remove(i)
except ValueError:
pass
if mismatched and mismatched[0].startswith("Comparing schema: current.sql to Upgraded from"):
del mismatched[0]
self.assertEqual(len(mismatched), 0, "Schema mismatch:\n" + "\n".join(mismatched))
yield _unloadOldSchema()
self.assertEqual(new_version, expected_version)
# Upgrade disallowed
upgrader = UpgradeDatabaseSchemaStep(store, failIfUpgradeNeeded=True)
yield _loadOldSchema(child)
old_version = yield _loadVersion()
try:
yield upgrader.databaseUpgrade()
except NotAllowedToUpgrade:
pass
except Exception:
self.fail("NotAllowedToUpgrade not raised")
else:
self.fail("NotAllowedToUpgrade not raised")
new_version = yield _loadVersion()
yield _unloadOldSchema()
self.assertEqual(old_version, new_version)
@inlineCallbacks
def _dbDataUpgrades(self, version, versionKey, upgraderClass):
"""
This does a full DB test of all possible data upgrade paths. For each old schema, it loads it into the DB
then runs the data upgrade service. This ensures all the upgrade_XX.py files work correctly - at least for
postgres.
TODO: this currently does not create any data to test with. It simply runs the upgrade on an empty
store.
"""
store = yield self.testStoreBuilder.buildStore(
self, {"push": StubNotifierFactory()}, enableJobProcessing=False
)
@inlineCallbacks
def _loadOldData(path, oldVersion):
"""
Use the postgres schema mechanism to do tests under a separate "namespace"
in postgres that we can quickly wipe clean afterwards.
"""
startTxn = store.newTransaction("test_dbUpgrades")
if startTxn.dbtype.dialect == POSTGRES_DIALECT:
yield startTxn.execSQL("create schema test_dbUpgrades")
yield startTxn.execSQL("set search_path to test_dbUpgrades")
yield startTxn.execSQLBlock(path.getContent())
yield startTxn.execSQL("update CALENDARSERVER set VALUE = '%s' where NAME = '%s'" % (oldVersion, versionKey,))
yield startTxn.commit()
@inlineCallbacks
def _loadVersion():
startTxn = store.newTransaction("test_dbUpgrades")
new_version = yield startTxn.execSQL("select value from calendarserver where name = '%s'" % (versionKey,))
yield startTxn.commit()
returnValue(int(new_version[0][0]))
@inlineCallbacks
def _unloadOldData():
startTxn = store.newTransaction("test_dbUpgrades")
if startTxn.dbtype.dialect == POSTGRES_DIALECT:
yield startTxn.execSQL("set search_path to public")
yield startTxn.execSQL("drop schema test_dbUpgrades cascade")
elif startTxn.dbtype.dialect == ORACLE_DIALECT:
yield cleanDatabase(startTxn)
yield startTxn.commit()
@inlineCallbacks
def _cleanupOldData():
startTxn = store.newTransaction("test_dbUpgrades")
if startTxn.dbtype.dialect == POSTGRES_DIALECT:
yield startTxn.execSQL("set search_path to public")
yield startTxn.execSQL("drop schema if exists test_dbUpgrades cascade")
elif startTxn.dbtype.dialect == ORACLE_DIALECT:
yield cleanDatabase(startTxn)
yield startTxn.commit()
self.addCleanup(_cleanupOldData)
test_upgrader = UpgradeDatabaseSchemaStep(None)
expected_version = self._getSchemaVersion(test_upgrader.schemaLocation.child(DB_TYPE[2]), versionKey)
oldVersion = version
upgrader = upgraderClass(store)
yield _loadOldData(test_upgrader.schemaLocation.child(DB_TYPE[2]), oldVersion)
yield upgrader.databaseUpgrade()
new_version = yield _loadVersion()
yield _unloadOldData()
self.assertEqual(new_version, expected_version)
test_upgrader = UpgradeDatabaseSchemaStep(None)
DIALECT = DB_TYPE[0]
# Bind test methods for each schema version
for child in test_upgrader.schemaLocation.child("old").child(DIALECT).globChildren("*.sql"):
def f(self, lchild=child):
return self._dbSchemaUpgrades(lchild)
setattr(SchemaUpgradeTests, "test_dbSchemaUpgrades_%s" % (child.basename().split(".", 1)[0],), f)
# Bind test methods for each addressbook data version
versions = set()
for child in test_upgrader.schemaLocation.child("old").child(DIALECT).globChildren("*.sql"):
version = SchemaUpgradeTests._getRawSchemaVersion(child, "ADDRESSBOOK-DATAVERSION")
versions.add(version if version else 1)
for version in sorted(versions):
def f(self, lversion=version):
return self._dbDataUpgrades(lversion, "ADDRESSBOOK-DATAVERSION", UpgradeDatabaseAddressBookDataStep)
setattr(SchemaUpgradeTests, "test_dbAddressBookDataUpgrades_%s" % (version,), f)
# Bind test methods for each calendar data version
versions = set()
for child in test_upgrader.schemaLocation.child("old").child(DIALECT).globChildren("*.sql"):
version = SchemaUpgradeTests._getRawSchemaVersion(child, "CALENDAR-DATAVERSION")
versions.add(version if version else 1)
for version in sorted(versions):
def f(self, lversion=version):
return self._dbDataUpgrades(lversion, "CALENDAR-DATAVERSION", UpgradeDatabaseCalendarDataStep)
setattr(SchemaUpgradeTests, "test_dbCalendarDataUpgrades_%s" % (version,), f)
| 44.481994 | 124 | 0.670196 |
c8485a96e231f2f24e145233f82350bd7c4aa2f8 | 1,317 | py | Python | theil_sen_example.py | rsiverd/pure_python_theil_sen | ac76de50ddb18f404b9e58958957028fdf7d926c | [
"BSD-2-Clause"
] | 1 | 2020-05-07T01:01:30.000Z | 2020-05-07T01:01:30.000Z | theil_sen_example.py | rsiverd/pure_python_theil_sen | ac76de50ddb18f404b9e58958957028fdf7d926c | [
"BSD-2-Clause"
] | null | null | null | theil_sen_example.py | rsiverd/pure_python_theil_sen | ac76de50ddb18f404b9e58958957028fdf7d926c | [
"BSD-2-Clause"
] | 1 | 2020-05-07T06:22:01.000Z | 2020-05-07T06:22:01.000Z | #!/usr/bin/env python
import os, sys, time
reload = sys.modules['imp' if 'imp' in sys.modules else 'importlib'].reload
import pure_python_theil_sen
reload(pure_python_theil_sen)
ppts = pure_python_theil_sen
## -----------------------------------------------------------------------
## Example focus sequence dataset (focus, fwhm_avg, fwhm_med):
test_data = [
(+2.0, 13.0, 13.6),
(+1.5, 11.8, 13.2),
(+1.0, 11.9, 11.4),
(+0.5, 10.5, 9.5),
(+0.0, 8.2, 8.1),
(-0.5, 7.6, 7.3),
(-1.0, 6.9, 6.2),
(-1.5, 6.2, 6.2),
(-2.0, 4.9, 4.8)]
## Make sure data are sorted by focus:
data_points = sorted(test_data)
## Run the fit:
foc_vals, fwhm_avg, fwhm_med = zip(*data_points)
icept, slope = ppts.linefit(foc_vals, fwhm_avg)
## Calculate residuals:
res_avg = [(y - icept - slope*x) for x,y,_ in data_points]
res_med = [(y - icept - slope*x) for x,_,y in data_points]
## Best-fit line for plotting:
ts_x = [min(foc_vals), max(foc_vals)]
ts_y = [(icept + slope*x) for x in ts_x]
## -----------------------------------------------------------------------
## Plot of results for illustration:
import matplotlib.pyplot as plt
plt.clf()
plt.grid()
plt.scatter(foc_vals, fwhm_avg)
plt.plot(ts_x, ts_y, c='r')
plt.savefig('theil_sen_fit.png')
| 26.34 | 75 | 0.549734 |
530d868a258f9994d1f3777dfc0196d7c59ae310 | 5,612 | py | Python | awstwitter/__init__.py | hsbakshi/awstwitter | 8163b35ac9b9ac0b6ee6d1926be9dcfccf67ba06 | [
"Apache-2.0"
] | null | null | null | awstwitter/__init__.py | hsbakshi/awstwitter | 8163b35ac9b9ac0b6ee6d1926be9dcfccf67ba06 | [
"Apache-2.0"
] | null | null | null | awstwitter/__init__.py | hsbakshi/awstwitter | 8163b35ac9b9ac0b6ee6d1926be9dcfccf67ba06 | [
"Apache-2.0"
] | null | null | null | """This module allows securely storing and retrieving Twitter API
credentials in AWS EC2 System manager."""
import sys
import argparse
import pprint
import boto3
TWITTER_CREDENTIALS = {
'consumer_key': {
'Description': 'Twitter consumer key',
'Type': 'String'
},
'consumer_secret': {
'Description': 'Twitter consumer secret',
'Type': 'SecureString'
},
'access_token': {
'Description': 'Twitter access token',
'Type': 'String'
},
'access_token_secret': {
'Description': 'Twitter access token secret',
'Type': 'SecureString'
}
}
SSM_NAME_SEPARATOR = '.'
def manage():
"Process arguments to manage credentials"
parser = argparse.ArgumentParser(description='Manage credentials.')
parser.add_argument('mode', choices=['store', 'retrieve'],
help='Choose mode')
args = parser.parse_args()
mode = args.mode
if mode == 'store':
add_credentials()
elif mode == 'retrieve':
retrieve()
def retrieve():
"Retrieve credentials"
namespace = input("Please enter namespace: ")
pprint.pprint(retrieve_credentials(namespace))
def add_credentials():
"Store credentials into EC2 Systems manager"
namespace = input('Please enter namespace: ')
credentials_dict = {}
key_id = input('Enter KMS key id (blank to create one): ')
for key in TWITTER_CREDENTIALS:
description = TWITTER_CREDENTIALS[key]['Description']
value = input('Please enter ' + description + ': ')
credentials_dict[key] = value
upload_credentials(namespace, credentials_dict)
def retrieve_credentials(credential_namespace, aws_region=None,
aws_access_key_id=None, aws_secret_access_key=None):
"""Retrieves Twitter credentials from AWS EC2 System manager.
The credentials are returned as a dict.
They are decrypted before returning.
Args:
credential_namespace (string):
Project name used as credential prefix.
aws_region (string): AWS region name override
The aws_* parameters are overrides passed to boto.
Returns:
dict: This function returns a dict with the following keys:
* consumer_key
* consumer_secret
* access_token
* access_token_secret
"""
ssmname_to_key = _make_ssmname_to_key_map(credential_namespace)
client = boto3.client('ssm', region_name=aws_region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
response = client.get_parameters(Names=list(ssmname_to_key.keys()),
WithDecryption=True)
responsedict = _credentials_response_to_dict(ssmname_to_key, response)
if not responsedict:
raise LookupError('No credentials found for namespace:' + credential_namespace)
return responsedict
def upload_credentials(credential_namespace, credentials_dict,
kms_key_id=None, aws_region=None,
aws_access_key_id=None, aws_secret_access_key=None):
"""Save Twitter credentials securely into EC2 system manager.
Credentials are retrieved from AWS EC2 System manager.
They are decrypted before returning.
Args:
credential_namespace (string): Used as prefix for storing
credentials.
credentials_dict (dict): Twitter credentials dict.abs
The dict should have the following inputs
* consumer_key
* consumer_secret
* access_token
* access_token_secret
kms_key_id (string)
aws_region (string): AWS region name override
The aws_* parameters are optional overrides passed to boto.
By default, boto library will read from env variables or
~/.aws/credentials
"""
ssmname_to_key = _make_ssmname_to_key_map(credential_namespace)
if not kms_key_id:
kms = boto3.client('kms')
kms_key_id = kms.create_key()['KeyMetadata']['KeyId']
print('Created key: %s' % kms_key_id)
client = boto3.client(service_name='ssm', region_name=aws_region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
for ssmname, key in ssmname_to_key.items():
credential_details = TWITTER_CREDENTIALS[key]
description = credential_details['Description']
param_type = credential_details['Type']
if (param_type == 'SecureString'):
client.put_parameter(Name=ssmname, Description=description,
Value=credentials_dict[key], Type=param_type,
Overwrite=True, KeyId=kms_key_id)
else :
client.put_parameter(Name=ssmname, Description=description,
Value=credentials_dict[key], Type=param_type,
Overwrite=True)
def _make_ssmname_to_key_map(credential_prefix):
ssmname_to_key = {}
for required_credential in TWITTER_CREDENTIALS:
ssmname = credential_prefix + SSM_NAME_SEPARATOR + required_credential
ssmname_to_key[ssmname] = required_credential
return ssmname_to_key
def _credentials_response_to_dict(ssmname_to_key, response):
responsedict = {}
for credential in response['Parameters']:
key = ssmname_to_key[credential['Name']]
responsedict[key] = credential['Value']
return responsedict
if __name__ == "__main__":
manage() | 37.413333 | 87 | 0.653599 |
8a8e4bb0118ffe90335aa5ade104684ae009227f | 496 | py | Python | lint.py | rtmigo/vtcff_py | e26a5e55ea455b10995932dccd319c1f7fc28385 | [
"MIT"
] | null | null | null | lint.py | rtmigo/vtcff_py | e26a5e55ea455b10995932dccd319c1f7fc28385 | [
"MIT"
] | null | null | null | lint.py | rtmigo/vtcff_py | e26a5e55ea455b10995932dccd319c1f7fc28385 | [
"MIT"
] | null | null | null | import subprocess
def lint():
print("Running pylint...")
r = subprocess.call(['pylint', 'vtcff'])
if r & 1 or r & 2 or r & 32:
print(f"pylint return code is {r}")
if r & 1:
print(f"fatal message")
if r & 2:
print(f"error message")
exit(1)
print("Running mypy...")
if subprocess.call(['mypy', 'vtcff',
'--ignore-missing-imports']) != 0:
exit(1)
if __name__ == "__main__":
lint()
| 20.666667 | 58 | 0.489919 |
65d22aedb33ebc4da32a9f96ed2e58ac3e784a8f | 1,085 | py | Python | src/main/python/app/helpers/ItemFactory.py | karlpet/WadLauncher | 512f5d28de5c57e4dffdc642b170891a99a00ea8 | [
"MIT"
] | 2 | 2020-09-06T11:16:30.000Z | 2020-09-15T17:11:34.000Z | src/main/python/app/helpers/ItemFactory.py | karlpet/WadLauncher | 512f5d28de5c57e4dffdc642b170891a99a00ea8 | [
"MIT"
] | 74 | 2020-09-07T16:40:54.000Z | 2021-06-18T00:22:39.000Z | src/main/python/app/helpers/ItemFactory.py | karlpet/WadLauncher | 512f5d28de5c57e4dffdc642b170891a99a00ea8 | [
"MIT"
] | null | null | null | from PyQt5.QtCore import Qt
from PyQt5.QtGui import QStandardItem
ID_ROLE = Qt.UserRole + 1
TYPE_ROLE = Qt.UserRole + 2
def make_generic(text, id):
item = QStandardItem(text)
item.setData(id, ID_ROLE)
return item
def make_wad_item(data, flags=None, item=None):
if not item:
item = make_generic(data.name, data.id)
item.setText(data.display_name)
if flags:
item.setFlags(flags)
item.setData(data.id, ID_ROLE)
item.setData(data.model_type, TYPE_ROLE)
return item
def make_iwad_item(data):
return make_generic(data.name, data.id)
def make_source_port_item(data):
return make_generic(data.name, data.id)
def make_category_item(data, item=None):
if not item:
item = make_generic(data.name, data.id)
item.setData(data.id, ID_ROLE)
item.setText(data.display_name)
item.setData(data.model_type, TYPE_ROLE)
nameItemFont = item.font()
nameItemFont.setBold(True)
item.setFont(nameItemFont)
return item
def make_pending_item(data, item=None):
return make_generic('loading...', data.id)
| 26.463415 | 47 | 0.709677 |
299abb5fdb707467177fece08376ebe8c95b1751 | 8,588 | py | Python | tools/profiling/microbenchmarks/bm_diff.py | soramitsu/libp2p-grpc | 83a50ee39b86617612b55ac79f843d55c03b5633 | [
"BSD-3-Clause"
] | null | null | null | tools/profiling/microbenchmarks/bm_diff.py | soramitsu/libp2p-grpc | 83a50ee39b86617612b55ac79f843d55c03b5633 | [
"BSD-3-Clause"
] | null | null | null | tools/profiling/microbenchmarks/bm_diff.py | soramitsu/libp2p-grpc | 83a50ee39b86617612b55ac79f843d55c03b5633 | [
"BSD-3-Clause"
] | 1 | 2020-11-04T04:19:45.000Z | 2020-11-04T04:19:45.000Z | #!/usr/bin/env python2.7
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import json
import bm_json
import tabulate
import argparse
from scipy import stats
import subprocess
import multiprocessing
import collections
import pipes
import os
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
import comment_on_pr
import jobset
import itertools
import speedup
import random
import shutil
import errno
_INTERESTING = (
'cpu_time',
'real_time',
'locks_per_iteration',
'allocs_per_iteration',
'writes_per_iteration',
'atm_cas_per_iteration',
'atm_add_per_iteration',
'cli_transport_stalls_per_iteration',
'cli_stream_stalls_per_iteration',
'svr_transport_stalls_per_iteration',
'svr_stream_stalls_per_iteration'
'nows_per_iteration',
)
def changed_ratio(n, o):
if float(o) <= .0001: o = 0
if float(n) <= .0001: n = 0
if o == 0 and n == 0: return 0
if o == 0: return 100
return (float(n)-float(o))/float(o)
def median(ary):
ary = sorted(ary)
n = len(ary)
if n%2 == 0:
return (ary[n/2] + ary[n/2+1]) / 2.0
else:
return ary[n/2]
def min_change(pct):
return lambda n, o: abs(changed_ratio(n,o)) > pct/100.0
_AVAILABLE_BENCHMARK_TESTS = ['bm_fullstack_unary_ping_pong',
'bm_fullstack_streaming_ping_pong',
'bm_fullstack_streaming_pump',
'bm_closure',
'bm_cq',
'bm_call_create',
'bm_error',
'bm_chttp2_hpack',
'bm_chttp2_transport',
'bm_pollset',
'bm_metadata',
'bm_fullstack_trickle']
argp = argparse.ArgumentParser(description='Perform diff on microbenchmarks')
argp.add_argument('-t', '--track',
choices=sorted(_INTERESTING),
nargs='+',
default=sorted(_INTERESTING),
help='Which metrics to track')
argp.add_argument('-b', '--benchmarks', nargs='+', choices=_AVAILABLE_BENCHMARK_TESTS, default=['bm_cq'])
argp.add_argument('-d', '--diff_base', type=str)
argp.add_argument('-r', '--repetitions', type=int, default=1)
argp.add_argument('-l', '--loops', type=int, default=20)
argp.add_argument('-j', '--jobs', type=int, default=multiprocessing.cpu_count())
args = argp.parse_args()
assert args.diff_base
def avg(lst):
sum = 0.0
n = 0.0
for el in lst:
sum += el
n += 1
return sum / n
def make_cmd(cfg):
return ['make'] + args.benchmarks + [
'CONFIG=%s' % cfg, '-j', '%d' % args.jobs]
def build(dest):
shutil.rmtree('bm_diff_%s' % dest, ignore_errors=True)
subprocess.check_call(['git', 'submodule', 'update'])
try:
subprocess.check_call(make_cmd('opt'))
subprocess.check_call(make_cmd('counters'))
except subprocess.CalledProcessError, e:
subprocess.check_call(['make', 'clean'])
subprocess.check_call(make_cmd('opt'))
subprocess.check_call(make_cmd('counters'))
os.rename('bins', 'bm_diff_%s' % dest)
def collect1(bm, cfg, ver, idx):
cmd = ['bm_diff_%s/%s/%s' % (ver, cfg, bm),
'--benchmark_out=%s.%s.%s.%d.json' % (bm, cfg, ver, idx),
'--benchmark_out_format=json',
'--benchmark_repetitions=%d' % (args.repetitions)
]
return jobset.JobSpec(cmd, shortname='%s %s %s %d/%d' % (bm, cfg, ver, idx+1, args.loops),
verbose_success=True, timeout_seconds=None)
build('new')
where_am_i = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
subprocess.check_call(['git', 'checkout', args.diff_base])
try:
build('old')
finally:
subprocess.check_call(['git', 'checkout', where_am_i])
subprocess.check_call(['git', 'submodule', 'update'])
jobs = []
for loop in range(0, args.loops):
jobs.extend(x for x in itertools.chain(
(collect1(bm, 'opt', 'new', loop) for bm in args.benchmarks),
(collect1(bm, 'counters', 'new', loop) for bm in args.benchmarks),
(collect1(bm, 'opt', 'old', loop) for bm in args.benchmarks),
(collect1(bm, 'counters', 'old', loop) for bm in args.benchmarks),
))
random.shuffle(jobs, random.SystemRandom().random)
jobset.run(jobs, maxjobs=args.jobs)
class Benchmark:
def __init__(self):
self.samples = {
True: collections.defaultdict(list),
False: collections.defaultdict(list)
}
self.final = {}
def add_sample(self, data, new):
for f in args.track:
if f in data:
self.samples[new][f].append(float(data[f]))
def process(self):
for f in sorted(args.track):
new = self.samples[True][f]
old = self.samples[False][f]
if not new or not old: continue
mdn_diff = abs(median(new) - median(old))
print '%s: new=%r old=%r mdn_diff=%r' % (f, new, old, mdn_diff)
s = speedup.speedup(new, old)
if abs(s) > 3 and mdn_diff > 0.5:
self.final[f] = '%+d%%' % s
return self.final.keys()
def skip(self):
return not self.final
def row(self, flds):
return [self.final[f] if f in self.final else '' for f in flds]
def eintr_be_gone(fn):
"""Run fn until it doesn't stop because of EINTR"""
while True:
try:
return fn()
except IOError, e:
if e.errno != errno.EINTR:
raise
def read_json(filename):
try:
with open(filename) as f: return json.loads(f.read())
except ValueError, e:
return None
def finalize():
benchmarks = collections.defaultdict(Benchmark)
for bm in args.benchmarks:
for loop in range(0, args.loops):
js_new_ctr = read_json('%s.counters.new.%d.json' % (bm, loop))
js_new_opt = read_json('%s.opt.new.%d.json' % (bm, loop))
js_old_ctr = read_json('%s.counters.old.%d.json' % (bm, loop))
js_old_opt = read_json('%s.opt.old.%d.json' % (bm, loop))
if js_new_ctr:
for row in bm_json.expand_json(js_new_ctr, js_new_opt):
print row
name = row['cpp_name']
if name.endswith('_mean') or name.endswith('_stddev'): continue
benchmarks[name].add_sample(row, True)
if js_old_ctr:
for row in bm_json.expand_json(js_old_ctr, js_old_opt):
print row
name = row['cpp_name']
if name.endswith('_mean') or name.endswith('_stddev'): continue
benchmarks[name].add_sample(row, False)
really_interesting = set()
for name, bm in benchmarks.items():
print name
really_interesting.update(bm.process())
fields = [f for f in args.track if f in really_interesting]
headers = ['Benchmark'] + fields
rows = []
for name in sorted(benchmarks.keys()):
if benchmarks[name].skip(): continue
rows.append([name] + benchmarks[name].row(fields))
if rows:
text = 'Performance differences noted:\n' + tabulate.tabulate(rows, headers=headers, floatfmt='+.2f')
else:
text = 'No significant performance differences'
print text
comment_on_pr.comment_on_pr('```\n%s\n```' % text)
eintr_be_gone(finalize)
| 33.030769 | 105 | 0.651956 |
b2e5c0d3b26aa0ac570a56f4b3cee2057991d17a | 311 | py | Python | catkin_ws/src/o2as_routines/setup.py | DevwratJoshi/ur-o2as | 265249c27908a79a301014168394db0c0dc2204c | [
"MIT"
] | null | null | null | catkin_ws/src/o2as_routines/setup.py | DevwratJoshi/ur-o2as | 265249c27908a79a301014168394db0c0dc2204c | [
"MIT"
] | null | null | null | catkin_ws/src/o2as_routines/setup.py | DevwratJoshi/ur-o2as | 265249c27908a79a301014168394db0c0dc2204c | [
"MIT"
] | null | null | null | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['o2as_routines'],
package_dir={'': 'src'})
setup(**setup_args)
| 25.916667 | 61 | 0.768489 |
f91150abbd4c095f8dae42fcc302222a3ef1e2c9 | 28,233 | py | Python | android/.buildozer/android/platform/build/build/python-installs/youtb_dc/kivy/core/window/window_sdl2.py | mw3tv123/youtube-dc | 84cfdac4b7548285084f453f1716eee83b9288aa | [
"MIT"
] | 1 | 2019-04-26T19:07:48.000Z | 2019-04-26T19:07:48.000Z | kivy/core/window/window_sdl2.py | basharbme/kivy | 88d456d56b872feeb1b3d7d98bccbfea4137c338 | [
"MIT"
] | null | null | null | kivy/core/window/window_sdl2.py | basharbme/kivy | 88d456d56b872feeb1b3d7d98bccbfea4137c338 | [
"MIT"
] | null | null | null | # found a way to include it more easily.
'''
SDL2 Window
===========
Windowing provider directly based on our own wrapped version of SDL.
TODO:
- fix keys
- support scrolling
- clean code
- manage correctly all sdl events
'''
__all__ = ('WindowSDL2', )
from os.path import join
import sys
from kivy import kivy_data_dir
from kivy.logger import Logger
from kivy.base import EventLoop, ExceptionManager, stopTouchApp
from kivy.clock import Clock
from kivy.config import Config
from kivy.core.window import WindowBase
from kivy.core.window._window_sdl2 import _WindowSDL2Storage
from kivy.input.provider import MotionEventProvider
from kivy.input.motionevent import MotionEvent
from kivy.resources import resource_find
from kivy.utils import platform, deprecated
from kivy.compat import unichr
from collections import deque
KMOD_LCTRL = 64
KMOD_RCTRL = 128
KMOD_RSHIFT = 2
KMOD_LSHIFT = 1
KMOD_RALT = 512
KMOD_LALT = 256
KMOD_LMETA = 1024
KMOD_RMETA = 2048
SDLK_SHIFTL = 1073742049
SDLK_SHIFTR = 1073742053
SDLK_LCTRL = 1073742048
SDLK_RCTRL = 1073742052
SDLK_LALT = 1073742050
SDLK_RALT = 1073742054
SDLK_LEFT = 1073741904
SDLK_RIGHT = 1073741903
SDLK_UP = 1073741906
SDLK_DOWN = 1073741905
SDLK_HOME = 1073741898
SDLK_END = 1073741901
SDLK_PAGEUP = 1073741899
SDLK_PAGEDOWN = 1073741902
SDLK_SUPER = 1073742051
SDLK_CAPS = 1073741881
SDLK_INSERT = 1073741897
SDLK_KEYPADNUM = 1073741907
SDLK_KP_DEVIDE = 1073741908
SDLK_KP_MULTIPLY = 1073741909
SDLK_KP_MINUS = 1073741910
SDLK_KP_PLUS = 1073741911
SDLK_KP_ENTER = 1073741912
SDLK_KP_1 = 1073741913
SDLK_KP_2 = 1073741914
SDLK_KP_3 = 1073741915
SDLK_KP_4 = 1073741916
SDLK_KP_5 = 1073741917
SDLK_KP_6 = 1073741918
SDLK_KP_7 = 1073741919
SDLK_KP_8 = 1073741920
SDLK_KP_9 = 1073741921
SDLK_KP_0 = 1073741922
SDLK_KP_DOT = 1073741923
SDLK_F1 = 1073741882
SDLK_F2 = 1073741883
SDLK_F3 = 1073741884
SDLK_F4 = 1073741885
SDLK_F5 = 1073741886
SDLK_F6 = 1073741887
SDLK_F7 = 1073741888
SDLK_F8 = 1073741889
SDLK_F9 = 1073741890
SDLK_F10 = 1073741891
SDLK_F11 = 1073741892
SDLK_F12 = 1073741893
SDLK_F13 = 1073741894
SDLK_F14 = 1073741895
SDLK_F15 = 1073741896
class SDL2MotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.profile = ('pos', )
self.sx, self.sy = args
win = EventLoop.window
super(SDL2MotionEvent, self).depack(args)
class SDL2MotionEventProvider(MotionEventProvider):
win = None
q = deque()
touchmap = {}
def update(self, dispatch_fn):
touchmap = self.touchmap
while True:
try:
value = self.q.pop()
except IndexError:
return
action, fid, x, y = value
y = 1 - y
if fid not in touchmap:
touchmap[fid] = me = SDL2MotionEvent('sdl', fid, (x, y))
else:
me = touchmap[fid]
me.move((x, y))
if action == 'fingerdown':
dispatch_fn('begin', me)
elif action == 'fingerup':
me.update_time_end()
dispatch_fn('end', me)
del touchmap[fid]
else:
dispatch_fn('update', me)
class WindowSDL(WindowBase):
_do_resize_ev = None
def __init__(self, **kwargs):
self._pause_loop = False
self._win = _WindowSDL2Storage()
super(WindowSDL, self).__init__()
self._mouse_x = self._mouse_y = -1
self._meta_keys = (KMOD_LCTRL, KMOD_RCTRL, KMOD_RSHIFT,
KMOD_LSHIFT, KMOD_RALT, KMOD_LALT, KMOD_LMETA,
KMOD_RMETA)
self.command_keys = {
27: 'escape',
9: 'tab',
8: 'backspace',
13: 'enter',
127: 'del',
271: 'enter',
273: 'up',
274: 'down',
275: 'right',
276: 'left',
278: 'home',
279: 'end',
280: 'pgup',
281: 'pgdown'}
self._mouse_buttons_down = set()
self.key_map = {SDLK_LEFT: 276, SDLK_RIGHT: 275, SDLK_UP: 273,
SDLK_DOWN: 274, SDLK_HOME: 278, SDLK_END: 279,
SDLK_PAGEDOWN: 281, SDLK_PAGEUP: 280, SDLK_SHIFTR: 303,
SDLK_SHIFTL: 304, SDLK_SUPER: 309, SDLK_LCTRL: 305,
SDLK_RCTRL: 306, SDLK_LALT: 308, SDLK_RALT: 307,
SDLK_CAPS: 301, SDLK_INSERT: 277, SDLK_F1: 282,
SDLK_F2: 283, SDLK_F3: 284, SDLK_F4: 285, SDLK_F5: 286,
SDLK_F6: 287, SDLK_F7: 288, SDLK_F8: 289, SDLK_F9: 290,
SDLK_F10: 291, SDLK_F11: 292, SDLK_F12: 293,
SDLK_F13: 294, SDLK_F14: 295, SDLK_F15: 296,
SDLK_KEYPADNUM: 300, SDLK_KP_DEVIDE: 267,
SDLK_KP_MULTIPLY: 268, SDLK_KP_MINUS: 269,
SDLK_KP_PLUS: 270, SDLK_KP_ENTER: 271,
SDLK_KP_DOT: 266, SDLK_KP_0: 256, SDLK_KP_1: 257,
SDLK_KP_2: 258, SDLK_KP_3: 259, SDLK_KP_4: 260,
SDLK_KP_5: 261, SDLK_KP_6: 262, SDLK_KP_7: 263,
SDLK_KP_8: 264, SDLK_KP_9: 265}
if platform == 'ios':
# XXX ios keyboard suck, when backspace is hit, the delete
# keycode is sent. fix it.
self.key_map[127] = 8
elif platform == 'android':
# map android back button to escape
self.key_map[1073742094] = 27
self.bind(minimum_width=self._set_minimum_size,
minimum_height=self._set_minimum_size)
self.bind(allow_screensaver=self._set_allow_screensaver)
def get_window_info(self):
return self._win.get_window_info()
def _set_minimum_size(self, *args):
minimum_width = self.minimum_width
minimum_height = self.minimum_height
if minimum_width and minimum_height:
self._win.set_minimum_size(minimum_width, minimum_height)
elif minimum_width or minimum_height:
Logger.warning(
'Both Window.minimum_width and Window.minimum_height must be '
'bigger than 0 for the size restriction to take effect.')
def _set_allow_screensaver(self, *args):
self._win.set_allow_screensaver(self.allow_screensaver)
def _event_filter(self, action, *largs):
from kivy.app import App
if action == 'app_terminating':
EventLoop.quit = True
elif action == 'app_lowmemory':
self.dispatch('on_memorywarning')
elif action == 'app_willenterbackground':
from kivy.base import stopTouchApp
app = App.get_running_app()
if not app:
Logger.info('WindowSDL: No running App found, exit.')
stopTouchApp()
return 0
if not app.dispatch('on_pause'):
Logger.info(
'WindowSDL: App doesn\'t support pause mode, stop.')
stopTouchApp()
return 0
self._pause_loop = True
elif action == 'app_didenterforeground':
# on iOS, the did enter foreground is launched at the start
# of the application. in our case, we want it only when the app
# is resumed
if self._pause_loop:
self._pause_loop = False
app = App.get_running_app()
app.dispatch('on_resume')
elif action == 'windowresized':
self._size = largs
self._win.resize_window(*self._size)
# Force kivy to render the frame now, so that the canvas is drawn.
EventLoop.idle()
return 0
def create_window(self, *largs):
if self._fake_fullscreen:
if not self.borderless:
self.fullscreen = self._fake_fullscreen = False
elif not self.fullscreen or self.fullscreen == 'auto':
self.borderless = self._fake_fullscreen = False
if self.fullscreen == 'fake':
self.borderless = self._fake_fullscreen = True
Logger.warning("The 'fake' fullscreen option has been "
"deprecated, use Window.borderless or the "
"borderless Config option instead.")
if not self.initialized:
if self.position == 'auto':
pos = None, None
elif self.position == 'custom':
pos = self.left, self.top
# ensure we have an event filter
self._win.set_event_filter(self._event_filter)
# setup window
w, h = self.system_size
resizable = Config.getboolean('graphics', 'resizable')
state = (Config.get('graphics', 'window_state')
if self._is_desktop else None)
self.system_size = _size = self._win.setup_window(
pos[0], pos[1], w, h, self.borderless,
self.fullscreen, resizable, state)
# calculate density
sz = self._win._get_gl_size()[0]
self._density = density = sz / _size[0]
if self._is_desktop and self.size[0] != _size[0]:
self.dpi = density * 96.
# never stay with a None pos, application using w.center
# will be fired.
self._pos = (0, 0)
self._set_minimum_size()
self._set_allow_screensaver()
if state == 'hidden':
self._focus = False
else:
w, h = self.system_size
self._win.resize_window(w, h)
self._win.set_border_state(self.borderless)
self._win.set_fullscreen_mode(self.fullscreen)
super(WindowSDL, self).create_window()
# set mouse visibility
self._set_cursor_state(self.show_cursor)
if self.initialized:
return
# auto add input provider
Logger.info('Window: auto add sdl2 input provider')
from kivy.base import EventLoop
SDL2MotionEventProvider.win = self
EventLoop.add_input_provider(SDL2MotionEventProvider('sdl', ''))
# set window icon before calling set_mode
try:
filename_icon = self.icon or Config.get('kivy', 'window_icon')
if filename_icon == '':
logo_size = 32
if platform == 'macosx':
logo_size = 512
elif platform == 'win':
logo_size = 64
filename_icon = 'kivy-icon-{}.png'.format(logo_size)
filename_icon = resource_find(
join(kivy_data_dir, 'logo', filename_icon))
self.set_icon(filename_icon)
except:
Logger.exception('Window: cannot set icon')
def close(self):
self._win.teardown_window()
super(WindowSDL, self).close()
self.initialized = False
def maximize(self):
if self._is_desktop:
self._win.maximize_window()
else:
Logger.warning('Window: maximize() is used only on desktop OSes.')
def minimize(self):
if self._is_desktop:
self._win.minimize_window()
else:
Logger.warning('Window: minimize() is used only on desktop OSes.')
def restore(self):
if self._is_desktop:
self._win.restore_window()
else:
Logger.warning('Window: restore() is used only on desktop OSes.')
def hide(self):
if self._is_desktop:
self._win.hide_window()
else:
Logger.warning('Window: hide() is used only on desktop OSes.')
def show(self):
if self._is_desktop:
self._win.show_window()
else:
Logger.warning('Window: show() is used only on desktop OSes.')
def raise_window(self):
if self._is_desktop:
self._win.raise_window()
else:
Logger.warning('Window: show() is used only on desktop OSes.')
@deprecated
def toggle_fullscreen(self):
if self.fullscreen in (True, 'auto'):
self.fullscreen = False
else:
self.fullscreen = 'auto'
def set_title(self, title):
self._win.set_window_title(title)
def set_icon(self, filename):
self._win.set_window_icon(str(filename))
def screenshot(self, *largs, **kwargs):
filename = super(WindowSDL, self).screenshot(*largs, **kwargs)
if filename is None:
return
from kivy.graphics.opengl import glReadPixels, GL_RGB, GL_UNSIGNED_BYTE
width, height = self.size
data = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE)
self._win.save_bytes_in_png(filename, data, width, height)
Logger.debug('Window: Screenshot saved at <%s>' % filename)
return filename
def flip(self):
self._win.flip()
super(WindowSDL, self).flip()
def set_system_cursor(self, cursor_name):
result = self._win.set_system_cursor(cursor_name)
return result
def _get_window_pos(self):
return self._win.get_window_pos()
def _set_window_pos(self, x, y):
self._win.set_window_pos(x, y)
# Transparent Window background
def _is_shaped(self):
return self._win.is_window_shaped()
def _set_shape(self, shape_image, mode='default',
cutoff=False, color_key=None):
modes = ('default', 'binalpha', 'reversebinalpha', 'colorkey')
color_key = color_key or (0, 0, 0, 1)
if mode not in modes:
Logger.warning(
'Window: shape mode can be only '
'{}'.format(', '.join(modes))
)
return
if not isinstance(color_key, (tuple, list)):
return
if len(color_key) not in (3, 4):
return
if len(color_key) == 3:
color_key = (color_key[0], color_key[1], color_key[2], 1)
Logger.warning(
'Window: Shape color_key must be only tuple or list'
)
return
color_key = (
color_key[0] * 255,
color_key[1] * 255,
color_key[2] * 255,
color_key[3] * 255
)
assert cutoff in (1, 0)
shape_image = shape_image or Config.get('kivy', 'window_shape')
shape_image = resource_find(shape_image) or shape_image
self._win.set_shape(shape_image, mode, cutoff, color_key)
def _get_shaped_mode(self):
return self._win.get_shaped_mode()
def _set_shaped_mode(self, value):
self._set_shape(
shape_image=self.shape_image,
mode=value, cutoff=self.shape_cutoff,
color_key=self.shape_color_key
)
return self._win.get_shaped_mode()
# twb end
def _set_cursor_state(self, value):
self._win._set_cursor_state(value)
def _fix_mouse_pos(self, x, y):
y -= 1
self.mouse_pos = (x * self._density,
(self.system_size[1] - y) * self._density)
return x, y
def _mainloop(self):
EventLoop.idle()
# for android/iOS, we don't want to have any event nor executing our
# main loop while the pause is going on. This loop wait any event (not
# handled by the event filter), and remove them from the queue.
# Nothing happen during the pause on iOS, except gyroscope value sent
# over joystick. So it's safe.
while self._pause_loop:
self._win.wait_event()
if not self._pause_loop:
break
event = self._win.poll()
if event is None:
continue
# As dropfile is send was the app is still in pause.loop
# we need to dispatch it
action, args = event[0], event[1:]
if action == 'dropfile':
dropfile = args
self.dispatch('on_dropfile', dropfile[0])
while True:
event = self._win.poll()
if event is False:
break
if event is None:
continue
action, args = event[0], event[1:]
if action == 'quit':
if self.dispatch('on_request_close'):
continue
EventLoop.quit = True
break
elif action in ('fingermotion', 'fingerdown', 'fingerup'):
# for finger, pass the raw event to SDL motion event provider
# XXX this is problematic. On OSX, it generates touches with 0,
# 0 coordinates, at the same times as mouse. But it works.
# We have a conflict of using either the mouse or the finger.
# Right now, we have no mechanism that we could use to know
# which is the preferred one for the application.
if platform in ('ios', 'android'):
SDL2MotionEventProvider.q.appendleft(event)
pass
elif action == 'mousemotion':
x, y = args
x, y = self._fix_mouse_pos(x, y)
self._mouse_x = x
self._mouse_y = y
# don't dispatch motion if no button are pressed
if len(self._mouse_buttons_down) == 0:
continue
self._mouse_meta = self.modifiers
self.dispatch('on_mouse_move', x, y, self.modifiers)
elif action in ('mousebuttondown', 'mousebuttonup'):
x, y, button = args
x, y = self._fix_mouse_pos(x, y)
btn = 'left'
if button == 3:
btn = 'right'
elif button == 2:
btn = 'middle'
eventname = 'on_mouse_down'
self._mouse_buttons_down.add(button)
if action == 'mousebuttonup':
eventname = 'on_mouse_up'
self._mouse_buttons_down.remove(button)
self._mouse_x = x
self._mouse_y = y
self.dispatch(eventname, x, y, btn, self.modifiers)
elif action.startswith('mousewheel'):
self._update_modifiers()
x, y, button = args
btn = 'scrolldown'
if action.endswith('up'):
btn = 'scrollup'
elif action.endswith('right'):
btn = 'scrollright'
elif action.endswith('left'):
btn = 'scrollleft'
self._mouse_meta = self.modifiers
self._mouse_btn = btn
# times = x if y == 0 else y
# times = min(abs(times), 100)
# for k in range(times):
self._mouse_down = True
self.dispatch('on_mouse_down',
self._mouse_x, self._mouse_y, btn, self.modifiers)
self._mouse_down = False
self.dispatch('on_mouse_up',
self._mouse_x, self._mouse_y, btn, self.modifiers)
elif action == 'dropfile':
dropfile = args
self.dispatch('on_dropfile', dropfile[0])
# video resize
elif action == 'windowresized':
self._size = self._win.window_size
# don't use trigger here, we want to delay the resize event
ev = self._do_resize_ev
if ev is None:
ev = Clock.schedule_once(self._do_resize, .1)
self._do_resize_ev = ev
else:
ev()
elif action == 'windowrestored':
self.dispatch('on_restore')
self.canvas.ask_update()
elif action == 'windowexposed':
self.canvas.ask_update()
elif action == 'windowminimized':
self.dispatch('on_minimize')
if Config.getboolean('kivy', 'pause_on_minimize'):
self.do_pause()
elif action == 'windowmaximized':
self.dispatch('on_maximize')
elif action == 'windowhidden':
self.dispatch('on_hide')
elif action == 'windowshown':
self.dispatch('on_show')
elif action == 'windowfocusgained':
self._focus = True
elif action == 'windowfocuslost':
self._focus = False
elif action == 'windowenter':
self.dispatch('on_cursor_enter')
elif action == 'windowleave':
self.dispatch('on_cursor_leave')
elif action == 'joyaxismotion':
stickid, axisid, value = args
self.dispatch('on_joy_axis', stickid, axisid, value)
elif action == 'joyhatmotion':
stickid, hatid, value = args
self.dispatch('on_joy_hat', stickid, hatid, value)
elif action == 'joyballmotion':
stickid, ballid, xrel, yrel = args
self.dispatch('on_joy_ball', stickid, ballid, xrel, yrel)
elif action == 'joybuttondown':
stickid, buttonid = args
self.dispatch('on_joy_button_down', stickid, buttonid)
elif action == 'joybuttonup':
stickid, buttonid = args
self.dispatch('on_joy_button_up', stickid, buttonid)
elif action in ('keydown', 'keyup'):
mod, key, scancode, kstr = args
try:
key = self.key_map[key]
except KeyError:
pass
if action == 'keydown':
self._update_modifiers(mod, key)
else:
# ignore the key, it has been released
self._update_modifiers(mod)
# if mod in self._meta_keys:
if (key not in self._modifiers and
key not in self.command_keys.keys()):
try:
kstr_chr = unichr(key)
try:
# On android, there is no 'encoding' attribute.
# On other platforms, if stdout is redirected,
# 'encoding' may be None
encoding = getattr(sys.stdout, 'encoding',
'utf8') or 'utf8'
kstr_chr.encode(encoding)
kstr = kstr_chr
except UnicodeError:
pass
except ValueError:
pass
# if 'shift' in self._modifiers and key\
# not in self.command_keys.keys():
# return
if action == 'keyup':
self.dispatch('on_key_up', key, scancode)
continue
# don't dispatch more key if down event is accepted
if self.dispatch('on_key_down', key,
scancode, kstr,
self.modifiers):
continue
self.dispatch('on_keyboard', key,
scancode, kstr,
self.modifiers)
elif action == 'textinput':
text = args[0]
self.dispatch('on_textinput', text)
elif action == 'textedit':
text = args[0]
self.dispatch('on_textedit', text)
# unhandled event !
else:
Logger.trace('WindowSDL: Unhandled event %s' % str(event))
def _do_resize(self, dt):
Logger.debug('Window: Resize window to %s' % str(self.size))
self._win.resize_window(*self._size)
self.dispatch('on_pre_resize', *self.size)
def do_pause(self):
# should go to app pause mode (desktop style)
from kivy.app import App
from kivy.base import stopTouchApp
app = App.get_running_app()
if not app:
Logger.info('WindowSDL: No running App found, exit.')
stopTouchApp()
return
if not app.dispatch('on_pause'):
Logger.info('WindowSDL: App doesn\'t support pause mode, stop.')
stopTouchApp()
return
# XXX FIXME wait for sdl resume
while True:
event = self._win.poll()
if event is False:
continue
if event is None:
continue
action, args = event[0], event[1:]
if action == 'quit':
EventLoop.quit = True
break
elif action == 'app_willenterforeground':
break
elif action == 'windowrestored':
break
app.dispatch('on_resume')
def mainloop(self):
# don't known why, but pygame required a resize event
# for opengl, before mainloop... window reinit ?
# self.dispatch('on_resize', *self.size)
while not EventLoop.quit and EventLoop.status == 'started':
try:
self._mainloop()
except BaseException as inst:
# use exception manager first
r = ExceptionManager.handle_exception(inst)
if r == ExceptionManager.RAISE:
stopTouchApp()
raise
else:
pass
Logger.info("WindowSDL: exiting mainloop and closing.")
self.close()
#
# Pygame wrapper
#
def _update_modifiers(self, mods=None, key=None):
# Available mod, from dir(pygame)
# 'KMOD_ALT', 'KMOD_CAPS', 'KMOD_CTRL', 'KMOD_LALT',
# 'KMOD_LCTRL', 'KMOD_LMETA', 'KMOD_LSHIFT', 'KMOD_META',
# 'KMOD_MODE', 'KMOD_NONE'
if mods is None and key is None:
return
modifiers = set()
if mods is not None:
if mods & (KMOD_RSHIFT | KMOD_LSHIFT):
modifiers.add('shift')
if mods & (KMOD_RALT | KMOD_LALT):
modifiers.add('alt')
if mods & (KMOD_RCTRL | KMOD_LCTRL):
modifiers.add('ctrl')
if mods & (KMOD_RMETA | KMOD_LMETA):
modifiers.add('meta')
if key is not None:
if key in (KMOD_RSHIFT, KMOD_LSHIFT):
modifiers.add('shift')
if key in (KMOD_RALT, KMOD_LALT):
modifiers.add('alt')
if key in (KMOD_RCTRL, KMOD_LCTRL):
modifiers.add('ctrl')
if key in (KMOD_RMETA, KMOD_LMETA):
modifiers.add('meta')
self._modifiers = list(modifiers)
return
def request_keyboard(self, callback, target, input_type='text'):
self._sdl_keyboard = super(WindowSDL, self).\
request_keyboard(callback, target, input_type)
self._win.show_keyboard(self._system_keyboard, self.softinput_mode)
Clock.schedule_interval(self._check_keyboard_shown, 1 / 5.)
return self._sdl_keyboard
def release_keyboard(self, *largs):
super(WindowSDL, self).release_keyboard(*largs)
self._win.hide_keyboard()
self._sdl_keyboard = None
return True
def _check_keyboard_shown(self, dt):
if self._sdl_keyboard is None:
return False
if not self._win.is_keyboard_shown():
self._sdl_keyboard.release()
def map_key(self, original_key, new_key):
self.key_map[original_key] = new_key
def unmap_key(self, key):
if key in self.key_map:
del self.key_map[key]
def grab_mouse(self):
self._win.grab_mouse(True)
def ungrab_mouse(self):
self._win.grab_mouse(False)
| 34.941832 | 79 | 0.546276 |
d17c2e40c02914839537d117c74d35dc059bb123 | 1,698 | py | Python | test/transactions.py | bitseaman/bitcoin_tools | f1bbc32795854483d398a252399d13866b27bc26 | [
"BSD-3-Clause"
] | 248 | 2017-05-12T00:41:00.000Z | 2022-01-29T18:50:35.000Z | test/transactions.py | bitseaman/bitcoin_tools | f1bbc32795854483d398a252399d13866b27bc26 | [
"BSD-3-Clause"
] | 25 | 2017-10-21T12:05:18.000Z | 2021-01-18T19:59:49.000Z | test/transactions.py | bitseaman/bitcoin_tools | f1bbc32795854483d398a252399d13866b27bc26 | [
"BSD-3-Clause"
] | 75 | 2017-05-18T17:38:51.000Z | 2022-02-27T12:35:12.000Z | from bitcoin_tools.core.keys import serialize_pk, load_keys
from bitcoin_tools.core.transaction import TX
# BUILD TRANSACTIONS
prev_tx_ids = ["f0315ffc38709d70ad5647e22048358dd3745f3ce3874223c80a7c92fab0c8ba", # P2PK
"7767a9eb2c8adda3ffce86c06689007a903b6f7e78dbc049ef0dbaf9eeebe075", # P2PKH
"adcd6d269d5e0713fa9650099e9ab54ebf845a0d95f3740b44361bdb287959a7"] # P2MS
prev_out_index = [0, 0, 0]
btc_addrs = ["mgwpBW3g4diqasfxzWDgSi5fBrsFKmNdva", "mwryy9YdVezq2Wo1DukA5ADhrNemqCKTmy",
"n4kDrXT6BuvgjjitGxLr4wepmeATWwvBJy"]
value = [1000, 2000, 3000]
pks = []
sks = []
# Just for testing
fee_multiplier = 240
fee = 235 * fee_multiplier
value = 6083510 - fee
for addr in btc_addrs:
sk, pk = load_keys(addr)
sks.append(sk)
pks.append(pk)
for i in range(3):
if i is 0:
print "\n#############\n# FROM P2PK #\n#############"
sk = sks[i]
elif i is 1:
print "##############\n# FROM P2PKH #\n##############"
sk = sks[i]
elif i is 2:
print "#############\n# FROM P2MS #\n#############"
sk = sks[:i]
for j in range(3):
if j is 0:
print "\nTO: P2PK\n"
dest = serialize_pk(pks[j])
elif j is 1:
print "\nTO: P2PKH\n"
dest = btc_addrs[j]
elif j is 2:
print "\nTO: P2MS\n"
dest = [2, serialize_pk(pks[0]), serialize_pk(pks[1])]
tx = TX.build_from_io(prev_tx_ids[i], prev_out_index[i], value, dest)
tx.sign(sk, 0)
print tx.serialize()
tx.display()
print "\n---------------------------------------------------------------------------------------------\n"
| 31.444444 | 109 | 0.551826 |
9dd92b160a485f5cdd251d491eaafc11e9f70610 | 43,518 | py | Python | purity_fb/purity_fb_1dot9/apis/arrays_api.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 5 | 2017-09-08T20:47:22.000Z | 2021-06-29T02:11:05.000Z | purity_fb/purity_fb_1dot9/apis/arrays_api.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 16 | 2017-11-27T20:57:48.000Z | 2021-11-23T18:46:43.000Z | purity_fb/purity_fb_1dot9/apis/arrays_api.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 22 | 2017-10-13T15:33:05.000Z | 2021-11-08T19:56:21.000Z | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.9 Python SDK
Pure Storage FlashBlade REST 1.9 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.9
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ArraysApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def list_arrays(self, **kwargs):
"""
List arrays
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ArrayResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_arrays_with_http_info(**kwargs)
else:
(data) = self.list_arrays_with_http_info(**kwargs)
return data
def list_arrays_with_http_info(self, **kwargs):
"""
List arrays
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ArrayResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_arrays" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/arrays', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_arrays_http_specific_performance(self, **kwargs):
"""
List instant or historical http specific performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_http_specific_performance(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:return: ArrayHttpPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_arrays_http_specific_performance_with_http_info(**kwargs)
else:
(data) = self.list_arrays_http_specific_performance_with_http_info(**kwargs)
return data
def list_arrays_http_specific_performance_with_http_info(self, **kwargs):
"""
List instant or historical http specific performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_http_specific_performance_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:return: ArrayHttpPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['start_time', 'end_time', 'resolution']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_arrays_http_specific_performance" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/arrays/http-specific-performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayHttpPerformanceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_arrays_nfs_specific_performance(self, **kwargs):
"""
List instant or historical nfs specific performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_nfs_specific_performance(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:return: ArrayNfsPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_arrays_nfs_specific_performance_with_http_info(**kwargs)
else:
(data) = self.list_arrays_nfs_specific_performance_with_http_info(**kwargs)
return data
def list_arrays_nfs_specific_performance_with_http_info(self, **kwargs):
"""
List instant or historical nfs specific performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_nfs_specific_performance_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:return: ArrayNfsPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['start_time', 'end_time', 'resolution']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_arrays_nfs_specific_performance" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/arrays/nfs-specific-performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayNfsPerformanceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_arrays_performance(self, **kwargs):
"""
List instant or historical array performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_performance(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:param str protocol: to sample performance of a certain protocol
:return: ArrayPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_arrays_performance_with_http_info(**kwargs)
else:
(data) = self.list_arrays_performance_with_http_info(**kwargs)
return data
def list_arrays_performance_with_http_info(self, **kwargs):
"""
List instant or historical array performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_performance_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:param str protocol: to sample performance of a certain protocol
:return: ArrayPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['start_time', 'end_time', 'resolution', 'protocol']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_arrays_performance" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'protocol' in params:
query_params.append(('protocol', params['protocol']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/arrays/performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayPerformanceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_arrays_performance_replication(self, **kwargs):
"""
List instant or historical array replication performance.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_performance_replication(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:param int start_time: Time to start sample in milliseconds since epoch.
:param str type: to sample space of either file systems, object store, or all
:return: ArrayPerformanceReplicationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_arrays_performance_replication_with_http_info(**kwargs)
else:
(data) = self.list_arrays_performance_replication_with_http_info(**kwargs)
return data
def list_arrays_performance_replication_with_http_info(self, **kwargs):
"""
List instant or historical array replication performance.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_performance_replication_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:param int start_time: Time to start sample in milliseconds since epoch.
:param str type: to sample space of either file systems, object store, or all
:return: ArrayPerformanceReplicationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['end_time', 'resolution', 'start_time', 'type']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_arrays_performance_replication" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'type' in params:
query_params.append(('type', params['type']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/arrays/performance/replication', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayPerformanceReplicationResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_arrays_s3_specific_performance(self, **kwargs):
"""
List instant or historical object store specific performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_s3_specific_performance(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:return: ArrayS3PerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_arrays_s3_specific_performance_with_http_info(**kwargs)
else:
(data) = self.list_arrays_s3_specific_performance_with_http_info(**kwargs)
return data
def list_arrays_s3_specific_performance_with_http_info(self, **kwargs):
"""
List instant or historical object store specific performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_s3_specific_performance_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:return: ArrayS3PerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['start_time', 'end_time', 'resolution']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_arrays_s3_specific_performance" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/arrays/s3-specific-performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayS3PerformanceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_arrays_space(self, **kwargs):
"""
List instant or historical array space
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_space(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:param str type: to sample space of either file systems, object store, or all
:return: ArraySpaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_arrays_space_with_http_info(**kwargs)
else:
(data) = self.list_arrays_space_with_http_info(**kwargs)
return data
def list_arrays_space_with_http_info(self, **kwargs):
"""
List instant or historical array space
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_space_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:param str type: to sample space of either file systems, object store, or all
:return: ArraySpaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['start_time', 'end_time', 'resolution', 'type']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_arrays_space" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'type' in params:
query_params.append(('type', params['type']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/arrays/space', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArraySpaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_clients_performance(self, **kwargs):
"""
List client performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_clients_performance(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int limit: limit, should be >= 0
:return: ClientPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_clients_performance_with_http_info(**kwargs)
else:
(data) = self.list_clients_performance_with_http_info(**kwargs)
return data
def list_clients_performance_with_http_info(self, **kwargs):
"""
List client performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_clients_performance_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name).
:param int limit: limit, should be >= 0
:return: ClientPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'filter', 'sort', 'limit']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_clients_performance" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/arrays/clients/performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ClientPerformanceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_arrays(self, array_settings, **kwargs):
"""
Update arrays
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_arrays(array_settings, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param PureArray array_settings: (required)
:return: ArrayResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_arrays_with_http_info(array_settings, **kwargs)
else:
(data) = self.update_arrays_with_http_info(array_settings, **kwargs)
return data
def update_arrays_with_http_info(self, array_settings, **kwargs):
"""
Update arrays
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_arrays_with_http_info(array_settings, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param PureArray array_settings: (required)
:return: ArrayResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['array_settings']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_arrays" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'array_settings' is set
if ('array_settings' not in params) or (params['array_settings'] is None):
raise ValueError("Missing the required parameter `array_settings` when calling `update_arrays`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'array_settings' in params:
body_params = params['array_settings']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/arrays', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 42.91716 | 204 | 0.579967 |
6233546ab8e7817c313f7f2088ef8c4e88355ab8 | 68,271 | py | Python | fairseq/models/speech_to_text/modules/emformer.py | narfanar/fairseq | 7f5ec30b25d906b564cc0ee0e50d1d9193940f58 | [
"MIT"
] | 16,259 | 2018-05-02T02:31:30.000Z | 2022-03-31T21:50:23.000Z | fairseq/models/speech_to_text/modules/emformer.py | narfanar/fairseq | 7f5ec30b25d906b564cc0ee0e50d1d9193940f58 | [
"MIT"
] | 3,863 | 2018-05-02T13:42:39.000Z | 2022-03-31T19:03:32.000Z | fairseq/models/speech_to_text/modules/emformer.py | narfanar/fairseq | 7f5ec30b25d906b564cc0ee0e50d1d9193940f58 | [
"MIT"
] | 4,796 | 2018-05-02T07:55:51.000Z | 2022-03-31T14:46:45.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import re
from functools import partial
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq.models import (
FairseqEncoder,
)
from fairseq.models.speech_to_text.utils import (
NoOp,
lengths_to_padding_mask,
segments_to_sequence,
)
from fairseq.models.speech_to_text.utils import (
attention_suppression,
layer_norm_backward_hook,
)
from torch import Tensor, device as Device
from torch.ao.quantization.qconfig import (
default_dynamic_qconfig,
per_channel_dynamic_qconfig,
)
class RelativePositionEmbedding(nn.Module):
"""
Implementation according to https://arxiv.org/abs/1803.02155
"""
def __init__(self, head_dim, max_position, norm_init=True):
super().__init__()
self.head_dim = head_dim
self.max_position = max_position
self.embeddings = nn.Parameter(torch.Tensor(max_position * 2 + 1, head_dim))
if norm_init:
nn.init.xavier_normal_(self.embeddings)
else:
nn.init.xavier_uniform_(self.embeddings)
def forward(self, input: Tensor):
output = nn.functional.embedding(input.long(), self.embeddings)
return output
class Fp32LayerNorm(nn.Module):
def __init__(
self,
input_dim,
clamp_grad=True,
max_grad_value=256,
eps=1e-5,
elementwise_affine=True,
):
super().__init__()
self.torch_module = torch.nn.LayerNorm(
input_dim, eps=eps, elementwise_affine=elementwise_affine
)
if clamp_grad:
hook = partial(layer_norm_backward_hook, clamp_value=max_grad_value)
self.torch_module.register_backward_hook(hook)
def forward(self, input):
output = torch.nn.functional.layer_norm(
input.float(),
self.torch_module.normalized_shape,
self.torch_module.weight.float()
if self.torch_module.weight is not None
else None,
self.torch_module.bias.float()
if self.torch_module.bias is not None
else None,
self.torch_module.eps,
).type_as(input)
return output
# ------------------------------------------------------------------------------
# PositionwiseFF
# ------------------------------------------------------------------------------
class PositionwiseFF(nn.Module):
"""
FFN layer in transformer.
Args:
input_dim: input embedding dimension
ffn_dim: FFN layer inner dimension
dropout_on_fc1: dropout for first linear layer
dropout_on_fc2: dropout fr second linear layer
activation_fn: activation function used after first linear layer. \
Only relu or gelu is supported.
"""
def __init__(
self, input_dim, ffn_dim, dropout_on_fc1, dropout_on_fc2, activation_fn
):
super(PositionwiseFF, self).__init__()
self.input_dim = input_dim
self.ffn_dim = ffn_dim
if activation_fn == "relu":
ac = nn.ReLU()
elif activation_fn == "gelu":
ac = nn.GELU()
else:
raise ValueError("Unsupported activation_fn = ({})".format(activation_fn))
# fc1 -> ac -> dropout -> fc2 -> dropout
self.module = nn.Sequential(
nn.Linear(input_dim, ffn_dim),
ac,
nn.Dropout(dropout_on_fc1),
nn.Linear(ffn_dim, input_dim),
nn.Dropout(dropout_on_fc2),
)
self.layer_norm = Fp32LayerNorm(input_dim)
def forward(self, input):
module_out = self.module(self.layer_norm(input))
output = module_out + input
return output
def quantize_(self, params=None):
if params and "per_channel" in params and params["per_channel"]:
qconfig = per_channel_dynamic_qconfig
else:
qconfig = default_dynamic_qconfig
torch.ao.quantization.quantize_dynamic(
self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
)
return self
# ------------------------------------------------------------------------------
# SummarizationLayer
# ------------------------------------------------------------------------------
class SummarizationLayer(nn.Module):
def __init__(self, method, segment_size, embedding_dim):
super(SummarizationLayer, self).__init__()
self.segment_size = segment_size
self.embedding_dim = embedding_dim
nonlin_match = re.match(r"nonlinear\((?P<act>[a-z]+),(?P<dim>[0-9]+)\)", method)
self.method = method
if method == "mean":
self.module = nn.AvgPool1d(
kernel_size=segment_size,
stride=segment_size,
ceil_mode=True,
)
elif method == "max":
self.module = nn.MaxPool1d(
kernel_size=segment_size,
stride=segment_size,
ceil_mode=True,
)
elif method == "linear":
self.module = nn.Linear(segment_size, 1)
elif nonlin_match:
nonlin_args = nonlin_match.groupdict()
act_type = nonlin_args["act"]
hid_dim = int(nonlin_args["dim"])
if act_type == "relu":
act = nn.ReLU()
elif act_type == "gelu":
act = nn.GELU()
else:
raise ValueError("Unsupported activation_fn = ({})".format(act_type))
self.module = nn.Sequential(
nn.Linear(segment_size, hid_dim),
act,
nn.Linear(hid_dim, 1),
)
else:
raise ValueError("Unsupported summarization method = ({})".format(method))
def forward(self, input):
# T, B, D -> B, D, T
input = input.permute(1, 2, 0)
if self.method == "mean" or self.method == "max":
output = self.module(input)
output = output.permute(2, 0, 1)
return output
full_seg_length = input.size(2) // self.segment_size * self.segment_size
if full_seg_length > 0:
# at least one seg is full
B = input.size(0)
D = input.size(1)
input_todo = (
input[:, :, :full_seg_length]
.contiguous()
.view(B, -1, self.segment_size)
)
output = self.module(input_todo)
output = output.view(B, D, -1)
else:
output = input.new_zeros(input.size(0), input.size(1), 0)
left = input.size(2) - full_seg_length
if left > 0:
# when last seg is not full, use zeros as last memory placeholder
zeros = input.new_zeros(input.size(0), input.size(1), 1)
output = torch.cat([output, zeros], dim=2)
output = output.permute(2, 0, 1)
return output
# ------------------------------------------------------------------------------
# NoSegAugmentedMemoryMultiheadAttentionBmm
# ------------------------------------------------------------------------------
class NoSegAugmentedMemoryMultiheadAttentionBmm(nn.Module):
"""
Whole utterance augmented memory multihead attention using BMM.
Different with previous augmented memory multihead attention where
the utterance is chunked into segments. Here we use attention mask
achieve so. The input embedding [right_context, utterance, summary]
is a concatenation of right context, utterance and summary.
Right context block is the concatenation of all the right context for
each segments. [right_context_0, right_context_1, ..., right_context_n]
For example, if we have utterance = [v0, v1, v2, ...., v20]. segment
size 8, right_context size 4. Then the right context blocks =
[v8, v9, v10, v11, v16, v17, v18, v19, 0, 0, 0, 0], where v8, v9, v10,
and v11 are the right context for first segment. v16, v17, v18 and v19
are the right context for second segment. 0, 0, 0 and 0 are right context
for the last segment.
utterance is corresponding to input embedding sequence
summary is concatenation of average of each segments. [summary_0,
summary_1, ..., ].
In augmented memory multihead attention, the query is [right_context,
utterance, summary], key is [memory, right_context, utterance]. Different
with AugmentedMemoryMultiheadAttentionBmm, memory here is passed from
previous attention layer. For the first attention layer, memory is average
of each segment.
Memory is a concatenation of memory from each segments in previous attention
layer. For example, current layer is i, then memory is [m_0, m_1, ..., m_n].
Each m_k is the output from seg_k in layer i-1.
args:
input_dim: input embedding dimension
num_heads: number of heads in multihead self-attention
dropout: attention dropout
std_scale: if std_scale is not None. The weak attention suppression is
turned on. For std_scale = 0.5, all the attention smaller than
mean + 0.5 * std will be suppressed.
scaled_init: whether to use scaled init for linear weight
tanh_on_mem: whether to use tanh on memory output
use_mem: whether to use memory or not. When max_memory_size is 0, then
we don't have memory anymore.
layer_index: current self-attention layer index that is used in depth
initialization
max_relative_position: max relative position used in relative position
embedding
rpe_old_option: To be compatible with previous model. The previous model
was trained with attention += attention + rpe. The correct equation
should be attention = attention + rpe
"""
def __init__(
self,
input_dim,
num_heads,
dropout=0.0,
std_scale=None,
scaled_init=False,
tanh_on_mem=False,
use_mem=True,
mini_batches=False,
negative_inf="-inf",
layer_index=-1,
max_relative_position=0,
rpe_old_option=True,
):
if input_dim % num_heads:
raise ValueError(
"input_dim ({}) must be divisible by num_heads ({})".format(
input_dim, num_heads
)
)
super().__init__()
embed_dim = input_dim
self.e2h_kv = torch.nn.Linear(input_dim, 2 * input_dim, bias=True)
self.e2h_q = torch.nn.Linear(input_dim, input_dim, bias=True)
self.rpe_old_option = rpe_old_option
if max_relative_position > 0:
self.use_rpe = True
self.rpe_k = RelativePositionEmbedding(
head_dim=input_dim // num_heads,
max_position=max_relative_position,
)
self.rpe_v = RelativePositionEmbedding(
head_dim=input_dim // num_heads,
max_position=max_relative_position,
)
else:
self.use_rpe = False
self.rpe_k = None
self.rpe_v = None
if scaled_init:
if layer_index == -1:
gain = 1.0 / math.sqrt(2)
else:
# https://arxiv.org/abs/2005.09684 depthwise initialization
# stablize the training greatly. Use depthwise initialization to
# replace incremental loss.
gain = 1.0 / math.sqrt(layer_index + 1)
torch.nn.init.xavier_uniform_(self.e2h_kv.weight, gain=gain)
torch.nn.init.xavier_uniform_(self.e2h_q.weight, gain=gain)
self.out_proj = torch.nn.Linear(embed_dim, embed_dim, bias=True)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim ** -0.5
self.std_scale = std_scale
self.use_mem = use_mem
self.mini_batches = mini_batches
self.negative_inf = negative_inf
if tanh_on_mem:
self.squash_mem = torch.tanh
self.nonlinear_squash_mem = True
else:
self.squash_mem = NoOp()
self.nonlinear_squash_mem = False
def prepare_qkv(
self,
input: Tensor,
mems: Tensor,
lengths: Tensor,
summary_length: int,
lc_length: int,
):
# T: right_context length + utterance_length + summary_length
T, B, D = input.shape
mem_length = mems.size(0)
utterance_length = torch.max(lengths)
right_context_blocks_length = T - utterance_length - summary_length
rc_block = input[:right_context_blocks_length, :, :]
utterance_block = input[right_context_blocks_length : T - summary_length, :, :]
if B == 1:
padding_mask = None
else:
klengths = lengths + mem_length + right_context_blocks_length + lc_length
padding_mask = lengths_to_padding_mask(lengths=klengths)
mem_rc_input = torch.cat([mems, rc_block, utterance_block], dim=0)
# In training lc_length = 0
key_length = mem_rc_input.size(0) + lc_length
rc_input_sum = input
q = self.e2h_q(rc_input_sum)
kv = self.e2h_kv(mem_rc_input)
k, v = kv.chunk(chunks=2, dim=2)
result_qkv = (q, k, v)
input_shape = (T, B, D)
result_lengths_info = (
mem_length,
utterance_length,
right_context_blocks_length,
key_length,
)
if padding_mask is not None:
assert padding_mask.size(0) == B
assert padding_mask.size(1) == key_length
return result_qkv, input_shape, result_lengths_info, padding_mask
def prepare_attention_weights(
self,
q: Tensor,
new_k: Tensor,
new_v: Tensor,
input_shape: Tuple[int, int, int],
rpe: Optional[Tensor],
) -> Tuple[Tensor, Tensor, Tensor]:
T, B, D = input_shape
q = (
q.contiguous().view(-1, B * self.num_heads, self.head_dim).transpose(0, 1)
* self.scaling
)
k = (
new_k.contiguous()
.view(-1, B * self.num_heads, self.head_dim)
.transpose(0, 1)
)
v = (
new_v.contiguous()
.view(-1, B * self.num_heads, self.head_dim)
.transpose(0, 1)
)
attention_weights = torch.bmm(q, k.transpose(1, 2))
if self.use_rpe and rpe is not None and self.rpe_v is not None:
r_k = self.rpe_k(rpe)
# [q, B*h, d] * [q, k, d] -> [B*h, q, k]
attention_weights_rpe = torch.matmul(
q.transpose(0, 1), r_k.transpose(1, 2)
).transpose(0, 1)
attention_weights = attention_weights + attention_weights_rpe
attention_weights_float = attention_weights.float()
return attention_weights, attention_weights_float, v
def prepare_attention_output(
self,
attention_weights: Tensor,
attention_weights_float: Tensor,
v: Tensor,
input_shape: Tuple[int, int, int],
key_length: int,
padding_mask: Optional[Tensor],
rpe: Optional[Tensor],
) -> Tensor:
T, B, D = input_shape
if padding_mask is not None:
attention_weights_float = attention_weights_float.view(
B, self.num_heads, T, key_length
)
attention_weights_float = attention_weights_float.masked_fill(
padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
)
attention_weights_float = attention_weights_float.view(
B * self.num_heads, T, key_length
)
if self.std_scale is not None:
attention_weights_float = attention_suppression(
attention_weights_float, self.std_scale
)
attention_weights_float = torch.nn.functional.softmax(
attention_weights_float, dim=-1
)
attention_weights = attention_weights_float.type_as(attention_weights)
attention_probs = torch.nn.functional.dropout(
attention_weights, p=self.dropout, training=self.training
)
# [T, key_length, B, n_head]+ [key_length, B, n_head, d_head]
# -> [T, B, n_head, d_head]
attention = torch.bmm(attention_probs, v)
if self.use_rpe and rpe is not None and self.rpe_v is not None:
r_v = self.rpe_v(rpe)
attention_rpe = torch.matmul(
attention_probs.transpose(0, 1), r_v
).transpose(0, 1)
if self.rpe_old_option:
attention += attention + attention_rpe
else:
attention = attention + attention_rpe
assert list(attention.shape) == [B * self.num_heads, T, self.head_dim]
attention = attention.transpose(0, 1).contiguous().view(T, B, self.embed_dim)
rc_output_memory = self.out_proj(attention)
return rc_output_memory
@torch.jit.unused
def forward(
self,
input: Tensor,
lengths: Tensor,
mems: Tensor,
attention_mask: Tensor,
pre_mems: Optional[Tensor] = None,
left_context_key: Optional[Tensor] = None,
left_context_val: Optional[Tensor] = None,
rpe: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""
forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in training.
args:
input: formed in the following way
[right_context_0, right_contex_1, ..., seg_0, seg_1,
..., summary_0, summary_1,..]
lengths: the length of query which is [seg_0, seg_1, ....]
mems: [mem_0, mem_1, ...].
attention_mask: attention mask for query = [right_context, query, summary]
key = [mem, right_context, query]. This is only used for traing.
"""
if self.use_mem:
mem_length = mems.size(0)
summary_length = mem_length + 1
if pre_mems is not None:
mems = torch.cat([pre_mems, mems], dim=0)
else:
mem_length = 0
summary_length = 0
# In training, lc_length = 0
if left_context_key is not None:
lc_length = left_context_key.size(0)
else:
lc_length = 0
results = self.prepare_qkv(
input=input,
mems=mems,
lengths=lengths,
summary_length=summary_length,
lc_length=lc_length,
)
result_qkv, input_shape, result_lengths_info, padding_mask = results
q, k, v = result_qkv
(
mem_length,
utterance_length,
right_context_blocks_length,
key_length,
) = result_lengths_info
if left_context_key is not None:
# add the cache key and value
new_k = torch.cat(
[
k[: mem_length + right_context_blocks_length, :, :],
left_context_key,
k[-utterance_length:, :, :],
],
dim=0,
)
new_v = torch.cat(
[
v[: mem_length + right_context_blocks_length, :, :],
left_context_val,
v[-utterance_length:, :, :],
],
dim=0,
)
next_k = new_k[mem_length + right_context_blocks_length :, :, :]
next_v = new_v[mem_length + right_context_blocks_length :, :, :]
else:
new_k = k
new_v = v
next_k = None
next_v = None
attention_weights, attention_weights_float, v = self.prepare_attention_weights(
q=q,
new_k=new_k,
new_v=new_v,
input_shape=input_shape,
rpe=rpe,
)
# mask attention
attention_mask = attention_mask.unsqueeze(0)
attention_weights_float = attention_weights_float.masked_fill(
attention_mask, float(self.negative_inf)
)
rc_output_memory = self.prepare_attention_output(
attention_weights=attention_weights,
attention_weights_float=attention_weights_float,
v=v,
input_shape=input_shape,
key_length=key_length,
padding_mask=padding_mask,
rpe=rpe,
)
if self.use_mem:
# next_m length equals to summary length - 1
# last memory is ignored
if self.mini_batches:
next_m = rc_output_memory[-summary_length:]
else:
next_m = rc_output_memory[-summary_length:-1]
next_m = self.squash_mem(next_m)
# rc and output
rc_output = rc_output_memory[:-summary_length]
if not self.nonlinear_squash_mem:
next_m = torch.clamp(next_m, min=-10, max=10)
else:
next_m = mems
rc_output = rc_output_memory
return rc_output, next_m, next_k, next_v
@torch.jit.export
def forward_jit(
self,
input: Tensor,
lengths: Tensor,
mems: Tensor,
left_context_key: Tensor,
left_context_val: Tensor,
rpe: Optional[Tensor],
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""
forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in decoding.
args:
input: formed in the following way
[right_context_0, right_contex_1, ..., seg_0, seg_1,
..., summary_0, summary_1,..]
lengths: the length of query which is [seg_0, seg_1, ....]
mems: [mem_0, mem_1, ...].
left_context_key: left_context for key part. This is only used for online
decoding. In training, this is empty tensor
left_context_val: left_context for value part. This is only used for online
decoding. In training, this is empty tensor
"""
lc_length = left_context_key.size(0)
# In decoding, summary_length = 1 or 0
if self.use_mem:
summary_length = 1
else:
summary_length = 0
results = self.prepare_qkv(
input=input,
mems=mems,
lengths=lengths,
summary_length=summary_length,
lc_length=lc_length,
)
result_qkv, input_shape, result_lengths_info, padding_mask = results
q, k, v = result_qkv
(
mem_length,
utterance_length,
right_context_blocks_length,
key_length,
) = result_lengths_info
# add the cache key and value
new_k = torch.cat(
[
k[: mem_length + right_context_blocks_length, :, :],
left_context_key,
k[-utterance_length:, :, :],
],
dim=0,
)
new_v = torch.cat(
[
v[: mem_length + right_context_blocks_length, :, :],
left_context_val,
v[-utterance_length:, :, :],
],
dim=0,
)
next_k = new_k[mem_length + right_context_blocks_length :, :, :]
next_v = new_v[mem_length + right_context_blocks_length :, :, :]
attention_weights, attention_weights_float, v = self.prepare_attention_weights(
q=q,
new_k=new_k,
new_v=new_v,
input_shape=input_shape,
rpe=rpe,
)
# In online decoding, we don't have attention mask. But we still need
# to disable the attention from summary query to memory
attention_weights_float[:, -1, :mem_length] = float(self.negative_inf)
rc_output_memory = self.prepare_attention_output(
attention_weights=attention_weights,
attention_weights_float=attention_weights_float,
v=v,
input_shape=input_shape,
key_length=key_length,
padding_mask=padding_mask,
rpe=rpe,
)
# In decoding, summary length is 1
if self.use_mem:
next_m = rc_output_memory[-1:]
next_m = self.squash_mem(next_m)
# rc and output
rc_output = rc_output_memory[:-1]
if not self.nonlinear_squash_mem:
next_m = torch.clamp(next_m, min=-10, max=10)
else:
rc_output = rc_output_memory
# empty tensor as input mems
next_m = mems
return rc_output, next_m, next_k, next_v
def quantize_(self, params=None):
if params and "per_channel" in params and params["per_channel"]:
qconfig = per_channel_dynamic_qconfig
else:
qconfig = default_dynamic_qconfig
torch.ao.quantization.quantize_dynamic(
self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
)
return self
class NoSegAugmentedMemoryTransformer(nn.Module):
"""
Whole utterance augmented memory transformer.
This is not pyspeech nn layer. It is used as a module in a master layer where
multiple transformers is used.
"""
def __init__(
self,
input_dim,
num_heads,
ffn_dim,
dropout_in_attn=0.0,
dropout_on_attn=None,
dropout_on_fc1=None,
dropout_on_fc2=None,
activation_fn="relu",
tanh_on_mem=False,
std_scale=None,
scaled_init=False,
segment_size=128,
use_mem=True,
mini_batches=False,
negative_inf="-inf",
layer_index=-1,
summarization_method="mean",
max_relative_position=0,
rpe_old_option=True,
):
super(NoSegAugmentedMemoryTransformer, self).__init__()
self.attention = NoSegAugmentedMemoryMultiheadAttentionBmm(
input_dim=input_dim,
num_heads=num_heads,
dropout=dropout_in_attn,
scaled_init=scaled_init,
tanh_on_mem=tanh_on_mem,
std_scale=std_scale,
use_mem=use_mem,
mini_batches=mini_batches,
negative_inf=negative_inf,
layer_index=layer_index,
max_relative_position=max_relative_position,
)
self.dropout = nn.Dropout(dropout_on_attn)
self.pos_ff = PositionwiseFF(
input_dim=input_dim,
ffn_dim=ffn_dim,
dropout_on_fc1=dropout_on_fc1,
dropout_on_fc2=dropout_on_fc2,
activation_fn=activation_fn,
)
self.layer_norm_pre = Fp32LayerNorm(input_dim)
self.layer_norm = Fp32LayerNorm(input_dim)
self.segment_size = segment_size
self.use_mem = use_mem
self.memory_op = SummarizationLayer(
summarization_method, segment_size, input_dim
)
def set_mini_batches(self, mini_batches):
self.attention.mini_batches = mini_batches
def gen_summary_queries(self, input):
sum_input = self.memory_op(input)
return sum_input
def pre_attention_ops(self, input, right_context_blocks):
rc_length = right_context_blocks.size(0)
input_length = input.size(0)
rc_and_input = torch.cat([right_context_blocks, input], dim=0)
residual_input = rc_and_input
rc_and_input = self.layer_norm_pre(rc_and_input)
query_input = rc_and_input[-input_length:, :, :]
return rc_length, input_length, residual_input, query_input, rc_and_input
def after_attention_ops(self, attention_output, residual_input):
output = self.dropout(attention_output)
output = output + residual_input
output = self.pos_ff(output)
output = self.layer_norm(output)
return output
@torch.jit.export
def forward_jit(
self,
input: Tensor,
lengths: Tensor,
mems: Tensor,
left_context_key: Tensor,
left_context_val: Tensor,
right_context_blocks: Tensor,
rpe: Optional[Tensor],
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
results = self.pre_attention_ops(input, right_context_blocks)
rc_length, input_length, residual_input, query_input, rc_and_input = results
# In online decoding, the summary query size is always 1 or 0
if self.use_mem:
summary_query = self.gen_summary_queries(query_input)
summary_query = summary_query[0:1, :, :]
rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0)
else:
rc_qu_su = rc_and_input
rc_output, next_m, next_k, next_v = self.attention.forward_jit(
input=rc_qu_su,
lengths=lengths,
mems=mems,
left_context_key=left_context_key,
left_context_val=left_context_val,
rpe=rpe,
)
rc_output = self.after_attention_ops(rc_output, residual_input)
results = (
rc_output[-input_length:, :, :],
next_m,
rc_output[0:rc_length, :, :],
next_k,
next_v,
)
return results
@torch.jit.unused
def forward(
self,
input,
lengths,
mems,
right_context_blocks,
attention_mask,
pre_mems,
left_context_key,
left_context_val,
rpe,
):
results = self.pre_attention_ops(input, right_context_blocks)
rc_length, input_length, residual_input, query_input, rc_and_input = results
if self.use_mem:
summary_query = self.gen_summary_queries(query_input)
rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0)
else:
rc_qu_su = rc_and_input
rc_output, next_m, next_k, next_v = self.attention(
input=rc_qu_su,
lengths=lengths,
mems=mems,
attention_mask=attention_mask,
pre_mems=pre_mems,
left_context_key=left_context_key,
left_context_val=left_context_val,
rpe=rpe,
)
# [TODO] Note memory did not go through pos_ff. What happen if we pass
# memory through the pos_ff as well?
rc_output = self.after_attention_ops(rc_output, residual_input)
results = (
rc_output[-input_length:, :, :],
next_m,
rc_output[0:rc_length, :, :],
next_k,
next_v,
)
return results
class NoSegAugmentedMemoryTransformerEncoderLayer(FairseqEncoder):
"""
Whole utterance augmented memory transformer encoder layer. This is a master layer
where we can define multiple augmented memory transformers. There are two reasons
to setup the master layer.
1. We only need to define once about the attention mask. All the layers in the master
layer share the same mask.
2. pyspeech nn layer has special input and output format. Defining one master layer is
easier to passing memory between different layes inside the master layer
args:
input_dim: input embedding dimension
num_heads: number of heads in multihead self-attention
ffn_dim: ffn dimension in FFN layer
num_layers: number of augmented memory transformer layers
dropout_in_attn: dropout used in multi-head self-attention
dropout_on_attn: dropout used for output from te multihead self-attention
dropout_on_fc1: dropout used in FFN layer for the first linear layer
dropout_on_fc2: dropout used in FFN layer for the second linear layer
segment_size: segment size for each segment
context_config: (left_context_size, right_context_size) defines the surround context size
for each segment
max_memory_size: maximum memory size used for each segment
scaled_init: whether use scaled init for weight initialization in attention layer
std_scale: if std_scale is not None. The weak attention suppression is
turned on. For std_scale = 0.5, all the attention smaller than
mean + 0.5 * std will be suppressed.
activation_fn: activation function used in FFN layer. [ReLU, GELU] supported
tanh_on_mem: whether use tanh on memory
mini_batches: use mini-btach training
negative_inf: the negative infinity value used in attention masking. default is "-inf".
For some situation, e.g. LM. it is better to use "-1e8" to avoid nan issue.
summarization_method: method to generate segment summrization embedding
max_relative_position: max relatie position for relative position embedding
rpe_old_option: To be compatible with previous model. The previous model
was trained with attention += attention + rpe. The correct equation
should be attention = attention + rpe
[TODO]: remove the rpe_old_option by the end of 2021 Q1.
"""
def __init__(
self,
input_dim,
num_heads,
ffn_dim,
num_layers=1,
dropout_in_attn=0.0,
dropout_on_attn=0.0,
dropout_on_fc1=0.0,
dropout_on_fc2=0.0,
segment_size=128,
context_config=(0, 0),
max_memory_size=0,
scaled_init=True,
std_scale=None,
activation_fn="relu",
tanh_on_mem=False,
mini_batches=False,
negative_inf="-inf",
deep_init=True,
summarization_method="mean",
max_relative_position=0,
rpe_old_option=True,
):
super().__init__(None)
if input_dim % num_heads:
raise ValueError(
"input_dim ({}) must be divisible by num_heads ({})".format(
input_dim, num_heads
)
)
# we used to support growing memory size. However, it will cause
# cross stream batching failure. Now we need to have exact max memory size
if max_memory_size < 0:
raise ValueError("max_memory_size must be >= 0")
# Only assign right_context. In decoding, left context will be cached.
# No need to let the online decoder to re-assign the left context
self.left_context, self.right_context = context_config
self.segment_size = segment_size
self.memory_dim = input_dim
self.max_memory_size = max_memory_size
self.mini_batches = mini_batches
if self.max_memory_size != 0:
self.use_mem = True
else:
self.use_mem = False
self.memory_op = SummarizationLayer(
summarization_method, segment_size, input_dim
)
self.layers = torch.nn.ModuleList()
self.num_layers = num_layers
self.max_relative_position = max_relative_position
if self.max_relative_position > 0:
self.use_rpe = True
else:
self.use_rpe = False
for i in range(self.num_layers):
if deep_init:
layer_index = i
else:
layer_index = -1
self.layers.append(
NoSegAugmentedMemoryTransformer(
num_heads=num_heads,
input_dim=input_dim,
ffn_dim=ffn_dim,
dropout_in_attn=dropout_in_attn,
dropout_on_attn=dropout_on_attn,
dropout_on_fc1=dropout_on_fc1,
dropout_on_fc2=dropout_on_fc2,
segment_size=segment_size,
std_scale=std_scale,
activation_fn=activation_fn,
tanh_on_mem=tanh_on_mem,
scaled_init=scaled_init,
use_mem=self.use_mem,
mini_batches=mini_batches,
negative_inf=negative_inf,
layer_index=layer_index,
summarization_method=summarization_method,
max_relative_position=max_relative_position,
rpe_old_option=rpe_old_option,
)
)
def set_mini_batches(self, mini_batches):
# handy function only used for unit test
self.mini_batches = mini_batches
for layer in self.layers:
layer.set_mini_batches(mini_batches)
def _get_relative_position(
self,
input: Tensor,
max_relative_position: int,
left_context_length: int,
past_length: int,
is_decoding: bool,
):
# For training, we copy the right context to the start of the utterance
# First dimension in distance is corresponding to query.
# [right context, utterance, summary vector]
# Second dimension in distance is corresponding to key.
# [Memory bank, right context, utterance]
# For summary vector in query part, the distance with
# all other position is 2*max_position. For memory bank in key,
# the distance with all other positions is 0.
T, B, D = input.shape
num_segs = math.ceil((T - self.right_context) / self.segment_size)
# utterance
u_st = past_length * self.segment_size
u_ed = u_st + T
utterance_ranges = torch.arange(u_st, u_ed - self.right_context)
# left context. Only in minibatch or decoding
left_context_ranges = torch.arange(u_st - left_context_length, u_st)
# Right context block
# right context + utterance
right_context_blocks = []
for i in range(0, num_segs - 1):
st = (i + 1) * self.segment_size + u_st
ed = st + self.right_context
assert ed < u_ed
temp = torch.arange(st, ed)
right_context_blocks.append(temp)
right_context_blocks.append(torch.arange(u_ed - self.right_context, u_ed))
right_context_ranges = torch.cat(right_context_blocks)
if self.use_mem:
# Memory bank
# The position for memory -n, .., -1
if is_decoding:
memory_size = min(past_length, self.max_memory_size)
else:
memory_size = num_segs + past_length - 1
memory_bank_ranges = torch.arange(
-max_relative_position - 1, -max_relative_position - 1 - memory_size, -1
)
# summary vector
# The position for summary vector as the T+max_relative_position+1.
# After the clamping, the relative position is max_relative_position
summary_pos_st = u_ed + max_relative_position + 1
summary_vector_ranges = torch.arange(
summary_pos_st, summary_pos_st + num_segs
)
key_ranges = torch.cat(
[
memory_bank_ranges,
right_context_ranges,
left_context_ranges,
utterance_ranges,
]
)
query_ranges = torch.cat(
[right_context_ranges, utterance_ranges, summary_vector_ranges]
)
else:
key_ranges = torch.cat(
[right_context_ranges, left_context_ranges, utterance_ranges]
)
query_ranges = torch.cat([right_context_ranges, utterance_ranges])
distance = key_ranges[None, :] - query_ranges[:, None]
distance_clamp = (
torch.clamp(distance, -max_relative_position, max_relative_position)
+ max_relative_position
)
distance_clamp = distance_clamp.to(input.device).long().detach()
return distance_clamp
def _get_attention_mask(self, input, past_length=0, left_context_cache=0):
# attention mask for each query contains three parts:
# 1. memory part
# 2. left_context + segment
# 3. right_context_block
# so for each segment and its correspoinding right context block,
# the attention matrix is formed by 9 parts:
# [0, m, 0, 0, right_context, 0, 0, seg, 0]
# [before memory, memory, after memory, before right context, right_context,
# after right context, before seg, seg, after seg]
#
# Query is formed in the way as [right_context_blocks, utterance, summary]
#
# Note: put m and right_context before segment is convenient
# for padding_mask operation.
# Key lengths = m_length + right_context_block_length + lengths
utterance_length, batch_size, _ = input.shape
summary_length = math.ceil(utterance_length / self.segment_size)
num_segs = summary_length
rc_length = self.right_context * num_segs
rc = self.right_context
lc = self.left_context
# using mini-batches, there is left context cache available for current
# sequence.
lcc = left_context_cache
# max_memory_size is 0 then we don't have memory and summary
# past_length is the memory carry from previous sequence
if self.use_mem:
mem_length = num_segs - 1 + past_length
else:
mem_length = 0
rc_mask = []
query_mask = []
summary_mask = []
for j in range(0, num_segs):
ssize = min(self.segment_size, utterance_length - j * self.segment_size)
rc_size = rc
rc_mat = []
q_mat = []
s_mat = []
m_start = max(j + past_length - self.max_memory_size, 0)
# max_memory_size is 0, then we don't use memory
if self.use_mem:
# part 0: before memory
rc_mat.append(input.new_zeros(rc_size, m_start))
q_mat.append(input.new_zeros(ssize, m_start))
s_mat.append(input.new_zeros(1, m_start))
# part 1: memory
col_1 = j + past_length - m_start
rc_mat.append(torch.ones(rc_size, col_1, device=input.device))
q_mat.append(torch.ones(ssize, col_1, device=input.device))
# based on D22875746, disable summary query attention
# on memeory is better for long form utterance
s_mat.append(input.new_zeros(1, col_1))
# part 2: after memory
col_2 = mem_length - (j + past_length)
rc_mat.append(input.new_zeros(rc_size, col_2))
q_mat.append(input.new_zeros(ssize, col_2))
s_mat.append(input.new_zeros(1, col_2))
# part 3: before right context
rc_start = j * rc
rc_mat.append(input.new_zeros(rc_size, rc_start))
q_mat.append(input.new_zeros(ssize, rc_start))
s_mat.append(input.new_zeros(1, rc_start))
# part 4: right context
rc_end = rc_start + rc
col_4 = rc
rc_mat.append(torch.ones(rc_size, col_4, device=input.device))
q_mat.append(torch.ones(ssize, col_4, device=input.device))
s_mat.append(torch.ones(1, col_4, device=input.device))
# part 5: after right context
col_5 = rc_length - rc_end
rc_mat.append(input.new_zeros(rc_size, col_5))
q_mat.append(input.new_zeros(ssize, col_5))
s_mat.append(input.new_zeros(1, col_5))
# part 6: before query segment
seg_start = max(j * self.segment_size + lcc - lc, 0)
rc_mat.append(input.new_zeros(rc_size, seg_start))
q_mat.append(input.new_zeros(ssize, seg_start))
s_mat.append(input.new_zeros(1, seg_start))
# part 7: query segment
# note: right context is put in right context block
# here we only need to consider about left context
seg_end = min((j + 1) * self.segment_size + lcc, utterance_length + lcc)
col_7 = seg_end - seg_start
rc_mat.append(torch.ones(rc_size, col_7, device=input.device))
q_mat.append(torch.ones(ssize, col_7, device=input.device))
s_mat.append(torch.ones(1, col_7, device=input.device))
# part 8: after query segment
col_8 = utterance_length + lcc - seg_end
rc_mat.append(input.new_zeros(rc_size, col_8))
q_mat.append(input.new_zeros(ssize, col_8))
s_mat.append(input.new_zeros(1, col_8))
rc_mask.append(torch.cat(rc_mat, dim=1))
query_mask.append(torch.cat(q_mat, dim=1))
summary_mask.append(torch.cat(s_mat, dim=1))
# no memory, then we don't need summary either
if self.use_mem:
attention_mask = (
1
- torch.cat(
[
torch.cat(rc_mask, dim=0),
torch.cat(query_mask, dim=0),
torch.cat(summary_mask, dim=0),
],
dim=0,
)
).to(torch.bool)
else:
attention_mask = (
1
- torch.cat(
[torch.cat(rc_mask, dim=0), torch.cat(query_mask, dim=0)], dim=0
)
).to(torch.bool)
return attention_mask
@torch.jit.export
def init_state(
self, batch_size: int, device: Optional[Device] = None
) -> List[Tensor]:
empty_memory = torch.zeros(
self.num_layers,
self.max_memory_size,
batch_size,
self.memory_dim,
device=device,
)
left_context_key = torch.zeros(
self.num_layers,
self.left_context,
batch_size,
self.memory_dim,
device=device,
)
left_context_val = torch.zeros(
self.num_layers,
self.left_context,
batch_size,
self.memory_dim,
device=device,
)
past_length = torch.zeros(1, batch_size, dtype=torch.int32, device=device)
return [empty_memory, left_context_key, left_context_val, past_length]
@torch.jit.export
def batch_state(self, states: List[List[Tensor]]) -> List[Tensor]:
if len(states) == 0:
return []
batched_m = []
batched_lc_key = []
batched_lc_val = []
batched_past_length = []
for state in states:
if len(state) == 0:
continue
m, lc_key, lc_val, past_length = state
batched_m.append(m)
batched_lc_key.append(lc_key)
batched_lc_val.append(lc_val)
batched_past_length.append(past_length)
if (
(len(batched_m) == 0)
or (len(batched_lc_key) == 0)
or (len(batched_lc_val) == 0)
or (len(batched_past_length) == 0)
):
return [
torch.tensor([]),
torch.tensor([]),
torch.tensor([]),
torch.tensor([]),
]
batched_m = torch.cat(batched_m, dim=2)
batched_lc_key = torch.cat(batched_lc_key, dim=2)
batched_lc_val = torch.cat(batched_lc_val, dim=2)
batched_past_length = torch.cat(batched_past_length, dim=1)
return [batched_m, batched_lc_key, batched_lc_val, batched_past_length]
@torch.jit.export
def reorder_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]:
if len(state) == 0:
return []
m, lc_key, lc_val, past_length = state
indices = indices.to(device=m.device)
reord_m = torch.index_select(m, 2, indices)
reord_lc_key = torch.index_select(lc_key, 2, indices)
reord_lc_val = torch.index_select(lc_val, 2, indices)
reord_past_length = torch.index_select(past_length, 1, indices)
return [reord_m, reord_lc_key, reord_lc_val, reord_past_length]
@torch.jit.export
def reset_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]:
m, lc_key, lc_val, past_length = state
m = m.index_fill(dim=2, index=indices, value=0.0)
lc_key = lc_key.index_fill(dim=2, index=indices, value=0.0)
lc_val = lc_val.index_fill(dim=2, index=indices, value=0.0)
past_length = past_length.index_fill(dim=1, index=indices, value=0)
return [m, lc_key, lc_val, past_length]
@torch.jit.export
def state_size(self) -> int:
return 4
@torch.jit.export
def batch_size_in_state(
self, state: Optional[List[Tensor]], sloppy: bool = True
) -> Optional[int]:
if state is None:
return None
return state[0].size(2)
def gen_summary_queries(self, input):
sum_input = self.memory_op(input)
return sum_input
def _gen_right_context_padded_input(self, input):
# This function deals with input that is already
# padded with right context (e.g. minibatch training)
right_context_blocks = []
T, B, D = input.shape
num_segs = math.ceil((T - self.right_context) / self.segment_size)
for i in range(0, num_segs - 1):
st = (i + 1) * self.segment_size
ed = st + self.right_context
assert ed < T
temp = input[st:ed, :, :]
right_context_blocks.append(temp)
# last segment right context is already available
right_context_blocks.append(input[T - self.right_context :, :, :])
return torch.cat(right_context_blocks, dim=0)
def _gen_segs_right_context(self, input, lengths):
segments = []
T, B, D = input.size()
nT = T - self.right_context
# assume input is right context padded
num_segs = math.ceil(nT / self.segment_size)
# pad zeros to the utterance to make sure each
# segment has the same right context. For the
for i in range(0, num_segs - 1):
st = i * self.segment_size
ed = min(T, st + self.segment_size + self.right_context)
temp = input[st:ed, :, :]
rest_lengths = torch.clamp(
lengths - self.segment_size, min=0, max=nT - (i + 1) * self.segment_size
)
segments.append((temp, lengths - rest_lengths + self.right_context))
lengths = rest_lengths
last_seg = input[st + self.segment_size :, :, :]
segments.append((last_seg, rest_lengths + self.right_context))
return segments
@torch.jit.unused
def forward(
self, input: Tensor, padding_masks: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]:
# Xutai: originally the second argument is lengths.
lengths = (~padding_masks).sum(dim=1).long()
# mini batch training.
if self.mini_batches:
return self.forward_mini_batches(input, lengths, state)
# regular full sequence training. Note, assume the right context in provided
# in the input.
T, B, D = input.size()
right_context_blocks = self._gen_right_context_padded_input(input)
# generate the relative positional embedding
if self.use_rpe:
rpe = self._get_relative_position(
input=input,
max_relative_position=self.max_relative_position,
left_context_length=0,
past_length=0,
is_decoding=False,
)
else:
rpe = None
input = input[: T - self.right_context, :, :]
attention_mask = self._get_attention_mask(input)
# firt layer use each segment mean as memory
# ignore the last one seg average
if self.use_mem:
mems = self.gen_summary_queries(input)[:-1, :, :]
else:
mems = torch.zeros(0, input.size(1), input.size(2), device=input.device)
mems = mems.type_as(input)
output = input
all_outputs = []
for layer in self.layers:
output, mems, right_context_blocks, _, _ = layer(
input=output,
lengths=lengths,
attention_mask=attention_mask,
mems=mems,
right_context_blocks=right_context_blocks,
pre_mems=None,
left_context_key=None,
left_context_val=None,
rpe=rpe,
)
all_outputs.append(output)
return output, padding_masks, [], all_outputs
def forward_jit_mini_batch_init(
self,
seg: Tensor,
state: Optional[List[Tensor]] = None,
is_decoding: bool = False,
):
# Prepare state. In whole sequence training, state is ignored.
# For minibatch training, we need to prepare state
if state is None:
state = self.init_state(batch_size=seg.size(1), device=seg.device)
if seg.dtype == torch.half:
state = [state[0].half(), state[1].half(), state[2].half(), state[3]]
if self.use_mem:
# note input average only on seg, not on right context
# first layer use each segmetn mean as memory. the last
# one segment average is used in state
full_mems = self.gen_summary_queries(seg)
if is_decoding:
mems = full_mems[0:1, :, :]
state_mems = torch.cat([state[0][0], mems], dim=0)
else:
mems = full_mems[:-1, :, :]
state_mems = torch.cat([state[0][0], full_mems], dim=0)
else:
mems = state[0][0]
state_mems = mems
# track processed segment number or memory number
# the same batch as the same bumber of past length
past_length = state[3][0][0].item()
past_left_context = min(past_length * self.segment_size, self.left_context)
past_length = min(self.max_memory_size, past_length)
return state, mems, state_mems, past_length, past_left_context
def state_update_before(
self, layer: int, state: List[Tensor], past_length: int, past_left_context: int
):
pre_mems = state[0][layer][self.max_memory_size - past_length :, :, :]
lc_key = state[1][layer][self.left_context - past_left_context :, :, :]
lc_val = state[2][layer][self.left_context - past_left_context :, :, :]
return pre_mems, lc_key, lc_val
def state_update_after(
self,
layer: int,
state: List[Tensor],
mems: Tensor,
next_key: Tensor,
next_val: Tensor,
mems_list: List[Tensor],
lc_key_list: List[Tensor],
lc_val_list: List[Tensor],
):
# mems is used for next layer
if layer < self.num_layers - 1:
state_mems = torch.cat([state[0][layer + 1], mems], dim=0)
mems_list.append(state_mems[-self.max_memory_size :, :, :])
# when mems pass to next sequence, we need the last memory. when mems
# use for the next layer, we can ignore the last memory
mems = mems[:-1, :, :]
# note state[1][i] and state[2][i] original length equals to self.left_context
new_k = torch.cat([state[1][layer], next_key], dim=0)
new_v = torch.cat([state[2][layer], next_val], dim=0)
lc_key_list.append(new_k[-self.left_context :, :, :])
lc_val_list.append(new_v[-self.left_context :, :, :])
return mems_list, lc_key_list, lc_val_list, mems
def state_update_after_loop(
self,
state: List[Tensor],
mems_list: List[Tensor],
lc_key_list: List[Tensor],
lc_val_list: List[Tensor],
update_length: int,
):
state[0] = torch.stack(mems_list, dim=0)
state[1] = torch.stack(lc_key_list, dim=0)
state[2] = torch.stack(lc_val_list, dim=0)
state[3] = state[3] + update_length
return state
@torch.jit.unused
def forward_mini_batches(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]:
T, B, D = input.size()
# input without right context
seg = input[: T - self.right_context, :, :]
# get right context blocks
right_context_blocks = self._gen_right_context_padded_input(input)
mems_list = []
lc_key_list = []
lc_val_list = []
results = self.forward_jit_mini_batch_init(seg, state, False)
state, mems, state_mems, past_length, past_left_context = results
# relative position embedding
if self.use_rpe:
rpe = self._get_relative_position(
input=input,
max_relative_position=self.max_relative_position,
left_context_length=past_left_context,
past_length=past_length,
is_decoding=False,
)
else:
rpe = None
# get attention mask based on seg (not include right context) and available
# left context
attention_mask = self._get_attention_mask(seg, past_length, past_left_context)
mems_list.append(state_mems[-self.max_memory_size :, :, :])
output = seg
i = 0
all_outputs = []
for layer in self.layers:
# In order to make cross stream batching work, mem, left context key
# and left context value in the state should always be the same shape.
# We use the past length to track the processed segment number. In this
# way, we take out the essential memory, left context key and left
# context val from the state. After finish the forward for current segment
# we add the new memory, left context key and left context value into the
# staate and trim out the oldest part to keep the shape consistent.
pre_mems, lc_key, lc_val = self.state_update_before(
i, state, past_length, past_left_context
)
output, mems, right_context_blocks, next_key, next_val = layer.forward(
input=output,
lengths=lengths,
attention_mask=attention_mask,
mems=mems,
right_context_blocks=right_context_blocks,
pre_mems=pre_mems,
left_context_key=lc_key,
left_context_val=lc_val,
rpe=rpe,
)
all_outputs.append(output)
mems_list, lc_key_list, lc_val_list, mems = self.state_update_after(
layer=i,
state=state,
mems=mems,
next_key=next_key,
next_val=next_val,
mems_list=mems_list,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
)
i += 1
# update state
update_length = math.ceil((T - self.right_context) / self.segment_size)
state = self.state_update_after_loop(
state=state,
mems_list=mems_list,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
update_length=update_length,
)
return output, lengths, state, all_outputs
def forward_jit_test(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
"""
This one simulate sequence encoder forward jit. This is for unit test purpose.
It is not used in training or decoding. Note, extra_right_context is set in
the model. In unit test, input = [utterance, right_context], lengths =
[utterance_length].
args:
input: input utterance
lengths: utterance input length
state: None here. input is whole utterance
"""
# [TODO] sequence_to_segment has bug in lengths.
seg_src_tokens_lengths = self._gen_segs_right_context(input, lengths)
seg_enc_tokens_lengths: List[Tuple[Tensor, Tensor]] = []
state: Optional[List[Tensor]] = None
for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths:
seg_enc_tokens, seg_enc_lengths, state = self.forward_jit(
input=seg_src_tokens, lengths=seg_src_lengths, state=state
)
seg_enc_tokens_lengths.append((seg_enc_tokens, seg_enc_lengths))
enc_tokens, enc_lengths = segments_to_sequence(
segments=seg_enc_tokens_lengths, time_axis=0
)
state = [] # returns trivial state
return enc_tokens, enc_lengths, state
@torch.jit.export
def forward_jit(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
"""
Forward helper for online decoding.
args:
input: [seg, right_context]. We assume in online we
always padding the right context to the preset right context size.
For the last segment, we may have short segment size, but right
context size is the same as other segments
lengths: utterance input length is the utterance segment length and
right context size
state: [memory, left_context_key, left_context_val]. To improve throughput,
in addition to memory, we also cache key and value for left_context in
multihead self-attention
"""
# In online decoding, input = [segment, right_context]
# Lengths = [segment_length, right_context_length]
# so we need strip right context in output
T, B, D = input.size()
rc_str = T - self.right_context
rc_end = T
right_context_blocks = input[rc_str:rc_end, :, :]
seg = input[:rc_str, :, :]
lengths = torch.clamp(lengths - self.right_context, min=0)
mems_list = []
lc_key_list = []
lc_val_list = []
results = self.forward_jit_mini_batch_init(seg, state, True)
state, mems, state_mems, past_length, past_left_context = results
# relative position embedding
if self.use_rpe:
rpe = self._get_relative_position(
input=input,
max_relative_position=self.max_relative_position,
left_context_length=past_left_context,
past_length=past_length,
is_decoding=True,
)
else:
rpe = None
# memory for first layer.
mems_list.append(state_mems[-self.max_memory_size :, :, :])
output = seg
i = 0
for layer in self.layers:
# In order to make cross stream batching work, mem, left context key
# and left context value in the state should always be the same shape.
# We use the past length to track the processed segment number. In this
# way, we take out the essential memory, left context key and left
# context val from the state. After finish the forward for current segment
# we add the new memory, left context key and left context value into the
# staate and trim out the oldest part to keep the shape consistent.
true_mems, lc_key, lc_val = self.state_update_before(
layer=i,
state=state,
past_length=past_length,
past_left_context=past_left_context,
)
output, mems, right_context_blocks, next_key, next_val = layer.forward_jit(
input=output,
lengths=lengths,
mems=true_mems,
right_context_blocks=right_context_blocks,
left_context_key=lc_key,
left_context_val=lc_val,
rpe=rpe,
)
# mems is used for next layer
mems_list, lc_key_list, lc_val_list, _ = self.state_update_after(
layer=i,
state=state,
mems_list=mems_list,
mems=mems,
next_key=next_key,
next_val=next_val,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
)
i += 1
# update state
state = self.state_update_after_loop(
state=state,
mems_list=mems_list,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
update_length=1,
)
return output, lengths, state
def quantize_(self, params=None):
if params and "per_channel" in params and params["per_channel"]:
qconfig = per_channel_dynamic_qconfig
else:
qconfig = default_dynamic_qconfig
torch.ao.quantization.quantize_dynamic(
self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
)
return self
# ------------------------------------------------------------------------------
# Emformer encoder for seq2seq model
# This is a wrapper over the original emformer
# ------------------------------------------------------------------------------
def emformer_encoder(klass):
class SpeechEncoder(klass):
def __init__(self, args):
super().__init__(args)
stride = SpeechEncoder.conv_layer_stride(args)
trf_left_context = args.segment_left_context // stride
trf_right_context = args.segment_right_context // stride
context_config = [trf_left_context, trf_right_context]
self.transformer_layers = nn.ModuleList(
[
NoSegAugmentedMemoryTransformerEncoderLayer(
input_dim=args.encoder_embed_dim,
num_heads=args.encoder_attention_heads,
ffn_dim=args.encoder_ffn_embed_dim,
num_layers=args.encoder_layers,
dropout_in_attn=args.dropout,
dropout_on_attn=args.dropout,
dropout_on_fc1=args.dropout,
dropout_on_fc2=args.dropout,
activation_fn=args.activation_fn,
context_config=context_config,
segment_size=args.segment_length,
max_memory_size=args.max_memory_size,
scaled_init=True, # TODO: use constant for now.
tanh_on_mem=args.amtrf_tanh_on_mem,
)
]
)
def forward(self, src_tokens, src_lengths):
encoder_out = super().forward(src_tokens, src_lengths)
output = encoder_out["encoder_out"][0]
encoder_padding_masks = encoder_out["encoder_padding_mask"][0]
# This is because that in the original implementation
# the output didn't consider the last segment as right context.
encoder_padding_masks = encoder_padding_masks[:, : output.size(0)]
return {
"encoder_out": [output],
"encoder_padding_mask": [encoder_padding_masks],
"encoder_embedding": [],
"encoder_states": [],
"src_tokens": [],
"src_lengths": [],
}
@staticmethod
def conv_layer_stride(args):
# TODO: make it configurable from the args
return 4
SpeechEncoder.__name__ = klass.__name__
return SpeechEncoder
| 37.144178 | 97 | 0.584655 |
de83133e2fccf8eba4f37b0932b5452763262a54 | 3,254 | py | Python | dynamic_preferences/admin.py | calvin620707/django-dynamic-preferences | 09026f5aea085fba24df8ba961c629a133242c81 | [
"BSD-3-Clause"
] | null | null | null | dynamic_preferences/admin.py | calvin620707/django-dynamic-preferences | 09026f5aea085fba24df8ba961c629a133242c81 | [
"BSD-3-Clause"
] | null | null | null | dynamic_preferences/admin.py | calvin620707/django-dynamic-preferences | 09026f5aea085fba24df8ba961c629a133242c81 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from django import forms
from .settings import preferences_settings
from .registries import global_preferences_registry
from .models import GlobalPreferenceModel
from .forms import GlobalSinglePreferenceForm, SinglePerInstancePreferenceForm
class SectionFilter(admin.AllValuesFieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
super(SectionFilter, self).__init__(field, request, params, model, model_admin, field_path)
parent_model, reverse_path = admin.utils.reverse_field_path(model, field_path)
if model == parent_model:
queryset = model_admin.get_queryset(request)
else:
queryset = parent_model._default_manager.all()
self.registries = []
registry_name_set = set()
for preferenceModel in queryset.distinct():
l = len(registry_name_set)
registry_name_set.add(preferenceModel.registry.__class__.__name__)
if len(registry_name_set) != l:
self.registries.append(preferenceModel.registry)
def choices(self, changelist):
choices = super(SectionFilter, self).choices(changelist)
for choice in choices:
display = choice['display']
try:
for registry in self.registries:
display = registry.section_objects[display].verbose_name
choice["display"] = display
except (KeyError):
pass
yield choice
class DynamicPreferenceAdmin(admin.ModelAdmin):
list_display = ('verbose_name', 'name', 'section_name', 'help_text', 'raw_value', 'default_value')
fields = ('raw_value', 'default_value', 'name', 'section_name')
readonly_fields = ('name', 'section_name', 'default_value')
if preferences_settings.ADMIN_ENABLE_CHANGELIST_FORM:
list_editable = ('raw_value',)
search_fields = ['name', 'section', 'raw_value']
list_filter = (('section', SectionFilter),)
if preferences_settings.ADMIN_ENABLE_CHANGELIST_FORM:
def get_changelist_form(self, request, **kwargs):
return self.changelist_form
def default_value(self, obj):
return obj.preference.default
def section_name(self, obj):
try:
return obj.registry.section_objects[obj.section].verbose_name
except KeyError:
pass
return obj.section
class GlobalPreferenceAdmin(DynamicPreferenceAdmin):
form = GlobalSinglePreferenceForm
changelist_form = GlobalSinglePreferenceForm
def get_queryset(self, *args, **kwargs):
# Instanciate default prefs
manager = global_preferences_registry.manager()
manager.all()
return super(GlobalPreferenceAdmin, self).get_queryset(*args, **kwargs)
admin.site.register(GlobalPreferenceModel, GlobalPreferenceAdmin)
class PerInstancePreferenceAdmin(DynamicPreferenceAdmin):
list_display = ('instance',) + DynamicPreferenceAdmin.list_display
fields = ('instance',) + DynamicPreferenceAdmin.fields
raw_id_fields = ('instance',)
form = SinglePerInstancePreferenceForm
changelist_form = SinglePerInstancePreferenceForm
list_select_related = True
| 38.282353 | 102 | 0.697296 |
96dc64547c0f0a2644601d7855b6ff036658ead3 | 67 | py | Python | app/__init__.py | zhangshen20/capstone-project-sparkify | 78c01e3670745a343b4dacebe9d10f1b1f0d7715 | [
"MIT"
] | null | null | null | app/__init__.py | zhangshen20/capstone-project-sparkify | 78c01e3670745a343b4dacebe9d10f1b1f0d7715 | [
"MIT"
] | null | null | null | app/__init__.py | zhangshen20/capstone-project-sparkify | 78c01e3670745a343b4dacebe9d10f1b1f0d7715 | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
from app import run | 13.4 | 23 | 0.776119 |
f09dda13c00f9bfa4e4fa1edebd62366c71cc249 | 5,960 | py | Python | docs/conf.py | paultag/billy | 70f4c55d760552829a86b30baa6d6eac3f6dc47f | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | paultag/billy | 70f4c55d760552829a86b30baa6d6eac3f6dc47f | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | paultag/billy | 70f4c55d760552829a86b30baa6d6eac3f6dc47f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# flake8: noqa
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('..'))
from billy import __version__
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'billy'
copyright = u'2012, Sunlight Labs'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme_path = ['.']
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'nosidebar': True}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = ''
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 32.391304 | 80 | 0.716946 |
619dc6963d07e9de95d093734ca4c86008b5c8a0 | 89,924 | py | Python | voc/java/opcodes.py | Mariatta/voc | 348346646a8c0755729dd9ca961a2958e0f6ad50 | [
"BSD-3-Clause"
] | 1 | 2021-01-03T00:59:50.000Z | 2021-01-03T00:59:50.000Z | voc/java/opcodes.py | Mariatta/voc | 348346646a8c0755729dd9ca961a2958e0f6ad50 | [
"BSD-3-Clause"
] | null | null | null | voc/java/opcodes.py | Mariatta/voc | 348346646a8c0755729dd9ca961a2958e0f6ad50 | [
"BSD-3-Clause"
] | null | null | null | from .constants import Classref, Fieldref, Methodref, InterfaceMethodref, String, Integer, Long, Float, Double, Constant
# From: https://en.wikipedia.org/wiki/Java_bytecode_instruction_listings
# Reference" http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-6.html#jvms-6.2
##########################################################################
# 6.2. Opcodes
##########################################################################
class Opcode:
opcodes = None
def __init__(self):
self.references = []
self.starts_line = None
def __repr__(self):
return '<%s%s>' % (self.__class__.__name__, self.__arg_repr__())
def __arg_repr__(self):
return ''
def __len__(self):
return 1
@classmethod
def read(cls, reader, dump=None):
code = reader.read_u1()
# Create an index of all known opcodes.
if Opcode.opcodes is None:
Opcode.opcodes = {}
for name in globals():
klass = globals()[name]
try:
if name != 'Opcode' and issubclass(klass, Opcode):
Opcode.opcodes[klass.code] = klass
except TypeError:
pass
instance = Opcode.opcodes[code].read_extra(reader, dump)
if dump:
reader.debug(" " * dump, '%3d: %s' % (reader.offset, instance))
return instance
@classmethod
def read_extra(cls, reader, dump=None):
return cls()
def write(self, writer):
writer.write_u1(self.code)
self.write_extra(writer)
def write_extra(self, writer):
pass
def resolve(self, constant_pool):
pass
@property
def stack_effect(self):
return self.produce_count - self.consume_count
def process(self, context):
return True
class AALOAD(Opcode):
# Load onto the stack a reference from an array
# Stack: arrayref, index → value
code = 0x32
def __init__(self):
super(AALOAD, self).__init__()
@property
def consume_count(self):
return 2
@property
def produce_count(self):
return 1
class AASTORE(Opcode):
# Store into a reference in an array
# Stack: arrayref, index, value →
code = 0x53
def __init__(self):
super(AASTORE, self).__init__()
@property
def consume_count(self):
return 3
@property
def produce_count(self):
return 0
class ACONST_NULL(Opcode):
# Push a null reference onto the stack
# Stack: → null
code = 0x01
def __init__(self):
super(ACONST_NULL, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ALOAD(Opcode):
# Load a reference onto the stack from a local variable #index
# Args(1): index
# Stack: → objectref
code = 0x19
def __init__(self, var):
super(ALOAD, self).__init__()
self.var = var
def __len__(self):
return 2
def __arg_repr__(self):
return ' %s' % self.var
@classmethod
def read_extra(cls, reader, dump=None):
var = reader.read_u1()
return cls(var)
def write_extra(self, writer):
writer.write_u1(self.var)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ALOAD_0(Opcode):
# Load a reference onto the stack from local variable 0
# Stack: → objectref
code = 0x2a
def __init__(self):
super(ALOAD_0, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ALOAD_1(Opcode):
# Load a reference onto the stack from local variable 1
# Stack: → objectref
code = 0x2b
def __init__(self):
super(ALOAD_1, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ALOAD_2(Opcode):
# Load a reference onto the stack from local variable 2
# Stack: → objectref
code = 0x2c
def __init__(self):
super(ALOAD_2, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ALOAD_3(Opcode):
# Load a reference onto the stack from local variable 3
# Stack: → objectref
code = 0x2d
def __init__(self):
super(ALOAD_3, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ANEWARRAY(Opcode):
# Create a new array of references of length count and component type identified
# by the class reference index (indexbyte1 << 8 + indexbyte2) in the constant
# pool
# Args(2): indexbyte1, indexbyte2
# Stack: count → arrayref
code = 0xbd
def __init__(self, class_name):
super(ANEWARRAY, self).__init__()
self.klass = Classref(class_name)
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.klass.name
@classmethod
def read_extra(cls, reader, dump=None):
klass = reader.read_u2()
return cls(reader.constant_pool[klass].name.bytes.decode('mutf-8'))
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.klass))
def resolve(self, constant_pool):
self.klass.resolve(constant_pool)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class ARETURN(Opcode):
# Return a reference from a method
# Stack: objectref → [empty]
code = 0xb0
def __init__(self):
super(ARETURN, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class ARRAYLENGTH(Opcode):
# Get the length of an array
# arrayref → length
code = 0xbe
def __init__(self):
super(ARRAYLENGTH, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class ASTORE(Opcode):
# Store a reference into a local variable #index
# Args(1): index
# Stack: objectref →
code = 0x3a
def __init__(self, var):
super(ASTORE, self).__init__()
self.var = var
def __len__(self):
return 2
def __arg_repr__(self):
return ' %s' % self.var
@classmethod
def read_extra(cls, reader, dump=None):
var = reader.read_u1()
return cls(var)
def write_extra(self, writer):
writer.write_u1(self.var)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class ASTORE_0(Opcode):
# Store a reference into local variable 0
# Stack: objectref →
code = 0x4b
def __init__(self):
super(ASTORE_0, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class ASTORE_1(Opcode):
# Store a reference into local variable 1
# Stack: objectref →
code = 0x4c
def __init__(self):
super(ASTORE_1, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class ASTORE_2(Opcode):
# Store a reference into local variable 2
# Stack: objectref →
code = 0x4d
def __init__(self):
super(ASTORE_2, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class ASTORE_3(Opcode):
# Store a reference into local variable 3
# Stack: objectref →
code = 0x4e
def __init__(self):
super(ASTORE_3, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class ATHROW(Opcode):
# Throws an error or exception (notice that the rest of the stack is cleared,
# leaving only a reference to the Throwable)
# Stack: objectref → [empty], objectref
code = 0xbf
def __init__(self):
super(ATHROW, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 0
class BALOAD(Opcode):
# Load a byte or Boolean value from an array
# Stack: arrayref, index → value
code = 0x33
def __init__(self):
super(BALOAD, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 3
class BASTORE(Opcode):
# Store a byte or Boolean value into an array
# Stack: arrayref, index, value →
code = 0x54
def __init__(self):
super(BASTORE, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 3
class BIPUSH(Opcode):
# Push a byte onto the stack as an integer value
# Args(1) byte
# Stack: → value
code = 0x10
def __init__(self, const):
super(BIPUSH, self).__init__()
self.const = const
def __len__(self):
return 2
def __arg_repr__(self):
return ' ' + repr(self.const)
@classmethod
def read_extra(cls, reader, dump=None):
const = reader.read_u1()
return cls(const)
def write_extra(self, writer):
writer.write_s1(self.const)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class BREAKPOINT(Opcode):
# Reserved for breakpoints in Java debuggers; should not appear in any class file
code = 0xca
def __init__(self):
super(BREAKPOINT, self).__init__()
class CALOAD(Opcode):
# Load a char from an array
# Stack: arrayref, index → value
code = 0x34
def __init__(self):
super(CALOAD, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class CASTORE(Opcode):
# Store a char into an array
# Stack: arrayref, index, value →
code = 0x55
def __init__(self):
super(CASTORE, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 3
class CHECKCAST(Opcode):
# Checks whether an objectref is of a certain type, the class reference of which
# is in the constant pool at index (indexbyte1 << 8 + indexbyte2)
# Args(2): indexbyte1, indexbyte2
# Stack: objectref → objectref
code = 0xc0
def __init__(self, class_name):
super(CHECKCAST, self).__init__()
self.klass = Classref(class_name)
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % (self.klass)
@classmethod
def read_extra(cls, reader, dump=None):
class_name = reader.constant_pool[reader.read_u2()].name.bytes.decode('mutf-8')
return cls(class_name)
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.klass))
def resolve(self, constant_pool):
self.klass.resolve(constant_pool)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class D2F(Opcode):
# Convert a double to a float
# Stack: value → result
code = 0x90
def __init__(self):
super(D2F, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class D2I(Opcode):
# Convert a double to an int
# Stack: value → result
code = 0x8e
def __init__(self):
super(D2I, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class D2L(Opcode):
# Convert a double to a long
# Stack: value → result
code = 0x8f
def __init__(self):
super(D2L, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class DADD(Opcode):
# Add two doubles
# Stack: value1, value2 → result
code = 0x63
def __init__(self):
super(DADD, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class DALOAD(Opcode):
# Load a double from an array
# Stack: arrayref, index → value
code = 0x31
def __init__(self):
super(DALOAD, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class DASTORE(Opcode):
# Store a double into an array
# Stack: arrayref, index, value →
code = 0x52
def __init__(self):
super(DASTORE, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 3
class DCMPG(Opcode):
# Compare two doubles
# Stack: value1, value2 → result
code = 0x98
def __init__(self):
super(DCMPG, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class DCMPL(Opcode):
# Compare two doubles
# Stack: value1, value2 → result
code = 0x97
def __init__(self):
super(DCMPL, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class DCONST_0(Opcode):
# Push the constant 0.0 onto the stack
# Stack: → 0.0
code = 0x0e
def __init__(self):
super(DCONST_0, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class DCONST_1(Opcode):
# Push the constant 1.0 onto the stack
# Stack: → 1.0
code = 0x0f
def __init__(self):
super(DCONST_1, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class DDIV(Opcode):
# Divide two doubles
# Stack: value1, value2 → result
code = 0x6f
def __init__(self):
super(DDIV, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class DLOAD(Opcode):
# Load a double value from a local variable #index
# Args(1): index
# Stack: → value
code = 0x18
def __init__(self, var):
super(DLOAD, self).__init__()
self.var = var
def __len__(self):
return 2
def __arg_repr__(self):
return ' %s' % self.var
@classmethod
def read_extra(cls, reader, dump=None):
var = reader.read_u1()
return cls(var)
def write_extra(self, writer):
writer.write_u1(self.var)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class DLOAD_0(Opcode):
# Load a double from local variable 0
# Stack: → value
code = 0x26
def __init__(self, var):
super(DLOAD_0, self).__init__()
self.var = var
def __len__(self):
return 2
def __arg_repr__(self):
return ' %s' % self.var
@classmethod
def read_extra(cls, reader, dump=None):
var = reader.read_u1()
return cls(var)
def write_extra(self, writer):
writer.write_u1(self.var)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class DLOAD_1(Opcode):
# Load a double from local variable 1
# Stack: → value
code = 0x27
def __init__(self):
super(DLOAD_1, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class DLOAD_2(Opcode):
# Load a double from local variable 2
# Stack: → value
code = 0x28
def __init__(self):
super(DLOAD_2, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class DLOAD_3(Opcode):
# Load a double from local variable 3
# Stack: → value
code = 0x29
def __init__(self):
super(DLOAD_3, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class DMUL(Opcode):
# Multiply two doubles
# Stack: value1, value2 → result
code = 0x6b
def __init__(self):
super(DMUL, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class DNEG(Opcode):
# Negate a double
# Stack: value → result
code = 0x77
def __init__(self):
super(DNEG, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class DREM(Opcode):
# Get the remainder from a division between two doubles
# value1, value2 → result
code = 0x73
def __init__(self):
super(DREM, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class DRETURN(Opcode):
# Return a double from a method
# Stack:value → [empty]
code = 0xaf
def __init__(self):
super(DRETURN, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class DSTORE(Opcode):
# Store a double value into a local variable #index
# Args(1): index
# Stack: value →
code = 0x39
def __init__(self, var):
super(DSTORE, self).__init__()
self.var = var
def __len__(self):
return 2
def __arg_repr__(self):
return ' %s' % self.var
@classmethod
def read_extra(cls, reader, dump=None):
var = reader.read_u1()
return cls(var)
def write_extra(self, writer):
writer.write_u1(self.var)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class DSTORE_0(Opcode):
# Store a double into local variable 0
# Stack: value →
code = 0x47
def __init__(self):
super(DSTORE_0, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class DSTORE_1(Opcode):
# Store a double into local variable 1
# Stack: value →
code = 0x48
def __init__(self):
super(DSTORE_1, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class DSTORE_2(Opcode):
# Store a double into local variable 2
# Stack: value →
code = 0x49
def __init__(self):
super(DSTORE_2, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class DSTORE_3(Opcode):
# Store a double into local variable 3
# Stack: value →
code = 0x4a
def __init__(self):
super(DSTORE_3, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class DSUB(Opcode):
# Subtract a double from another
# Stack: value1, value2 → result
code = 0x67
def __init__(self):
super(DSUB, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class DUP(Opcode):
# Duplicate the value on top of the stack
# value → value, value
code = 0x59
def __init__(self):
super(DUP, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class DUP_X1(Opcode):
# Insert a copy of the top value into the stack two values from the top. value1
# and value2 must not be of the type double or long.
# Stack: value2, value1 → value1, value2, value1
code = 0x5a
def __init__(self):
super(DUP_X1, self).__init__()
@property
def produce_count(self):
return 3
@property
def consume_count(self):
return 2
class DUP_X2(Opcode):
# Insert a copy of the top value into the stack two (if value2 is double or long
# it takes up the entry of value3, too) or three values (if value2 is neither
# double nor long) from the top
# Stack: value3, value2, value1 → value1, value3, value2, value1
code = 0x5b
def __init__(self):
super(DUP_X2, self).__init__()
@property
def produce_count(self):
return 4
@property
def consume_count(self):
return 3
class DUP2(Opcode):
# Duplicate top two stack words (two values, if value1 is not double nor long; a
# single value, if value1 is double or long)
# Stack: {value2, value1} → {value2, value1}, {value2, value1}
code = 0x5c
def __init__(self):
super(DUP2, self).__init__()
@property
def produce_count(self):
return 4
@property
def consume_count(self):
return 2
class DUP2_X1(Opcode):
code = 0x5d
def __init__(self):
super(DUP2_X1, self).__init__()
# value3, {value2, value1} → {value2, value1}, value3, {value2, value1}
# Duplicate two words and insert beneath third word (see explanation above)
@property
def produce_count(self):
return 4
@property
def consume_count(self):
return 3
class DUP2_X2(Opcode):
# Duplicate two words and insert beneath fourth word
# Stack: {value4, value3}, {value2, value1} → {value2, value1}, {value4, value3}, {value2, value1}
code = 0x5e
def __init__(self):
super(DUP2_X2, self).__init__()
@property
def produce_count(self):
return 6
@property
def consume_count(self):
return 4
class F2D(Opcode):
# Convert a float to a double
# Stack: value → result
code = 0x8d
def __init__(self):
super(F2D, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class F2I(Opcode):
# Convert a float to an int
# Stack: value → result
code = 0x8b
def __init__(self):
super(F2I, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class F2L(Opcode):
# Convert a float to a long
# Stack: value → result
code = 0x8c
def __init__(self):
super(F2L, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class FADD(Opcode):
# Add two floats
# value1, value2 → result
code = 0x62
def __init__(self):
super(FADD, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class FALOAD(Opcode):
# Load a float from an array
# Stack: arrayref, index → value
code = 0x30
def __init__(self):
super(FALOAD, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class FASTORE(Opcode):
# Store a float in an array
# Stack: arrayref, index, value →
code = 0x51
def __init__(self):
super(FASTORE, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 3
class FCMPG(Opcode):
# Compare two floats
# Stack: value1, value2 → result
code = 0x96
def __init__(self):
super(FCMPG, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class FCMPL(Opcode):
# Compare two floats
# Stack: value1, value2 → result
code = 0x95
def __init__(self):
super(FCMPL, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class FCONST_0(Opcode):
# Push 0.0f on the stack
# Stack: → 0.0f
code = 0x0b
def __init__(self):
super(FCONST_0, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class FCONST_1(Opcode):
# Push 1.0f on the stack
# Stack: → 1.0f
code = 0x0c
def __init__(self):
super(FCONST_1, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class FCONST_2(Opcode):
# Push 2.0f on the stack
# Stack: → 2.0f
code = 0x0d
def __init__(self):
super(FCONST_2, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class FDIV(Opcode):
# Divide two floats
# Stack: value1, value2 → result
code = 0x6e
def __init__(self):
super(FDIV, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class FLOAD(Opcode):
# Load a float value from a local variable #index
# Args(1): index
# Stack: → value
code = 0x17
def __init__(self, var):
super(FLOAD, self).__init__()
self.var = var
def __len__(self):
return 2
def __arg_repr__(self):
return ' %s' % self.var
@classmethod
def read_extra(cls, reader, dump=None):
var = reader.read_u1()
return cls(var)
def write_extra(self, writer):
writer.write_u1(self.var)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class FLOAD_0(Opcode):
# Load a float value from local variable 0
# Stack: → value
code = 0x22
def __init__(self):
super(FLOAD_0, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class FLOAD_1(Opcode):
# Load a float value from local variable 1
# Stack: → value
code = 0x23
def __init__(self):
super(FLOAD_1, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class FLOAD_2(Opcode):
# Load a float value from local variable 2
# Stack: → value
code = 0x24
def __init__(self):
super(FLOAD_2, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class FLOAD_3(Opcode):
# Load a float value from local variable 3
# Stack: → value
code = 0x25
def __init__(self):
super(FLOAD_3, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class FMUL(Opcode):
# Multiply two floats
# Stack: value1, value2 → result
code = 0x6a
def __init__(self):
super(FMUL, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class FNEG(Opcode):
# Negate a float
# Stack: value → result
code = 0x76
def __init__(self):
super(FNEG, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class FREM(Opcode):
# Get the remainder from a division between two floats
# Stack: value1, value2 → result
code = 0x72
def __init__(self):
super(FREM, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class FRETURN(Opcode):
# Return a float
# Stack: value → [empty]
code = 0xae
def __init__(self):
super(FRETURN, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class FSTORE(Opcode):
# Store a float value into a local variable #index
# Args(1): index
# Stack: value →
code = 0x38
def __init__(self, var):
super(FSTORE, self).__init__()
self.var = var
def __len__(self):
return 2
def __arg_repr__(self):
return ' %s' % self.var
@classmethod
def read_extra(cls, reader, dump=None):
var = reader.read_u1()
return cls(var)
def write_extra(self, writer):
writer.write_u1(self.var)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class FSTORE_0(Opcode):
# Store a float value into local variable 0
# Stack: value →
code = 0x43
def __init__(self):
super(FSTORE_0, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class FSTORE_1(Opcode):
# Store a float value into local variable 1
# Stack: value →
code = 0x44
def __init__(self):
super(FSTORE_1, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class FSTORE_2(Opcode):
# Store a float value into local variable 2
# Stack: value →
code = 0x45
def __init__(self):
super(FSTORE_2, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class FSTORE_3(Opcode):
# Store a float value into local variable 3
# Stack: value →
code = 0x46
def __init__(self):
super(FSTORE_3, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class FSUB(Opcode):
# Subtract two floats
# Stack: value1, value2 → result
code = 0x66
def __init__(self):
super(FSUB, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class GETFIELD(Opcode):
# Get a field value of an object objectref, where the field is identified by
# field reference in the constant pool index (index1 << 8 + index2)
# Args(2): index1, index2
# Stack: objectref → value
code = 0xb4
def __init__(self, class_name, field_name, descriptor):
super(GETFIELD, self).__init__()
self.field = Fieldref(class_name, field_name, descriptor)
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s.%s (%s)' % (self.field.class_name, self.field.name, self.field.name_and_type.descriptor)
@classmethod
def read_extra(cls, reader, dump=None):
field = reader.constant_pool[reader.read_u2()]
return cls(
field.class_name,
field.name,
field.name_and_type.descriptor.bytes.decode('mutf-8')
)
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.field))
def resolve(self, constant_pool):
self.field.resolve(constant_pool)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class GETSTATIC(Opcode):
# Get a static field value of a class, where the field is identified by field
# reference in the constant pool index (index 1 << 8 + index2)
# Args(2): index1, index2
# Stack: → value
code = 0xb2
def __init__(self, class_name, field_name, descriptor):
super(GETSTATIC, self).__init__()
self.field = Fieldref(class_name, field_name, descriptor)
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s.%s (%s)' % (self.field.class_name, self.field.name, self.field.name_and_type.descriptor)
@classmethod
def read_extra(cls, reader, dump=None):
field = reader.constant_pool[reader.read_u2()]
return cls(
field.class_name,
field.name,
field.name_and_type.descriptor.bytes.decode('mutf-8')
)
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.field))
def resolve(self, constant_pool):
self.field.resolve(constant_pool)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class GOTO(Opcode):
# Goes to another instruction at branchoffset (signed short constructed from
# unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: [no change]
code = 0xa7
def __init__(self, offset):
super(GOTO, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 0
class GOTO_W(Opcode):
# Goes to another instruction at branchoffset (signed int constructed from
# unsigned bytes branchbyte1 << 24 + branchbyte2 << 16 + branchbyte3 << 8 +
# branchbyte4)
# Args(4): branchbyte1, branchbyte2, branchbyte3, branchbyte4
# Stack: [no change]
code = 0xc8
def __init__(self, offset):
super(GOTO_W, self).__init__()
self.offset = offset
def __len__(self):
return 5
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s4()
return cls(offset)
def write_extra(self, writer):
writer.write_s4(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 0
class I2B(Opcode):
# Convert an int into a byte
# Stack: value → result
code = 0x91
def __init__(self):
super(I2B, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class I2C(Opcode):
# Convert an int into a character
# Stack: value → result
code = 0x92
def __init__(self):
super(I2C, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class I2D(Opcode):
# Convert an int into a double
# Stack: value → result
code = 0x87
def __init__(self):
super(I2D, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class I2F(Opcode):
# Convert an int into a float
# Stack: value → result
code = 0x86
def __init__(self):
super(I2F, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class I2L(Opcode):
# Convert an int into a long
# Stack: value → result
code = 0x85
def __init__(self):
super(I2L, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class I2S(Opcode):
# Convert an int into a short
# Stack: value → result
code = 0x93
def __init__(self):
super(I2S, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class IADD(Opcode):
# Add two ints
# Stack: value1, value2 → result
code = 0x60
def __init__(self):
super(IADD, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class IALOAD(Opcode):
# Load an int from an array
# Stack: arrayref, index → value
code = 0x2e
def __init__(self):
super(IALOAD, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class IAND(Opcode):
# Perform a bitwise and on two integers
# Stack: value1, value2 → result
code = 0x7e
def __init__(self):
super(IAND, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class IASTORE(Opcode):
# Store an int into an array
# Stack: arrayref, index, value →
code = 0x4f
def __init__(self):
super(IASTORE, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 3
class ICONST_M1(Opcode):
# Load the int value -1 onto the stack
# Stack: → -1
code = 0x02
def __init__(self):
super(ICONST_M1, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ICONST_0(Opcode):
# Load the int value 0 onto the stack
# Stack: → 0
code = 0x03
def __init__(self):
super(ICONST_0, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ICONST_1(Opcode):
# Load the int value 1 onto the stack
# Stack: → 1
code = 0x04
def __init__(self):
super(ICONST_1, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ICONST_2(Opcode):
# Load the int value 2 onto the stack
# Stack: → 2
code = 0x05
def __init__(self):
super(ICONST_2, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ICONST_3(Opcode):
# Load the int value 3 onto the stack
# Stack: → 3
code = 0x06
def __init__(self):
super(ICONST_3, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ICONST_4(Opcode):
# Load the int value 4 onto the stack
# Stack: → 4
code = 0x07
def __init__(self):
super(ICONST_4, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ICONST_5(Opcode):
# Load the int value 5 onto the stack
# Stack: → 5
code = 0x08
def __init__(self):
super(ICONST_5, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class IDIV(Opcode):
# Divide two integers
# Stack: value1, value2 → result
code = 0x6c
def __init__(self):
super(IDIV, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class IF_ACMPEQ(Opcode):
# If references are equal, branch to instruction at branchoffset (signed short
# constructed from unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value1, value2 →
code = 0xa5
def __init__(self, offset):
super(IF_ACMPEQ, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 2
class IF_ACMPNE(Opcode):
# If references are not equal, branch to instruction at branchoffset (signed
# short constructed from unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value1, value2 →
code = 0xa6
def __init__(self, offset):
super(IF_ACMPNE, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 2
class IF_ICMPEQ(Opcode):
# If ints are equal, branch to instruction at branchoffset (signed short
# constructed from unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value1, value2 →
code = 0x9f
def __init__(self, offset):
super(IF_ICMPEQ, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 2
class IF_ICMPGE(Opcode):
# If value1 is greater than or equal to value2, branch to instruction at
# Iranchoffset (signed short constructed from unsigned bytes branchbyte1 << 8 +
# branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value1, value2 →
code = 0xa2
def __init__(self, offset):
super(IF_ICMPGE, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 2
class IF_ICMPGT(Opcode):
# If value1 is greater than value2, branch to instruction at branchoffset
# (signed short constructed from unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value1, value2 →
code = 0xa3
def __init__(self, offset):
super(IF_ICMPGT, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 2
class IF_ICMPLE(Opcode):
# If value1 is less than or equal to value2, branch to instruction at
# Iranchoffset (signed short constructed from unsigned bytes branchbyte1 << 8 +
# branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value1, value2 →
code = 0xa4
def __init__(self, offset):
super(IF_ICMPLE, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 2
class IF_ICMPLT(Opcode):
# If value1 is less than value2, branch to instruction at branchoffset (signed
# short constructed from unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value1, value2 →
code = 0xa1
def __init__(self, offset):
super(IF_ICMPLT, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 2
class IF_ICMPNE(Opcode):
# If ints are not equal, branch to instruction at branchoffset (signed short
# constructed from unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value1, value2 →
code = 0xa0
def __init__(self, offset):
super(IF_ICMPNE, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 2
class IFEQ(Opcode):
# If value is 0, branch to instruction at branchoffset (signed short constructed
# from unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value →
code = 0x99
def __init__(self, offset):
super(IFEQ, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class IFGE(Opcode):
# If value is greater than or equal to 0, branch to instruction at branchoffset
# (signed short constructed from unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value →
code = 0x9c
def __init__(self, offset):
super(IFGE, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class IFGT(Opcode):
# If value is greater than 0, branch to instruction at branchoffset (signed
# short constructed from unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value →
code = 0x9d
def __init__(self, offset):
super(IFGT, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class IFLE(Opcode):
# If value is less than or equal to 0, branch to instruction at branchoffset
# (signed short constructed from unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value →
code = 0x9e
def __init__(self, offset):
super(IFLE, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class IFLT(Opcode):
# If value is less than 0, branch to instruction at branchoffset (signed short
# constructed from unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value →
code = 0x9b
def __init__(self, offset):
super(IFLT, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class IFNE(Opcode):
# If value is not 0, branch to instruction at branchoffset (signed short
# constructed from unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value →
code = 0x9a
def __init__(self, offset):
super(IFNE, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class IFNONNULL(Opcode):
# If value is not null, branch to instruction at branchoffset (signed short
# constructed from unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value →
code = 0xc7
def __init__(self, offset):
super(IFNONNULL, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class IFNULL(Opcode):
# If value is null, branch to instruction at branchoffset (signed short
# constructed from unsigned bytes branchbyte1 << 8 + branchbyte2)
# Args(2): branchbyte1, branchbyte2
# Stack: value →
code = 0xc6
def __init__(self, offset):
super(IFNULL, self).__init__()
self.offset = offset
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.offset
@classmethod
def read_extra(cls, reader, dump=None):
offset = reader.read_s2()
return cls(offset)
def write_extra(self, writer):
writer.write_s2(self.offset)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class IINC(Opcode):
# Increment local variable #index by signed byte const
# Args(2): index, const
# Stack: [No change]
code = 0x84
def __init__(self, index, const):
super(IINC, self).__init__()
self.index = index
self.const = const
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s %s' % (self.index, self.const)
@classmethod
def read_extra(cls, reader, dump=None):
index = reader.read_u1()
const = reader.read_u1()
return cls(index, const)
def write_extra(self, writer):
writer.write_u1(self.index)
writer.write_u1(self.const)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 0
class ILOAD(Opcode):
# Load an int value from a local variable #index
# Args(1): index
# Stack: → value
code = 0x15
def __init__(self, var):
super(ILOAD, self).__init__()
self.var = var
def __len__(self):
return 2
def __arg_repr__(self):
return ' %s' % self.var
@classmethod
def read_extra(cls, reader, dump=None):
var = reader.read_u1()
return cls(var)
def write_extra(self, writer):
writer.write_u1(self.var)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ILOAD_0(Opcode):
# Load a reference onto the stack from local variable 0
# Stack: → value
code = 0x1a
def __init__(self):
super(ILOAD_0, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ILOAD_1(Opcode):
# Load a reference onto the stack from local variable 1
# Stack: → value
code = 0x1b
def __init__(self):
super(ILOAD_1, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ILOAD_2(Opcode):
# Load a reference onto the stack from local variable 2
# Stack: → value
code = 0x1c
def __init__(self):
super(ILOAD_2, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class ILOAD_3(Opcode):
# Load a reference onto the stack from local variable 3
# Stack: → value
code = 0x1d
def __init__(self):
super(ILOAD_3, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class IMPDEP1(Opcode):
# Reserved for implementation-dependent operations within debuggers; should not
# appear in any class file
code = 0xfe
def __init__(self):
super(IMPDEP1, self).__init__()
class IMPDEP2(Opcode):
# Reserved for implementation-dependent operations within debuggers; should not
# appear in any class file
code = 0xff
def __init__(self):
super(IMPDEP2, self).__init__()
class IMUL(Opcode):
# Multiply two integers
# Stack: value1, value2 → result
code = 0x68
def __init__(self):
super(IMUL, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class INEG(Opcode):
# Negate int
# Stack: value → result
code = 0x74
def __init__(self):
super(INEG, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class INSTANCEOF(Opcode):
# Determines if an object objectref is of a given type, identified by class
# reference index in constant pool (indexbyte1 << 8 + indexbyte2)
# Args(2): indexbyte1, indexbyte2
# Stack: objectref → result
code = 0xc1
def __init__(self, class_name):
super(INSTANCEOF, self).__init__()
self.klass = Classref(class_name)
def __arg_repr__(self):
return ' %s' % self.klass.class_name
def __len__(self):
return 3
@classmethod
def read_extra(cls, reader, dump=None):
klass = reader.constant_pool[reader.read_u2()]
return cls(
klass.name.bytes.decode('mutf-8'),
)
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.klass))
def resolve(self, constant_pool):
self.klass.resolve(constant_pool)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class INVOKEDYNAMIC(Opcode):
# Invokes a dynamic method and puts the result on the stack (might be void); the
# method is identified by method reference index in constant pool (indexbyte1 <<
# 8 + indexbyte2)
# Args(4): indexbyte1, indexbyte2, 0, 0
# Stack: [arg1, [arg2 ...]] → result
code = 0xba
def __init__(self, class_name, method_name, descriptor):
super(INVOKEDYNAMIC, self).__init__()
self.method = Methodref(class_name, method_name, descriptor)
def __arg_repr__(self):
return ' %s.%s %s' % (self.method.class_name, self.method.name, self.method.name_and_type.descriptor)
def __len__(self):
return 5
@classmethod
def read_extra(cls, reader, dump=None):
method = reader.constant_pool[reader.read_u2()]
reader.read_u2()
return cls(
method.klass.name.bytes.decode('mutf-8'),
method.name_and_type.name.bytes.decode('mutf-8'),
method.name_and_type.descriptor.bytes.decode('mutf-8')
)
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.method))
writer.write_u2(0)
def resolve(self, constant_pool):
self.method.resolve(constant_pool)
@property
def produce_count(self):
return 0 if self.method.name_and_type.descriptor.bytes[-1] == ord('V') else 1
@property
def consume_count(self):
return 1 + len(self.method.descriptor.parameters)
class INVOKEINTERFACE(Opcode):
# Invokes an interface method on object objectref and puts the result on the
# stack (might be void); the interface method is identified by method reference
# index in constant pool (indexbyte1 << 8 + indexbyte2)
# Args(4): indexbyte1, indexbyte2, count, 0
# Stack: objectref, [arg1, arg2, ...] → result
code = 0xb9
def __init__(self, class_name, method_name, descriptor):
super(INVOKEINTERFACE, self).__init__()
self.method = InterfaceMethodref(class_name, method_name, descriptor)
def __arg_repr__(self):
return ' %s.%s %s' % (self.method.class_name, self.method.name, self.method.name_and_type.descriptor)
def __len__(self):
return 5
@classmethod
def read_extra(cls, reader, dump=None):
method = reader.constant_pool[reader.read_u2()]
reader.read_u1() # Count (can be interpolated from parameters)
reader.read_u1() # Blank value
return cls(
method.class_name,
method.name,
method.name_and_type.descriptor.bytes.decode('mutf-8'),
)
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.method))
writer.write_u1(len(self.method.descriptor.parameters) + 1)
writer.write_u1(0)
def resolve(self, constant_pool):
self.method.resolve(constant_pool)
@property
def produce_count(self):
return 0 if self.method.name_and_type.descriptor.bytes[-1] == ord('V') else 1
@property
def consume_count(self):
return 1 + len(self.method.descriptor.parameters)
class INVOKESPECIAL(Opcode):
# Invoke instance method on object objectref and puts the result on the stack
# (might be void); the method is identified by method reference index in
# constant pool (indexbyte1 << 8 + indexbyte2)
# Args (2): indexbyte1, indexbyte2
# Stack: objectref, [arg1, arg2, ...] → result
code = 0xb7
def __init__(self, class_name, method_name, descriptor):
super(INVOKESPECIAL, self).__init__()
self.method = Methodref(class_name, method_name, descriptor)
def __arg_repr__(self):
return ' %s.%s %s' % (self.method.klass.name, self.method.name_and_type.name, self.method.name_and_type.descriptor)
def __len__(self):
return 3
@property
def class_name(self):
return self.method.class_name
@property
def method_name(self):
return self.method.method_name
@property
def descriptor(self):
return self.method.descriptor
@classmethod
def read_extra(cls, reader, dump=None):
method = reader.constant_pool[reader.read_u2()]
return cls(
method.klass.name.bytes.decode('mutf-8'),
method.name_and_type.name.bytes.decode('mutf-8'),
method.name_and_type.descriptor.bytes.decode('mutf-8')
)
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.method))
def resolve(self, constant_pool):
self.method.resolve(constant_pool)
@property
def produce_count(self):
return 0 if self.method.name_and_type.descriptor.bytes[-1] == ord('V') else 1
@property
def consume_count(self):
return 1 + len(self.method.descriptor.parameters)
class INVOKESTATIC(Opcode):
# Invoke a static method and puts the result on the stack (might be void); the
# method is identified by method reference index in constant pool (indexbyte1 <<
# 8 + indexbyte2)
# Args(2): indexbyte1, indexbyte2
# Stack: [arg1, arg2, ...] → result
code = 0xb8
def __init__(self, class_name, method_name, descriptor):
super(INVOKESTATIC, self).__init__()
self.method = Methodref(class_name, method_name, descriptor)
def __arg_repr__(self):
return ' %s.%s %s' % (self.method.klass.name, self.method.name_and_type.name, self.method.name_and_type.descriptor)
def __len__(self):
return 3
@classmethod
def read_extra(cls, reader, dump=None):
method = reader.constant_pool[reader.read_u2()]
return cls(
method.klass.name.bytes.decode('mutf-8'),
method.name_and_type.name.bytes.decode('mutf-8'),
method.name_and_type.descriptor.bytes.decode('mutf-8')
)
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.method))
def resolve(self, constant_pool):
self.method.resolve(constant_pool)
@property
def produce_count(self):
return 0 if self.method.name_and_type.descriptor.bytes[-1] == ord('V') else 1
@property
def consume_count(self):
return len(self.method.descriptor.parameters)
class INVOKEVIRTUAL(Opcode):
# Invoke virtual method on object objectref and puts the result on the stack
# (might be void); the method is identified by method reference index in
# constant pool (indexbyte1 << 8 + indexbyte2)
# Args(2): indexbyte1, indexbyte2
# Stack: objectref, [arg1, arg2, ...] → result
code = 0xb6
def __init__(self, class_name, method_name, descriptor):
super(INVOKEVIRTUAL, self).__init__()
self.method = Methodref(class_name, method_name, descriptor)
def __arg_repr__(self):
return ' %s.%s %s' % (self.method.klass.name, self.method.name_and_type.name, self.method.name_and_type.descriptor)
def __len__(self):
return 3
@classmethod
def read_extra(cls, reader, dump=None):
method = reader.constant_pool[reader.read_u2()]
return cls(
method.klass.name.bytes.decode('mutf-8'),
method.name_and_type.name.bytes.decode('mutf-8'),
method.name_and_type.descriptor.bytes.decode('mutf-8')
)
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.method))
def resolve(self, constant_pool):
self.method.resolve(constant_pool)
@property
def produce_count(self):
return 0 if self.method.name_and_type.descriptor.bytes[-1] == ord('V') else 1
@property
def consume_count(self):
return 1 + len(self.method.descriptor.parameters)
class IOR(Opcode):
# Bitwise int or
# Stack: value1, value2 → result
code = 0x80
def __init__(self):
super(IOR, self).__init__()
class IREM(Opcode):
# Logical int remainder
# Stack: value1, value2 → result
code = 0x70
def __init__(self):
super(IREM, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class IRETURN(Opcode):
# Return an integer from a method
# Stack: value → [empty]
code = 0xac
def __init__(self):
super(IRETURN, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class ISHL(Opcode):
# Int shift left
# Stack: value1, value2 → result
code = 0x78
def __init__(self):
super(ISHL, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class ISHR(Opcode):
# Int arithmetic shift right
# Stack: value1, value2 → result
code = 0x7a
def __init__(self):
super(ISHR, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class ISTORE(Opcode):
# Store a reference into a local variable #index
# Args(1): index
# Stack: objectref →
code = 0x36
def __init__(self, var):
super(ISTORE, self).__init__()
self.var = var
def __len__(self):
return 2
def __arg_repr__(self):
return ' %s' % self.var
@classmethod
def read_extra(cls, reader, dump=None):
var = reader.read_u1()
return cls(var)
def write_extra(self, writer):
writer.write_u1(self.var)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class ISTORE_0(Opcode):
# Store int value into variable 0
# Stack: value →
code = 0x3b
def __init__(self):
super(ISTORE_0, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class ISTORE_1(Opcode):
# Store int value into variable 1
# Stack: value →
code = 0x3c
def __init__(self):
super(ISTORE_1, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class ISTORE_2(Opcode):
# Store int value into variable 2
# Stack: value →
code = 0x3d
def __init__(self):
super(ISTORE_2, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class ISTORE_3(Opcode):
# Store int value into variable 3
# Stack: value →
code = 0x3e
def __init__(self):
super(ISTORE_3, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class ISUB(Opcode):
# Int subtract
# Stack: value1, value2 → result
code = 0x64
def __init__(self):
super(ISUB, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class IUSHR(Opcode):
# Int logical shift right
# Stack: value1, value2 → result
code = 0x7c
def __init__(self):
super(IUSHR, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class IXOR(Opcode):
# Int xor
# Stack: value1, value2 → result
code = 0x82
def __init__(self):
super(IXOR, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class JSR(Opcode):
code = 0xa8
def __init__(self):
super(JSR, self).__init__()
# 2: branchbyte1, branchbyte2 → address
# Jump to subroutine at branchoffset (signed short constructed from unsigned
# bytes branchbyte1 << 8 + branchbyte2) and place the return address on the
# stack
class JSR_W(Opcode):
code = 0xc9
def __init__(self):
super(JSR_W, self).__init__()
# 4: branchbyte1, branchbyte2, branchbyte3, branchbyte4 → address
# Jump to subroutine at branchoffset (signed int constructed from unsigned bytes
# branchbyte1 << 24 + branchbyte2 << 16 + branchbyte3 << 8 + branchbyte4) and
# place the return address on the stack
class L2D(Opcode):
# Convert a long to a double
# Stack: value → result
code = 0x8a
def __init__(self):
super(L2D, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class L2F(Opcode):
# Convert a long to a float
# Stack: value → result
code = 0x89
def __init__(self):
super(L2F, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class L2I(Opcode):
# Convert a long to a int
# Stack: value → result
code = 0x88
def __init__(self):
super(L2I, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class LADD(Opcode):
# add two longs
# Stack: value1, value2 → result
code = 0x61
def __init__(self):
super(LADD, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class LALOAD(Opcode):
# Load a long from an array
# Stack: arrayref, index → value
code = 0x2f
def __init__(self):
super(LALOAD, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class LAND(Opcode):
# bitwise and of two longs
# Stack: value1, value2 → result
code = 0x7f
def __init__(self):
super(LAND, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class LASTORE(Opcode):
# Store a long to an array
# Stack: arrayref, index, value →
code = 0x50
def __init__(self):
super(LASTORE, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 3
class LCMP(Opcode):
# Compare two longs values
# Stack: value1, value2 → result
code = 0x94
def __init__(self):
super(LCMP, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class LCONST_0(Opcode):
# Push the long 0 onto the stack
# Stack: → 0L
code = 0x09
def __init__(self):
super(LCONST_0, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class LCONST_1(Opcode):
# Push the long 1 onto the stack
# Stack: → 1L
code = 0x0a
def __init__(self):
super(LCONST_1, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class LDC(Opcode):
# Push a constant #index from a constant pool (String, int or float) onto the
# stack
# Args(1): index
# Stack: → value
code = 0x12
def __init__(self, const):
super(LDC, self).__init__()
if isinstance(const, str):
self.const = String(const)
elif isinstance(const, int):
self.const = Integer(const)
# elif isinstance(const, float):
# self.const = Float(const)
elif isinstance(const, Constant):
self.const = const
else:
raise TypeError('Invalid type for LDC: %s' % type(const))
def __arg_repr__(self):
return ' %s' % self.const
def __len__(self):
return 2
@classmethod
def read_extra(cls, reader, dump=None):
const = reader.read_u1()
return cls(reader.constant_pool[const])
def write_extra(self, writer):
writer.write_u1(writer.constant_pool.index(self.const))
def resolve(self, constant_pool):
self.const.resolve(constant_pool)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class LDC_W(Opcode):
# Push a constant #index from a constant pool (String, int or float) onto the
# stack (wide index is constructed as indexbyte1 << 8 + indexbyte2)
# Args(2): indexbyte1, indexbyte2
# Stack: → value
code = 0x13
def __init__(self, const):
super(LDC_W, self).__init__()
if isinstance(const, str):
self.const = String(const)
elif isinstance(const, int):
self.const = Integer(const)
elif isinstance(const, float):
self.const = Float(const)
elif isinstance(const, Constant):
self.const = const
else:
raise TypeError('Invalid type for LDC_W: %s' % type(const))
def __arg_repr__(self):
return ' %s' % self.const
def __len__(self):
return 3
@classmethod
def read_extra(cls, reader, dump=None):
const = reader.read_u2()
return cls(reader.constant_pool[const])
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.const))
def resolve(self, constant_pool):
self.const.resolve(constant_pool)
@property
def produce_count(self):
# Although it only produces one value, the wide value
# takes up 2 slots on the stack.
return 2
@property
def consume_count(self):
return 0
class LDC2_W(Opcode):
# Push a constant #index from a constant pool (double or long) onto the stack
# (wide index is constructed as indexbyte1 << 8 + indexbyte2)
# Args(2): indexbyte1, indexbyte2
# Stack: → value
code = 0x14
def __init__(self, const):
super(LDC2_W, self).__init__()
if isinstance(const, float):
self.const = Double(const)
elif isinstance(const, int):
self.const = Long(const)
elif isinstance(const, Constant):
self.const = const
else:
raise TypeError('Invalid type for LDC_W: %s' % type(const))
def __arg_repr__(self):
return ' %s' % self.const
def __len__(self):
return 3
@classmethod
def read_extra(cls, reader, dump=None):
const = reader.read_u2()
return cls(reader.constant_pool[const])
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.const))
def resolve(self, constant_pool):
self.const.resolve(constant_pool)
@property
def produce_count(self):
# Although it only produces one value, the wide value
# takes up 2 slots on the stack.
return 2
@property
def consume_count(self):
return 0
class LDIV(Opcode):
# Divide two longs
# Stack: value1, value2 → result
code = 0x6d
def __init__(self):
super(LDIV, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class LLOAD(Opcode):
# Load a long value from a local variable #index
# Args(1): index
# Stack: → value
code = 0x16
def __init__(self, var):
super(LLOAD, self).__init__()
self.var = var
def __len__(self):
return 2
def __arg_repr__(self):
return ' %s' % self.var
@classmethod
def read_extra(cls, reader, dump=None):
var = reader.read_u1()
return cls(var)
def write_extra(self, writer):
writer.write_u1(self.var)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class LLOAD_0(Opcode):
# Load a long value from a local variable 0
# Stack: → value
code = 0x1e
def __init__(self):
super(LLOAD_0, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class LLOAD_1(Opcode):
# Load a long value from a local variable 1
# Stack: → value
code = 0x1f
def __init__(self):
super(LLOAD_1, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class LLOAD_2(Opcode):
# Load a long value from a local variable 2
# Stack: → value
code = 0x20
def __init__(self):
super(LLOAD_2, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class LLOAD_3(Opcode):
# Load a long value from a local variable 3
# Stack: → value
code = 0x21
def __init__(self):
super(LLOAD_3, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class LMUL(Opcode):
# Multiply two longs
# Stack: value1, value2 → result
code = 0x69
def __init__(self):
super(LMUL, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class LNEG(Opcode):
# Negate a long
# Stack: value → result
code = 0x75
def __init__(self):
super(LNEG, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 1
class LOOKUPSWITCH(Opcode):
code = 0xab
def __init__(self):
super(LOOKUPSWITCH, self).__init__()
# 4+: <0-3 bytes padding>, defaultbyte1, defaultbyte2, defaultbyte3, defaultbyte4, npairs1, npairs2, npairs3, npairs4, match-offset pairs...
# key →
# A target address is looked up from a table using a key and execution continues
# from the instruction at that address
class LOR(Opcode):
# Bitwise or of two longs
# Stack: value1, value2 → result
code = 0x81
def __init__(self):
super(LOR, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class LREM(Opcode):
# Remainder of division of two longs
# Stack: value1, value2 → result
code = 0x71
def __init__(self):
super(LREM, self).__init__()
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 2
class LRETURN(Opcode):
# Return a long value
# Stack: value → [empty]
code = 0xad
def __init__(self):
super(LRETURN, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class LSHL(Opcode):
code = 0x79
def __init__(self):
super(LSHL, self).__init__()
# value1, value2 → result
# Bitwise shift left of a long value1 by int value2 positions
class LSHR(Opcode):
code = 0x7b
def __init__(self):
super(LSHR, self).__init__()
# value1, value2 → result
# Bitwise shift right of a long value1 by int value2 positions
class LSTORE(Opcode):
# Store a long value in a local variable #index
# Args(1): index
# Stack: value →
code = 0x37
def __init__(self):
super(LSTORE, self).__init__()
class LSTORE_0(Opcode):
# Store a long value in a local variable 0
# Stack: value →
code = 0x3f
def __init__(self):
super(LSTORE_0, self).__init__()
class LSTORE_1(Opcode):
# Store a long value in a local variable 1
# Stack: value →
code = 0x40
def __init__(self):
super(LSTORE_1, self).__init__()
class LSTORE_2(Opcode):
# Store a long value in a local variable 2
# Stack: value →
code = 0x41
def __init__(self):
super(LSTORE_2, self).__init__()
class LSTORE_3(Opcode):
# Store a long value in a local variable 3
# Stack: value →
code = 0x42
def __init__(self):
super(LSTORE_3, self).__init__()
class LSUB(Opcode):
code = 0x65
def __init__(self):
super(LSUB, self).__init__()
# value1, value2 → result subtract two longs
class LUSHR(Opcode):
code = 0x7d
def __init__(self):
super(LUSHR, self).__init__()
# value1, value2 → result
# Bitwise shift right of a long value1 by int value2 positions, unsigned
class LXOR(Opcode):
code = 0x83
def __init__(self):
super(LXOR, self).__init__()
# value1, value2 → result
# Bitwise exclusive or of two longs
class MONITORENTER(Opcode):
code = 0xc2
def __init__(self):
super(MONITORENTER, self).__init__()
# objectref →
# Enter monitor for object ("grab the lock" - start of synchronized() section)
class MONITOREXIT(Opcode):
code = 0xc3
def __init__(self):
super(MONITOREXIT, self).__init__()
# objectref →
# Exit monitor for object ("release the lock" - end of synchronized() section)
class MULTIANEWARRAY(Opcode):
code = 0xc5
def __init__(self):
super(MULTIANEWARRAY, self).__init__()
# 3: indexbyte1, indexbyte2, dimensions
# count1, [count2,...] → arrayref
# Create a new array of dimensions dimensions with elements of type identified
# by class reference in constant pool index (indexbyte1 << 8 + indexbyte2); the
# sizes of each dimension is identified by count1, [count2, etc.]
class NEW(Opcode):
# Create new object of type identified by class reference in constant pool index
# (indexbyte1 << 8 + indexbyte2)
# Args(2): indexbyte1, indexbyte2
# Stack: → objectref
code = 0xbb
def __init__(self, class_name):
super(NEW, self).__init__()
self.classref = Classref(class_name)
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.classref.name
@classmethod
def read_extra(cls, reader, dump=None):
classref = reader.constant_pool[reader.read_u2()]
return cls(
classref.name.bytes.decode('mutf-8'),
)
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.classref))
def resolve(self, constant_pool):
self.classref.resolve(constant_pool)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class NEWARRAY(Opcode):
code = 0xbc
def __init__(self):
super(NEWARRAY, self).__init__()
# 1: atype
# count → arrayref
# Create new array with count elements of primitive type identified by atype
class NOP(Opcode):
# Perform no operation
# Stack: [No change]
code = 0x00
def __init__(self):
super(NOP, self).__init__()
class POP(Opcode):
# Discard the top value on the stack
# Stack: value →
code = 0x57
def __init__(self):
super(POP, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class POP2(Opcode):
# Discard the top two values on the stack (or one value, if it is a double or long)
# {value2, value1} →
code = 0x58
def __init__(self):
super(POP2, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 2
class PUTFIELD(Opcode):
# Set field to value in an object objectref, where the field is identified by a
# field reference index in constant pool (indexbyte1 << 8 + indexbyte2)
# Args(2): indexbyte1, indexbyte2
# Stack: objectref, value →
code = 0xb5
def __init__(self, class_name, field_name, descriptor):
super(PUTFIELD, self).__init__()
self.field = Fieldref(class_name, field_name, descriptor)
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s.%s (%s)' % (self.field.klass.name, self.field.name, self.field.name_and_type.descriptor)
@classmethod
def read_extra(cls, reader, dump=None):
field = reader.constant_pool[reader.read_u2()]
return cls(
field.class_name,
field.name,
field.name_and_type.descriptor.bytes.decode('mutf-8')
)
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.field))
def resolve(self, constant_pool):
self.field.resolve(constant_pool)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 2
class PUTSTATIC(Opcode):
# Set static field to value in a class, where the field is identified by a field
# reference index in constant pool (indexbyte1 << 8 + indexbyte2)
# Args(2): indexbyte1, indexbyte2
# Stack: value →
code = 0xb3
def __init__(self, class_name, field_name, descriptor):
super(PUTSTATIC, self).__init__()
self.field = Fieldref(class_name, field_name, descriptor)
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s.%s (%s)' % (self.field.klass.name, self.field.name, self.field.name_and_type.descriptor)
@classmethod
def read_extra(cls, reader, dump=None):
field = reader.constant_pool[reader.read_u2()]
return cls(
field.class_name,
field.name,
field.name_and_type.descriptor.bytes.decode('mutf-8')
)
def write_extra(self, writer):
writer.write_u2(writer.constant_pool.index(self.field))
def resolve(self, constant_pool):
self.field.resolve(constant_pool)
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 1
class RET(Opcode):
code = 0xa9
def __init__(self):
super(RET, self).__init__()
# 1: index
# [No change]
# Continue execution from address taken from a local variable #index (the
# asymmetry with jsr is intentional)
class RETURN(Opcode):
# Return void from method
# Stack:→ [empty]
code = 0xb1
def __init__(self):
super(RETURN, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 0
class SALOAD(Opcode):
code = 0x35
def __init__(self):
super(SALOAD, self).__init__()
# arrayref, index → value
# Load short from array
class SASTORE(Opcode):
code = 0x56
def __init__(self):
super(SASTORE, self).__init__()
# arrayref, index, value →
# Store short to array
class SIPUSH(Opcode):
# push a short onto the stack
# Args(2): byte1, byte2
# Stack: → value
code = 0x11
def __init__(self, const):
super(SIPUSH, self).__init__()
self.const = const
def __len__(self):
return 3
def __arg_repr__(self):
return ' %s' % self.const
@classmethod
def read_extra(cls, reader, dump=None):
const = reader.read_s2()
return cls(const)
def write_extra(self, writer):
writer.write_s2(self.const)
@property
def produce_count(self):
return 1
@property
def consume_count(self):
return 0
class SWAP(Opcode):
# Swaps two top words on the stack (note that value1 and value2 must not be
# double or long)
# Stack: value2, value1 → value1, value2
code = 0x5f
def __init__(self):
super(SWAP, self).__init__()
@property
def produce_count(self):
return 0
@property
def consume_count(self):
return 0
class TABLESWITCH(Opcode):
code = 0xaa
def __init__(self):
super(TABLESWITCH, self).__init__()
# 4+: [0-3 bytes padding], defaultbyte1, defaultbyte2, defaultbyte3, defaultbyte4, lowbyte1, lowbyte2, lowbyte3, lowbyte4, highbyte1, highbyte2, highbyte3, highbyte4, jump offsets...
# index →
# continue execution from an address in the table at offset index
class WIDE(Opcode):
code = 0xc4
def __init__(self):
super(WIDE, self).__init__()
# 3/5: Opcode, indexbyte1, indexbyte2
# or
# iinc, indexbyte1, indexbyte2, countbyte1, countbyte2
# Execute Opcode, where Opcode is either iload, fload, aload, lload, dload,
# istore, fstore, astore, lstore, dstore, or ret, but assume the index is 16
# bit; or execute iinc, where the index is 16 bits and the constant to
# increment by is a signed 16 bit short
| 20.966193 | 182 | 0.616354 |
cd89e3aa619182b4eed1d6985a95558b117a15f7 | 3,314 | py | Python | contrib/python/api/skydive/graph.py | EmilienM/skydive | 1dccee6a075ad5c0a3fc85f1ad5474de67a7d80b | [
"Apache-2.0"
] | 1 | 2018-10-26T08:32:13.000Z | 2018-10-26T08:32:13.000Z | contrib/python/api/skydive/graph.py | EmilienM/skydive | 1dccee6a075ad5c0a3fc85f1ad5474de67a7d80b | [
"Apache-2.0"
] | null | null | null | contrib/python/api/skydive/graph.py | EmilienM/skydive | 1dccee6a075ad5c0a3fc85f1ad5474de67a7d80b | [
"Apache-2.0"
] | 2 | 2015-10-21T03:28:56.000Z | 2017-09-19T11:43:28.000Z | #
# Copyright (C) 2017 Red Hat, Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
class GraphElement(object):
"""
Definition of a Skydive graph element.
"""
def __init__(self, id, host,
created_at=0, updated_at=0, deleted_at=0, revision=0,
metadata=None):
self.id = id
self.host = host
self.metadata = metadata
self.created_at = created_at
self.updated_at = updated_at
self.deleted_at = deleted_at
self.revision = revision
def repr_json(self):
obj = {
"ID": self.id,
"Host": self.host,
"Metadata": self.metadata,
}
if self.created_at:
obj["CreatedAt"] = self.created_at
if self.updated_at:
obj["UpdatedAt"] = self.updated_at
if self.deleted_at:
obj["DeletedAt"] = self.deleted_at
return obj
@classmethod
def from_object(self, obj):
return self(obj["ID"], obj["Host"],
created_at=obj.get("CreatedAt", 0),
updated_at=obj.get("UpdatedAt", 0),
deleted_at=obj.get("DeletedAt", 0),
revision=obj.get("Revision", 0),
metadata=obj.get("Metadata"))
class Node(GraphElement):
"""
Definition of a Skydive graph Node, see GraphElement.
"""
pass
class Edge(GraphElement):
"""
Definition of a Skydive graph Edge, see GraphElement.
"""
def __init__(self, id, host, parent, child,
created_at=0, updated_at=0, deleted_at=0, revision=0,
metadata=None):
super(Edge, self).__init__(id, host,
created_at=created_at,
updated_at=updated_at,
deleted_at=deleted_at,
metadata=metadata)
self.parent = parent
self.child = child
def repr_json(self):
obj = super(Edge, self).repr_json()
obj["Parent"] = self.parent
obj["Child"] = self.child
return obj
@classmethod
def from_object(self, obj):
return self(obj["ID"], obj["Host"],
obj["Parent"], obj["Child"],
created_at=obj.get("CreatedAt", 0),
updated_at=obj.get("UpdatedAt", 0),
deleted_at=obj.get("DeletedAt", 0),
revision=obj.get("Revision", 0),
metadata=obj.get("Metadata"))
| 32.490196 | 70 | 0.572118 |
7fb0e2b79e78b0fb482526a112708c5e86c7be8d | 2,181 | py | Python | game/engine.py | tushortz/Football-Match-Engine | 4de6d3cc80ad981f70c579d98b2ebe15c014f5b1 | [
"MIT"
] | null | null | null | game/engine.py | tushortz/Football-Match-Engine | 4de6d3cc80ad981f70c579d98b2ebe15c014f5b1 | [
"MIT"
] | null | null | null | game/engine.py | tushortz/Football-Match-Engine | 4de6d3cc80ad981f70c579d98b2ebe15c014f5b1 | [
"MIT"
] | null | null | null | import random
class Engine:
def __init__(self, home_team, away_team):
self.home_team = home_team
self.away_team = away_team
def scoreboard(self):
print("%s %s : %s %s" % (self.home_team.name, self.home_team.score, self.away_team.score, self.away_team.name))
def play(self, commetary=False):
if commetary == True:
print("%s vs %s\n" % (self.home_team, self.away_team))
for x in range(self.home_team.chances):
away_gk = self.away_team.players[0]
away_def = random.choice(self.away_team.players[1:6])
away_def_luck = random.random()
home_player = random.choice(self.home_team.players[1:])
# Opponent defends before player shoots
if ((away_def.skill.defence * away_def_luck) < (home_player.skill.attack * home_player.luck)):
if commetary == True:
print("%s dribbles past %s" % (home_player.get_name(), away_def.get_name()))
if home_player.shoot(away_gk) == 1:
self.home_team.score += home_player.shoot(away_gk)
if commetary == True:
print("GOAL!!! Clinical finish from %s\n " % (home_player.get_name()))
else:
if commetary == True:
print("MISS! Fine save from %s \n" % (away_gk.get_name()))
for x in range(self.away_team.chances):
home_gk = self.home_team.players[0]
home_def = random.choice(self.home_team.players[1:6])
home_def_luck = random.random()
away_player = random.choice(self.away_team.players[1:])
# Opponent defends before player shoots
if ((home_def.skill.defence * home_def_luck) < (away_player.skill.attack * away_player.luck)):
if commetary == True:
print("%s dribbles past %s" % (away_player.get_name(), home_def.get_name()))
if away_player.shoot(home_gk) == 1:
self.away_team.score += away_player.shoot(home_gk)
if commetary == True:
print("GOAL!!! Clinical finish from %s \n" % (away_player.get_name()))
else:
if commetary == True:
print("MISS! Fine save from %s \n" % (home_gk.get_name()))
def winner(self):
winner = None
if self.home_team.score > self.away_team.score:
winner = self.home_team
elif self.home_team.score < self.away_team.score:
winner = self.away_team
return winner | 32.073529 | 113 | 0.68088 |
8a871cde4daa6984a4518c2ca9a6e0d67bbaf8b9 | 4,386 | py | Python | src/Summarizer.py | Rohit-More/Machine-Learning-and-Data-Science | e4eed506505a060aaf9f57ee61535f1d0fde20a7 | [
"MIT"
] | 1 | 2020-09-29T16:26:08.000Z | 2020-09-29T16:26:08.000Z | src/Summarizer.py | rayruchira/Machine-Learning-and-Data-Science | 434724365bc9ee86633245acef29ccee086cba3b | [
"MIT"
] | 2 | 2020-10-04T13:35:55.000Z | 2021-10-05T06:46:39.000Z | src/Summarizer.py | rayruchira/Machine-Learning-and-Data-Science | 434724365bc9ee86633245acef29ccee086cba3b | [
"MIT"
] | 16 | 2020-09-28T12:17:42.000Z | 2021-10-11T11:34:09.000Z |
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize, sent_tokenize
from langdetect import detect
text_str = input("Enter the text : ")
if(detect(text_str)=='ta'):
a=[]
with open('tamil Stop words.txt', 'r', encoding='utf-8') as f:
a+=f.readlines()
f.close()
for i in range(0,len(a)):
a[i]=a[i].rstrip('\n')
stopWords = a
elif(detect(text_str)=='en'):
stopWords = set(stopwords.words("english"))
elif(detect(text_str)=='hi'):
a=[]
with open('hindi stopwords.txt', 'r', encoding='utf-8') as f:
a+=f.readlines()
f.close()
for i in range(0,len(a)):
a[i]=a[i].rstrip('\n')
stopWords = a
def _create_frequency_table(text_string) -> dict:
"""
we create a dictionary for the word frequency table.
For this, we should only use the words that are not part of the stopWords array.
Removing stop words and making frequency table
Stemmer - an algorithm to bring words to its root word.
:rtype: dict
"""
#
words = word_tokenize(text_string)
ps = PorterStemmer()
freqTable = dict()
for word in words:
word = ps.stem(word)
if word in stopWords:
continue
if word in freqTable:
freqTable[word] += 1
else:
freqTable[word] = 1
return freqTable
def _score_sentences(sentences, freqTable) -> dict:
"""
score a sentence by its words
Basic algorithm: adding the frequency of every non-stop word in a sentence divided by total no of words in a sentence.
:rtype: dict
"""
sentenceValue = dict()
for sentence in sentences:
word_count_in_sentence = (len(word_tokenize(sentence)))
word_count_in_sentence_except_stop_words = 0
for wordValue in freqTable:
if wordValue in sentence.lower():
word_count_in_sentence_except_stop_words += 1
if sentence in sentenceValue:
sentenceValue[sentence] += freqTable[wordValue]
else:
sentenceValue[sentence] = freqTable[wordValue]
if sentence in sentenceValue:
sentenceValue[sentence] = sentenceValue[sentence] / word_count_in_sentence_except_stop_words
'''
Notice that a potential issue with our score algorithm is that long sentences will have an advantage over
short sentences.
To solve this, we're dividing every sentence score by the number of words in the sentence.
Note that here sentence[:10] is the first 10 character of any sentence, this is to save memory while saving keys of
the dictionary.
'''
print(sentenceValue)
return sentenceValue
def _find_average_score(sentenceValue) -> int:
"""
Find the average score from the sentence value dictionary
:rtype: int
"""
sumValues = 0
for entry in sentenceValue:
sumValues += sentenceValue[entry]
# Average value of a sentence from original text
average = (sumValues / len(sentenceValue))
return average
def _generate_summary(sentences, sentenceValue, threshold):
sentence_count = 0
summary = ''
for sentence in sentences:
if sentence in sentenceValue and sentenceValue[sentence] >= (threshold):
summary += " " + sentence
sentence_count += 1
return summary
def run_summarization(text):
# 1 Create the word frequency table
freq_table = _create_frequency_table(text)
'''
We already have a sentence tokenizer, so we just need
to run the sent_tokenize() method to create the array of sentences.
'''
# 2 Tokenize the sentences
sentences = sent_tokenize(text)
# 3 Important Algorithm: score the sentences
sentence_scores = _score_sentences(sentences, freq_table)
# 4 Find the threshold
threshold = _find_average_score(sentence_scores)
# 5 Important Algorithm: Generate the summary
summary = _generate_summary(sentences, sentence_scores, 1.0 * threshold)
return summary
if __name__ == '__main__':
result = run_summarization(text_str)
print("\nSummarized text is: \n")
print(result)
| 30.458333 | 124 | 0.632695 |
6ea5667cc3357e9ed68632011b599f8014dd47ce | 5,720 | py | Python | src/pyscripts/sqlalchemy/test/config.py | paddybu/xsbs | 3810eab4cd91d9a927ac724a4892763a5ff562a5 | [
"BSD-3-Clause"
] | 2 | 2020-07-02T21:50:43.000Z | 2021-01-12T09:26:25.000Z | src/pyscripts/sqlalchemy/test/config.py | harryd/xsbs-minimal | aaceaeda1d3fe6cdd7182484989eaa74e9ae9518 | [
"BSD-3-Clause"
] | null | null | null | src/pyscripts/sqlalchemy/test/config.py | harryd/xsbs-minimal | aaceaeda1d3fe6cdd7182484989eaa74e9ae9518 | [
"BSD-3-Clause"
] | null | null | null | import optparse, os, sys, re, ConfigParser, StringIO, time, warnings
logging = None
__all__ = 'parser', 'configure', 'options',
db = None
db_label, db_url, db_opts = None, None, {}
options = None
file_config = None
base_config = """
[db]
sqlite=sqlite:///:memory:
sqlite_file=sqlite:///querytest.db
postgres=postgres://scott:tiger@127.0.0.1:5432/test
mysql=mysql://scott:tiger@127.0.0.1:3306/test
oracle=oracle://scott:tiger@127.0.0.1:1521
oracle8=oracle://scott:tiger@127.0.0.1:1521/?use_ansi=0
mssql=mssql://scott:tiger@SQUAWK\\SQLEXPRESS/test
firebird=firebird://sysdba:masterkey@localhost//tmp/test.fdb
maxdb=maxdb://MONA:RED@/maxdb1
"""
def _log(option, opt_str, value, parser):
global logging
if not logging:
import logging
logging.basicConfig()
if opt_str.endswith('-info'):
logging.getLogger(value).setLevel(logging.INFO)
elif opt_str.endswith('-debug'):
logging.getLogger(value).setLevel(logging.DEBUG)
def _list_dbs(*args):
print "Available --db options (use --dburi to override)"
for macro in sorted(file_config.options('db')):
print "%20s\t%s" % (macro, file_config.get('db', macro))
sys.exit(0)
def _server_side_cursors(options, opt_str, value, parser):
db_opts['server_side_cursors'] = True
def _engine_strategy(options, opt_str, value, parser):
if value:
db_opts['strategy'] = value
class _ordered_map(object):
def __init__(self):
self._keys = list()
self._data = dict()
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._data[key] = value
def __iter__(self):
for key in self._keys:
yield self._data[key]
# at one point in refactoring, modules were injecting into the config
# process. this could probably just become a list now.
post_configure = _ordered_map()
def _engine_uri(options, file_config):
global db_label, db_url
db_label = 'sqlite'
if options.dburi:
db_url = options.dburi
db_label = db_url[:db_url.index(':')]
elif options.db:
db_label = options.db
db_url = None
if db_url is None:
if db_label not in file_config.options('db'):
raise RuntimeError(
"Unknown engine. Specify --dbs for known engines.")
db_url = file_config.get('db', db_label)
post_configure['engine_uri'] = _engine_uri
def _require(options, file_config):
if not(options.require or
(file_config.has_section('require') and
file_config.items('require'))):
return
try:
import pkg_resources
except ImportError:
raise RuntimeError("setuptools is required for version requirements")
cmdline = []
for requirement in options.require:
pkg_resources.require(requirement)
cmdline.append(re.split('\s*(<!>=)', requirement, 1)[0])
if file_config.has_section('require'):
for label, requirement in file_config.items('require'):
if not label == db_label or label.startswith('%s.' % db_label):
continue
seen = [c for c in cmdline if requirement.startswith(c)]
if seen:
continue
pkg_resources.require(requirement)
post_configure['require'] = _require
def _engine_pool(options, file_config):
if options.mockpool:
from sqlalchemy import pool
db_opts['poolclass'] = pool.AssertionPool
post_configure['engine_pool'] = _engine_pool
def _create_testing_engine(options, file_config):
from sqlalchemy.test import engines, testing
global db
db = engines.testing_engine(db_url, db_opts)
testing.db = db
post_configure['create_engine'] = _create_testing_engine
def _prep_testing_database(options, file_config):
from sqlalchemy.test import engines
from sqlalchemy import schema
try:
# also create alt schemas etc. here?
if options.dropfirst:
e = engines.utf8_engine()
existing = e.table_names()
if existing:
print "Dropping existing tables in database: " + db_url
try:
print "Tables: %s" % ', '.join(existing)
except:
pass
print "Abort within 5 seconds..."
time.sleep(5)
md = schema.MetaData(e, reflect=True)
md.drop_all()
e.dispose()
except (KeyboardInterrupt, SystemExit):
raise
except Exception, e:
warnings.warn(RuntimeWarning(
"Error checking for existing tables in testing "
"database: %s" % e))
post_configure['prep_db'] = _prep_testing_database
def _set_table_options(options, file_config):
from sqlalchemy.test import schema
table_options = schema.table_options
for spec in options.tableopts:
key, value = spec.split('=')
table_options[key] = value
if options.mysql_engine:
table_options['mysql_engine'] = options.mysql_engine
post_configure['table_options'] = _set_table_options
def _reverse_topological(options, file_config):
if options.reversetop:
from sqlalchemy.orm import unitofwork
from sqlalchemy import topological
class RevQueueDepSort(topological.QueueDependencySorter):
def __init__(self, tuples, allitems):
self.tuples = list(tuples)
self.allitems = list(allitems)
self.tuples.reverse()
self.allitems.reverse()
topological.QueueDependencySorter = RevQueueDepSort
unitofwork.DependencySorter = RevQueueDepSort
post_configure['topological'] = _reverse_topological
| 32.134831 | 77 | 0.651399 |
9dba7d522c93b48e076d0070d4803869d73f4077 | 75,001 | py | Python | examples/processing/obstacle_detection.py | ManishaMohane/acconeer-python-exploration | 48098f392bb0aa5a812e9ab2c1dd246fdf7a909a | [
"BSD-3-Clause-Clear"
] | null | null | null | examples/processing/obstacle_detection.py | ManishaMohane/acconeer-python-exploration | 48098f392bb0aa5a812e9ab2c1dd246fdf7a909a | [
"BSD-3-Clause-Clear"
] | null | null | null | examples/processing/obstacle_detection.py | ManishaMohane/acconeer-python-exploration | 48098f392bb0aa5a812e9ab2c1dd246fdf7a909a | [
"BSD-3-Clause-Clear"
] | null | null | null | import logging
import os
import numpy as np
import pyqtgraph as pg
import yaml
from numpy import pi, unravel_index
from scipy.fftpack import fft, fftshift
from PyQt5 import QtCore
from acconeer.exptool import configs, utils
from acconeer.exptool.clients import SocketClient, SPIClient, UARTClient
from acconeer.exptool.pg_process import PGProccessDiedException, PGProcess
log = logging.getLogger("acconeer.exptool.examples.obstacle_detection")
MAX_SPEED = 8.00 # Max speed to be resolved with FFT in cm/s
WAVELENGTH = 0.49 # Wavelength of radar in cm
FUSION_MAX_OBSTACLES = 5 # Max obstacles for sensor fusion
FUSION_MAX_SHADOWS = 10 # Max obstacles for sensor fusion shadows
FUSION_HISTORY = 30 # Max history for sensor fusion
BACKGROUND_PLATEAU_INTERPOLATION = 0.25 # Default plateau for background parameterization
BACKGROUND_PLATEAU_FACTOR = 1.25 # Default factor for background parameterization
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
def main():
args = utils.ExampleArgumentParser(num_sens=1).parse_args()
utils.config_logging(args)
if args.socket_addr:
client = SocketClient(args.socket_addr)
elif args.spi:
client = SPIClient()
else:
port = args.serial_port or utils.autodetect_serial_port()
client = UARTClient(port)
sensor_config = get_sensor_config()
processing_config = get_processing_config()
sensor_config.sensor = args.sensors
session_info = client.setup_session(sensor_config)
pg_updater = PGUpdater(sensor_config, processing_config, session_info)
pg_process = PGProcess(pg_updater)
pg_process.start()
client.start_session()
interrupt_handler = utils.ExampleInterruptHandler()
print("Press Ctrl-C to end session")
processor = ObstacleDetectionProcessor(sensor_config, processing_config, session_info)
while not interrupt_handler.got_signal:
info, sweep = client.get_next()
plot_data = processor.process(sweep)
if plot_data is not None:
try:
pg_process.put_data(plot_data)
except PGProccessDiedException:
break
print("Disconnecting...")
pg_process.close()
client.disconnect()
def get_sensor_config():
config = configs.IQServiceConfig()
config.range_interval = [0.1, 0.5]
config.repetition_mode = configs.IQServiceConfig.RepetitionMode.SENSOR_DRIVEN
config.update_rate = int(np.ceil(MAX_SPEED * 4 / WAVELENGTH))
config.gain = 0.7
return config
def get_processing_config():
return {
"fft_length": {
"name": "FFT length",
"value": 16,
"limits": [2, 512],
"type": int,
},
"threshold": { # Ignore data below threshold in FFT window for moving objects
"name": "Moving Threshold",
"value": 0.05,
"limits": [0.0, 100],
"type": float,
},
"v_max": {
"name": None,
"value": None,
"limits": None,
"type": None,
"text": "Max velocity = 4.9mm * freq / 4",
},
"downsampling": {
"name": "Downsample scale",
"value": 1,
"limits": [0, 124],
"type": int,
"advanced": True,
},
"calib": {
"name": "Background iterations",
"value": 10,
"limits": [0, 1000],
"type": int,
"advanced": True,
},
"bg_offset": {
"name": "Background Scale",
"value": 1.6,
"limits": [0, 1000],
"type": float,
"advanced": True,
},
"static_threshold": { # Ignore data below threshold in FFT window for static objects
"name": "Stationary Threshold",
"value": 0.1,
"limits": [0.0, 100],
"type": float,
"advanced": True,
},
"close_threshold_addition": { # Ignore data below threshold for very close range
"name": "Close Threshold Addition",
"value": 0.1,
"limits": [0.0, 100],
"type": float,
"advanced": True,
},
"static_distance": {
"name": "Distance limit far",
"value": 45,
"limits": [0.0, 1000],
"type": float,
"advanced": True,
},
"static_grad": {
"name": "Static distance gradient",
"value": 6,
"limits": [0.0, 100],
"type": float,
"advanced": True,
},
"close_dist": {
"name": "Distance limit near",
"value": 16,
"limits": [0.0, 100],
"type": float,
"advanced": True,
},
"static_freq": {
"name": "Static frequency gradient",
"value": 2,
"limits": [0.0, 100],
"type": float,
"advanced": True,
},
"nr_peaks": {
"name": "Number of peaks",
"value": 1,
"limits": [0, 100],
"type": int,
"advanced": True,
},
"edge_to_peak": {
"name": "Edge to peak ratio",
"value": 1,
"limits": [0, 1],
"type": float,
"advanced": True,
},
"peak_hist": {
"name": "Peak history",
"value": 500,
"limits": [50, 2000],
"type": int,
"advanced": True,
},
"robot_velocity": {
"name": "Robot Velocity [cm/s]",
"value": 6,
"limits": [-1000, 1000],
"type": float,
"advanced": True,
},
"use_parameterization": {
"name": "Use bg parameterization",
"value": False,
"advanced": True,
},
"background_map": {
"name": "Show background",
"value": True,
"advanced": True,
},
"threshold_map": {
"name": "Show threshold map",
"value": False,
"advanced": True,
},
"show_line_outs": {
"name": "Show extra line outs",
"value": False,
"advanced": True,
},
"distance_history": {
"name": "Show distance history",
"value": True,
"advanced": True,
},
"velocity_history": {
"name": "Show velocity history",
"value": True,
"advanced": True,
},
"angle_history": {
"name": "Show angle history",
"value": True,
"advanced": True,
},
"amplitude_history": {
"name": "Show amplitude history",
"value": False,
"advanced": True,
},
"fusion_map": {
"name": "Show fusion (2 sensors)",
"value": False,
"advanced": True,
},
"show_shadows": {
"name": "Show shadows",
"value": False,
"advanced": True,
},
"sensor_separation": {
"name": "Sensor separation [cm]",
"value": 15,
"limits": [1, 100],
"type": float,
"advanced": True,
},
"fusion_spread": {
"name": "Fusion spread [degrees]",
"value": 15,
"limits": [1, 45],
"type": float,
"advanced": True,
},
# Allows saving and loading from GUI
"send_process_data": {
"value": None,
"text": "FFT background",
},
}
class ObstacleDetectionProcessor:
def __init__(self, sensor_config, processing_config, session_info):
self.sensor_config = sensor_config
self.sensor_separation = processing_config["sensor_separation"]["value"]
self.fusion_spread = processing_config["fusion_spread"]["value"]
self.fft_len = processing_config["fft_length"]["value"]
self.threshold = processing_config["threshold"]["value"]
self.static_threshold = processing_config["static_threshold"]["value"]
self.static_distance = processing_config["static_distance"]["value"]
self.close_threshold_addition = processing_config["close_threshold_addition"]["value"]
self.sweep_index = 0
self.use_bg = max(processing_config["calib"]["value"], 0)
self.bg_off = processing_config["bg_offset"]["value"]
self.saved_bg = processing_config["send_process_data"]["value"]
self.bg_avg = 0
self.peak_hist_len = processing_config["peak_hist"]["value"]
self.nr_locals = processing_config["nr_peaks"]["value"]
self.static_freq_limit = processing_config["static_freq"]["value"]
self.static_dist_gradient = processing_config["static_grad"]["value"]
self.close_dist_limit = processing_config["close_dist"]["value"]
self.robot_velocity = processing_config["robot_velocity"]["value"]
self.edge_ratio = processing_config["edge_to_peak"]["value"]
self.downsampling = processing_config["downsampling"]["value"]
self.fusion_enabled = processing_config["fusion_map"]["value"]
self.fusion_handle = None
self.fusion_max_obstacles = FUSION_MAX_OBSTACLES
self.fusion_history = FUSION_HISTORY
self.fusion_max_shadows = FUSION_MAX_SHADOWS
self.use_bg_parameterization = processing_config["use_parameterization"]["value"]
self.bg_params = []
def process(self, sweep):
if len(sweep.shape) == 1:
sweep = np.expand_dims(sweep, 0)
if self.downsampling:
sweep = sweep[:, ::self.downsampling]
sweep = sweep / 2**12
nr_sensors, len_range = sweep.shape
nr_sensors = min(len(self.sensor_config.sensor), nr_sensors)
if self.sweep_index == 0 and self.bg_avg == 0:
if self.fusion_enabled and nr_sensors <= 2:
self.fusion_handle = SensorFusion()
fusion_params = {
"separation": self.sensor_separation,
"obstacle_spread": self.fusion_spread,
"min_y_spread": 4, # cm
"min_x_spread": 4, # cm
"decay_time": 5, # cycles
"step_time": 1 / self.sensor_config.update_rate, # s
"min_distance": self.sensor_config.range_start * 100, # cm
}
self.fusion_handle.setup(fusion_params)
self.sweep_map = np.zeros((nr_sensors, len_range, self.fft_len), dtype="complex")
self.fft_bg = np.zeros((nr_sensors, len_range, self.fft_len))
self.hamming_map = np.zeros((len_range, self.fft_len))
self.fusion_data = {
"fused_y": np.full((self.fusion_history, self.fusion_max_obstacles), np.nan),
"fused_x": np.full((self.fusion_history, self.fusion_max_obstacles), np.nan),
"left_shadow_y": np.full((self.fusion_history, self.fusion_max_shadows), np.nan),
"left_shadow_x": np.full((self.fusion_history, self.fusion_max_shadows), np.nan),
"right_shadow_y": np.full((self.fusion_history, self.fusion_max_shadows), np.nan),
"right_shadow_x": np.full((self.fusion_history, self.fusion_max_shadows), np.nan),
}
for i in range(len_range):
self.hamming_map[i, :] = np.hamming(self.fft_len)
self.env_xs = np.linspace(*self.sensor_config.range_interval * 100, len_range)
self.peak_prop_num = 4
self.peak_hist = np.zeros((nr_sensors, self.nr_locals,
self.peak_prop_num, self.peak_hist_len))
self.peak_hist *= float(np.nan)
self.mask = np.zeros((len_range, self.fft_len))
self.threshold_map = np.zeros((len_range, self.fft_len))
if self.saved_bg is not None:
if isinstance(self.saved_bg, dict):
try:
for s in range(nr_sensors):
# Generate background from piece-wise-linear (pwl) interpolation
self.bg_params.append(self.saved_bg)
self.generate_background_from_pwl_params(
self.fft_bg[s, :, :],
self.saved_bg
)
if s == 0:
self.dump_bg_params_to_yaml()
self.use_bg = False
self.use_bg_parameterization = False
log.info("Using saved parameterized FFT background data!")
except Exception:
log.warning("Could not reconstruct background!")
elif hasattr(self.saved_bg, "shape"):
if self.saved_bg.shape == self.fft_bg.shape:
self.fft_bg = self.saved_bg
self.use_bg = False
log.info("Using saved FFT background data!")
else:
log.warning("Saved background has wrong shape/type!")
log.warning("Required shape {}".format(self.fft_bg.shape))
if hasattr(self.saved_bg, "shape"):
log.warning("Supplied shape {}".format(self.saved_bg.shape))
else:
log.warning("Received unsupported background data!")
for dist in range(len_range):
for freq in range(self.fft_len):
self.threshold_map[dist, freq] = self.variable_thresholding(
freq, dist, self.threshold, self.static_threshold)
fft_psd = np.empty((nr_sensors, len_range, self.fft_len))
fused_obstacles = {}
for s in range(nr_sensors):
self.push(sweep[s, :], self.sweep_map[s, :, :])
signalFFT = fftshift(fft(self.sweep_map[s, :, :] * self.hamming_map, axis=1), axes=1)
fft_psd[s, :, :] = np.square(np.abs(signalFFT))
signalPSD = fft_psd[s, :, :]
if self.use_bg and self.sweep_index == self.fft_len - 1:
self.fft_bg[s, :, :] = np.maximum(self.bg_off * signalPSD, self.fft_bg[s, :, :])
if s == nr_sensors - 1:
self.bg_avg += 1
if self.bg_avg == self.use_bg and self.use_bg_parameterization:
for i in range(nr_sensors):
self.bg_params.append(self.parameterize_bg(self.fft_bg[i, :, :]))
# only dump first sensor params
if i == 0:
self.dump_bg_params_to_yaml()
if self.use_bg_parameterization:
self.generate_background_from_pwl_params(
self.fft_bg[i, :, :],
self.bg_params[i]
)
signalPSD_sub = signalPSD - self.fft_bg[s, :, :]
signalPSD_sub[signalPSD_sub < 0] = 0
env = np.abs(sweep[s, :])
fft_peaks, peaks_found = self.find_peaks(signalPSD_sub)
fft_max_env = signalPSD[:, 8]
angle = None
velocity = None
peak_idx = np.argmax(env)
obstacles = []
if self.sweep_index < self.fft_len:
fft_peaks = None
if fft_peaks is not None:
fft_max_env = signalPSD[:, int(fft_peaks[0, 1])]
zero = np.floor(self.fft_len / 2)
for i in range(self.nr_locals):
bin_index = (fft_peaks[i, 2] - zero)
velocity = (bin_index / zero) * WAVELENGTH * self.sensor_config.update_rate / 4
angle = np.arccos(self.clamp(abs(velocity) / self.robot_velocity, -1.0, 1.0))
angle = np.sign(velocity) * angle / pi * 180
peak_idx = int(fft_peaks[i, 0])
distance = self.env_xs[int(fft_peaks[i, 0])]
amp = fft_peaks[i, 3]
if not amp:
distance = float(np.nan)
velocity = float(np.nan)
angle = float(np.nan)
amp = float(np.nan)
elif self.fusion_handle:
obstacles.append(
{"angle": abs(angle),
"velocity": velocity,
"distance": distance,
"amplitude": amp,
"sensor": s}
)
self.push_vec(distance, self.peak_hist[s, i, 0, :])
self.push_vec(velocity, self.peak_hist[s, i, 1, :])
self.push_vec(angle, self.peak_hist[s, i, 2, :])
self.push_vec(amp, self.peak_hist[s, i, 3, :])
fft_peaks = fft_peaks[:peaks_found, :]
else:
for i in range(self.nr_locals):
for j in range(self.peak_prop_num):
self.push_vec(float(np.nan), self.peak_hist[s, i, j, :])
fused_obstacles["{}".format(s)] = obstacles
if self.fusion_handle:
fused_obstacles, shadow_obstacles = self.fusion_handle.update(fused_obstacles)
for position in self.fusion_data:
self.fusion_data[position] = np.roll(self.fusion_data[position], 1, axis=0)
self.fusion_data[position][0, :] = np.nan
if fused_obstacles is not None and nr_sensors == 2:
for i, o in enumerate(fused_obstacles):
if i < self.fusion_max_obstacles:
self.fusion_data["fused_x"][0, i] = o["pos"][0]
self.fusion_data["fused_y"][0, i] = o["pos"][1]
else:
print("Too many fused obstacles ({})".format(len(fused_obstacles)))
if shadow_obstacles is not None:
for i, o in enumerate(shadow_obstacles):
if i < self.fusion_max_shadows:
tag = o["sensor"]
self.fusion_data["{}_shadow_x".format(tag)][0, i] = o["pos"][0]
self.fusion_data["{}_shadow_y".format(tag)][0, i] = o["pos"][1]
else:
print("Too many shadow obstacles ({})".format(len(shadow_obstacles)))
fft_bg = None
fft_bg_send = None
if self.sweep_index < 3 * self.fft_len: # Make sure data gets send to plotting
fft_bg = self.fft_bg[0, :, :]
fft_bg_send = self.fft_bg
if self.use_bg:
iterations_left = self.use_bg - self.bg_avg
else:
iterations_left = 0
threshold_map = None
if self.sweep_index < 3 * self.fft_len: # Make sure data gets send to plotting
threshold_map = self.threshold_map
f_data = self.fusion_data
out_data = {
"env_ampl": env,
"fft_max_env": fft_max_env,
"fft_map": fft_psd,
"peak_idx": peak_idx,
"angle": angle,
"velocity": velocity,
"fft_peaks": fft_peaks,
"peak_hist": self.peak_hist[0, :, :, :],
"fft_bg": fft_bg,
"fft_bg_iterations_left": iterations_left,
"threshold_map": threshold_map,
"send_process_data": fft_bg_send,
"fused_obstacles": [f_data["fused_x"], f_data["fused_y"]],
"left_shadows": [f_data["left_shadow_x"], f_data["left_shadow_y"]],
"right_shadows": [f_data["right_shadow_x"], f_data["right_shadow_y"]],
"sweep_index": self.sweep_index,
"peaks_found": peaks_found,
}
if (self.bg_avg < self.use_bg) and (self.sweep_index == self.fft_len - 1):
self.sweep_index = 0
else:
self.sweep_index += 1
return out_data
def dump_bg_params_to_yaml(self, filename=None, bg_params=None):
if filename is None:
filename = os.path.join(DIR_PATH, "obstacle_bg_params_dump.yaml")
if bg_params is None:
bg_params = self.bg_params[0]
with open(filename, 'w') as f_handle:
yaml.dump(bg_params, f_handle, default_flow_style=False)
def remap(self, val, x1, x2, y1, y2):
if x1 == x2:
return y1
m = (y2 - y1) / (x2 - x1)
b = y1 - m * x1
return val * m + b
def clamp(self, val, a, b):
val = max(val, a)
val = min(val, b)
return val
def push(self, sweep, arr):
res = np.empty_like(arr)
res[:, 0] = sweep
res[:, 1:] = arr[:, :-1]
arr[...] = res
def push_vec(self, val, vec):
res = np.empty_like(vec)
res[0] = val
res[1:] = vec[:-1]
vec[...] = res
def parameterize_bg(self, fft_bg):
dist_len = fft_bg.shape[0]
static_idx = int(self.fft_len / 2)
adjacent_idx = [int(static_idx - 1), int(static_idx + 1)]
moving_range = [*range(adjacent_idx[0]), *range(adjacent_idx[1] + 1, self.fft_len)]
pwl_static_points = 5
static_pwl_amp = []
static_pwl_dist = []
static_sum = 0.0
adjacent_sum = np.zeros(2)
moving_sum = 0.0
moving_sum_sqr = 0.0
moving_mean_array = np.zeros(dist_len)
moving_max = 0.0
moving_min = 0.0
for dist_index in range(dist_len):
static_sum += fft_bg[dist_index, static_idx]
adjacent_sum[0] += fft_bg[dist_index, adjacent_idx[0]]
adjacent_sum[1] += fft_bg[dist_index, adjacent_idx[1]]
current_dist_moving_sum = 0.0
for freq_index in moving_range:
val = fft_bg[dist_index, freq_index]
current_dist_moving_sum += val
if val > moving_max:
moving_max = val
if val < moving_min:
moving_min = val
moving_sum += val
moving_sum_sqr += val * val
moving_mean_array[dist_index] = current_dist_moving_sum / len(moving_range)
segs = self.adapt_background_segment_step(moving_mean_array, dist_len)
segs_nr = int(len(segs) / 2)
moving_pwl_dist = segs[0:segs_nr]
moving_pwl_amp = segs[segs_nr:]
adjacent_sum_max = max(adjacent_sum)
if adjacent_sum_max == 0:
static_adjacent_factor = 0
else:
static_adjacent_factor = static_sum / adjacent_sum_max
bin_width = dist_len / pwl_static_points
pwl_y_max = 0
pwl_x_max = 0
for point_index in range(pwl_static_points):
dist_begin = int(point_index * bin_width)
dist_end = int((point_index + 1) * bin_width)
if point_index == pwl_static_points:
dist_end = dist_len
pwl_x_max = 0.0
pwl_y_max = 0.0
for dist_index in range(dist_begin, dist_end):
val = fft_bg[dist_index, static_idx]
if val > pwl_y_max:
pwl_x_max = dist_index
pwl_y_max = val
static_pwl_dist.append(pwl_x_max)
static_pwl_amp.append(pwl_y_max)
for i in range(pwl_static_points):
if static_pwl_amp[i] < moving_max:
static_pwl_amp[i] = moving_max
bg_params = {
"static_pwl_dist": [float(i) for i in static_pwl_dist],
"static_pwl_amp": [float(i) for i in static_pwl_amp],
"moving_pwl_dist": [float(i) for i in moving_pwl_dist],
"moving_pwl_amp": [float(i) for i in moving_pwl_amp],
"static_adjacent_factor": [float(static_adjacent_factor)],
"moving_max": [float(moving_max)],
}
return bg_params
def adapt_background_segment_step(self, data_y, data_length):
mid_index = int(data_length / 2)
y1 = 0
for i in range(mid_index):
if y1 < data_y[i]:
y1 = data_y[i]
y2 = 0
for i in range(mid_index, int(data_length)):
if y2 < data_y[i]:
y2 = data_y[i]
# Check the plateau levels and return early if the step is non-decreasing
if y1 <= y2:
y1 = y2
x1 = 0
x2 = float(data_length - 1)
return [x1, x2, y1, y2]
intersection_threshold = self.remap(BACKGROUND_PLATEAU_INTERPOLATION, 0, 1, y2, y1)
if (intersection_threshold > y2 * BACKGROUND_PLATEAU_FACTOR):
intersection_threshold = y2 * BACKGROUND_PLATEAU_FACTOR
intersection_index = mid_index
while (intersection_index > 1 and data_y[intersection_index - 1] < intersection_threshold):
intersection_index -= 1
x2 = float(intersection_index)
y2 = data_y[intersection_index]
if intersection_index <= 1:
x1 = 0
return [x1, x2, y1, y2]
max_neg_slope = 0
for i in range(intersection_index):
neg_slope = (data_y[i] - y2) / float(intersection_index - i)
if max_neg_slope < neg_slope:
max_neg_slope = neg_slope
x1 = x2 - (y1 - y2) / max_neg_slope
return [x1, x2, y1, y2]
def generate_background_from_pwl_params(self, fft_bg, bg_params):
static_idx = int(self.fft_len / 2)
adjacent_idx = [int(static_idx - 1), int(static_idx + 1)]
moving_range = [*range(adjacent_idx[0]), *range(adjacent_idx[1] + 1, self.fft_len)]
pwl_static_points = 5
pwl_moving_points = 2
dist_len = fft_bg.shape[0]
fac = bg_params["static_adjacent_factor"][0]
static_pwl_amp = bg_params["static_pwl_amp"]
static_pwl_dist = bg_params["static_pwl_dist"]
moving_pwl_amp = bg_params["moving_pwl_amp"]
moving_pwl_dist = bg_params["moving_pwl_dist"]
moving_max = bg_params["moving_max"][0]
self.apply_pwl_segments(
fft_bg, static_idx, static_pwl_dist, static_pwl_amp, pwl_static_points
)
for dist_index in range(dist_len):
static_val = fft_bg[dist_index, 8]
if static_val < moving_max:
static_val = moving_max
fft_bg[dist_index, static_idx] = static_val
interp_adjacent = moving_max
if fac > 0:
interp_adjacent = static_val / fac
fft_bg[dist_index, adjacent_idx] = interp_adjacent
for freq_index in moving_range:
self.apply_pwl_segments(
fft_bg, freq_index, moving_pwl_dist, moving_pwl_amp, pwl_moving_points
)
def apply_pwl_segments(self, fft_bg, freq_index, pwl_dist, pwl_amp, pwl_points):
dist_len = fft_bg.shape[0]
x_start = 0
x_stop = pwl_dist[0]
y_start = pwl_amp[0]
y_stop = pwl_amp[0]
segment_stop = 0
for dist_index in range(dist_len):
if x_stop < dist_index:
x_start = x_stop
y_start = y_stop
segment_stop += 1
if segment_stop < pwl_points:
x_stop = pwl_dist[segment_stop]
y_stop = pwl_amp[segment_stop]
else:
x_stop = dist_len - 1
interp = self.remap(dist_index, x_start, x_stop, y_start, y_stop)
fft_bg[dist_index, freq_index] = interp
def find_peaks(self, arr):
if not self.nr_locals:
return None, 0
peaks_found = 0
peak = np.asarray(unravel_index(np.argmax(arr), arr.shape))
peak_avg = peak[1]
peak_val = arr[peak[0], peak[1]]
thresh = self.variable_thresholding(peak[1], peak[0], self.threshold,
self.static_threshold)
if peak_val < thresh:
peak = None
peak_avg = None
if peak is not None:
peak_avg = self.calc_peak_avg(peak, arr)
local_peaks = np.zeros((self.nr_locals, self.peak_prop_num), dtype=float)
dist_edge = self.edge(arr[:, peak[1]], peak[0], self.edge_ratio)
local_peaks[0, 0] = dist_edge
local_peaks[0, 1] = peak[1]
local_peaks[0, 2] = peak_avg
local_peaks[0, 3] = np.round(peak_val, decimals=4)
peaks_found += 1
self.mask = arr.copy()
for i in range(self.nr_locals - 1):
self.peak_masking(local_peaks[i, :])
p = np.asarray(unravel_index(np.argmax(self.mask), arr.shape))
thresh = self.variable_thresholding(p[1], p[0],
self.threshold, self.static_threshold)
peak_val = arr[p[0], p[1]]
if peak_val > thresh:
dist_edge = self.edge(arr[:, p[1]], p[0], self.edge_ratio)
local_peaks[i + 1, 0] = dist_edge
local_peaks[i + 1, 1] = p[1]
local_peaks[i + 1, 2] = self.calc_peak_avg(p, arr)
local_peaks[i + 1, 3] = np.round(peak_val, decimals=4)
peaks_found += 1
else:
break
return local_peaks, peaks_found
else:
return None, 0
def calc_peak_avg(self, peak, arr):
amp_sum = 0
s = 0
for i in range(3):
idx_real = peak[1] - 1 + i
idx = idx_real
if idx == self.fft_len:
idx = 0
elif idx == -1:
idx = self.fft_len - 1
s += arr[peak[0], idx] * idx_real
amp_sum += arr[peak[0], idx]
peak_avg = s / amp_sum
peak_avg = max(0, peak_avg)
return peak_avg % self.fft_len
def peak_masking(self, peak, angle_depth=2, depth=180, amplitude_margin=1.2):
dist_depth = depth / self.downsampling
distance_index = peak[0]
angle_index = peak[1]
peak_val = peak[3]
dist_len, angle_len = self.mask.shape
distance_scaling_per_index = 1 / dist_depth
angle_scaling_per_index = 1 / (1 + angle_depth)
distance_start_index = - dist_depth
if distance_start_index + distance_index < 0:
distance_start_index = - distance_index
distance_end_index = dist_depth
if distance_end_index + distance_index >= dist_len:
distance_end_index = dist_len - distance_index - 1
for i in range(-angle_depth, angle_depth + 1):
wrapped_row = (angle_len + i + angle_index) % angle_len
for j in range(int(distance_start_index), int(distance_end_index) + 1):
dist_from_peak = abs(i) * angle_scaling_per_index \
+ abs(j) * distance_scaling_per_index
mask_val = (1 - dist_from_peak**2) * peak_val * amplitude_margin
if self.mask[int(j + distance_index), int(wrapped_row)] < mask_val:
self.mask[int(j + distance_index), int(wrapped_row)] = 0
def edge(self, arr, peak_idx, ratio=0.5):
if ratio == 1.0:
return peak_idx
s0 = arr[peak_idx]
for i in range(peak_idx):
if peak_idx - i < 0:
peak_idx = 0
break
if arr[peak_idx - i] < s0 * ratio:
peak_idx -= i
break
return peak_idx
def variable_thresholding(self, freq_index, dist_index, min_thresh, max_thresh):
dist = self.env_xs[dist_index]
distance_gradient = self.static_dist_gradient
thresh = self.remap(dist, self.static_distance - distance_gradient, self.static_distance,
max_thresh, min_thresh)
thresh = self.clamp(thresh, min_thresh, max_thresh)
null_frequency = self.fft_len / 2
frequency_gradient = self.static_freq_limit
if freq_index <= null_frequency:
freq = self.clamp(freq_index, null_frequency - frequency_gradient, null_frequency)
thresh = self.remap(freq,
null_frequency - frequency_gradient, null_frequency,
min_thresh, thresh)
else:
freq = self.clamp(freq_index, null_frequency, null_frequency + frequency_gradient)
thresh = self.remap(freq,
null_frequency, null_frequency + frequency_gradient,
thresh, min_thresh)
thresh_add = self.remap(
dist,
self.sensor_config.range_start * 100,
self.sensor_config.range_start * 100 + self.close_dist_limit,
self.close_threshold_addition,
0.0,
)
thresh += self.clamp(thresh_add, 0, self.close_threshold_addition)
return thresh
class PGUpdater:
def __init__(self, sensor_config, processing_config, session_info):
self.sensor_config = sensor_config
self.map_max = 0
self.width = 3
self.max_velocity = WAVELENGTH / 4 * self.sensor_config.update_rate # cm/s
self.peak_hist_len = processing_config["peak_hist"]["value"]
self.dist_index = processing_config["downsampling"]["value"]
self.nr_locals = processing_config["nr_peaks"]["value"]
self.downsampling = processing_config["downsampling"]["value"]
self.threshold = processing_config["static_threshold"]["value"]
self.sensor_separation = processing_config["sensor_separation"]["value"]
self.fusion_max_obstacles = FUSION_MAX_OBSTACLES
self.fusion_max_shadows = FUSION_MAX_SHADOWS
self.fusion_history = FUSION_HISTORY
self.fft_bg_data = None
self.threshold_data = None
self.hist_plots = {
"velocity": [[], processing_config["velocity_history"]["value"]],
"angle": [[], processing_config["angle_history"]["value"]],
"distance": [[], processing_config["distance_history"]["value"]],
"amplitude": [[], processing_config["amplitude_history"]["value"]],
}
self.num_hist_plots = 0
for hist in self.hist_plots:
if hist[1]:
self.num_hist_plots += 1
self.advanced_plots = {
"background_map": processing_config["background_map"]["value"],
"threshold_map": processing_config["threshold_map"]["value"],
"show_line_outs": processing_config["show_line_outs"]["value"],
"fusion_map": processing_config["fusion_map"]["value"],
"show_shadows": False,
}
if self.advanced_plots["fusion_map"] and len(sensor_config.sensor) > 2:
self.advanced_plots["fusion_map"] = False
log.warning("Sensor fusion needs 2 sensors!")
if self.advanced_plots["fusion_map"] and len(sensor_config.sensor) == 1:
log.warning("Sensor fusion needs 2 sensors! Plotting single sensor data.")
if self.advanced_plots["fusion_map"]:
self.advanced_plots["show_shadows"] = processing_config["show_shadows"]["value"]
def setup(self, win):
win.setWindowTitle("Acconeer obstacle detection example")
row_idx = 0
self.env_ax = win.addPlot(row=row_idx, col=0, colspan=4, title="Envelope and max FFT")
self.env_ax.setMenuEnabled(False)
self.env_ax.setLabel("bottom", "Depth (cm)")
self.env_ax.setXRange(*(self.sensor_config.range_interval * 100))
self.env_ax.showGrid(True, True)
self.env_ax.addLegend(offset=(-10, 10))
self.env_ax.setYRange(0, 0.1)
self.env_ampl = self.env_ax.plot(pen=utils.pg_pen_cycler(0), name="Envelope")
self.fft_max = self.env_ax.plot(pen=utils.pg_pen_cycler(1, "--"), name="FFT @ max")
if self.advanced_plots["show_line_outs"]:
self.fft_bg = self.env_ax.plot(pen=utils.pg_pen_cycler(2, "--"), name="BG @ max")
self.fft_thresh = self.env_ax.plot(
pen=utils.pg_pen_cycler(3, "--"),
name="Threshold @ max"
)
self.peak_dist_text = pg.TextItem(color="k", anchor=(0, 1))
self.env_ax.addItem(self.peak_dist_text)
self.peak_dist_text.setPos(self.sensor_config.range_start * 100, 0)
self.peak_dist_text.setZValue(3)
self.env_peak_vline = pg.InfiniteLine(pos=0, angle=90, pen=pg.mkPen(width=2,
style=QtCore.Qt.DotLine))
self.env_ax.addItem(self.env_peak_vline)
row_idx += 1
self.obstacle_ax = win.addPlot(row=row_idx, col=0, colspan=self.num_hist_plots,
title="Obstacle map")
self.obstacle_ax.setMenuEnabled(False)
self.obstacle_im = pg.ImageItem()
self.obstacle_ax.setLabel("bottom", "Velocity (cm/s)")
self.obstacle_ax.setLabel("left", "Distance (cm)")
self.obstacle_im.setLookupTable(utils.pg_mpl_cmap("viridis"))
self.obstacle_ax.addItem(self.obstacle_im)
self.obstacle_ax.setXRange(-self.max_velocity, self.max_velocity)
self.obstacle_ax.setYRange(*self.sensor_config.range_interval * 100)
self.obstacle_ax.setXRange(-self.max_velocity, self.max_velocity)
self.obstacle_ax.setYRange(*self.sensor_config.range_interval * 100)
self.obstacle_peak = pg.ScatterPlotItem(brush=pg.mkBrush("k"), size=15)
self.obstacle_ax.addItem(self.obstacle_peak)
self.peak_fft_text = pg.TextItem(color="w", anchor=(0, 1))
self.obstacle_ax.addItem(self.peak_fft_text)
self.peak_fft_text.setPos(-self.max_velocity, self.sensor_config.range_start * 100)
self.peak_val_text = pg.TextItem(color="w", anchor=(0, 0))
self.obstacle_ax.addItem(self.peak_val_text)
self.peak_val_text.setPos(-self.max_velocity, self.sensor_config.range_end * 100)
self.bg_estimation_text = pg.TextItem(color="w", anchor=(0, 1))
self.obstacle_ax.addItem(self.bg_estimation_text)
self.bg_estimation_text.setPos(-self.max_velocity, self.sensor_config.range_start * 100)
row_idx += 1
if self.advanced_plots["background_map"]:
self.obstacle_bg_ax = win.addPlot(
row=row_idx, col=0, colspan=self.num_hist_plots, title="Obstacle background")
self.obstacle_bg_ax.setMenuEnabled(False)
self.obstacle_bg_im = pg.ImageItem()
self.obstacle_bg_ax.setLabel("bottom", "Velocity (cm/s)")
self.obstacle_bg_ax.setLabel("left", "Distance (cm)")
self.obstacle_bg_im.setLookupTable(utils.pg_mpl_cmap("viridis"))
self.obstacle_bg_ax.addItem(self.obstacle_bg_im)
row_idx += 1
if self.advanced_plots["threshold_map"]:
self.obstacle_thresh_ax = win.addPlot(
row=row_idx, col=0, colspan=self.num_hist_plots, title="Obstacle threshold")
self.obstacle_thresh_ax.setMenuEnabled(False)
self.obstacle_thresh_im = pg.ImageItem()
self.obstacle_thresh_ax.setLabel("bottom", "Velocity (cm/s)")
self.obstacle_thresh_ax.setLabel("left", "Distance (cm)")
self.obstacle_thresh_im.setLookupTable(utils.pg_mpl_cmap("viridis"))
self.obstacle_thresh_ax.addItem(self.obstacle_thresh_im)
row_idx += 1
if self.advanced_plots["fusion_map"]:
b = utils.color_cycler(0)
r = utils.color_cycler(1)
p = utils.color_cycler(4)
spos = self.sensor_separation / 2
pos0 = self.sensor_config.range_start * 100
pos1 = self.sensor_config.range_end * 100
self.fusion_ax = win.addPlot(
row=row_idx,
col=0,
colspan=self.num_hist_plots,
title="Fused obstacle map",
)
self.fusion_ax.setMenuEnabled(False)
self.fusion_legend = self.fusion_ax.addLegend()
self.fusion_ax.showGrid(True, True)
self.fusion_ax.setLabel("bottom", "X (cm)")
self.fusion_ax.setLabel("left", "Y (cm)")
self.fusion_ax.setXRange(-pos0, pos0)
self.fusion_ax.setYRange(pos0 - 10, pos1)
# Add plots for fused obstacles
self.fusion_scatter = pg.ScatterPlotItem(brush=pg.mkBrush("k"), size=10)
self.fusion_legend.addItem(self.fusion_scatter, "Fused")
self.fusion_ax.addItem(self.fusion_scatter)
self.fusion_scatter.setZValue(10)
self.fusion_hist = []
for i in range(self.fusion_max_obstacles):
self.fusion_hist.append(self.fusion_ax.plot(pen=utils.pg_pen_cycler(2, "--")))
self.fusion_ax.addItem(self.fusion_hist[i])
self.fusion_hist[i].setZValue(10)
# Add plots for shadow obstacles
if self.advanced_plots["show_shadows"]:
self.left_sensor_shadows = pg.ScatterPlotItem(brush=pg.mkBrush(r), size=10)
self.right_sensor_shadows = pg.ScatterPlotItem(brush=pg.mkBrush(b), size=10)
self.fusion_ax.addItem(self.left_sensor_shadows)
self.fusion_ax.addItem(self.right_sensor_shadows)
self.fusion_left_shadow_hist = []
self.fusion_right_shadow_hist = []
for i in range(self.fusion_max_shadows):
self.fusion_left_shadow_hist.append(
self.fusion_ax.plot(pen=utils.pg_pen_cycler(0, "--")))
self.fusion_right_shadow_hist.append(
self.fusion_ax.plot(pen=utils.pg_pen_cycler(1, "--")))
self.fusion_ax.addItem(self.fusion_left_shadow_hist[i])
self.fusion_ax.addItem(self.fusion_right_shadow_hist[i])
self.left_sensor = pg.InfiniteLine(
pos=-spos,
angle=90,
pen=pg.mkPen(width=2, style=QtCore.Qt.DotLine),
)
self.right_sensor = pg.InfiniteLine(
pos=spos,
angle=90,
pen=pg.mkPen(width=2, style=QtCore.Qt.DotLine),
)
self.detection_limit = pg.InfiniteLine(
pos=self.sensor_config.range_start * 100,
angle=0,
pen=pg.mkPen(color=p, width=2, style=QtCore.Qt.DotLine),
)
self.fusion_ax.addItem(self.left_sensor)
self.fusion_ax.addItem(self.right_sensor)
self.fusion_ax.addItem(self.detection_limit)
self.right_sensor_rect = pg.QtGui.QGraphicsRectItem(spos - 1, pos0 - 1, 2, 2)
self.right_sensor_rect.setPen(pg.mkPen(None))
self.right_sensor_rect.setBrush(pg.mkBrush(b))
self.fusion_ax.addItem(self.right_sensor_rect)
self.left_sensor_rect = pg.QtGui.QGraphicsRectItem(-spos - 1, pos0 - 1, 2, 2)
self.left_sensor_rect.setPen(pg.mkPen(None))
self.left_sensor_rect.setBrush(pg.mkBrush(r))
self.fusion_ax.addItem(self.left_sensor_rect)
id1 = self.sensor_config.sensor[0]
if len(self.sensor_config.sensor) == 2:
id2 = self.sensor_config.sensor[1]
else:
id2 = -1
self.right_sensor_text = pg.TextItem("Sensor {}".format(id1), color=b, anchor=(0, 1))
self.left_sensor_text = pg.TextItem("Sensor {}".format(id2), color=r, anchor=(1, 1))
self.fusion_ax.addItem(self.left_sensor_text)
self.fusion_ax.addItem(self.right_sensor_text)
self.left_sensor_text.setPos(-spos, spos)
self.right_sensor_text.setPos(spos, spos)
if id2 == -1:
self.left_sensor_text.hide()
self.left_sensor_rect.hide()
row_idx += 1
hist_col = 0
row_idx += self.num_hist_plots
if self.hist_plots["distance"][1]:
self.peak_hist_ax_l = win.addPlot(row=row_idx, col=hist_col, title="Distance history")
self.peak_hist_ax_l.setMenuEnabled(False)
self.peak_hist_ax_l.setLabel("bottom", "Sweep")
self.peak_hist_ax_l.setXRange(0, self.peak_hist_len)
self.peak_hist_ax_l.showGrid(True, True)
self.peak_hist_ax_l.addLegend(offset=(-10, 10))
self.peak_hist_ax_l.setYRange(
self.sensor_config.range_start * 100, self.sensor_config.range_end * 100)
hist_col += 1
if self.hist_plots["velocity"][1]:
self.peak_hist_ax_c = win.addPlot(row=row_idx, col=hist_col, title="Velocity history")
self.peak_hist_ax_c.setMenuEnabled(False)
self.peak_hist_ax_c.setLabel("bottom", "Sweep")
self.peak_hist_ax_c.setXRange(0, self.peak_hist_len)
limit = np.round(self.max_velocity / 10) * 10
if limit < 1.0:
limit = self.max_velocity
self.peak_hist_ax_c.setYRange(-limit, limit)
self.peak_hist_ax_c.showGrid(True, True)
self.peak_hist_ax_c.addLegend(offset=(-10, 10))
hist_col += 1
if self.hist_plots["angle"][1]:
self.peak_hist_ax_r = win.addPlot(row=row_idx, col=hist_col, title="Angle history")
self.peak_hist_ax_r.setMenuEnabled(False)
self.peak_hist_ax_r.setLabel("bottom", "Sweep")
self.peak_hist_ax_r.setXRange(0, self.peak_hist_len)
self.peak_hist_ax_r.showGrid(True, True)
self.peak_hist_ax_r.addLegend(offset=(-10, 10))
self.peak_hist_ax_r.setYRange(-100, 100)
hist_col += 1
if self.hist_plots["amplitude"][1]:
self.peak_hist_ax_r1 = win.addPlot(row=row_idx, col=hist_col,
title="Amplitude history")
self.peak_hist_ax_r1.setMenuEnabled(False)
self.peak_hist_ax_r1.setLabel("bottom", "Sweep")
self.peak_hist_ax_r1.setXRange(0, self.peak_hist_len)
self.peak_hist_ax_r1.showGrid(True, True)
self.peak_hist_ax_r1.addLegend(offset=(-10, 10))
hist_col += 1
for i in range(self.nr_locals):
if self.hist_plots["velocity"][1]:
self.hist_plots["velocity"][0].append(
self.peak_hist_ax_c.plot(pen=utils.pg_pen_cycler(i),
name="Veloctiy {:d}".format(i)))
if self.hist_plots["angle"][1]:
self.hist_plots["angle"][0].append(
self.peak_hist_ax_r.plot(pen=utils.pg_pen_cycler(i),
name="Angle {:d}".format(i)))
if self.hist_plots["distance"][1]:
self.hist_plots["distance"][0].append(
self.peak_hist_ax_l.plot(pen=utils.pg_pen_cycler(i),
name="Distance {:d}".format(i)))
if self.hist_plots["amplitude"][1]:
self.hist_plots["amplitude"][0].append(
self.peak_hist_ax_r1.plot(pen=utils.pg_pen_cycler(i),
name="Amplitude {:d}".format(i)))
self.smooth_max = utils.SmoothMax(
self.sensor_config.update_rate,
tau_decay=0.2,
tau_grow=0.2,
)
self.plot_index = 0
def update(self, data):
nfft = data["fft_map"].shape[2]
if self.plot_index == 0:
nr_sensors = data["fft_map"].shape[0]
spos = self.sensor_separation / 2
pos0 = self.sensor_config.range_start * 100
pos1 = self.sensor_config.range_end * 100
num_points = data["env_ampl"].size
self.env_xs = np.linspace(*self.sensor_config.range_interval * 100, num_points)
self.peak_x = self.env_xs[data["peak_idx"]]
self.obstacle_im.translate(-self.max_velocity, pos0)
self.obstacle_im.scale(
2 * self.max_velocity / nfft,
self.sensor_config.range_length * 100 / num_points
)
if self.advanced_plots["background_map"]:
self.obstacle_bg_im.translate(-self.max_velocity, pos0)
self.obstacle_bg_im.scale(
2 * self.max_velocity / nfft,
self.sensor_config.range_length * 100 / num_points
)
if self.advanced_plots["threshold_map"]:
self.obstacle_thresh_im.translate(-self.max_velocity, pos0)
self.obstacle_thresh_im.scale(
2 * self.max_velocity / nfft,
self.sensor_config.range_length * 100 / num_points
)
show_fusion = self.advanced_plots["fusion_map"]
show_shadows = self.advanced_plots["show_shadows"]
if show_fusion:
self.fusion_ax.setXRange(-pos1, pos1)
self.fusion_ax.setYRange(pos0 - 10, pos1)
self.left_sensor_text.setPos(-spos, pos0 - 10)
self.right_sensor_text.setPos(spos, pos0 - 10)
self.left_sensor.setValue(spos)
self.right_sensor.setValue(-spos)
self.left_sensor_rect.setRect(-spos - 1, pos0 - 1, 2, 2)
self.right_sensor_rect.setRect(spos - 1, pos0 - 1, 2, 2)
id1 = self.sensor_config.sensor[0]
self.right_sensor_text.setText("Sensor {}".format(id1))
if show_shadows:
self.fusion_legend.addItem(self.right_sensor_shadows, "Sensor {}".format(id1))
if len(self.sensor_config.sensor) == 2:
id2 = self.sensor_config.sensor[1]
self.left_sensor_text.setText("Sensor {}".format(id2))
if show_shadows:
self.fusion_legend.addItem(
self.left_sensor_shadows, "Sensor {}".format(id2))
if nr_sensors == 1:
self.left_sensor_text.hide()
self.left_sensor_rect.hide()
else:
self.left_sensor_text.show()
self.left_sensor_rect.show()
else:
self.peak_x = self.peak_x * 0.7 + 0.3 * self.env_xs[data["peak_idx"]]
peak_dist_text = "Peak: {:.1f} cm".format(self.peak_x)
peak_fft_text = "No peaks found"
if data["fft_peaks"] is not None:
dist = self.env_xs[data["fft_peaks"][:, 0].astype(int)]
vel = (data["fft_peaks"][:, 1] / data["fft_map"].shape[2] * 2 - 1) * self.max_velocity
peak_fft_text = "Dist: {:.1f}cm, Speed/Angle: {:.1f}cm/s / {:.0f}".format(
dist[0], data["velocity"], data["angle"])
half_pixel = self.max_velocity / np.floor(data["fft_map"].shape[2] / 2) / 2
self.obstacle_peak.setData(vel + half_pixel, dist)
else:
self.obstacle_peak.setData([], [])
if data["fft_bg_iterations_left"]:
bg_text = "Stay clear of sensors, estimating background! {} iterations left"
bg_text = bg_text.format(data["fft_bg_iterations_left"])
peak_fft_text = ""
else:
bg_text = ""
for i in range(self.nr_locals):
if self.hist_plots["distance"][1]:
self.hist_plots["distance"][0][i].setData(data["peak_hist"][i, 0, :],
connect="finite")
if self.hist_plots["velocity"][1]:
self.hist_plots["velocity"][0][i].setData(data["peak_hist"][i, 1, :],
connect="finite")
if self.hist_plots["angle"][1]:
self.hist_plots["angle"][0][i].setData(data["peak_hist"][i, 2, :],
connect="finite")
if self.hist_plots["amplitude"][1]:
self.hist_plots["amplitude"][0][i].setData(data["peak_hist"][i, 3, :],
connect="finite")
map_max = np.max(np.max(data["fft_map"][0]))
self.peak_dist_text.setText(peak_dist_text)
self.peak_fft_text.setText(peak_fft_text)
self.bg_estimation_text.setText(bg_text)
self.env_ampl.setData(self.env_xs, data["env_ampl"])
self.env_peak_vline.setValue(self.peak_x)
fft_max = np.max(data["fft_max_env"])
env_max = np.max(data["env_ampl"])
env_max = max(env_max, fft_max)
self.fft_max.setData(self.env_xs, data["fft_max_env"])
if data["fft_bg"] is not None:
self.fft_bg_data = data["fft_bg"]
if self.advanced_plots["show_line_outs"]:
max_index = 8
max_bg = None
if data["fft_peaks"] is not None:
max_index = int(data["fft_peaks"][0, 1])
else:
try:
max_index = np.asarray(unravel_index(
np.argmax(data["fft_map"][0]), data["fft_map"][0].shape)
)[1]
except Exception:
pass
if self.fft_bg_data is not None:
max_bg = self.fft_bg_data[:, max_index]
self.fft_bg.setData(self.env_xs, max_bg)
env_max = max(np.max(max_bg), env_max)
if data["threshold_map"] is not None:
self.threshold_data = data["threshold_map"]
if self.threshold_data is not None:
thresh_max = self.threshold_data[:, max_index]
if max_bg is not None:
thresh_max = thresh_max + max_bg
env_max = max(np.max(thresh_max), env_max)
self.fft_thresh.setData(self.env_xs, thresh_max)
self.env_ax.setYRange(0, self.smooth_max.update(env_max))
fft_data = data["fft_map"][0].T
if self.fft_bg_data is not None:
max_wo_bg = map_max
fft_data = fft_data - self.fft_bg_data.T
fft_data[fft_data < 0] = 0
map_max = np.max(fft_data)
self.peak_val_text.setText("FFT max: {:.3f} ({:.3f})".format(map_max, max_wo_bg))
else:
self.peak_val_text.setText("FFT max: {:.3f}".format(map_max))
g = 1 / 2.2
fft_data = 254 / (map_max + 1.0e-9)**g * fft_data**g
fft_data[fft_data > 254] = 254
map_min = -1
map_max = 257
self.obstacle_im.updateImage(fft_data, levels=(map_min, map_max))
if data["threshold_map"] is not None and self.advanced_plots["threshold_map"]:
thresh_max = np.max(data["threshold_map"])
levels = (0, thresh_max * 1.05)
self.obstacle_thresh_im.updateImage(data["threshold_map"].T, levels=levels)
if data["fft_bg"] is not None and self.advanced_plots["background_map"]:
map_max = np.max(np.max(data["fft_bg"]))
fft_data = data["fft_bg"].T
fft_data = 254 / map_max**g * fft_data**g
fft_data[fft_data > 254] = 254
map_min = -1
map_max = 257
self.obstacle_bg_im.updateImage(fft_data, levels=(map_min, map_max))
if self.advanced_plots["fusion_map"]:
self.fusion_scatter.setData(
data["fused_obstacles"][0][0, :],
data["fused_obstacles"][1][0, :]
)
for i in range(self.fusion_max_obstacles):
self.fusion_hist[i].setData(
data["fused_obstacles"][0][:, i],
data["fused_obstacles"][1][:, i]
)
if self.advanced_plots["show_shadows"]:
self.left_sensor_shadows.setData(
data["left_shadows"][0][0, :],
data["left_shadows"][1][0, :]
)
self.right_sensor_shadows.setData(
data["right_shadows"][0][0, :],
data["right_shadows"][1][0, :]
)
self.plot_index += 1
class SensorFusion():
def __init__(self):
self.debug = False
self.iteration = 0
self.fusion_params = {
"separation": 15, # cm
"obstacle_spread": 10, # degrees
"min_y_spread": 2, # cm
"min_x_spread": 4, # cm
"max_x_spread": 15, # cm
"decay_time": 5, # cycles
"step_time": 0, # s
"min_distance": 6, # cm
"use_as_noise_filter:": False # increases decaytime if known obstacle
}
self.fused_obstacles = []
def setup(self, params):
for key in params:
self.fusion_params[key] = params[key]
def update(self, obstacles):
right = []
left = []
if "0" in obstacles:
right = obstacles["0"]
if "1" in obstacles:
left = obstacles["1"]
obstacle_list = []
# Split obstacles into left and right sensor and convert to xy coordinates
# Each obstavcle has two x and one y coordinate, where x1 is the outer
# position and x2 is the inner position between both sensors
for l in left:
obstacle_list.append(
self.convert_to_xy(l, "left"))
for r in right:
obstacle_list.append(
self.convert_to_xy(r, "right"))
# create all possible obstacle locations
shadow_obstacles = self.create_shadow_obstacles(obstacle_list)
# get updated obstacle data
new_fused_obstacle_data = self.extract(obstacle_list)
# combine new obstacles with old obstacles
self.update_fused_obstacle_data(new_fused_obstacle_data)
if self.debug:
self.print_obstacles(obstacle_list, left, right)
return self.fused_obstacles, shadow_obstacles
def extract(self, obstacles):
fused_obstacles = []
if self.debug:
self.debug_updates = []
if len(obstacles) == 0:
return []
ref_obst = obstacles[0]
add_ref = True
for num, obst in enumerate(obstacles):
if num == 0:
continue
# Check overlap with current obstacle
overlap_idx, _ = self.check_overlap(ref_obst, obst)
hist_idx = None
if overlap_idx:
self.add_obstacle(
fused_obstacles,
obst,
obst2=ref_obst,
idx=overlap_idx,
matched=True,
position="inner")
add_ref = False
else:
# Check overlap with obstacle history
overlap_idx, matched, hist_idx = self.check_overlap_with_history(obst)
if overlap_idx:
self.add_obstacle(
fused_obstacles,
obst,
obst2=self.fused_obstacles[hist_idx],
idx=overlap_idx,
matched=matched,
position=self.fused_obstacles[hist_idx]["match_position"],
hist_idx=hist_idx)
if self.debug:
if overlap_idx:
if hist_idx is None:
self.debug_updates.append(
"obstacle 0 matched with {} at x{:d}".format(num, overlap_idx))
else:
self.debug_updates.append(
"obstacle {} matched with fused {} at x{:d}".format(
num, hist_idx, overlap_idx))
else:
self.debug_updates.append("obstacle {} not matched".format(num))
# If no overlap with old/new obstacle, assume outer position x1
if overlap_idx is None:
self.add_obstacle(fused_obstacles, obst, idx=1, position=obst["side"])
if add_ref:
# Check ref overlap with obstacle history
overlap_idx, matched, hist_idx = self.check_overlap_with_history(ref_obst)
if overlap_idx:
self.add_obstacle(
fused_obstacles,
ref_obst,
obst2=self.fused_obstacles[hist_idx],
idx=overlap_idx,
matched=matched,
position=self.fused_obstacles[hist_idx]["match_position"],
hist_idx=hist_idx)
else:
self.add_obstacle(fused_obstacles, ref_obst, idx=1, position=ref_obst["side"])
if self.debug:
if overlap_idx:
self.debug_updates.append(
"obstacle 0 matched with fused {} at x{:d}".format(hist_idx, overlap_idx))
else:
self.debug_updates.append("obstacle 0 not matched")
return fused_obstacles
def check_overlap_with_history(self, obstacle, hist_idx=None):
overlap_idx = None
idx = None
matched = False
overlaps_matched = []
overlaps_unmatched = []
hist = self.fused_obstacles
if hist_idx:
hist = [self.fused_obstacles[hist_idx]]
for idx, o in enumerate(hist):
# update y spread with prediction
dy = o["y"][0] - o["predicted"][2]
y = o["y"] - dy
y[0] = o["predicted"][2]
obst_hist = {
"vec": o["predicted"],
"x1": o["x1"],
"x2": o["x1"],
"y": y,
"match_position": o["match_position"]
}
overlap_idx, d = self.check_overlap(obstacle, obst_hist, mode="history")
if overlap_idx:
if o["matched"]:
overlaps_matched.append((d[0], overlap_idx, idx))
else:
overlaps_unmatched.append((d[0], overlap_idx, idx))
over = None
if len(overlaps_matched):
over = np.asarray(overlaps_matched)
matched = True
elif len(overlaps_unmatched):
over = np.asarray(overlaps_unmatched)
if over is not None:
overlap_idx = int(over[np.argmin(over[:, 0]), 1])
idx = int(over[np.argmin(over[:, 0]), 2])
if hist_idx is None:
hist_idx = idx
return overlap_idx, matched, hist_idx
def check_overlap(self, obst1, obst2, mode="current"):
overlap_idx = None
delta = None
o1_x_spread = np.asarray(((obst1["x1"][1:]), (obst1["x2"][1:])))
o1_y_spread = np.asarray((obst1["y"][1:]))
o2_y_spread = np.asarray(np.flip(obst2["y"][1:]))
o2_x_spread = np.asarray((np.flip(obst2["x1"][1:]),
np.flip(obst2["x2"][1:])))
# if there is a sign flip between the outer cooridnate margins, obstacles overlap
diff_y = o1_y_spread - o2_y_spread
if np.sign(diff_y[0]) == np.sign(diff_y[1]):
# No overlap in y
if not (obst1["y"] == obst2["y"]).any():
return overlap_idx, delta
diff_x1 = o1_x_spread[0] - o2_x_spread[0]
diff_x2 = o1_x_spread[1] - o2_x_spread[1]
# Overlap in x 1 (outer position)
if np.sign(diff_x1[0]) != np.sign(diff_x1[1]):
overlap_idx = 1
if (obst1["x1"] == obst2["x1"]).any():
overlap_idx = 1
# Overlap in x2 (inner position)
if np.sign(diff_x2[0]) != np.sign(diff_x2[1]):
overlap_idx = 2
if (obst1["x2"] == obst2["x2"]).any():
overlap_idx = 1
if obst1.get("side") and mode == "history":
if obst2["match_position"] == "inner" and overlap_idx == 1:
overlap_idx = None
if obst2["match_position"] == "left" and obst1["side"] == "right":
overlap_idx = None
if obst2["match_position"] == "right" and obst1["side"] == "left":
overlap_idx = None
delta = [min(min(abs(diff_x1)), min(abs(diff_x2))), min(abs(diff_y))]
return overlap_idx, delta
def update_fused_obstacle_data(self, new_obstacles):
# compare new obstacles against old obstacles
no_match_found = np.full(len(self.fused_obstacles), True)
add_as_new = np.full(len(new_obstacles), False)
overlaps = []
for idx_new, new in enumerate(new_obstacles):
if new["matched_hist_idx"]:
overlap, matched, idx_old = self.check_overlap_with_history(
new, new["matched_hist_idx"])
else:
overlap, matched, idx_old = self.check_overlap_with_history(new)
if overlap:
overlaps.append((idx_new, idx_old, overlap, matched))
else:
add_as_new[idx_new] = True
for o in overlaps:
# update old obstacles with new obstacle coordinates
idx_new = o[0]
idx_fused = o[1]
self.update_fused_obstacle(idx_fused, new_obstacles[idx_new], idx_new)
no_match_found[idx_fused] = False
# add new obstacles not matching any old obstacles
for idx, add in enumerate(add_as_new):
if add:
self.create_fused_obstacle(new_obstacles[idx])
# update prediction for unmatched old obstacles and destroy if decayed
self.decay_fused_obstacles()
# TODO: remove obstacles we know are not correct
self.destroy_shadows()
return
def decay_fused_obstacles(self):
for fused_obst in self.fused_obstacles:
if not fused_obst["updated"]:
fused_obst["decay_time"] -= 1
fused_obst["predicted"] = self.predict(fused_obst["vec"])
for idx, fused_obst in enumerate(self.fused_obstacles):
if fused_obst["decay_time"] < 1:
self.fused_obstacles.pop(idx)
for fused_obst in self.fused_obstacles:
fused_obst["updated"] = False
def update_fused_obstacle(self, fused_idx, new_obst_data, idx_new):
decay_time = self.fusion_params["decay_time"]
fused_obst = self.fused_obstacles[fused_idx]
for key in new_obst_data:
fused_obst[key] = new_obst_data[key]
if self.fusion_params["use_as_noise_filter"]:
fused_obst["decay_time"] = min(fused_obst["decay_time"] + 1, decay_time * 2)
else:
fused_obst["decay_time"] = min(fused_obst["decay_time"] + 1, decay_time)
fused_obst["predicted"] = self.predict(new_obst_data["vec"])
fused_obst["updated"] = True
if self.debug:
self.debug_updates.append(
"updating fused obstacle {} with new obstacle {}".format(fused_idx, idx_new))
def create_fused_obstacle(self, obst_data):
if obst_data:
new_fused = {
"predicted": self.predict(obst_data["vec"]),
"decay_time": self.fusion_params["decay_time"],
"updated": True,
}
for key in obst_data:
new_fused[key] = obst_data[key]
self.fused_obstacles.append(new_fused)
if self.debug:
self.debug_updates.append("creating new obstacle X/Y {:.1f} / {:.1f}".format(
obst_data["pos"][0], obst_data["pos"][1]))
def destroy_shadows(self):
pass
def create_shadow_obstacles(self, obstacle_data):
shadow_obstacles = []
for obst in obstacle_data:
self.add_obstacle(shadow_obstacles, obst, idx=1, matched=False)
self.add_obstacle(shadow_obstacles, obst, idx=2, matched=False)
return shadow_obstacles
def predict(self, vec):
step_time = self.fusion_params["step_time"]
predicted = vec.copy()
vy = vec[4]
y = vec[2]
predicted[2] = y + vy * step_time
return predicted
def add_obstacle(self, o_list, obst1, obst2=None, idx=None, matched=False, position=None,
hist_idx=None):
if idx is None:
print("Warning: no index for obstacle")
return
idx -= 1
idx_excluded = 0
if idx == 0:
idx_excluded = 1
vec1 = obst1["vec"]
vec2 = vec1
if obst2 is not None:
vec2 = obst2["vec"]
vec = np.empty_like(vec1)
amp1 = 1
amp2 = 1
# write x coordinate as average of both inputs
vec[0] = (vec1[idx] * amp1 + vec2[idx] * amp2) / (amp1 + amp2)
vec[1] = vec[0]
# write remaining params as average of both inputs
for i in range(2, len(vec1)):
vec[i] = (vec1[i] * amp1 + vec2[i] * amp2) / (amp1 + amp2)
# use angle of obstacle 1
vec[5] = vec1[5]
# use closest y distance
vec[2] = min(vec1[2], vec2[2])
spread = self.fusion_params["obstacle_spread"]
fused_angle = np.arctan(vec[0] / vec[2]) / np.pi * 180
dx1 = np.tan((fused_angle - spread) / 180 * np.pi) * vec[2]
dx2 = np.tan((fused_angle + spread) / 180 * np.pi) * vec[2]
fused_x = np.asarray((vec[0], min(dx1, dx2), max(dx1, dx2)))
max_x_spread = self.fusion_params["max_x_spread"]
if abs(fused_x[0] - fused_x[1]) > max_x_spread:
fused_x[1] = fused_x[0] - max_x_spread
if abs(fused_x[0] - fused_x[2]) > max_x_spread:
fused_x[2] = fused_x[0] + max_x_spread
vec[0:6] = np.round(vec[0:6], 1)
obstacle = {
"pos": np.round((vec[0], vec[2]), 1),
"fused_angle": np.round(fused_angle, 1),
"amplitude": max(vec1[6], vec2[6]),
"x1": np.round(fused_x, 1),
"x2": np.round(fused_x, 1),
"y": np.round(obst1["y"], 1),
"excluded": np.round((vec1[idx_excluded], vec2[idx_excluded]), 1),
"vec": vec,
"matched": matched,
"match_position": position,
"matched_hist_idx": hist_idx,
"sensor": obst1["side"],
}
o_list.append(obstacle)
def convert_to_xy(self, data, side):
spread = self.fusion_params["obstacle_spread"]
min_x_spread = self.fusion_params["min_x_spread"]
min_y_spread = self.fusion_params["min_y_spread"]
angle = data["angle"]
distance = data["distance"]
v = data["velocity"]
# each coordinate has a margin given by the obstacle spread
a = np.asarray(
(angle / 180 * np.pi,
max(angle - spread, 0) / 180 * np.pi,
max(angle + spread, 0) / 180 * np.pi
)
)
x = np.sin(a) * distance
y = np.cos(a) * distance
y[0] = max(y[0], self.fusion_params["min_distance"])
vt = np.sin(angle / 180 * np.pi) * v
vy = -np.cos(angle / 180 * np.pi) * v
# x1 is outer position for each sensor side
# x2 is inner postion between both sensors
x1 = self.fusion_params["separation"] / 2 + x
x2 = self.fusion_params["separation"] / 2 - x
if side == "left":
x1 *= -1
x2 *= -1
# make sure coordinates are ordered from left to right
if x1[1] > x1[2]:
x1[1:] = np.flip(x1[1:])
if x2[1] > x2[2]:
x2[1:] = np.flip(x2[1:])
if y[1] > y[2]:
y[1:] = np.flip(y[1:])
if abs(x1[0] - x1[1]) < min_x_spread:
x1[1] = x1[0] - min_x_spread
if abs(x1[0] - x1[2]) < min_x_spread:
x1[2] = x1[0] + min_x_spread
if abs(x2[0] - x2[1]) < min_x_spread:
x2[1] = x2[0] - min_x_spread
if abs(x2[0] - x2[2]) < min_x_spread:
x2[2] = x2[0] + min_x_spread
if abs(y[0] - y[1]) < min_y_spread:
y[1] = y[0] - min_y_spread
if abs(y[0] - y[2]) < min_y_spread:
y[2] = y[0] + min_y_spread
data["x1"] = np.round(x1, 1)
data["x2"] = np.round(x2, 1)
data["vx"] = np.round(vt, 1)
data["vy"] = np.round(vy, 1)
data["y"] = np.round(y, 1)
data["side"] = side
data["vec"] = np.asarray((
x1[0],
x2[0],
y[0],
vt,
vy,
data["angle"],
data["amplitude"],
distance,
v,)
)
data["vec"][0:6] = np.round(data["vec"][0:6], 1)
return data
def print_obstacles(self, obstacle_list, left, right):
self.iteration += 1
print(self.iteration)
if len(self.fused_obstacles) == 0:
print("No obstacles!")
else:
print("New obstacles:")
for i, o in enumerate(obstacle_list):
fmt = (
"{}: X {:.1f}({:.1f}>{:.1f})/{:.2f}({:.2}>{:.1f})"
"Y {:.1f}/({:.1f}>{:.1f}) true {:.1f} a {:.1f}"
)
print(fmt.format(i, o["vec"][0], o["x1"][1], o["x1"][2],
o["vec"][1], o["x2"][1], o["x2"][2],
o["vec"][2], o["y"][1], o["y"][2],
o["distance"], o["vec"][5]))
for update in self.debug_updates:
print(update)
if len(self.fused_obstacles):
print("Fused obstalces (L:{}/R:{} ):".format(len(left), len(right)))
else:
print("No fused obstalces!".format(len(left), len(right)))
for i, o in enumerate(self.fused_obstacles):
print("{}: X {:.1f}({:.1f}/{:.2}) Y {:.1f}({:.1f}/{:.1f}) t {} matched {} {}".format(
i, o["vec"][0], o["x1"][1], o["x1"][2],
o["vec"][2], o["y"][1], o["y"][2],
o["decay_time"], o["matched"], o["match_position"]))
print("")
if __name__ == "__main__":
main()
| 39.411981 | 99 | 0.543099 |
9dca22e1f455d96253152de06700b2fba8ba7076 | 724 | py | Python | plots.py | SailikhithGunda/Eigen-values-using-CUDA | 1a77fcd267747919e3f41c3c77c4c545abd4c0e4 | [
"MIT"
] | null | null | null | plots.py | SailikhithGunda/Eigen-values-using-CUDA | 1a77fcd267747919e3f41c3c77c4c545abd4c0e4 | [
"MIT"
] | null | null | null | plots.py | SailikhithGunda/Eigen-values-using-CUDA | 1a77fcd267747919e3f41c3c77c4c545abd4c0e4 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import pandas as pd
file = open("C:\\Users\\Likhith\\source\\repos\\PDC_proj\\times.txt", "r")
a = file.read()
# analysing CUDA
l = list(a.split("\n"))
l1 = []
file.close()
for i in l:
l1.append(list(i.split()))
df = pd.DataFrame(l1, columns = ['n', 'time', 'num'])
df1 = df[6:12]
num_matrices_3 = df1['num']
time_3 = df1['time'].map(float)
df2 = df[:6]
num_matrices_4 = df2['num']
time_4 = df2['time'].map(float)
plt.plot(num_matrices_3, time_3, label = "dim 3")
plt.plot(num_matrices_4, time_4, label = 'dim 4')
plt.xlabel("No of matrices")
plt.ylabel("Time taken to execute")
plt.title("No of matrices VS time taken (CUDA)")
plt.legend()
plt.show()
| 23.354839 | 75 | 0.633978 |
34db967c3e2eaf7e7a825e50a45f788ed1ed42b7 | 3,301 | py | Python | api/python/setup.py | viveklak/quilt | 248971a9e2ecd1bcae3a5af77d4d8933b2dba0c7 | [
"Apache-2.0"
] | null | null | null | api/python/setup.py | viveklak/quilt | 248971a9e2ecd1bcae3a5af77d4d8933b2dba0c7 | [
"Apache-2.0"
] | null | null | null | api/python/setup.py | viveklak/quilt | 248971a9e2ecd1bcae3a5af77d4d8933b2dba0c7 | [
"Apache-2.0"
] | null | null | null | import os
import sys
from pathlib import Path
from setuptools import setup, find_packages
from setuptools.command.install import install
VERSION = Path(Path(__file__).parent, "quilt3", "VERSION").read_text().strip()
def readme():
readme_short = """
Quilt manages data like code (with packages, repositories, browsing and
revision history) so that teams can experiment faster in machine learning,
biotech, and other data-driven domains.
The `quilt3` PyPi package allows you to build, push, and install data packages.
Visit the `documentation quickstart <https://docs.quiltdata.com/quickstart>`_
to learn more.
"""
return readme_short
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = 'verify that the git tag matches our version'
def run(self):
tag = os.getenv('CIRCLE_TAG')
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
setup(
name="quilt3",
version=VERSION,
packages=find_packages(),
description='Quilt: where data comes together',
long_description=readme(),
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
author='quiltdata',
author_email='contact@quiltdata.io',
license='LICENSE',
url='https://github.com/quiltdata/quilt',
keywords='',
install_requires=[
'appdirs>=1.4.0',
'aws-requests-auth>=0.4.2',
'boto3>=1.10.0',
'dnspython>=1.16.0',
'flask',
'flask_cors',
'flask_json',
'jsonlines==1.2.0',
'packaging>=16.8',
'python-dateutil<=2.8.0', # 2.8.1 conflicts with botocore
'PyYAML>=5.1',
'requests>=2.12.4',
'tenacity>=5.1.1',
'tqdm>=4.26.0',
'urllib3<1.25,>=1.21.1', # required by requests
'requests_futures==1.0.0',
],
extras_require={
'pyarrow': [
'numpy>=1.14.0', # required by pandas, but missing from its dependencies.
'pandas>=0.19.2',
'pyarrow>=0.14.1', # as of 7/5/19: linux/circleci bugs on 0.14.0
],
'tests': [
'codecov',
'numpy>=1.14.0', # required by pandas, but missing from its dependencies.
'pandas>=0.19.2',
'pyarrow>=0.14.1', # as of 7/5/19: linux/circleci bugs on 0.14.0
'pytest<5.1.0', # TODO: Fix pytest.ensuretemp in conftest.py
'pytest-cov',
'pytest-env',
'responses',
'tox',
'detox',
'tox-pytest-summary',
'git-pylint-commit-hook',
],
},
include_package_data=True,
entry_points={
'console_scripts': ['quilt3=quilt3.main:main'],
},
cmdclass={
'verify': VerifyVersionCommand,
}
)
| 31.141509 | 100 | 0.563163 |
b286a9f74aa6c8d12ebe6da7c83f1b17510840ea | 564 | py | Python | sandbox/finetuning/envs/mujoco/gather/swimmer_gather_env.py | andrewli77/rllab-finetuning | 2dae9141d0fdc284d04f18931907131d66b43023 | [
"MIT"
] | 23 | 2020-04-27T23:53:44.000Z | 2022-03-10T03:13:16.000Z | sandbox/finetuning/envs/mujoco/gather/swimmer_gather_env.py | WeiChengTseng/rllab-finetuning | 2dae9141d0fdc284d04f18931907131d66b43023 | [
"MIT"
] | 1 | 2021-11-14T13:30:22.000Z | 2021-11-14T13:30:22.000Z | sandbox/finetuning/envs/mujoco/gather/swimmer_gather_env.py | WeiChengTseng/rllab-finetuning | 2dae9141d0fdc284d04f18931907131d66b43023 | [
"MIT"
] | 8 | 2020-06-17T03:28:34.000Z | 2022-03-09T03:13:03.000Z | from rllab.envs.mujoco.gather.gather_env import GatherEnv
# from rllab.envs.mujoco.gather.baselines_gather_env import GatherEnv
from sandbox.finetuning.envs.mujoco.swimmer_env import SwimmerEnv
import time
class SwimmerGatherEnv(GatherEnv):
MODEL_CLASS = SwimmerEnv
ORI_IND = 2
if __name__ == "__main__":
env = SwimmerGatherEnv()
# import IPython; IPython.embed()
while True:
env.reset()
for _ in range(1000):
env.render()
_, reward, _, _ = env.step(env.action_space.sample()) # take a random action
| 29.684211 | 89 | 0.698582 |
977a0bbbea05425b3ccc71ff48fc292429618687 | 2,815 | py | Python | tests/msgterm/test_msgterm.py | nicoalonso/dory | 3322afb929f0bc310f92b3294426eaefb1ff9353 | [
"MIT"
] | null | null | null | tests/msgterm/test_msgterm.py | nicoalonso/dory | 3322afb929f0bc310f92b3294426eaefb1ff9353 | [
"MIT"
] | 2 | 2021-04-30T21:02:08.000Z | 2021-06-02T00:49:05.000Z | tests/msgterm/test_msgterm.py | nicoalonso/dory | 3322afb929f0bc310f92b3294426eaefb1ff9353 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import unittest
from unittest import TestCase
from unittest.mock import Mock, MagicMock, patch, create_autospec
from io import StringIO
from msgterm import MsgTerm
class TestMsgTerm(TestCase):
'''
MsgTerm Test
Extends:
TestCase
'''
@patch('sys.stdout', new_callable=StringIO)
def test_message(self, _stdout):
# [ When ]
MsgTerm.message('test')
# [ Then ]
self.assertIn('test', _stdout.getvalue())
# [ When ]
MsgTerm.message(['test1', 'test2'], bold=True, par=True, lbl='+')
# [ Then ]
self.assertIn('test1', _stdout.getvalue())
self.assertIn('test2', _stdout.getvalue())
self.assertIn('[+]', _stdout.getvalue())
# [ When ]
MsgTerm.message('reverse', reverse=True, type=11, hr=True)
# [ Then ]
self.assertIn('reverse', _stdout.getvalue())
# [ Given ]
MsgTerm.verbosity(2)
# [ When ]
MsgTerm.message('debug', type=-1)
# [ Then ]
self.assertNotIn('debug', _stdout.getvalue())
# [ Given ]
msg = MsgTerm('test9')
# [ When ]
val = '%s' % msg
# [ Then ]
self.assertEqual('test9', val)
# [ When]
with self.assertRaises(ValueError):
MsgTerm.message(111)
@patch('sys.stdout', new_callable=StringIO)
def test_msg_defined(self, _stdout):
# [ Given ]
MsgTerm.verbosity(0)
# [ When ]
MsgTerm.text('text')
MsgTerm.debug('debug')
MsgTerm.info('info')
MsgTerm.success('success')
MsgTerm.warning('warning')
MsgTerm.alert('alert')
MsgTerm.error('error')
MsgTerm.fatal('fatal')
MsgTerm.help('help')
# [ Then ]
self.assertIn('text', _stdout.getvalue())
self.assertIn('debug', _stdout.getvalue())
self.assertIn('info', _stdout.getvalue())
self.assertIn('success', _stdout.getvalue())
self.assertIn('warning', _stdout.getvalue())
self.assertIn('alert', _stdout.getvalue())
self.assertIn('error', _stdout.getvalue())
self.assertIn('fatal', _stdout.getvalue())
self.assertIn('help', _stdout.getvalue())
# [ When ]
MsgTerm.fatal( ('fatal2', 'fatal3') )
MsgTerm.help( ('help1', 'help2'), section='Config' )
# [ Then ]
self.assertIn('fatal2', _stdout.getvalue())
self.assertIn('help1', _stdout.getvalue())
self.assertIn('Help :: Config', _stdout.getvalue())
@patch('sys.stdout', new_callable=StringIO)
def test_jsonPrint(self, _stdout):
# [ When ]
MsgTerm.jsonPrint({'test': 'dummy'})
# [ Then ]
self.assertIn('dummy', _stdout.getvalue())
| 24.911504 | 73 | 0.560213 |
cda12ea70d6cf7d58ef5c62b8a28a9b70263ffcc | 892 | py | Python | synapse/replication/slave/storage/appservice.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | 1 | 2019-09-14T03:24:03.000Z | 2019-09-14T03:24:03.000Z | synapse/replication/slave/storage/appservice.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | 4 | 2020-03-04T23:47:05.000Z | 2021-12-09T21:41:44.000Z | synapse/replication/slave/storage/appservice.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.storage.appservice import (
ApplicationServiceTransactionWorkerStore,
ApplicationServiceWorkerStore,
)
class SlavedApplicationServiceStore(
ApplicationServiceTransactionWorkerStore, ApplicationServiceWorkerStore
):
pass
| 33.037037 | 75 | 0.774664 |
196fe1a32425f08852b840fb97b56f43b92ffadd | 1,818 | py | Python | dot_path_remove/dot_path_remove.py | contrerasjf0/dot_remove_path | 34c48331f777177e891a093ea47773a38b370244 | [
"MIT"
] | null | null | null | dot_path_remove/dot_path_remove.py | contrerasjf0/dot_remove_path | 34c48331f777177e891a093ea47773a38b370244 | [
"MIT"
] | null | null | null | dot_path_remove/dot_path_remove.py | contrerasjf0/dot_remove_path | 34c48331f777177e891a093ea47773a38b370244 | [
"MIT"
] | null | null | null | import copy
from dot_path_remove.utills import get_parts_from_list_format, is_list_format
def remove_spot(source: dict, spot: str) -> dict:
is_spot_list = is_list_format(spot)
if not is_spot_list and spot in source:
source.pop(spot)
else:
list_parts = get_parts_from_list_format(spot)
list_spot = list_parts.get('spot')
list_index = list_parts.get('index')
if list_index == 'all':
source.pop(list_spot)
else:
list_index = int(list_index)
if list_spot in source and isinstance(source[list_spot], list):
source[list_spot].pop(list_index)
return source
def remove_path_dot(source: dict, paths: str) -> dict:
if not isinstance(source, dict):
raise TypeError("source must be dict")
if paths == '':
return source
source_copy = copy.deepcopy(source)
path_spots = paths.split('.')
source_aux = source_copy
for i, spot in enumerate(path_spots):
if i == len(path_spots) - 1:
remove_spot(source_aux, spot)
else:
is_spot_list = is_list_format(spot)
if is_spot_list:
list_spot_parts = get_parts_from_list_format(spot)
list_spot = list_spot_parts.get('spot')
list_index = list_spot_parts.get('index')
if list_index == 'all':
source_aux = source_aux[list_spot]
else:
list_index = int(list_index)
if list_spot in source_aux and isinstance(source_aux[list_spot], list):
source_aux = source_aux[list_spot][list_index]
else:
if spot in source_aux:
source_aux = source_aux[spot]
return source_copy
| 29.803279 | 91 | 0.59626 |
389ae92a5fae5c8fa104c0f0a03c9accf1246621 | 4,916 | py | Python | env/lib/python3.8/site-packages/arcade/examples/sprite_follow_simple.py | iodaniel/cse210-project | abc7a4e64d35707d04900f8535961fc57ed8f4e1 | [
"MIT"
] | 1 | 2021-07-11T02:51:52.000Z | 2021-07-11T02:51:52.000Z | env/lib/python3.8/site-packages/arcade/examples/sprite_follow_simple.py | iodaniel/cse210-project | abc7a4e64d35707d04900f8535961fc57ed8f4e1 | [
"MIT"
] | null | null | null | env/lib/python3.8/site-packages/arcade/examples/sprite_follow_simple.py | iodaniel/cse210-project | abc7a4e64d35707d04900f8535961fc57ed8f4e1 | [
"MIT"
] | null | null | null | """
Sprite Follow Player
This moves towards the player in both the x and y direction.
Artwork from http://kenney.nl
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.sprite_follow_simple
"""
import random
import arcade
import os
# --- Constants ---
SPRITE_SCALING_PLAYER = 0.5
SPRITE_SCALING_COIN = 0.2
COIN_COUNT = 50
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Sprite Follow Player Simple Example"
SPRITE_SPEED = 0.5
class Coin(arcade.Sprite):
"""
This class represents the coins on our screen. It is a child class of
the arcade library's "Sprite" class.
"""
def follow_sprite(self, player_sprite):
"""
This function will move the current sprite towards whatever
other sprite is specified as a parameter.
We use the 'min' function here to get the sprite to line up with
the target sprite, and not jump around if the sprite is not off
an exact multiple of SPRITE_SPEED.
"""
if self.center_y < player_sprite.center_y:
self.center_y += min(SPRITE_SPEED, player_sprite.center_y - self.center_y)
elif self.center_y > player_sprite.center_y:
self.center_y -= min(SPRITE_SPEED, self.center_y - player_sprite.center_y)
if self.center_x < player_sprite.center_x:
self.center_x += min(SPRITE_SPEED, player_sprite.center_x - self.center_x)
elif self.center_x > player_sprite.center_x:
self.center_x -= min(SPRITE_SPEED, self.center_x - player_sprite.center_x)
class MyGame(arcade.Window):
""" Our custom Window Class"""
def __init__(self):
""" Initializer """
# Call the parent class initializer
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Variables that will hold sprite lists
self.player_list = None
self.coin_list = None
# Set up the player info
self.player_sprite = None
self.score = 0
# Don't show the mouse cursor
self.set_mouse_visible(False)
arcade.set_background_color(arcade.color.AMAZON)
def setup(self):
""" Set up the game and initialize the variables. """
# Sprite lists
self.player_list = arcade.SpriteList()
self.coin_list = arcade.SpriteList()
# Score
self.score = 0
# Set up the player
# Character image from kenney.nl
self.player_sprite = arcade.Sprite(":resources:images/animated_characters/female_person/femalePerson_idle.png", SPRITE_SCALING_PLAYER)
self.player_sprite.center_x = 50
self.player_sprite.center_y = 50
self.player_list.append(self.player_sprite)
# Create the coins
for i in range(COIN_COUNT):
# Create the coin instance
# Coin image from kenney.nl
coin = Coin(":resources:images/items/coinGold.png", SPRITE_SCALING_COIN)
# Position the coin
coin.center_x = random.randrange(SCREEN_WIDTH)
coin.center_y = random.randrange(SCREEN_HEIGHT)
# Add the coin to the lists
self.coin_list.append(coin)
def on_draw(self):
""" Draw everything """
arcade.start_render()
self.coin_list.draw()
self.player_list.draw()
# Put the text on the screen.
output = f"Score: {self.score}"
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def on_mouse_motion(self, x, y, dx, dy):
""" Handle Mouse Motion """
# Move the center of the player sprite to match the mouse x, y
self.player_sprite.center_x = x
self.player_sprite.center_y = y
def on_update(self, delta_time):
""" Movement and game logic """
for coin in self.coin_list:
coin.follow_sprite(self.player_sprite)
# Generate a list of all sprites that collided with the player.
hit_list = arcade.check_for_collision_with_list(self.player_sprite, self.coin_list)
# Loop through each colliding sprite, remove it, and add to the score.
for coin in hit_list:
coin.remove_from_sprite_lists()
self.score += 1
def main():
""" Main method """
window = MyGame()
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 31.922078 | 143 | 0.629373 |
8f059a794d8b5193b73c4ce1d66938507e8aeb7f | 9,486 | py | Python | docs/conf.py | lumina-networks/topology-yaml | 4294aaaf778503809179f499bd785a56d03c866f | [
"MIT"
] | 1 | 2018-08-21T22:58:35.000Z | 2018-08-21T22:58:35.000Z | docs/conf.py | lumina-networks/topology-yaml | 4294aaaf778503809179f499bd785a56d03c866f | [
"MIT"
] | null | null | null | docs/conf.py | lumina-networks/topology-yaml | 4294aaaf778503809179f499bd785a56d03c866f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# tosca-nfv documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 6 10:12:52 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Topology-Yaml'
copyright = u'2018, Lumina Networks OSS'
author = u'Lumina Networks OSS'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Topology-Yaml'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Topology-Yaml', u'Topology-Yaml Documentation',
u'OSS', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Topology', u'Topology-Yaml Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'topology', u'Topology-Yaml Documentation',
author, 'Lumina Networks OSS', 'Topology File Generator and Parser.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.597938 | 80 | 0.718005 |
3007eedc6df48da431b6f37881b2b289439c44f1 | 1,888 | py | Python | python/ray/autoscaler/_private/constants.py | AIX2/ray | 91a1ac620012e0c779bf1bbf6983af22675cf26a | [
"Apache-2.0"
] | null | null | null | python/ray/autoscaler/_private/constants.py | AIX2/ray | 91a1ac620012e0c779bf1bbf6983af22675cf26a | [
"Apache-2.0"
] | null | null | null | python/ray/autoscaler/_private/constants.py | AIX2/ray | 91a1ac620012e0c779bf1bbf6983af22675cf26a | [
"Apache-2.0"
] | null | null | null | import os
from ray.ray_constants import ( # noqa F401
AUTOSCALER_RESOURCE_REQUEST_CHANNEL, DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES,
DEFAULT_OBJECT_STORE_MEMORY_PROPORTION, LOGGER_FORMAT,
MEMORY_RESOURCE_UNIT_BYTES, RESOURCES_ENVIRONMENT_VARIABLE)
def env_integer(key, default):
if key in os.environ:
return int(os.environ[key])
return default
# Abort autoscaling if more than this number of errors are encountered. This
# is a safety feature to prevent e.g. runaway node launches.
AUTOSCALER_MAX_NUM_FAILURES = env_integer("AUTOSCALER_MAX_NUM_FAILURES", 5)
# The maximum number of nodes to launch in a single request.
# Multiple requests may be made for this batch size, up to
# the limit of AUTOSCALER_MAX_CONCURRENT_LAUNCHES.
AUTOSCALER_MAX_LAUNCH_BATCH = env_integer("AUTOSCALER_MAX_LAUNCH_BATCH", 5)
# Max number of nodes to launch at a time.
AUTOSCALER_MAX_CONCURRENT_LAUNCHES = env_integer(
"AUTOSCALER_MAX_CONCURRENT_LAUNCHES", 10)
# Interval at which to perform autoscaling updates.
AUTOSCALER_UPDATE_INTERVAL_S = env_integer("AUTOSCALER_UPDATE_INTERVAL_S", 5)
# The autoscaler will attempt to restart Ray on nodes it hasn't heard from
# in more than this interval.
AUTOSCALER_HEARTBEAT_TIMEOUT_S = env_integer("AUTOSCALER_HEARTBEAT_TIMEOUT_S",
30)
# The maximum allowed resource demand vector size to guarantee the resource
# demand scheduler bin packing algorithm takes a reasonable amount of time
# to run.
AUTOSCALER_MAX_RESOURCE_DEMAND_VECTOR_SIZE = 1000
# Max number of retries to AWS (default is 5, time increases exponentially)
BOTO_MAX_RETRIES = env_integer("BOTO_MAX_RETRIES", 12)
# Max number of retries to create an EC2 node (retry different subnet)
BOTO_CREATE_MAX_RETRIES = env_integer("BOTO_CREATE_MAX_RETRIES", 5)
# ray home path in the container image
RAY_HOME = "/home/ray"
| 39.333333 | 79 | 0.786547 |
6da3a93cb8a3ea8f0956fda0437f79a3de65b105 | 10,814 | py | Python | script/t2c_extract_features.py | ThierryDeruyttere/vilbert-Talk2car | 6476b16970cfd0d88e09beb9a57cc5c39b7acb3f | [
"MIT"
] | null | null | null | script/t2c_extract_features.py | ThierryDeruyttere/vilbert-Talk2car | 6476b16970cfd0d88e09beb9a57cc5c39b7acb3f | [
"MIT"
] | null | null | null | script/t2c_extract_features.py | ThierryDeruyttere/vilbert-Talk2car | 6476b16970cfd0d88e09beb9a57cc5c39b7acb3f | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Requires vqa-maskrcnn-benchmark to be built and installed. See Readme
# for more details.
import argparse
import os
import time
import cv2
import numpy as np
import torch
from PIL import Image
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.layers import nms
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
import sys
sys.path.append("/cw/liir/NoCsBack/testliir/thierry/PathProjection/vilbert/vilbert/datasets")
from talk2car import Talk2Car
class FeatureExtractor:
MAX_SIZE = 1333
MIN_SIZE = 800
def __init__(self):
self.args = self.get_parser().parse_args()
self.detection_model = self._build_detection_model()
os.makedirs(self.args.output_folder, exist_ok=True)
def get_parser(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_file", default=None, type=str, help="Detectron model file"
)
parser.add_argument(
"--config_file", default=None, type=str, help="Detectron config file"
)
parser.add_argument(
"--imdb_gt_file",
default=None,
type=str,
help="Imdb file containing file path and bboxes.",
)
parser.add_argument("--batch_size", type=int, default=6, help="Batch size")
parser.add_argument(
"--num_features",
type=int,
default=100,
help="Number of features to extract.",
)
parser.add_argument(
"--output_folder", type=str, default="./output", help="Output folder"
)
parser.add_argument(
"--feature_name",
type=str,
help="The name of the feature to extract",
default="fc6",
)
parser.add_argument(
"--confidence_threshold",
type=float,
default=0,
help="Threshold of detection confidence above which boxes will be selected",
)
parser.add_argument(
"--background",
action="store_true",
help="The model will output predictions for the background class when set",
)
parser.add_argument(
"--partition", type=int, default=0, help="Partition to download."
)
return parser
def _build_detection_model(self):
cfg.merge_from_file(self.args.config_file)
cfg.freeze()
model = build_detection_model(cfg)
checkpoint = torch.load(self.args.model_file, map_location=torch.device("cpu"))
load_state_dict(model, checkpoint.pop("model"))
model.to("cuda")
model.eval()
return model
def get_batch_proposals(self, images, im_scales, im_infos, commands):
proposals_batch = []
for idx, img_info in enumerate(im_infos):
boxes_tensor = self.t2c.get_raw_boxes_for_sample(commands[idx]).to("cuda")
#boxes_tensor = torch.from_numpy(
# proposals[idx]["bbox"][: int(proposals[idx]["num_box"]), 0:]
#).to("cuda")
orig_image_size = (img_info["width"], img_info["height"])
boxes = BoxList(boxes_tensor, orig_image_size)
image_size = (images.image_sizes[idx][1], images.image_sizes[idx][0])
boxes = boxes.resize(image_size)
proposals_batch.append(boxes)
return proposals_batch
def _image_transform(self, path):
img = Image.open(path)
im = np.array(img).astype(np.float32)
# IndexError: too many indices for array, grayscale images
if len(im.shape) < 3:
im = np.repeat(im[:, :, np.newaxis], 3, axis=2)
im = im[:, :, ::-1]
im -= np.array([102.9801, 115.9465, 122.7717])
im_shape = im.shape
im_height = im_shape[0]
im_width = im_shape[1]
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
# Scale based on minimum size
im_scale = self.MIN_SIZE / im_size_min
# Prevent the biggest axis from being more than max_size
# If bigger, scale it down
if np.round(im_scale * im_size_max) > self.MAX_SIZE:
im_scale = self.MAX_SIZE / im_size_max
im = cv2.resize(
im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR
)
img = torch.from_numpy(im).permute(2, 0, 1)
im_info = {"width": im_width, "height": im_height}
return img, im_scale, im_info
def _process_feature_extraction(
self, output, im_scales, im_infos, feature_name="fc6", conf_thresh=0
):
#print("here")
batch_size = len(output[0]["proposals"])
n_boxes_per_image = [len(boxes) for boxes in output[0]["proposals"]]
score_list = output[0]["scores"].split(n_boxes_per_image)
score_list = [torch.nn.functional.softmax(x, -1) for x in score_list]
feats = output[0][feature_name].split(n_boxes_per_image)
cur_device = score_list[0].device
feat_list = []
info_list = []
for i in range(batch_size):
dets = output[0]["proposals"][i].bbox / im_scales[i]
scores = score_list[i]
max_conf = torch.zeros((scores.shape[0])).to(cur_device)
conf_thresh_tensor = torch.full_like(max_conf, conf_thresh)
start_index = 1
# Column 0 of the scores matrix is for the background class
if self.args.background:
start_index = 0
for cls_ind in range(start_index, scores.shape[1]):
cls_scores = scores[:, cls_ind]
keep = nms(dets, cls_scores, 0.5)
max_conf[keep] = torch.where(
# Better than max one till now and minimally greater than conf_thresh
(cls_scores[keep] > max_conf[keep])
& (cls_scores[keep] > conf_thresh_tensor[keep]),
cls_scores[keep],
max_conf[keep],
)
feat_list.append(feats[i])
num_boxes = len(feats[i])
bbox = output[0]["proposals"][i]
bbox = bbox.resize(((im_infos[i]["width"], im_infos[i]["height"])))
bbox = bbox.bbox
# Predict the class label using the scores
objects = torch.argmax(scores[:, start_index:], dim=1)
info_list.append(
{
"bbox": bbox.cpu().numpy(),
"num_boxes": num_boxes,
"objects": objects.cpu().numpy(),
"image_width": im_infos[i]["width"],
"image_height": im_infos[i]["height"],
"cls_prob": scores.cpu().numpy(),
}
)
return feat_list, info_list
def get_detectron_features(self, commands, img_dir):
img_tensor, im_scales, im_infos = [], [], []
for sample in commands:
img_path = os.path.join(img_dir, sample['img'])
im, im_scale, im_info = self._image_transform(img_path)
img_tensor.append(im)
im_scales.append(im_scale)
im_infos.append(im_info)
# Image dimensions should be divisible by 32, to allow convolutions
# in detector to work
current_img_list = to_image_list(img_tensor, size_divisible=32)
current_img_list = current_img_list.to("cuda")
proposals = self.get_batch_proposals(
current_img_list, im_scales, im_infos, commands
)
with torch.no_grad():
output = self.detection_model(current_img_list, proposals=proposals)
feat_list = self._process_feature_extraction(
output,
im_scales,
im_infos,
self.args.feature_name,
self.args.confidence_threshold,
)
return feat_list
def _chunks(self, array, chunk_size):
for i in range(0, len(array), chunk_size):
yield array[i : i + chunk_size]
def _save_feature(self, file_name, feature, info):
file_base_name = str(file_name).split(".")[0]
info["image_id"] = file_base_name
info["features"] = feature.cpu().numpy()
file_base_name = str(file_base_name) + ".npy"
np.save(os.path.join(self.args.output_folder, file_base_name), info)
def extract_features(self):
for split in ["train", "val", "test"]:
self.t2c = Talk2Car(split=split,
root="/cw/liir/NoCsBack/testliir/thierry/3rd_place/data",
vocabulary="/cw/liir/NoCsBack/testliir/thierry/3rd_place/utils/vocabulary.txt",
num_rpns_per_image= 32)
ix = 0
for chunk in self._chunks(list(self.t2c.data.values()), self.args.batch_size):
try:
t0 = time.time()
features, infos = self.get_detectron_features(chunk, self.t2c.img_dir)
for idx, c in enumerate(chunk):
self._save_feature(c["img"], features[idx], infos[idx])
t1 = time.time()
print("{}/{} [took {:.2f}s, expected remaining {:.2f}s]".format((ix+1)*self.args.batch_size, len(self.t2c),
t1 - t0, (len(self.t2c)//self.args.batch_size - (ix+1))*(t1-t0)))
ix+=1
except BaseException as ex:
print(ex)
exit()
# files = np.load(self.args.imdb_gt_file, allow_pickle=True)
# # files = sorted(files)
# # files = [files[i: i+1000] for i in range(0, len(files), 1000)][self.args.partition]
# for chunk in self._chunks(files, self.args.batch_size):
# try:
# features, infos = self.get_detectron_features(chunk)
# for idx, c in enumerate(chunk):
# self._save_feature(c["file_name"], features[idx], infos[idx])
# except BaseException:
# continue
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
if __name__ == "__main__":
feature_extractor = FeatureExtractor()
print("Number of params", count_parameters(feature_extractor))
#exit()
feature_extractor.extract_features()
| 38.347518 | 141 | 0.586739 |
3b1ff1caa13a6c1b75260133c709fbc6569489af | 8,223 | py | Python | oauthenticator/globus.py | meesam15/oauthenticator | 3ed131b9a825f1ee6a55a8d378b6b277f172dd0b | [
"BSD-3-Clause"
] | 1 | 2021-02-26T03:19:53.000Z | 2021-02-26T03:19:53.000Z | oauthenticator/globus.py | meesam15/oauthenticator | 3ed131b9a825f1ee6a55a8d378b6b277f172dd0b | [
"BSD-3-Clause"
] | 1 | 2021-06-25T15:17:10.000Z | 2021-06-25T15:17:10.000Z | oauthenticator/globus.py | kidig/oauthenticator | 75d8b9b5ee366e7094d28a4c569986f04e3ea802 | [
"BSD-3-Clause"
] | 1 | 2019-02-05T21:06:42.000Z | 2019-02-05T21:06:42.000Z | """
Custom Authenticator to use Globus OAuth2 with JupyterHub
"""
import os
import pickle
import base64
from tornado import gen, web
from tornado.auth import OAuth2Mixin
from tornado.web import HTTPError
from traitlets import List, Unicode, Bool
from jupyterhub.handlers import LogoutHandler
from jupyterhub.auth import LocalAuthenticator
from jupyterhub.utils import url_path_join
from .oauth2 import OAuthLoginHandler, OAuthenticator
try:
import globus_sdk
except:
raise ImportError('globus_sdk is not installed, please see '
'"globus-requirements.txt" for using Globus oauth.')
class GlobusMixin(OAuth2Mixin):
_OAUTH_AUTHORIZE_URL = 'https://auth.globus.org/v2/oauth2/authorize'
class GlobusLoginHandler(OAuthLoginHandler, GlobusMixin):
pass
class GlobusLogoutHandler(LogoutHandler):
"""
Handle custom logout URLs and token revocation. If a custom logout url
is specified, the 'logout' button will log the user out of that identity
provider in addition to clearing the session with Jupyterhub, otherwise
only the Jupyterhub session is cleared.
"""
@gen.coroutine
def get(self):
user = self.get_current_user()
if user:
if self.authenticator.revoke_tokens_on_logout:
self.clear_tokens(user)
self.clear_login_cookie()
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
else:
super().get()
@gen.coroutine
def clear_tokens(self, user):
if not self.authenticator.revoke_tokens_on_logout:
return
state = yield user.get_auth_state()
if state:
self.authenticator.revoke_service_tokens(state.get('tokens'))
self.log.info('Logout: Revoked tokens for user "{}" services: {}'
.format(user.name, ','.join(state['tokens'].keys())))
state['tokens'] = ''
user.save_auth_state(state)
class GlobusOAuthenticator(OAuthenticator):
"""The Globus OAuthenticator handles both authorization and passing
transfer tokens to the spawner. """
login_service = 'Globus'
login_handler = GlobusLoginHandler
logout_handler = GlobusLogoutHandler
identity_provider = Unicode(help="""Restrict which institution a user
can use to login (GlobusID, University of Hogwarts, etc.). This should
be set in the app at developers.globus.org, but this acts as an additional
check to prevent unnecessary account creation.""").tag(config=True)
def _identity_provider_default(self):
return os.getenv('IDENTITY_PROVIDER', 'globusid.org')
exclude_tokens = List(
help="""Exclude tokens from being passed into user environments
when they start notebooks, Terminals, etc."""
).tag(config=True)
def _exclude_tokens_default(self):
return ['auth.globus.org']
def _scope_default(self):
return [
'openid',
'profile',
'urn:globus:auth:scope:transfer.api.globus.org:all'
]
allow_refresh_tokens = Bool(
help="""Allow users to have Refresh Tokens. If Refresh Tokens are not
allowed, users must use regular Access Tokens which will expire after
a set time. Set to False for increased security, True for increased
convenience."""
).tag(config=True)
def _allow_refresh_tokens_default(self):
return True
globus_local_endpoint = Unicode(help="""If Jupyterhub is also a Globus
endpoint, its endpoint id can be specified here.""").tag(config=True)
def _globus_local_endpoint_default(self):
return os.getenv('GLOBUS_LOCAL_ENDPOINT', '')
logout_redirect_url = \
Unicode(help="""URL for logging out.""").tag(config=True)
def _logout_redirect_url_default(self):
return os.getenv('LOGOUT_REDIRECT_URL', '')
revoke_tokens_on_logout = Bool(
help="""Revoke tokens so they cannot be used again. Single-user servers
MUST be restarted after logout in order to get a fresh working set of
tokens."""
).tag(config=True)
def _revoke_tokens_on_logout_default(self):
return False
@gen.coroutine
def pre_spawn_start(self, user, spawner):
"""Add tokens to the spawner whenever the spawner starts a notebook.
This will allow users to create a transfer client:
globus-sdk-python.readthedocs.io/en/stable/tutorial/#tutorial-step4
"""
spawner.environment['GLOBUS_LOCAL_ENDPOINT'] = \
self.globus_local_endpoint
state = yield user.get_auth_state()
if state:
globus_data = base64.b64encode(
pickle.dumps(state)
)
spawner.environment['GLOBUS_DATA'] = globus_data.decode('utf-8')
def globus_portal_client(self):
return globus_sdk.ConfidentialAppAuthClient(
self.client_id,
self.client_secret)
@gen.coroutine
def authenticate(self, handler, data=None):
"""
Authenticate with globus.org. Usernames (and therefore Jupyterhub
accounts) will correspond to a Globus User ID, so foouser@globusid.org
will have the 'foouser' account in Jupyterhub.
"""
code = handler.get_argument("code")
redirect_uri = self.get_callback_url(self)
client = self.globus_portal_client()
client.oauth2_start_flow(
redirect_uri,
requested_scopes=' '.join(self.scope),
refresh_tokens=self.allow_refresh_tokens
)
# Doing the code for token for id_token exchange
tokens = client.oauth2_exchange_code_for_tokens(code)
id_token = tokens.decode_id_token(client)
# It's possible for identity provider domains to be namespaced
# https://docs.globus.org/api/auth/specification/#identity_provider_namespaces # noqa
username, domain = id_token.get('preferred_username').split('@', 1)
if self.identity_provider and domain != self.identity_provider:
raise HTTPError(
403,
'This site is restricted to {} accounts. Please link your {}'
' account at {}.'.format(
self.identity_provider,
self.identity_provider,
'globus.org/app/account'
)
)
return {
'name': username,
'auth_state': {
'client_id': self.client_id,
'tokens': {
tok: v for tok, v in tokens.by_resource_server.items()
if tok not in self.exclude_tokens
},
}
}
def revoke_service_tokens(self, services):
"""Revoke live Globus access and refresh tokens. Revoking inert or
non-existent tokens does nothing. Services are defined by dicts
returned by tokens.by_resource_server, for example:
services = { 'transfer.api.globus.org': {'access_token': 'token'}, ...
<Additional services>...
}
"""
client = self.globus_portal_client()
for service_data in services.values():
client.oauth2_revoke_token(service_data['access_token'])
client.oauth2_revoke_token(service_data['refresh_token'])
def get_callback_url(self, handler=None):
"""
Getting the configured callback url
"""
if self.oauth_callback_url is None:
raise HTTPError(500,
'No callback url provided. '
'Please configure by adding '
'c.GlobusOAuthenticator.oauth_callback_url '
'to the config'
)
return self.oauth_callback_url
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_handlers(self, app):
return super().get_handlers(app) + [(r'/logout', self.logout_handler)]
class LocalGlobusOAuthenticator(LocalAuthenticator, GlobusOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| 35.752174 | 93 | 0.642466 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.