hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
db6290b8e8faaa6a4872ab53de8a80eed6f7d72f | 6,057 | py | Python | nettests/launch_nodes.py | harrywong/evt | 95985384619e0f5ff4021e8838d421ac4b4b946d | [
"BSD-3-Clause"
] | 1 | 2019-04-30T13:28:42.000Z | 2019-04-30T13:28:42.000Z | nettests/launch_nodes.py | harrywong/evt | 95985384619e0f5ff4021e8838d421ac4b4b946d | [
"BSD-3-Clause"
] | null | null | null | nettests/launch_nodes.py | harrywong/evt | 95985384619e0f5ff4021e8838d421ac4b4b946d | [
"BSD-3-Clause"
] | null | null | null | import json
import os
import sys
import time
import click
import docker
class command():
"""docstring for command."""
def __init__(self, cmd):
self.content = cmd
def add_option(self, option):
self.content += ' '
self.content += option
def get_arguments(self):
return self.content
# free the container
def free_container(name, client):
i = -1
while(True):
try:
i += 1
container = client.containers.get(name+str(i))
container.stop()
container.remove()
print('free {}{} succeed'.format(name, i))
except docker.errors.NotFound:
if(i >= 10):
break
# free the dir
def free_the_dir(dir):
list = os.listdir(dir)
print('remove the files in {}'.format(dir))
for name in list:
print(name)
os.removedirs(os.path.join(dir, name))
if(not os.path.exists(dir)):
os.mkdir(dir, 0o755)
@click.group()
def run():
pass
@click.command()
@click.option('--config', help='the config of nodes', default='launch.config')
def create(config):
f = open('launch.config', 'r')
text = f.read()
f.close()
paras = json.loads(text)
producer_number = paras['producer_number'] # the number of the producer
nodes_number = paras['nodes_number'] # the number of nodes we run
evtd_port_http = paras['evtd_port_http'] # the begin port of nodes port,port+1 ....
evtd_port_p2p = paras['evtd_port_p2p'] # the begin port of nodes port,port+1 ....
evtd_dir = paras['evtd_dir'] # the data directory of the evtd
use_tmpfs = paras['use_tmpfs'] # use the tmpfs or not
tmpfs_size = paras['tmpfs_size'] # the memory usage per node
client = docker.from_env()
print('check and free the container before')
free_container('evtd_', client)
# network=client.networks.create("evt-net",driver="bridge")
# begin the nodes one by one
print('begin open the evtd')
for i in range(0, nodes_number):
# create files in evtd_dir
if(not os.path.exists(evtd_dir)):
os.mkdir(evtd_dir, 0o755)
file = os.path.join(evtd_dir, 'dir_'+str(i))
if(os.path.exists(file)):
print("Warning: the file before didn't freed ")
else:
os.mkdir(file, 0o755)
# make the command
cmd = command('evtd.sh')
cmd.add_option('--delete-all-blocks')
if(i < producer_number):
cmd.add_option('--enable-stale-production')
cmd.add_option('--producer-name=evt')
else:
cmd.add_option('--producer-name=evt.'+str(i))
cmd.add_option('--http-server-address=evtd_'+str(i)+':'+str(8888+i))
cmd.add_option('--p2p-listen-endpoint=evtd_'+str(i)+':'+str(9876+i))
for j in range(0, nodes_number):
if(i == j):
continue
cmd.add_option(('--p2p-peer-address=evtd_'+str(j)+':'+str(9876+j)))
# run the image evtd in container
if(not use_tmpfs):
print('********evtd {} **************'.format(i))
print('name: evtd_{}'.format(i))
print('nework: evt-net')
print('http port: {} /tcp: {}'.format(evtd_port_http+i, 8888+i))
print('p2p port: {} /tcp: {}'.format(evtd_port_p2p+i, 9876+i))
print('mount location: {}'.format(file))
print('****************************')
container = client.containers.run(image='everitoken/evt:latest',
name='evtd_'+str(i),
command=cmd.get_arguments(),
network='evt-net',
ports={
str(evtd_port_http+i): 8888+i, str(evtd_port_p2p+i)+'/tcp': 9876+i},
detach=True,
volumes={
file: {'bind': '/opt/evtd/data', 'mode': 'rw'}}
)
else:
print('********evtd {} **************'.format(i))
print('name: evtd_{}'.format(i))
print('nework: evt-net')
print('http port: {} /tcp: {}'.format(evtd_port_http+i, 8888+i))
print('p2p port: {} /tcp: {}'.format(evtd_port_p2p+i, 9876+i))
print('tmpfs use size: {} M'.format(tmpfs_size))
print('****************************')
container = client.containers.run(image='everitoken/evt:latest',
name='evtd_'+str(i),
command=cmd.get_arguments(),
network='evt-net',
ports={
str(evtd_port_http+i): 8888+i, str(evtd_port_p2p+i)+'/tcp': 9876+i},
detach=True,
tmpfs={
'/opt/evtd/data': 'size='+str(tmpfs_size)+'M'}
#
)
# format with the click
@click.command()
@click.option('--config', help='the config of nodes', default='launch.config')
def free(config):
f = open('launch.config', 'r')
text = f.read()
f.close()
paras = json.loads(text)
free_dir = paras['free_dir'] # delete the directory of the evtd
evtd_dir = paras['evtd_dir'] # the data directory of the evtd
client = docker.from_env()
print('free the container')
free_container('evtd_', client)
if(free_dir):
print(evtd_dir)
free_the_dir(evtd_dir)
if __name__ == '__main__':
run.add_command(create)
run.add_command(free)
run()
| 36.487952 | 118 | 0.490507 |
d2e082acab7754c5d1f35daccedb3f8c04f6a7b3 | 5,693 | py | Python | scripts/main.py | harrivle/Mirai | ea2d4839f1f8b9f881798b819b2192ce2795bd5d | [
"MIT"
] | 37 | 2021-01-28T06:00:34.000Z | 2022-03-29T21:14:12.000Z | scripts/main.py | NkwamPhilip/Mirai | 70413de690da36c5878e2e6006711476e166bb1d | [
"MIT"
] | null | null | null | scripts/main.py | NkwamPhilip/Mirai | 70413de690da36c5878e2e6006711476e166bb1d | [
"MIT"
] | 14 | 2021-02-02T09:42:18.000Z | 2022-03-23T00:36:41.000Z | import pickle
from os.path import dirname, realpath
import sys
import git
sys.path.append(dirname(dirname(realpath(__file__))))
import torch
import onconet.datasets.factory as dataset_factory
import onconet.models.factory as model_factory
from onconet.learn import train
import onconet.transformers.factory as transformer_factory
import onconet.visualize as visualize
import onconet.utils.parsing as parsing
import warnings
import onconet.learn.state_keeper as state
from onconet.utils.get_dataset_stats import get_dataset_stats
import onconet.utils.stats as stats
import pdb
import csv
#Constants
DATE_FORMAT_STR = "%Y-%m-%d:%H-%M-%S"
if __name__ == '__main__':
args = parsing.parse_args()
if args.ignore_warnings:
warnings.simplefilter('ignore')
repo = git.Repo(search_parent_directories=True)
commit = repo.head.object
args.commit = commit.hexsha
print("OncoNet main running from commit: \n\n{}\n{}author: {}, date: {}".format(
commit.hexsha, commit.message, commit.author, commit.committed_date))
if args.get_dataset_stats:
print("\nComputing image mean and std...")
args.img_mean, args.img_std = get_dataset_stats(args)
print('Mean: {}'.format(args.img_mean))
print('Std: {}'.format(args.img_std))
print("\nLoading data-augmentation scheme...")
transformers = transformer_factory.get_transformers(
args.image_transformers, args.tensor_transformers, args)
test_transformers = transformer_factory.get_transformers(
args.test_image_transformers, args.test_tensor_transformers, args)
# Load dataset and add dataset specific information to args
print("\nLoading data...")
train_data, dev_data, test_data = dataset_factory.get_dataset(args, transformers, test_transformers)
# Load model and add model specific information to args
if args.snapshot is None:
model = model_factory.get_model(args)
else:
model = model_factory.load_model(args.snapshot, args)
if args.replace_snapshot_pool:
non_trained_model = model_factory.get_model(args)
model._model.pool = non_trained_model._model.pool
model._model.args = non_trained_model._model.args
print(model)
# Load run parameters if resuming that run.
args.model_path = state.get_model_path(args)
print('Trained model will be saved to [%s]' % args.model_path)
if args.resume:
try:
state_keeper = state.StateKeeper(args)
model, optimizer_state, epoch, lr, epoch_stats = state_keeper.load()
args.optimizer_state = optimizer_state
args.current_epoch = epoch
args.lr = lr
args.epoch_stats = epoch_stats
except:
args.optimizer_state = None
args.current_epoch = None
args.lr = None
args.epoch_stats = None
print("\n Error loading previous state. \n Starting run from scratch.")
else:
print("\n Restarting run from scratch.")
print("\nParameters:")
for attr, value in sorted(args.__dict__.items()):
if attr not in ['optimizer_state', 'patient_to_partition_dict', 'path_to_hidden_dict', 'exam_to_year_dict', 'exam_to_device_dict']:
print("\t{}={}".format(attr.upper(), value))
save_path = args.results_path
print()
if args.train:
epoch_stats, model = train.train_model(train_data, dev_data, model, args)
args.epoch_stats = epoch_stats
if args.plot_losses:
visualize.viz_utils.plot_losses(epoch_stats)
print("Save train/dev results to {}".format(save_path))
args_dict = vars(args)
pickle.dump(args_dict, open(save_path, 'wb'))
print()
if args.dev:
print("-------------\nDev")
args.dev_stats = train.compute_threshold_and_dev_stats(dev_data, model, args)
print("Save dev results to {}".format(save_path))
args_dict = vars(args)
pickle.dump(args_dict, open(save_path, 'wb'))
if args.test:
print("-------------\nTest")
args.test_stats = train.eval_model(test_data, model, args)
print("Save test results to {}".format(save_path))
args_dict = vars(args)
pickle.dump(args_dict, open(save_path, 'wb'))
if (args.dev or args.test) and args.prediction_save_path is not None:
exams, probs = [], []
if args.dev:
exams.extend( args.dev_stats['exams'])
probs.extend( args.dev_stats['probs'])
if args.test:
exams.extend( args.test_stats['exams'])
probs.extend( args.test_stats['probs'])
legend = ['patient_exam_id']
if args.callibrator_snapshot is not None:
callibrator = pickle.load(open(args.callibrator_snapshot,'rb'))
for i in range(args.max_followup):
legend.append("{}_year_risk".format(i+1))
export = {}
with open(args.prediction_save_path,'w') as out_file:
writer = csv.DictWriter(out_file, fieldnames=legend)
writer.writeheader()
for exam, arr in zip(exams, probs):
export['patient_exam_id'] = exam
for i in range(args.max_followup):
key = "{}_year_risk".format(i+1)
raw_val = arr[i]
if args.callibrator_snapshot is not None:
val = callibrator[i].predict_proba([[raw_val]])[0,1]
else:
val = raw_val
export[key] = val
writer.writerow(export)
print("Exported predictions to {}".format(args.prediction_save_path))
| 39.262069 | 139 | 0.64711 |
5fd697d0a8702558d62ea13300b2ef20b5173732 | 1,126 | py | Python | app/admin/forms.py | chenke91/ihaveablog | 64000723589d3f5a074bd09f045cb5d6c3daf6dd | [
"MIT"
] | null | null | null | app/admin/forms.py | chenke91/ihaveablog | 64000723589d3f5a074bd09f045cb5d6c3daf6dd | [
"MIT"
] | null | null | null | app/admin/forms.py | chenke91/ihaveablog | 64000723589d3f5a074bd09f045cb5d6c3daf6dd | [
"MIT"
] | null | null | null | from flask.ext.wtf import Form
from wtforms import StringField, SubmitField, TextAreaField, SelectField, FileField, SubmitField
from wtforms.validators import Required, Length, Email
from app import db
from app.models import Category
def get_category():
categories = db.session.query(Category.id, Category.name).all()
res = map(lambda x: (str(x[0]),x[1]), categories)
return list(res)
class EditBlogForm(Form):
title = StringField('标题', validators=[Required('请输入标题')])
category = SelectField('栏目', choices=get_category())
summary = TextAreaField('摘要', validators=[Required('请输入摘要'), Length(1, 600)])
blog_body = TextAreaField('文章', validators=[Required('请输入文章正文')])
submit = SubmitField('Submit')
class BlogForm(Form):
title = StringField('标题', validators=[Required('请输入标题')])
category = SelectField('栏目', choices=get_category())
avatars = FileField('选择图片', validators=[Required('请上传图片')])
summary = TextAreaField('摘要', validators=[Required('请输入摘要'), Length(1, 600)])
blog_body = TextAreaField('文章', validators=[Required('请输入文章正文')])
submit = SubmitField('Submit')
| 40.214286 | 96 | 0.71048 |
310beb7556e620e2e20fa143acb610fb6b182456 | 3,316 | py | Python | app/app/settings.py | Moudreaux/recipe-app-api | e70489b9afa0129b8d2560961ca4aea4f14bc11f | [
"MIT"
] | null | null | null | app/app/settings.py | Moudreaux/recipe-app-api | e70489b9afa0129b8d2560961ca4aea4f14bc11f | [
"MIT"
] | null | null | null | app/app/settings.py | Moudreaux/recipe-app-api | e70489b9afa0129b8d2560961ca4aea4f14bc11f | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.11.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_mu2j0g9=t$gvvz)%oh#-z0ctt%9@9&+dew0lho_$ctph8!ig3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| 25.121212 | 91 | 0.686671 |
917729824d43f7037894f31d16d4b0fbcbb01d54 | 1,505 | py | Python | cumulusci/tasks/github/tag.py | jdominiczak/CumulusCI | f706c1906f9eb6d604c571a9dd16f5d0ed38599f | [
"BSD-3-Clause"
] | 1 | 2020-12-04T10:29:31.000Z | 2020-12-04T10:29:31.000Z | cumulusci/tasks/github/tag.py | jdominiczak/CumulusCI | f706c1906f9eb6d604c571a9dd16f5d0ed38599f | [
"BSD-3-Clause"
] | 157 | 2021-07-07T19:06:58.000Z | 2022-03-30T19:08:27.000Z | cumulusci/tasks/github/tag.py | jdominiczak/CumulusCI | f706c1906f9eb6d604c571a9dd16f5d0ed38599f | [
"BSD-3-Clause"
] | null | null | null | from datetime import datetime
import github3.exceptions
from cumulusci.core.exceptions import GithubException
from cumulusci.tasks.github.base import BaseGithubTask
class CloneTag(BaseGithubTask):
task_options = {
"src_tag": {
"description": "The source tag to clone. Ex: beta/1.0-Beta_2",
"required": True,
},
"tag": {
"description": "The new tag to create by cloning the src tag. Ex: release/1.0",
"required": True,
},
}
def _run_task(self):
repo = self.get_repo()
ref = repo.ref("tags/{}".format(self.options["src_tag"]))
try:
src_tag = repo.tag(ref.object.sha)
except github3.exceptions.NotFoundError:
message = "Tag {} not found".format(self.options["src_tag"])
self.logger.error(message)
raise GithubException(message)
tag = repo.create_tag(
tag=self.options["tag"],
message="Cloned from {}".format(self.options["src_tag"]),
sha=src_tag.sha,
obj_type="commit",
tagger={
"name": self.github_config.username,
"email": self.github_config.email,
"date": "{}Z".format(datetime.utcnow().isoformat()),
},
)
self.logger.info(
"Tag {} created by cloning {}".format(
self.options["tag"], self.options["src_tag"]
)
)
return tag
| 30.1 | 92 | 0.546844 |
8a90634e34c67ada1ddddb90b45b4a77e096b0f2 | 1,971 | py | Python | bin/install.py | JulienPalard/cloudmesh | 1759b88daef3a13917492d028fdabe08f03ca996 | [
"Apache-2.0"
] | null | null | null | bin/install.py | JulienPalard/cloudmesh | 1759b88daef3a13917492d028fdabe08f03ca996 | [
"Apache-2.0"
] | 4 | 2021-06-08T20:20:08.000Z | 2022-03-11T23:30:22.000Z | bin/install.py | JulienPalard/cloudmesh | 1759b88daef3a13917492d028fdabe08f03ca996 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
"""
Usage:
cm-install -h | --help
cm-install --version
cm-install create OS
cm-install list
cm-install veewee
Arguments:
OS The operating system. [default: ubuntu]
Options:
Description:
start the install shell with
cm-install
cm-install> vagrant create ubuntu
creates an ubuntu 14.04 image with veewee and includes it in the base box list
cm-install> vagrant list
lists the base boxes in vagrant
"""
from docopt import docopt
import os
try:
from sh import ls
except:
os.system("pip install sh")
from sh import vagrant
def not_implemented():
print "ERROR: not yet implemented"
def get_boxes(kind):
lines = vagrant("box", "list")
boxes = []
for line in lines:
(OS, KIND) = line.split("(")
OS = OS.strip()
(KIND, rest) = KIND.split(")")
if kind == KIND:
boxes.append(OS)
return boxes
def list_boxes():
# print vagrant("box","list")
print get_boxes("virtualbox")
def exec_script(name):
if name is "veewee":
os.system("./bin/install-veewee.sh")
elif name is "ubuntu64":
os.system("./bin/install-ubuntu64.sh")
def cm_install_command(arguments):
print arguments
if arguments["create"]:
print "cm-install create"
operating_system = arguments['OS']
boxes = get_boxes("virtualbox")
if operating_system not in boxes:
print "The OS '%s' was not found in vagrant" % (operating_system)
exec_script('./bin/install-ubuntu64.sh')
else:
print "The OS '%s' was found in vagrant" % (operating_system)
exec_script('ubuntu64')
elif arguments["veewee"]:
exec_script('veewee')
elif arguments["list"]:
# cm-install vagrant list
print "list"
list_boxes()
if __name__ == '__main__':
arguments = docopt(__doc__)
cm_install_command(arguments)
| 20.319588 | 85 | 0.616438 |
0a524cb2046fc91eddfbf76c9538b210b0c97002 | 242 | py | Python | manage.py | Gomax-07/news07 | 063124b818543d26cb6e1e7fddf7bf81d9c59057 | [
"MIT"
] | null | null | null | manage.py | Gomax-07/news07 | 063124b818543d26cb6e1e7fddf7bf81d9c59057 | [
"MIT"
] | null | null | null | manage.py | Gomax-07/news07 | 063124b818543d26cb6e1e7fddf7bf81d9c59057 | [
"MIT"
] | null | null | null | from app import create_app
from flask_script import Manager,Server
#creating app instance
app = create_app('development')
manager = Manager(app)
manager.add_command('server', Server)
if __name__ == '__main__':
manager.run()
| 17.285714 | 39 | 0.727273 |
fc3a7b45bd26c74e52e47af4a918eaca1f512c2d | 1,268 | py | Python | populationsim/tests/test_tracing.py | psrc/populationsim | 91615025726175a99c94963188ac38c045570d26 | [
"BSD-3-Clause"
] | 1 | 2019-06-12T21:35:39.000Z | 2019-06-12T21:35:39.000Z | populationsim/tests/test_tracing.py | BayAreaMetro/populationsim | 8307c5a53a4d84994a224058a201b8c4f42543b8 | [
"BSD-3-Clause"
] | 4 | 2018-11-06T20:54:46.000Z | 2018-11-06T21:01:52.000Z | populationsim/tests/test_tracing.py | BayAreaMetro/populationsim | 8307c5a53a4d84994a224058a201b8c4f42543b8 | [
"BSD-3-Clause"
] | null | null | null | # ActivitySim
# See full license in LICENSE.txt.
import os.path
import logging
import pytest
import orca
import pandas as pd
import activitysim.core.tracing as tracing
def add_canonical_dirs():
configs_dir = os.path.join(os.path.dirname(__file__), 'configs')
orca.add_injectable("configs_dir", configs_dir)
output_dir = os.path.join(os.path.dirname(__file__), 'output')
orca.add_injectable("output_dir", output_dir)
def test_config_logger(capsys):
add_canonical_dirs()
tracing.config_logger()
logger = logging.getLogger('popsim')
file_handlers = [h for h in logger.handlers if type(h) is logging.FileHandler]
assert len(file_handlers) == 1
asim_logger_baseFilename = file_handlers[0].baseFilename
print "handlers:", logger.handlers
logger.info('test_config_logger')
logger.info('log_info')
logger.warn('log_warn1')
out, err = capsys.readouterr()
# don't consume output
print out
assert "could not find conf file" not in out
assert 'log_warn1' in out
assert 'log_info' not in out
with open(asim_logger_baseFilename, 'r') as content_file:
content = content_file.read()
print content
assert 'log_warn1' in content
assert 'log_info' in content
| 22.642857 | 82 | 0.712145 |
40e8c588c248af2891aca527b6ac7af2360a9834 | 4,174 | py | Python | Apache-Beam-Dataflow-Streaming-Pipeline/Dataflow-Streaming-Pubsub-to-BQTable.py | vibwipro/GCP-Python | f97741efe0b0a96d2c69279d3cf6c404f4f0ca20 | [
"MIT"
] | null | null | null | Apache-Beam-Dataflow-Streaming-Pipeline/Dataflow-Streaming-Pubsub-to-BQTable.py | vibwipro/GCP-Python | f97741efe0b0a96d2c69279d3cf6c404f4f0ca20 | [
"MIT"
] | null | null | null | Apache-Beam-Dataflow-Streaming-Pipeline/Dataflow-Streaming-Pubsub-to-BQTable.py | vibwipro/GCP-Python | f97741efe0b0a96d2c69279d3cf6c404f4f0ca20 | [
"MIT"
] | null | null | null |
#------------Import Lib-----------------------#
import apache_beam as beam
from apache_beam import window
from apache_beam.options.pipeline_options import PipelineOptions, StandardOptions
import os, sys, time
import argparse
import logging
from apache_beam.options.pipeline_options import SetupOptions
from datetime import datetime
#------------Set up BQ parameters-----------------------#
# Replace with Project Id
project = 'xxxxxxxxxxx'
Pubsub_subscription='projects/xxxxxxxxxxx/subscriptions/Pubsubdemo_subscription'
#plitting Of Records----------------------#
class Transaction_ECOM(beam.DoFn):
def process(self, element):
logging.info(element)
result = json.loads(element)
data_bkt = result.get('_bkt','null')
data_cd=result.get('_cd','null')
data_indextime=result.get('_indextime','0')
data_kv=result.get('_kv','null')
data_raw=result['_raw']
data_raw1=data_raw.replace("\n", "")
data_serial=result.get('_serial','null')
data_si = str(result.get('_si','null'))
data_sourcetype =result.get('_sourcetype','null')
data_subsecond = result.get('_subsecond','null')
data_time=result.get('_time','null')
data_host=result.get('host','null')
data_index=result.get('index','null')
data_linecount=result.get('linecount','null')
data_source=result.get('source','null')
data_sourcetype1=result.get('sourcetype','null')
data_splunk_server=result.get('splunk_server','null')
return [{"datetime_indextime": time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(int(data_indextime))), "_bkt": data_bkt, "_cd": data_cd, "_indextime": data_indextime, "_kv": data_kv, "_raw": data_raw1, "_serial": data_serial, "_si": data_si, "_sourcetype": data_sourcetype, "_subsecond": data_subsecond, "_time": data_time, "host": data_host, "index": data_index, "linecount": data_linecount, "source": data_source, "sourcetype": data_sourcetype1, "splunk_server": data_splunk_server}]
def run(argv=None, save_main_session=True):
parser = argparse.ArgumentParser()
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args, streaming=True)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
p1 = beam.Pipeline(options=pipeline_options)
data_loading = (
p1
| "Read Pub/Sub Messages" >> beam.io.ReadFromPubSub(subscription=Pubsub_subscription)
)
project_id = "xxxxxxxxxxx"
dataset_id = 'test123'
table_schema_ECOM = ('datetime_indextime:DATETIME, _bkt:STRING, _cd:STRING, _indextime:STRING, _kv:STRING, _raw:STRING, _serial:STRING, _si:STRING, _sourcetype:STRING, _subsecond:STRING, _time:STRING, host:STRING, index:STRING, linecount:STRING, source:STRING, sourcetype:STRING, splunk_server:STRING')
# Persist to BigQuery
# WriteToBigQuery accepts the data as list of JSON objects
#---------------------Index = ITF----------------------------------------------------------------------------------------------------------------------
result = (
data_loading
| 'Clean-ITF' >> beam.ParDo(Transaction_ECOM())
| 'Write-ITF' >> beam.io.WriteToBigQuery(
table='CFF_ABC',
dataset=dataset_id,
project=project_id,
schema=table_schema_ECOM,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
))
result = p1.run()
result.wait_until_finish()
if __name__ == '__main__':
path_service_account = '/home/vibhg/Splunk/CFF/xxxxxxxxxxx-abcder125.json'
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = path_service_account
run()
| 45.369565 | 500 | 0.602779 |
bc56d7e072ed6fb255c5d1193466c747235867fc | 3,093 | py | Python | isaactest/tests/numeric_q_incorrect_value.py | jsharkey13/isaac-selenium-testing | fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8 | [
"MIT"
] | null | null | null | isaactest/tests/numeric_q_incorrect_value.py | jsharkey13/isaac-selenium-testing | fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8 | [
"MIT"
] | 1 | 2016-01-15T11:28:06.000Z | 2016-01-25T17:09:18.000Z | isaactest/tests/numeric_q_incorrect_value.py | jsharkey13/isaac-selenium-testing | fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8 | [
"MIT"
] | 1 | 2019-05-14T16:53:49.000Z | 2019-05-14T16:53:49.000Z | import time
from ..utils.log import log, INFO, ERROR, PASS
from ..utils.isaac import answer_numeric_q, open_accordion_section
from ..utils.i_selenium import assert_tab, image_div
from ..utils.i_selenium import wait_for_xpath_element
from ..tests import TestWithDependency
from selenium.common.exceptions import TimeoutException, NoSuchElementException
__all__ = ["numeric_q_incorrect_value"]
#####
# Test : Numeric Questions Correct Unit, Incorrect Value
#####
@TestWithDependency("NUMERIC_Q_INCORRECT_VALUE", ["NUMERIC_Q_ANSWER_CHANGE"])
def numeric_q_incorrect_value(driver, ISAAC_WEB, WAIT_DUR, **kwargs):
"""Test numeric question behaviour on incorrect value.
- 'driver' should be a Selenium WebDriver.
- 'ISAAC_WEB' is the string URL of the Isaac website to be tested.
- 'WAIT_DUR' is the time in seconds to wait for JavaScript to run/load.
"""
assert_tab(driver, ISAAC_WEB + "/questions/_regression_test_")
time.sleep(WAIT_DUR)
try:
open_accordion_section(driver, 3)
num_question = driver.find_element_by_xpath("//div[@ng-switch-when='isaacNumericQuestion']")
except NoSuchElementException:
log(ERROR, "Can't find the numeric question; can't continue!")
return False
log(INFO, "Attempt to enter unknown incorrect value, to correct sig figs and correct units.")
if not answer_numeric_q(num_question, "4.33", "\units{ m\,s^{-1} }", wait_dur=WAIT_DUR):
log(ERROR, "Couldn't answer Numeric Question; can't continue!")
return False
time.sleep(WAIT_DUR)
try:
wait_for_xpath_element(driver, "//div[@ng-switch-when='isaacNumericQuestion']//h2[text()='Incorrect']")
log(INFO, "An 'Incorrect' message was displayed as expected.")
wait_for_xpath_element(driver, "(//div[@ng-switch-when='isaacNumericQuestion']//p[text()='Check your working.'])[1]")
log(INFO, "The 'Check your working.' message was correctly shown.")
wait_for_xpath_element(driver, "//div[@ng-switch-when='isaacNumericQuestion']//h5[text()='Please try again.']")
log(INFO, "The 'Please try again.' message was correctly shown.")
bg_colour = num_question.find_element_by_xpath("(.//div[@class='ru-answer-block-panel'])[1]").value_of_css_property('background-color')
assert bg_colour in ['#be4c4c', 'rgba(190, 76, 76, 1)', 'rgb(190, 76, 76)']
log(INFO, "Red highlighting shown around value box.")
log(INFO, "Avoid rate limiting: wait 1 minute.")
time.sleep(60)
log(PASS, "Numeric Question 'correct unit, incorrect value' behavior as expected.")
return True
except TimeoutException:
image_div(driver, "ERROR_numeric_q_incorrect_value")
log(ERROR, "The messages shown for an incorrect value were not all displayed; see 'ERROR_numeric_q_incorrect_value.png'!")
return False
except AssertionError:
image_div(driver, "ERROR_numeric_q_incorrect_value")
log(ERROR, "The units box was not highlighted red; see 'ERROR_numeric_q_incorrect_value.png'!")
return False
| 51.55 | 143 | 0.705787 |
42d86b89d7b46dbaaeeaa6ec08dbcfca55d674cf | 755 | py | Python | np/reference/ch2code/record.py | focusunsink/study_python | 322326642db54df8725793d70a95d21ac40b6507 | [
"MIT"
] | null | null | null | np/reference/ch2code/record.py | focusunsink/study_python | 322326642db54df8725793d70a95d21ac40b6507 | [
"MIT"
] | null | null | null | np/reference/ch2code/record.py | focusunsink/study_python | 322326642db54df8725793d70a95d21ac40b6507 | [
"MIT"
] | null | null | null | import numpy as np
# Chapter 2 Beginning with NumPy fundamentals
#
# Demonstrates the NumPy record data type.
#
# Run from the commandline with
#
# python record.py
print "In: t = dtype([('name', numpy.str_, 40), ('numitems', numpy.int32), ('price', numpy.float32)])"
t = np.dtype([('name', np.str_, 40), ('numitems', np.int32), ('price', np.float32)])
print t
#Out: dtype([('name', '|S40'), ('numitems', '<i4'), ('price', '<f4')])
print "In: t['name']"
print t['name']
#Out: dtype('|S40')
print "In: itemz = array([('Meaning of life DVD', 42, 3.14), ('Butter', 13, 2.72)], dtype=t)"
itemz = np.array([('Meaning of life DVD', 42, 3.14), ('Butter', 13, 2.72)], dtype=t)
print "In: itemz[1]"
print itemz[1]
#Out: ('Butter', 13, 2.7200000286102295)
| 26.964286 | 102 | 0.617219 |
7844dc96f8ba2da02aa3c07830801e0725f49dea | 599 | py | Python | leonardo_module_gdpr/__init__.py | dresl/leonardo-module-gdpr | 8b5c74d5709dd96d7b45e2f91f90166237cc503b | [
"BSD-3-Clause"
] | 1 | 2019-01-27T23:42:06.000Z | 2019-01-27T23:42:06.000Z | leonardo_module_gdpr/__init__.py | dresl/leonardo-module-gdpr | 8b5c74d5709dd96d7b45e2f91f90166237cc503b | [
"BSD-3-Clause"
] | null | null | null | leonardo_module_gdpr/__init__.py | dresl/leonardo-module-gdpr | 8b5c74d5709dd96d7b45e2f91f90166237cc503b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
default_app_config = 'leonardo_module_gdpr.Config'
class Default(object):
optgroup = 'GDPR'
apps = [
'leonardo_module_gdpr'
]
widgets = [
'leonardo_module_gdpr.widget.gdpr.models.GDPRWidget'
]
config = {
'GOOGLE_ANALYTICS_INFO':
(True, u"Display google analytics info"),
}
public = True
class Config(AppConfig, Default):
name = 'leonardo_module_gdpr'
verbose_name = u"GDPR module"
default = Default()
| 17.617647 | 60 | 0.657763 |
b2b5e41fe792700a9109d11518495711c718ee88 | 5,131 | py | Python | src/infi/storagemodel/vendor/infinidat/predicates/__init__.py | Infinidat/infi.storagemodel | 81740970b5b1c0a691472f2e360d3a6e5c4d0875 | [
"Python-2.0",
"BSD-3-Clause"
] | 6 | 2015-07-29T11:22:36.000Z | 2019-01-22T19:07:42.000Z | src/infi/storagemodel/vendor/infinidat/predicates/__init__.py | Infinidat/infi.storagemodel | 81740970b5b1c0a691472f2e360d3a6e5c4d0875 | [
"Python-2.0",
"BSD-3-Clause"
] | null | null | null | src/infi/storagemodel/vendor/infinidat/predicates/__init__.py | Infinidat/infi.storagemodel | 81740970b5b1c0a691472f2e360d3a6e5c4d0875 | [
"Python-2.0",
"BSD-3-Clause"
] | 3 | 2015-01-05T13:55:38.000Z | 2018-07-07T05:05:36.000Z | from infi.storagemodel.predicates import FiberChannelMappingExists, FiberChannelMappingNotExists
from logging import getLogger
log = getLogger(__name__)
def compare_device_system_and_id(device, system_serial, volume_id):
''' checks if given device is from a specific volume from a specific system '''
from infi.storagemodel.base.multipath import MultipathBlockDevice
from infi.storagemodel.vendor.infinidat.infinibox.connectivity import get_system_serial_from_path
vendor = device.get_vendor()
log_msg = "checking if device {} from system serial {} and volume id {} is from system serial {} with volume id {}"
log.debug(log_msg.format(device.get_display_name(), vendor.get_system_serial(), vendor.get_volume_id(), system_serial, volume_id))
if vendor.get_replication_type() == 'ACTIVE_ACTIVE' and isinstance(device, MultipathBlockDevice):
replication_mapping = vendor.get_replication_mapping()
log.debug("device is A/A replicated. mapping={}".format(replication_mapping))
if (system_serial in replication_mapping and
replication_mapping[system_serial].id == volume_id):
# device is replicated to system_serial with volume_id but may not be mapped to the host
return any(get_system_serial_from_path(path) == system_serial
for path in device.get_paths())
# if the device is single-path or not under A/A replication it's ok to check by SCSI inquiry
# because we'll always inquire the same, single system
elif (volume_id == vendor.get_volume_id() and
system_serial == vendor.get_system_serial()):
return True
return False
class InfinidatVolumeExists(object):
"""A predicate that checks if an Infinidat volume exists"""
def __init__(self, system_serial, volume_id):
self.system_serial = system_serial
self.volume_id = volume_id
def __call__(self):
from ..shortcuts import get_infinidat_block_devices
from infi.instruct.errors import InstructError
from infi.asi.errors import AsiException
devices_to_query = get_infinidat_block_devices()
log.debug("Looking for Infinidat volume id {} from system id {}".format(self.volume_id, self.system_serial))
for device in devices_to_query:
device.get_scsi_test_unit_ready()
try:
# As some vendors seem to be inconsistent with the designators passed within the pages, using
# vendor-specifc pages seems more safe:
if 0xc6 not in device.get_scsi_inquiry_pages():
log.debug("No vendor-specific page 0xc6 for device {!r}, returning False now as this should be fixed by rescan".format(device))
return False
except (AsiException, InstructError):
log.exception("failed to identify INFINIDAT volume, returning False now as this should be fixed by rescan")
return False
return any(compare_device_system_and_id(device, self.system_serial, self.volume_id)
for device in devices_to_query)
def __repr__(self):
return "<{}(system_serial={!r}, volume_id={!r})>".format(self.__class__.__name__, self.system_serial, self.volume_id)
class InfinidatVolumeDoesNotExist(InfinidatVolumeExists):
"""A predicate that checks if an Infinidat volume does not exist"""
def __call__(self):
return not super(InfinidatVolumeDoesNotExist, self).__call__()
class FiberChannelMappingExistsUsingLinuxSG(FiberChannelMappingExists):
def _get_chain_of_devices(self, model):
from itertools import chain
return chain(model.get_scsi().get_all_linux_scsi_generic_disk_devices(),
model.get_scsi().get_all_storage_controller_devices())
def __repr__(self):
return "<{}: {!r}>".format(self.__class__.__name__, self.connectivity)
class FiberChannelMappingNotExistsUsingLinuxSG(FiberChannelMappingExistsUsingLinuxSG):
def __call__(self):
return not super(FiberChannelMappingNotExistsUsingLinuxSG, self).__call__()
def get_predicate_for_checking_non_zero_host_id(system_serial, cluster_id=False):
def all_storage_devices_on_logical_unit_0_of_specific_box_show_non_zero_host_id():
from infi.storagemodel.vendor.infinidat.shortcuts import get_infinidat_block_devices_and_controllers__mapped_to_lun0
from infi.storagemodel.errors import RescanIsNeeded
devices = []
try:
devices.extend(get_infinidat_block_devices_and_controllers__mapped_to_lun0())
except RescanIsNeeded:
pass
for device in devices:
if cluster_id:
if device.get_vendor().get_system_serial() == system_serial and device.get_vendor().get_cluster_id() == 0:
return False
else:
if device.get_vendor().get_system_serial() == system_serial and device.get_vendor().get_host_id() == 0:
return False
return True
return all_storage_devices_on_logical_unit_0_of_specific_box_show_non_zero_host_id
| 50.303922 | 147 | 0.714286 |
677b7b89f53764838bbfe32f803526c0d0b7009f | 2,225 | py | Python | test/visuals/runner.py | ruohoruotsi/coldtype | 13993e5a4fa3f99c6800fed2496bd5a374e4f53f | [
"Apache-2.0"
] | null | null | null | test/visuals/runner.py | ruohoruotsi/coldtype | 13993e5a4fa3f99c6800fed2496bd5a374e4f53f | [
"Apache-2.0"
] | null | null | null | test/visuals/runner.py | ruohoruotsi/coldtype | 13993e5a4fa3f99c6800fed2496bd5a374e4f53f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
from coldtype import *
from coldtype.renderer import Renderer
import glfw
all_tests = []
sources = list(Path("test/visuals").glob("test_*.py"))
sources.extend(list(Path("test/visuals").glob("test_*.md")))
for p in sources:
if not p.name.startswith("_"):
all_tests.append(p)
all_tests = sorted(all_tests)
class TestRunner(Renderer):
def before_start(self):
if self.args.test:
self.test_index = all_tests.index(Path(self.args.test))
else:
self.test_index = 0
self.load_test(0, rerender=False)
def on_message(self, message, action):
if action == "next_test":
self.load_test(+1)
elif action == "prev_test":
self.load_test(-1)
else:
super().on_message(message, action)
def shortcuts(self):
xs = super().shortcuts()
xs["prev_test"] = [[[glfw.MOD_SUPER], glfw.KEY_B]]
xs["next_test"] = [[[], glfw.KEY_N]]
return xs
def shortcut_to_action(self, shortcut):
if shortcut == "prev_test":
return self.load_test(-1)
elif shortcut == "next_test":
return self.load_test(+1)
else:
return super().shortcut_to_action(shortcut)
def load_test(self, inc, rerender=True):
self.test_index = cycle_idx(all_tests, self.test_index + inc)
test_path = all_tests[self.test_index]
print("---" * 20)
print(">>>", test_path)
self.reset_filepath(str(test_path))
if rerender:
self.reload_and_render(Action.PreviewStoryboard)
self.set_title(str(test_path))
return -1
def restart(self):
print("----------------------------")
args = sys.argv
test_path = str(all_tests[self.test_index])
if len(args) > 1:
args[-1] = test_path
else:
args.append(test_path)
print(sys.executable, ["-m"]+args)
os.execl(sys.executable, *(["-m"]+args))
def main():
pargs, parser = Renderer.Argparser(name="pb.py", file=False, nargs=[["test", None]])
TestRunner(parser).main()
if __name__ == "__main__":
main() | 29.276316 | 88 | 0.576629 |
bc0fc170ae765be681aadab73297915b8fbe2bc4 | 3,799 | py | Python | cloudvolume/provenance.py | SridharJagannathan/cloud-volume | ae4e5d8f245aacf451404e91f75e6da5182ac090 | [
"BSD-3-Clause"
] | 94 | 2017-09-03T16:18:34.000Z | 2022-03-31T14:49:14.000Z | cloudvolume/provenance.py | SridharJagannathan/cloud-volume | ae4e5d8f245aacf451404e91f75e6da5182ac090 | [
"BSD-3-Clause"
] | 378 | 2017-08-30T17:46:45.000Z | 2022-03-31T00:15:04.000Z | cloudvolume/provenance.py | SridharJagannathan/cloud-volume | ae4e5d8f245aacf451404e91f75e6da5182ac090 | [
"BSD-3-Clause"
] | 40 | 2018-03-01T09:12:02.000Z | 2022-02-08T17:57:37.000Z | import python_jsonschema_objects as pjs
import json
import json5
__all__ = [ 'DatasetProvenance', 'DataLayerProvenance' ]
dataset_provenance_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Dataset Provenance",
"description": "Represents a dataset and its derived data layers.",
"required": [
"dataset_name", "dataset_description",
"organism", "imaged_date", "imaged_by",
"owners"
],
"properties": {
'dataset_name': { 'type': 'string' },
'dataset_description': { 'type': 'string' },
'organism': {
'type': 'string',
'description': 'Species, sex, strain identifier',
},
'imaged_date': { 'type': 'string' },
'imaged_by': { 'type': 'string' },
'references': { # e.g. dois, urls, titles
"type": "array",
"items": {
"type": "string"
},
"minItems": 0,
"uniqueItems": True, # e.g. email addresses
},
'owners': {
"type": "array",
"items": {
"type": "string"
},
"minItems": 1,
"uniqueItems": True, # e.g. email addresses
}
}
}
builder = pjs.ObjectBuilder(dataset_provenance_schema)
classes = builder.build_classes()
DatasetProvenance = classes.DatasetProvenance
layer_provenance_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Data Layer Provenance",
"description": "Represents a data layer within a dataset. e.g. image, segmentation, etc",
"required": [
'description', 'sources',
'processing', 'owners'
],
"properties": {
'description': { 'type': 'string' },
'sources': { # e.g. [ 'gs://neuroglancer/pinky40_v11/image'
"type": "array",
"items": {
"type": "string"
},
"minItems": 0,
"uniqueItems": True,
},
'processing': {
"type": "array",
"items": {
"type": "object"
},
"minItems": 0,
},
# e.g. processing = [
# { 'method': 'inceptionnet', 'by': 'example@princeton.edu' },
# { 'method': 'downsample', 'by': 'example2@princeton.edu', 'description': 'demo of countless downsampling' }
# { 'method': 'meshing', 'by': 'example2@princeton.edu', 'description': '512x512x512 mip 3 simplification factor 30' }
# ]
'owners': {
"type": "array",
"items": {
"type": "string"
},
"minItems": 0,
"uniqueItems": True,
},
}
}
builder = pjs.ObjectBuilder(layer_provenance_schema)
classes = builder.build_classes()
DataLayerProvenanceValidation = classes.DataLayerProvenance
class DataLayerProvenance(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
if 'description' not in self:
self['description'] = ''
if 'owners' not in self:
self['owners'] = []
if 'processing' not in self:
self['processing'] = []
if 'sources' not in self:
self['sources'] = ''
def validate(self):
DataLayerProvenanceValidation(**self).validate()
def serialize(self):
return json.dumps(self)
def from_json(self, data):
data = json5.loads(data)
self.update(data)
return self
@classmethod
def create(cls, mydict):
return DatasetProvenance(**mydict)
@property
def description(self):
return self['description']
@description.setter
def description(self, val):
self['description'] = val
@property
def owners(self):
return self['owners']
@owners.setter
def owners(self, val):
self['owners'] = val
@property
def processing(self):
return self['processing']
@processing.setter
def processing(self, val):
self['processing'] = val
@property
def sources(self):
return self['sources']
@sources.setter
def sources(self, val):
self['sources'] = val
| 24.044304 | 125 | 0.59621 |
f14ab93ef7821278c1d5db3574d58d8956117da7 | 1,015 | py | Python | usuarios/migrations/0016_auto_20180201_2122.py | WFPColombia/nutrifami-users | 9441aada0f185442f9b14311f78111eb8d377463 | [
"MIT"
] | null | null | null | usuarios/migrations/0016_auto_20180201_2122.py | WFPColombia/nutrifami-users | 9441aada0f185442f9b14311f78111eb8d377463 | [
"MIT"
] | null | null | null | usuarios/migrations/0016_auto_20180201_2122.py | WFPColombia/nutrifami-users | 9441aada0f185442f9b14311f78111eb8d377463 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-02-01 21:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usuarios', '0015_auto_20180123_2350'),
]
operations = [
migrations.AlterModelOptions(
name='trainee',
options={'verbose_name_plural': 'Trainees'},
),
migrations.AlterModelOptions(
name='traineeadvance',
options={'verbose_name_plural': 'Trainees Advances'},
),
migrations.AddField(
model_name='trainee',
name='date',
field=models.DateTimeField(
auto_now_add=True, default='2018-02-01'),
preserve_default=False,
),
migrations.AlterField(
model_name='trainee',
name='name',
field=models.CharField(default=1, max_length=45),
preserve_default=False,
),
]
| 27.432432 | 65 | 0.576355 |
38501c0d0171c698b38f44caf2693de7e572b121 | 25,820 | py | Python | .venv/lib/python3.8/site-packages/yapf/yapflib/unwrapped_line.py | eo1989/VectorBTanalysis | bea3deaf2ee3fc114b308146f2af3e4f35f70197 | [
"MIT"
] | 5 | 2018-07-02T19:10:39.000Z | 2021-09-27T04:05:10.000Z | .venv/lib/python3.8/site-packages/yapf/yapflib/unwrapped_line.py | eo1989/VectorBTanalysis | bea3deaf2ee3fc114b308146f2af3e4f35f70197 | [
"MIT"
] | 3 | 2018-06-10T06:28:06.000Z | 2021-09-24T13:54:19.000Z | .venv/lib/python3.8/site-packages/yapf/yapflib/unwrapped_line.py | eo1989/VectorBTanalysis | bea3deaf2ee3fc114b308146f2af3e4f35f70197 | [
"MIT"
] | 1 | 2021-12-25T16:03:43.000Z | 2021-12-25T16:03:43.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UnwrappedLine primitive for formatting.
An unwrapped line is the containing data structure produced by the parser. It
collects all nodes (stored in FormatToken objects) that could appear on a
single line if there were no line length restrictions. It's then used by the
parser to perform the wrapping required to comply with the style guide.
"""
from yapf.yapflib import format_token
from yapf.yapflib import py3compat
from yapf.yapflib import pytree_utils
from yapf.yapflib import split_penalty
from yapf.yapflib import style
from lib2to3.fixer_util import syms as python_symbols
class UnwrappedLine(object):
"""Represents a single unwrapped line in the output.
Attributes:
depth: indentation depth of this line. This is just a numeric value used to
distinguish lines that are more deeply nested than others. It is not the
actual amount of spaces, which is style-dependent.
"""
def __init__(self, depth, tokens=None):
"""Constructor.
Creates a new unwrapped line with the given depth an initial list of tokens.
Constructs the doubly-linked lists for format tokens using their built-in
next_token and previous_token attributes.
Arguments:
depth: indentation depth of this line
tokens: initial list of tokens
"""
self.depth = depth
self._tokens = tokens or []
self.disable = False
if self._tokens:
# Set up a doubly linked list.
for index, tok in enumerate(self._tokens[1:]):
# Note, 'index' is the index to the previous token.
tok.previous_token = self._tokens[index]
self._tokens[index].next_token = tok
def CalculateFormattingInformation(self):
"""Calculate the split penalty and total length for the tokens."""
# Say that the first token in the line should have a space before it. This
# means only that if this unwrapped line is joined with a predecessor line,
# then there will be a space between them.
self.first.spaces_required_before = 1
self.first.total_length = len(self.first.value)
prev_token = self.first
prev_length = self.first.total_length
for token in self._tokens[1:]:
if (token.spaces_required_before == 0 and
_SpaceRequiredBetween(prev_token, token, self.disable)):
token.spaces_required_before = 1
tok_len = len(token.value) if not token.is_pseudo_paren else 0
spaces_required_before = token.spaces_required_before
if isinstance(spaces_required_before, list):
assert token.is_comment, token
# If here, we are looking at a comment token that appears on a line
# with other tokens (but because it is a comment, it is always the last
# token). Rather than specifying the actual number of spaces here,
# hard code a value of 0 and then set it later. This logic only works
# because this comment token is guaranteed to be the last token in the
# list.
spaces_required_before = 0
token.total_length = prev_length + tok_len + spaces_required_before
# The split penalty has to be computed before {must|can}_break_before,
# because these may use it for their decision.
token.split_penalty += _SplitPenalty(prev_token, token)
token.must_break_before = _MustBreakBefore(prev_token, token)
token.can_break_before = (
token.must_break_before or _CanBreakBefore(prev_token, token))
prev_length = token.total_length
prev_token = token
def Split(self):
"""Split the line at semicolons."""
if not self.has_semicolon or self.disable:
return [self]
uwlines = []
uwline = UnwrappedLine(self.depth)
for tok in self._tokens:
if tok.value == ';':
uwlines.append(uwline)
uwline = UnwrappedLine(self.depth)
else:
uwline.AppendToken(tok)
if uwline.tokens:
uwlines.append(uwline)
for uwline in uwlines:
pytree_utils.SetNodeAnnotation(uwline.first.node,
pytree_utils.Annotation.MUST_SPLIT, True)
uwline.first.previous_token = None
uwline.last.next_token = None
return uwlines
############################################################################
# Token Access and Manipulation Methods #
############################################################################
def AppendToken(self, token):
"""Append a new FormatToken to the tokens contained in this line."""
if self._tokens:
token.previous_token = self.last
self.last.next_token = token
self._tokens.append(token)
def AppendNode(self, node):
"""Convenience method to append a pytree node directly.
Wraps the node with a FormatToken.
Arguments:
node: the node to append
"""
self.AppendToken(format_token.FormatToken(node))
@property
def first(self):
"""Returns the first non-whitespace token."""
return self._tokens[0]
@property
def last(self):
"""Returns the last non-whitespace token."""
return self._tokens[-1]
############################################################################
# Token -> String Methods #
############################################################################
def AsCode(self, indent_per_depth=2):
"""Return a "code" representation of this line.
The code representation shows how the line would be printed out as code.
TODO(eliben): for now this is rudimentary for debugging - once we add
formatting capabilities, this method will have other uses (not all tokens
have spaces around them, for example).
Arguments:
indent_per_depth: how much spaces to indend per depth level.
Returns:
A string representing the line as code.
"""
indent = ' ' * indent_per_depth * self.depth
tokens_str = ' '.join(tok.value for tok in self._tokens)
return indent + tokens_str
def __str__(self): # pragma: no cover
return self.AsCode()
def __repr__(self): # pragma: no cover
tokens_repr = ','.join(
['{0}({1!r})'.format(tok.name, tok.value) for tok in self._tokens])
return 'UnwrappedLine(depth={0}, tokens=[{1}])'.format(
self.depth, tokens_repr)
############################################################################
# Properties #
############################################################################
@property
def tokens(self):
"""Access the tokens contained within this line.
The caller must not modify the tokens list returned by this method.
Returns:
List of tokens in this line.
"""
return self._tokens
@property
def lineno(self):
"""Return the line number of this unwrapped line.
Returns:
The line number of the first token in this unwrapped line.
"""
return self.first.lineno
@property
def is_comment(self):
return self.first.is_comment
@property
def has_semicolon(self):
return any(tok.value == ';' for tok in self._tokens)
def _IsIdNumberStringToken(tok):
return tok.is_keyword or tok.is_name or tok.is_number or tok.is_string
def _IsUnaryOperator(tok):
return format_token.Subtype.UNARY_OPERATOR in tok.subtypes
def _HasPrecedence(tok):
"""Whether a binary operation has precedence within its context."""
node = tok.node
# We let ancestor be the statement surrounding the operation that tok is the
# operator in.
ancestor = node.parent.parent
while ancestor is not None:
# Search through the ancestor nodes in the parse tree for operators with
# lower precedence.
predecessor_type = pytree_utils.NodeName(ancestor)
if predecessor_type in ['arith_expr', 'term']:
# An ancestor "arith_expr" or "term" means we have found an operator
# with lower precedence than our tok.
return True
if predecessor_type != 'atom':
# We understand the context to look for precedence within as an
# arbitrary nesting of "arith_expr", "term", and "atom" nodes. If we
# leave this context we have not found a lower precedence operator.
return False
# Under normal usage we expect a complete parse tree to be available and
# we will return before we get an AttributeError from the root.
ancestor = ancestor.parent
def _PriorityIndicatingNoSpace(tok):
"""Whether to remove spaces around an operator due to precedence."""
if not tok.is_arithmetic_op or not tok.is_simple_expr:
# Limit space removal to highest priority arithmetic operators
return False
return _HasPrecedence(tok)
def _IsSubscriptColonAndValuePair(token1, token2):
return (token1.is_number or token1.is_name) and token2.is_subscript_colon
def _SpaceRequiredBetween(left, right, is_line_disabled):
"""Return True if a space is required between the left and right token."""
lval = left.value
rval = right.value
if (left.is_pseudo_paren and _IsIdNumberStringToken(right) and
left.previous_token and _IsIdNumberStringToken(left.previous_token)):
# Space between keyword... tokens and pseudo parens.
return True
if left.is_pseudo_paren or right.is_pseudo_paren:
# There should be a space after the ':' in a dictionary.
if left.OpensScope():
return True
# The closing pseudo-paren shouldn't affect spacing.
return False
if left.is_continuation or right.is_continuation:
# The continuation node's value has all of the spaces it needs.
return False
if right.name in pytree_utils.NONSEMANTIC_TOKENS:
# No space before a non-semantic token.
return False
if _IsIdNumberStringToken(left) and _IsIdNumberStringToken(right):
# Spaces between keyword, string, number, and identifier tokens.
return True
if lval == ',' and rval == ':':
# We do want a space between a comma and colon.
return True
if style.Get('SPACE_INSIDE_BRACKETS'):
# Supersede the "no space before a colon or comma" check.
if lval in pytree_utils.OPENING_BRACKETS and rval == ':':
return True
if rval in pytree_utils.CLOSING_BRACKETS and lval == ':':
return True
if (style.Get('SPACES_AROUND_SUBSCRIPT_COLON') and
(_IsSubscriptColonAndValuePair(left, right) or
_IsSubscriptColonAndValuePair(right, left))):
# Supersede the "never want a space before a colon or comma" check.
return True
if rval in ':,':
# Otherwise, we never want a space before a colon or comma.
return False
if lval == ',' and rval in ']})':
# Add a space between ending ',' and closing bracket if requested.
return style.Get('SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET')
if lval == ',':
# We want a space after a comma.
return True
if lval == 'from' and rval == '.':
# Space before the '.' in an import statement.
return True
if lval == '.' and rval == 'import':
# Space after the '.' in an import statement.
return True
if (lval == '=' and rval == '.' and
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN not in left.subtypes):
# Space between equal and '.' as in "X = ...".
return True
if ((right.is_keyword or right.is_name) and
(left.is_keyword or left.is_name)):
# Don't merge two keywords/identifiers.
return True
if (format_token.Subtype.SUBSCRIPT_COLON in left.subtypes or
format_token.Subtype.SUBSCRIPT_COLON in right.subtypes):
# A subscript shouldn't have spaces separating its colons.
return False
if (format_token.Subtype.TYPED_NAME in left.subtypes or
format_token.Subtype.TYPED_NAME in right.subtypes):
# A typed argument should have a space after the colon.
return True
if left.is_string:
if (rval == '=' and format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST
in right.subtypes):
# If there is a type hint, then we don't want to add a space between the
# equal sign and the hint.
return False
if rval not in '[)]}.' and not right.is_binary_op:
# A string followed by something other than a subscript, closing bracket,
# dot, or a binary op should have a space after it.
return True
if rval in pytree_utils.CLOSING_BRACKETS:
# A string followed by closing brackets should have a space after it
# depending on SPACE_INSIDE_BRACKETS. A string followed by opening
# brackets, however, should not.
return style.Get('SPACE_INSIDE_BRACKETS')
if format_token.Subtype.SUBSCRIPT_BRACKET in right.subtypes:
# It's legal to do this in Python: 'hello'[a]
return False
if left.is_binary_op and lval != '**' and _IsUnaryOperator(right):
# Space between the binary operator and the unary operator.
return True
if left.is_keyword and _IsUnaryOperator(right):
# Handle things like "not -3 < x".
return True
if _IsUnaryOperator(left) and _IsUnaryOperator(right):
# No space between two unary operators.
return False
if left.is_binary_op or right.is_binary_op:
if lval == '**' or rval == '**':
# Space around the "power" operator.
return style.Get('SPACES_AROUND_POWER_OPERATOR')
# Enforce spaces around binary operators except the blacklisted ones.
blacklist = style.Get('NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS')
if lval in blacklist or rval in blacklist:
return False
if style.Get('ARITHMETIC_PRECEDENCE_INDICATION'):
if _PriorityIndicatingNoSpace(left) or _PriorityIndicatingNoSpace(right):
return False
else:
return True
else:
return True
if (_IsUnaryOperator(left) and lval != 'not' and
(right.is_name or right.is_number or rval == '(')):
# The previous token was a unary op. No space is desired between it and
# the current token.
return False
if (format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN in left.subtypes and
format_token.Subtype.TYPED_NAME not in right.subtypes):
# A named argument or default parameter shouldn't have spaces around it.
return style.Get('SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN')
if (format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN in right.subtypes and
format_token.Subtype.TYPED_NAME not in left.subtypes):
# A named argument or default parameter shouldn't have spaces around it.
return style.Get('SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN')
if (format_token.Subtype.VARARGS_LIST in left.subtypes or
format_token.Subtype.VARARGS_LIST in right.subtypes):
return False
if (format_token.Subtype.VARARGS_STAR in left.subtypes or
format_token.Subtype.KWARGS_STAR_STAR in left.subtypes):
# Don't add a space after a vararg's star or a keyword's star-star.
return False
if lval == '@' and format_token.Subtype.DECORATOR in left.subtypes:
# Decorators shouldn't be separated from the 'at' sign.
return False
if left.is_keyword and rval == '.':
# Add space between keywords and dots.
return lval != 'None' and lval != 'print'
if lval == '.' and right.is_keyword:
# Add space between keywords and dots.
return rval != 'None' and rval != 'print'
if lval == '.' or rval == '.':
# Don't place spaces between dots.
return False
if ((lval == '(' and rval == ')') or (lval == '[' and rval == ']') or
(lval == '{' and rval == '}')):
# Empty objects shouldn't be separated by spaces.
return False
if not is_line_disabled and (left.OpensScope() or right.ClosesScope()):
if (style.GetOrDefault('SPACES_AROUND_DICT_DELIMITERS', False) and (
(lval == '{' and _IsDictListTupleDelimiterTok(left, is_opening=True)) or
(rval == '}' and
_IsDictListTupleDelimiterTok(right, is_opening=False)))):
return True
if (style.GetOrDefault('SPACES_AROUND_LIST_DELIMITERS', False) and (
(lval == '[' and _IsDictListTupleDelimiterTok(left, is_opening=True)) or
(rval == ']' and
_IsDictListTupleDelimiterTok(right, is_opening=False)))):
return True
if (style.GetOrDefault('SPACES_AROUND_TUPLE_DELIMITERS', False) and (
(lval == '(' and _IsDictListTupleDelimiterTok(left, is_opening=True)) or
(rval == ')' and
_IsDictListTupleDelimiterTok(right, is_opening=False)))):
return True
if (lval in pytree_utils.OPENING_BRACKETS and
rval in pytree_utils.OPENING_BRACKETS):
# Nested objects' opening brackets shouldn't be separated, unless enabled
# by SPACE_INSIDE_BRACKETS.
return style.Get('SPACE_INSIDE_BRACKETS')
if (lval in pytree_utils.CLOSING_BRACKETS and
rval in pytree_utils.CLOSING_BRACKETS):
# Nested objects' closing brackets shouldn't be separated, unless enabled
# by SPACE_INSIDE_BRACKETS.
return style.Get('SPACE_INSIDE_BRACKETS')
if lval in pytree_utils.CLOSING_BRACKETS and rval in '([':
# A call, set, dictionary, or subscript that has a call or subscript after
# it shouldn't have a space between them.
return False
if lval in pytree_utils.OPENING_BRACKETS and _IsIdNumberStringToken(right):
# Don't separate the opening bracket from the first item, unless enabled
# by SPACE_INSIDE_BRACKETS.
return style.Get('SPACE_INSIDE_BRACKETS')
if left.is_name and rval in '([':
# Don't separate a call or array access from the name.
return False
if rval in pytree_utils.CLOSING_BRACKETS:
# Don't separate the closing bracket from the last item, unless enabled
# by SPACE_INSIDE_BRACKETS.
# FIXME(morbo): This might be too permissive.
return style.Get('SPACE_INSIDE_BRACKETS')
if lval == 'print' and rval == '(':
# Special support for the 'print' function.
return False
if lval in pytree_utils.OPENING_BRACKETS and _IsUnaryOperator(right):
# Don't separate a unary operator from the opening bracket, unless enabled
# by SPACE_INSIDE_BRACKETS.
return style.Get('SPACE_INSIDE_BRACKETS')
if (lval in pytree_utils.OPENING_BRACKETS and
(format_token.Subtype.VARARGS_STAR in right.subtypes or
format_token.Subtype.KWARGS_STAR_STAR in right.subtypes)):
# Don't separate a '*' or '**' from the opening bracket, unless enabled
# by SPACE_INSIDE_BRACKETS.
return style.Get('SPACE_INSIDE_BRACKETS')
if rval == ';':
# Avoid spaces before a semicolon. (Why is there a semicolon?!)
return False
if lval == '(' and rval == 'await':
# Special support for the 'await' keyword. Don't separate the 'await'
# keyword from an opening paren, unless enabled by SPACE_INSIDE_BRACKETS.
return style.Get('SPACE_INSIDE_BRACKETS')
return True
def _MustBreakBefore(prev_token, cur_token):
"""Return True if a line break is required before the current token."""
if prev_token.is_comment or (prev_token.previous_token and
prev_token.is_pseudo_paren and
prev_token.previous_token.is_comment):
# Must break if the previous token was a comment.
return True
if (cur_token.is_string and prev_token.is_string and
IsSurroundedByBrackets(cur_token)):
# We want consecutive strings to be on separate lines. This is a
# reasonable assumption, because otherwise they should have written them
# all on the same line, or with a '+'.
return True
return pytree_utils.GetNodeAnnotation(
cur_token.node, pytree_utils.Annotation.MUST_SPLIT, default=False)
def _CanBreakBefore(prev_token, cur_token):
"""Return True if a line break may occur before the current token."""
pval = prev_token.value
cval = cur_token.value
if py3compat.PY3:
if pval == 'yield' and cval == 'from':
# Don't break before a yield argument.
return False
if pval in {'async', 'await'} and cval in {'def', 'with', 'for'}:
# Don't break after sync keywords.
return False
if cur_token.split_penalty >= split_penalty.UNBREAKABLE:
return False
if pval == '@':
# Don't break right after the beginning of a decorator.
return False
if cval == ':':
# Don't break before the start of a block of code.
return False
if cval == ',':
# Don't break before a comma.
return False
if prev_token.is_name and cval == '(':
# Don't break in the middle of a function definition or call.
return False
if prev_token.is_name and cval == '[':
# Don't break in the middle of an array dereference.
return False
if cur_token.is_comment and prev_token.lineno == cur_token.lineno:
# Don't break a comment at the end of the line.
return False
if format_token.Subtype.UNARY_OPERATOR in prev_token.subtypes:
# Don't break after a unary token.
return False
if not style.Get('ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS'):
if (format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN in cur_token.subtypes or
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN in prev_token.subtypes):
return False
return True
def IsSurroundedByBrackets(tok):
"""Return True if the token is surrounded by brackets."""
paren_count = 0
brace_count = 0
sq_bracket_count = 0
previous_token = tok.previous_token
while previous_token:
if previous_token.value == ')':
paren_count -= 1
elif previous_token.value == '}':
brace_count -= 1
elif previous_token.value == ']':
sq_bracket_count -= 1
if previous_token.value == '(':
if paren_count == 0:
return previous_token
paren_count += 1
elif previous_token.value == '{':
if brace_count == 0:
return previous_token
brace_count += 1
elif previous_token.value == '[':
if sq_bracket_count == 0:
return previous_token
sq_bracket_count += 1
previous_token = previous_token.previous_token
return None
def _IsDictListTupleDelimiterTok(tok, is_opening):
assert tok
if tok.matching_bracket is None:
return False
if is_opening:
open_tok = tok
close_tok = tok.matching_bracket
else:
open_tok = tok.matching_bracket
close_tok = tok
# There must be something in between the tokens
if open_tok.next_token == close_tok:
return False
assert open_tok.next_token.node
assert open_tok.next_token.node.parent
return open_tok.next_token.node.parent.type in [
python_symbols.dictsetmaker,
python_symbols.listmaker,
python_symbols.testlist_gexp,
]
_LOGICAL_OPERATORS = frozenset({'and', 'or'})
_BITWISE_OPERATORS = frozenset({'&', '|', '^'})
_ARITHMETIC_OPERATORS = frozenset({'+', '-', '*', '/', '%', '//', '@'})
def _SplitPenalty(prev_token, cur_token):
"""Return the penalty for breaking the line before the current token."""
pval = prev_token.value
cval = cur_token.value
if pval == 'not':
return split_penalty.UNBREAKABLE
if cur_token.node_split_penalty > 0:
return cur_token.node_split_penalty
if style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR'):
# Prefer to split before 'and' and 'or'.
if pval in _LOGICAL_OPERATORS:
return style.Get('SPLIT_PENALTY_LOGICAL_OPERATOR')
if cval in _LOGICAL_OPERATORS:
return 0
else:
# Prefer to split after 'and' and 'or'.
if pval in _LOGICAL_OPERATORS:
return 0
if cval in _LOGICAL_OPERATORS:
return style.Get('SPLIT_PENALTY_LOGICAL_OPERATOR')
if style.Get('SPLIT_BEFORE_BITWISE_OPERATOR'):
# Prefer to split before '&', '|', and '^'.
if pval in _BITWISE_OPERATORS:
return style.Get('SPLIT_PENALTY_BITWISE_OPERATOR')
if cval in _BITWISE_OPERATORS:
return 0
else:
# Prefer to split after '&', '|', and '^'.
if pval in _BITWISE_OPERATORS:
return 0
if cval in _BITWISE_OPERATORS:
return style.Get('SPLIT_PENALTY_BITWISE_OPERATOR')
if (format_token.Subtype.COMP_FOR in cur_token.subtypes or
format_token.Subtype.COMP_IF in cur_token.subtypes):
# We don't mind breaking before the 'for' or 'if' of a list comprehension.
return 0
if format_token.Subtype.UNARY_OPERATOR in prev_token.subtypes:
# Try not to break after a unary operator.
return style.Get('SPLIT_PENALTY_AFTER_UNARY_OPERATOR')
if pval == ',':
# Breaking after a comma is fine, if need be.
return 0
if pval == '**' or cval == '**':
return split_penalty.STRONGLY_CONNECTED
if (format_token.Subtype.VARARGS_STAR in prev_token.subtypes or
format_token.Subtype.KWARGS_STAR_STAR in prev_token.subtypes):
# Don't split after a varargs * or kwargs **.
return split_penalty.UNBREAKABLE
if prev_token.OpensScope() and cval != '(':
# Slightly prefer
return style.Get('SPLIT_PENALTY_AFTER_OPENING_BRACKET')
if cval == ':':
# Don't split before a colon.
return split_penalty.UNBREAKABLE
if cval == '=':
# Don't split before an assignment.
return split_penalty.UNBREAKABLE
if (format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN in prev_token.subtypes or
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN in cur_token.subtypes):
# Don't break before or after an default or named assignment.
return split_penalty.UNBREAKABLE
if cval == '==':
# We would rather not split before an equality operator.
return split_penalty.STRONGLY_CONNECTED
if cur_token.ClosesScope():
# Give a slight penalty for splitting before the closing scope.
return 100
return 0
| 38.308605 | 80 | 0.686406 |
002fefcc9bfa81253af761d811a8f270e64827ca | 449 | py | Python | setup.py | Chive/divio-django-jet | 61526a4146f2535a7c71dd848be398d4755c08f2 | [
"BSD-2-Clause"
] | null | null | null | setup.py | Chive/divio-django-jet | 61526a4146f2535a7c71dd848be398d4755c08f2 | [
"BSD-2-Clause"
] | null | null | null | setup.py | Chive/divio-django-jet | 61526a4146f2535a7c71dd848be398d4755c08f2 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from divio_django_jet import __version__
setup(
name='divio-django-jet',
version=__version__,
description=open('README.rst').read(),
author='Divio AG',
author_email='info@divio.com',
packages=find_packages(),
platforms=['OS Independent'],
install_requires=[
'django-jet==1.0.4',
],
include_package_data=True,
zip_safe=False,
)
| 22.45 | 43 | 0.665924 |
c0b33e9a04a493e27f702d90145a7ac134b36c85 | 5,755 | py | Python | autogoal/contrib/__init__.py | gmijenes/autogoal | 916b0eb4d1aa1a222d0ff1b0f6f202bf56458ef5 | [
"MIT"
] | null | null | null | autogoal/contrib/__init__.py | gmijenes/autogoal | 916b0eb4d1aa1a222d0ff1b0f6f202bf56458ef5 | [
"MIT"
] | null | null | null | autogoal/contrib/__init__.py | gmijenes/autogoal | 916b0eb4d1aa1a222d0ff1b0f6f202bf56458ef5 | [
"MIT"
] | null | null | null | def find_classes(include=None, exclude=None, modules=None, input=None, output=None):
import inspect
import re
result = []
if include:
include = f".*({include}).*"
else:
include = r".*"
if exclude:
exclude = f".*({exclude}).*"
if input:
input = f".*({input}).*"
if output:
output = f".*({output}).*"
if modules is None:
modules = []
try:
from autogoal.contrib import sklearn
modules.append(sklearn)
except ImportError as e:
pass
try:
from autogoal.contrib import nltk
modules.append(nltk)
except ImportError as e:
pass
try:
from autogoal.contrib import gensim
modules.append(gensim)
except ImportError as e:
pass
try:
from autogoal.contrib import keras
modules.append(keras)
except ImportError as e:
pass
try:
from autogoal.contrib import transformers
modules.append(transformers)
except ImportError as e:
pass
try:
from autogoal.contrib import spacy
modules.append(spacy)
except ImportError as e:
pass
try:
from autogoal.contrib import wikipedia
modules.append(wikipedia)
except ImportError as e:
pass
from autogoal.contrib import wrappers
modules.append(wrappers)
from autogoal.contrib import regex
modules.append(regex)
for module in modules:
for _, cls in inspect.getmembers(module, inspect.isclass):
if not hasattr(cls, "run"):
continue
if cls.__name__.startswith("_"):
continue
if not re.match(include, repr(cls)):
continue
if exclude is not None and re.match(exclude, repr(cls)):
continue
if not cls.__module__.startswith("autogoal.contrib"):
continue
sig = inspect.signature(cls.run)
if input and not re.match(input, str(sig.parameters["input"].annotation)):
continue
if output and not re.match(output, str(sig.return_annotation)):
continue
result.append(cls)
return result
import enum
class ContribStatus(enum.Enum):
RequiresDependency = enum.auto()
RequiresDownload = enum.auto()
Ready = enum.auto()
def status():
status = {}
modules = []
try:
from autogoal.contrib import sklearn
modules.append(sklearn)
except ImportError as e:
status["autogoal.contrib.sklearn"] = ContribStatus.RequiresDependency
try:
from autogoal.contrib import nltk
modules.append(nltk)
except ImportError as e:
status["autogoal.contrib.nltk"] = ContribStatus.RequiresDependency
try:
from autogoal.contrib import gensim
modules.append(gensim)
except ImportError as e:
status["autogoal.contrib.gensim"] = ContribStatus.RequiresDependency
try:
from autogoal.contrib import keras
modules.append(keras)
except ImportError as e:
status["autogoal.contrib.keras"] = ContribStatus.RequiresDependency
try:
from autogoal.contrib import transformers
modules.append(transformers)
except ImportError as e:
status["autogoal.contrib.transformers"] = ContribStatus.RequiresDependency
try:
from autogoal.contrib import spacy
modules.append(spacy)
except ImportError as e:
status["autogoal.contrib.spacy"] = ContribStatus.RequiresDependency
try:
from autogoal.contrib import wikipedia
modules.append(wikipedia)
except ImportError as e:
status["autogoal.contrib.wikipedia"] = ContribStatus.RequiresDependency
modules.sort(key=lambda m: m.__name__)
for module in modules:
if hasattr(module, "status"):
status[module.__name__] = module.status()
else:
status[module.__name__] = ContribStatus.Ready
return status
def download(contrib: str):
modules = {}
try:
from autogoal.contrib import sklearn
modules["sklearn"] = sklearn
except ImportError as e:
pass
try:
from autogoal.contrib import nltk
modules["nltk"] = nltk
except ImportError as e:
pass
try:
from autogoal.contrib import gensim
modules["gensim"] = gensim
except ImportError as e:
pass
try:
from autogoal.contrib import keras
modules["keras"] = keras
except ImportError as e:
pass
try:
from autogoal.contrib import transformers
modules["transformers"] = transformers
except ImportError as e:
pass
try:
from autogoal.contrib import spacy
modules["spacy"] = spacy
except ImportError as e:
pass
try:
from autogoal.contrib import wikipedia
modules["wikipedia"] = wikipedia
except ImportError as e:
pass
if contrib not in modules:
raise ValueError(f"Contrib `{contrib}` cannot be imported.")
contrib = modules[contrib]
if not hasattr(contrib, "download"):
return False
return contrib.download()
__all__ = ["find_classes", "status", "download"]
| 23.205645 | 87 | 0.56907 |
fbe047c31eac8f1fff8528a952f537cc6a2c89b6 | 3,473 | py | Python | letterparser/zip_lib.py | elifesciences/decision-letter-parser | afe2b72a7d8697e5e8cfec96c531d7bef843908d | [
"MIT"
] | null | null | null | letterparser/zip_lib.py | elifesciences/decision-letter-parser | afe2b72a7d8697e5e8cfec96c531d7bef843908d | [
"MIT"
] | 52 | 2018-11-29T01:45:48.000Z | 2022-02-22T18:05:22.000Z | letterparser/zip_lib.py | elifesciences/decision-letter-parser | afe2b72a7d8697e5e8cfec96c531d7bef843908d | [
"MIT"
] | null | null | null | # coding=utf-8
import zipfile
import os
import shutil
from letterparser import utils
def profile_zip(file_name):
"""open the zip and get zip file info based on the filename"""
zip_docx_info = None
zip_asset_infos = []
with zipfile.ZipFile(file_name, "r") as open_zipfile:
for zipfile_info in open_zipfile.infolist():
# ignore files in subfolders like __MACOSX
zipfile_file = zipfile_info.filename
if "/" in zipfile_file:
continue
if zipfile_file.endswith(".docx"):
zip_docx_info = zipfile_info
else:
# assume figure or video file
zip_asset_infos.append(zipfile_info)
# sort by file name
zip_asset_infos = sorted(zip_asset_infos, key=lambda asset: asset.filename)
return zip_docx_info, zip_asset_infos
def unzip_file(open_zipfile, zip_file_info, output_path):
"read the zip_file_info from the open_zipfile and write to output_path"
with open_zipfile.open(zip_file_info) as zip_content:
with open(output_path, "wb") as output_file:
output_file.write(zip_content.read())
def unzip_zip(file_name, temp_dir):
"unzip certain files and return the local paths"
docx_file_name = None
asset_file_names = []
zip_docx_info, zip_asset_infos = profile_zip(file_name)
# extract the files
with zipfile.ZipFile(file_name, "r") as open_zipfile:
if zip_docx_info:
docx_file_name = os.path.join(temp_dir, zip_docx_info.filename)
unzip_file(open_zipfile, zip_docx_info, docx_file_name)
for zip_asset_info in zip_asset_infos:
asset_file_name = os.path.join(temp_dir, zip_asset_info.filename)
unzip_file(open_zipfile, zip_asset_info, asset_file_name)
asset_file_names.append(asset_file_name)
return docx_file_name, asset_file_names
def fix_complex_scripts_styles(file_name, temp_dir="tmp"):
"""copy the docx file and fix complex scripts style tags"""
new_zip_file_name = os.path.join(temp_dir, "temp.docx")
new_file_name = os.path.join(temp_dir, utils.get_file_name_file(file_name))
# create a new zip file with altered word/document.xml file contents
with zipfile.ZipFile(
file_name, "r", zipfile.ZIP_DEFLATED, allowZip64=True
) as open_zip:
with zipfile.ZipFile(
new_zip_file_name, "w", zipfile.ZIP_DEFLATED, allowZip64=True
) as new_open_zip:
complex_scripts_styles_rewrite(open_zip, new_open_zip)
# copy the new zip overtop of existing docx, if present
shutil.move(new_zip_file_name, new_file_name)
return new_file_name
def complex_scripts_styles_rewrite(from_zip, to_zip):
"""
given two open zipfile.Zipfile objects from docx files,
read items from from_zip,
write them to to_zip and remove complex script styles from the document.xml file
"""
for zip_file_name in from_zip.namelist():
if zip_file_name == "word/document.xml":
with from_zip.open(zip_file_name) as open_file:
document_xml = open_file.read()
document_xml = utils.remove_complex_scripts_styles(document_xml)
# write the altered string to the new zip file
to_zip.writestr(zip_file_name, document_xml)
else:
# copy the file into the new zip
to_zip.writestr(zip_file_name, from_zip.read(zip_file_name))
| 40.383721 | 84 | 0.691621 |
82909e1be62adc8dea3ac17adaabdfeba5896eca | 592 | py | Python | LAB1/roof14.py | Anastasia-Paliy/VisualGeometry | 3c7c08f126e7f64793402f0e3e8f07f9bb653505 | [
"MIT"
] | null | null | null | LAB1/roof14.py | Anastasia-Paliy/VisualGeometry | 3c7c08f126e7f64793402f0e3e8f07f9bb653505 | [
"MIT"
] | null | null | null | LAB1/roof14.py | Anastasia-Paliy/VisualGeometry | 3c7c08f126e7f64793402f0e3e8f07f9bb653505 | [
"MIT"
] | null | null | null | import random
from PIL import Image, ImageDraw
image = Image.open("roof.jpg")
draw = ImageDraw.Draw(image)
width = image.size[0]
height = image.size[1]
pix = image.load()
for x in range(0, width, 100):
for y in range(height // 4, 3 * height // 4):
for i in range(50):
draw.point((x + i, y), (128, 255, 0))
draw.ellipse((width // 2 - height // 4,
height // 4,
width // 2 + height // 4,
3 * height // 4),
(255, 128, 0),
(255, 128, 0))
image.show()
del draw
| 25.73913 | 61 | 0.481419 |
51f90c8a57c06911ec23a565c4a9752a1182a7e1 | 7,378 | py | Python | src/database.py | rwarnking/image-sorter | 821ec2b89126ea95d6d801a995d05dfa2205918d | [
"MIT"
] | null | null | null | src/database.py | rwarnking/image-sorter | 821ec2b89126ea95d6d801a995d05dfa2205918d | [
"MIT"
] | 21 | 2021-03-22T15:14:17.000Z | 2021-03-31T15:32:34.000Z | src/database.py | rwarnking/image-sorter | 821ec2b89126ea95d6d801a995d05dfa2205918d | [
"MIT"
] | null | null | null | import json
import sqlite3
class Database:
def __init__(self, path="database.db"):
self.conn = sqlite3.connect(path)
self.conn.execute(
"CREATE TABLE IF NOT EXISTS events (title STRING, s_year INT, s_month INT, \
s_day INT, e_year INT, e_month INT, e_day INTT)"
)
self.conn.execute(
"CREATE TABLE IF NOT EXISTS artists (name STRING, make STRING, model STRING)"
)
self.conn.commit()
def clean_all(self):
self.clean_events()
self.clean_artists()
self.conn.execute("DROP TABLE IF EXISTS events")
self.conn.execute("DROP TABLE IF EXISTS artists")
self.conn.execute(
"CREATE TABLE IF NOT EXISTS events (title STRING, s_year INT, s_month INT, \
s_day INT, e_year INT, e_month INT, e_day INTT)"
)
self.conn.execute(
"CREATE TABLE IF NOT EXISTS artists (name STRING, make STRING, model STRING)"
)
print("All table entrys were deleted.")
######################
# Generell functions #
######################
def has_elem(self, table, attr, var):
query = f"SELECT * FROM {table} WHERE {attr}=?"
cur = self.conn.execute(query, (var,))
result = cur.fetchall()
cur.close()
return len(result) > 0
def delete_one(self, table, attr, var):
if self.has_elem(table, attr, var):
query = f"DELETE FROM {table} WHERE {attr}=?"
self.conn.execute(query, (var,))
self.conn.commit()
print("From table " + table + ", " + var + " was deleted.")
else:
print("In table " + table + ", " + var + " was not found.")
def get_all_from_table(self, table):
cur = self.conn.execute(f"SELECT * FROM {table}")
result = cur.fetchall()
cur.close()
return result
def print_table(self, table):
cur = self.conn.execute(f"SELECT * FROM {table}")
result = cur.fetchall()
cur.close()
for r in result:
print(r)
#################
# Event related #
#################
def insert_event_from_date(self, title, start_date, end_date):
# TODO
if end_date < start_date:
print("Could not add Event: end date < start date!")
return
if title == "":
print("Could not add Event: Missing titel!")
return
self.insert_event(
title,
start_date.year,
start_date.month,
start_date.day,
end_date.year,
end_date.month,
end_date.day,
)
def insert_event(self, title, s_year, s_month, s_day, e_year, e_month, e_day):
if not self.has_elem("events", "title", title):
title = title.replace(" ", "")
self.conn.execute(
"INSERT INTO events (title, s_year, s_month, s_day, e_year, e_month, e_day) \
VALUES (?, ?, ?, ?, ?, ?, ?)",
(title, s_year, s_month, s_day, e_year, e_month, e_day),
)
self.conn.commit()
print("Event " + title + " was added.")
else:
print("Event " + title + " was already there, could NOT add.")
def insert_events(self, file):
with open(file) as json_file:
data = json.load(json_file)
for event in data["events"]:
self.insert_event(
event["title"],
event["start"]["year"],
event["start"]["month"],
event["start"]["day"],
event["end"]["year"],
event["end"]["month"],
event["end"]["day"],
)
def delete_event(self, title):
title = title.replace(" ", "")
self.delete_one("events", "title", title)
def clean_events(self):
self.conn.execute("DROP TABLE IF EXISTS events")
self.conn.execute(
"CREATE TABLE IF NOT EXISTS events (title STRING, s_year INT, s_month INT, \
s_day INT, e_year INT, e_month INT, e_day INTT)"
)
print("All event entrys were deleted.")
def save_events(self, file):
data = self.get_all_from_table("events")
json_data = {"events": []}
for elem in data:
json_data["events"].append(
{
"title": elem[0],
"start": {
"year": elem[1],
"month": elem[2],
"day": elem[3],
},
"end": {
"year": elem[4],
"month": elem[5],
"day": elem[6],
},
}
)
with open(file, "w") as outfile:
json.dump(json_data, outfile, indent=4)
print("Events were saved to file " + file + ".")
def get_event(self, year, month, day):
cur = self.conn.execute(
"SELECT title FROM events WHERE s_year<=? AND s_month<=? AND s_day<=? AND e_year>=? \
AND e_month>=? AND e_day>=?",
(year, month, day, year, month, day),
)
result = cur.fetchall()
cur.close()
return result
def print_events(self):
self.print_table("events")
##################
# Artist related #
##################
def insert_artist(self, name, make, model):
if not self.has_elem("artists", "name", name):
self.conn.execute(
"INSERT INTO artists (name, make, model) VALUES (?, ?, ?)", (name, make, model)
)
self.conn.commit()
print("Artist " + name + " was added.")
else:
print("Artist " + name + " was already there, could NOT add.")
def insert_artists(self, file):
with open(file) as json_file:
data = json.load(json_file)
for artist in data["artists"]:
self.insert_artist(
artist["name"],
artist["make"],
artist["model"],
)
def delete_artist(self, name):
self.delete_one("artists", "name", name)
def clean_artists(self):
self.conn.execute("DROP TABLE IF EXISTS artists")
self.conn.execute(
"CREATE TABLE IF NOT EXISTS artists (name STRING, make STRING, model STRING)"
)
print("All artist entrys were deleted.")
def save_artists(self, file):
data = self.get_all_from_table("artists")
json_data = {"artists": []}
for elem in data:
json_data["artists"].append(
{
"name": elem[0],
"make": elem[1],
"model": elem[2],
}
)
with open(file, "w") as outfile:
json.dump(json_data, outfile, indent=4)
print("Artists were saved to file " + file + ".")
def get_artist(self, make, model):
cur = self.conn.execute("SELECT name FROM artists WHERE make=? AND model=?", (make, model))
result = cur.fetchall()
cur.close()
return result
def print_artists(self):
self.print_table("artists")
| 32.646018 | 99 | 0.495798 |
4dad85d74272c16d4adbb3ebd62f0de6bf94a5bd | 3,222 | py | Python | models/modules/sparse_ops.py | yanivbl6/convNet.pytorch | a75807611991d867e752bf25eaf8c311a416d8b5 | [
"MIT"
] | null | null | null | models/modules/sparse_ops.py | yanivbl6/convNet.pytorch | a75807611991d867e752bf25eaf8c311a416d8b5 | [
"MIT"
] | null | null | null | models/modules/sparse_ops.py | yanivbl6/convNet.pytorch | a75807611991d867e752bf25eaf8c311a416d8b5 | [
"MIT"
] | null | null | null | import torch
from torch import autograd, nn
import torch.nn.functional as F
from itertools import repeat
from torch._six import container_abcs
class Sparse(autograd.Function):
"""" Prune the unimprotant weight for the forwards phase but pass the gradient to dense weight using SR-STE in the backwards phase"""
@staticmethod
def forward(ctx, weight, N, M, decay = 0.0002):
ctx.save_for_backward(weight)
output = weight.clone()
length = weight.numel()
group = int(length/M)
weight_temp = weight.detach().abs().reshape(group, M)
index = torch.argsort(weight_temp, dim=1)[:, :int(M-N)]
w_b = torch.ones(weight_temp.shape, device=weight_temp.device)
w_b = w_b.scatter_(dim=1, index=index, value=0).reshape(weight.shape)
ctx.mask = w_b
ctx.decay = decay
return output*w_b
@staticmethod
def backward(ctx, grad_output):
weight, = ctx.saved_tensors
return grad_output + ctx.decay * (1-ctx.mask) * weight, None, None
class Sparse_NHWC(autograd.Function):
"""" Prune the unimprotant edges for the forwards phase but pass the gradient to dense weight using STE in the backwards phase"""
@staticmethod
def forward(ctx, weight, N, M, decay = 0.0002):
ctx.save_for_backward(weight)
output = weight.clone()
length = weight.numel()
group = int(length/M)
weight_temp = weight.detach().abs().permute(0,2,3,1).reshape(group, M)
index = torch.argsort(weight_temp, dim=1)[:, :int(M-N)]
w_b = torch.ones(weight_temp.shape, device=weight_temp.device)
w_b = w_b.scatter_(dim=1, index=index, value=0).reshape(weight.permute(0,2,3,1).shape)
w_b = w_b.permute(0,3,1,2)
ctx.mask = w_b
ctx.decay = decay
return output*w_b
@staticmethod
def backward(ctx, grad_output):
weight, = ctx.saved_tensors
return grad_output + ctx.decay * (1-ctx.mask) * weight, None, None
class SparseConv(nn.Conv2d):
"""" implement N:M sparse convolution layer """
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', N=2, M=4, **kwargs):
self.N = N
self.M = M
super(SparseConv, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, **kwargs)
def get_sparse_weights(self):
return Sparse_NHWC.apply(self.weight, self.N, self.M)
def forward(self, x):
w = self.get_sparse_weights()
x = F.conv2d(
x, w, self.bias, self.stride, self.padding, self.dilation, self.groups
)
return x
class SparseLinear(nn.Linear):
def __init__(self, in_features: int, out_features: int, bias: bool = True, N=2, M=2, decay = 0.0002, **kwargs):
self.N = N
self.M = M
super(SparseLinear, self).__init__(in_features, out_features, bias = True)
def get_sparse_weights(self):
return Sparse.apply(self.weight, self.N, self.M)
def forward(self, x):
w = self.get_sparse_weights()
x = F.linear(x, w)
return x
| 30.11215 | 159 | 0.639665 |
8c502a99fad3b1cfd8a5f6a18484b8dff0f03826 | 1,559 | py | Python | src/features/build_features.py | jindongyang94/sample_luigipipeline | 397afd6debc546ea4c9760b475aca4806927e091 | [
"FTL"
] | null | null | null | src/features/build_features.py | jindongyang94/sample_luigipipeline | 397afd6debc546ea4c9760b475aca4806927e091 | [
"FTL"
] | null | null | null | src/features/build_features.py | jindongyang94/sample_luigipipeline | 397afd6debc546ea4c9760b475aca4806927e091 | [
"FTL"
] | null | null | null | """
author: atreya
desc:
"""
import datetime
import pandas
class FeatureBuilder():
def __init__(self, dataframe,training=True):
self.dataframe = dataframe
self.continuous_features = ["CompetitionDistance","Date"]
self.categorical_features = ["StoreType"]
self.target = ["Sales"]
self.training=training
def featurize(self):
print "Subsetting Dataframe"
self.subset_dataframe()
print "Feature Engineering"
dataframe = self.feature_engineering(self.dataframe)
self.handle_missing_value()
dataframe = pandas.get_dummies(dataframe)
print dataframe.head()
return dataframe
def feature_engineering(self, dataframe=None):
date1 = [datetime.datetime.strptime(i, '%Y-%m-%d') for i in dataframe.Date]
dataframe['Day'] = [i.day for i in date1]
self.continuous_features.append("Day")
dataframe['Month'] = [i.month for i in date1]
self.continuous_features.append("Month")
dataframe['Year'] = [i.year for i in date1]
self.continuous_features.append("Year")
dataframe.drop("Date",axis=1,inplace=True)
return dataframe
def subset_dataframe(self):
if self.training:
self.dataframe = self.dataframe[self.continuous_features+self.categorical_features+self.target]
else:
self.dataframe = self.dataframe[self.continuous_features + self.categorical_features]
def handle_missing_value(self):
self.dataframe.fillna(0, inplace=True)
| 33.170213 | 107 | 0.662604 |
5abaf9161f0664aff104b33c3aed04faf4806040 | 1,280 | py | Python | jrnl/plugins/json_exporter.py | jprof/jrnl | 04811405dc0007dcf1184b885b89bc5c965ddaac | [
"MIT"
] | 2 | 2017-12-08T19:06:55.000Z | 2017-12-09T00:45:20.000Z | jrnl/plugins/json_exporter.py | jprof/jrnl | 04811405dc0007dcf1184b885b89bc5c965ddaac | [
"MIT"
] | 2 | 2017-12-24T01:37:08.000Z | 2021-05-08T02:23:30.000Z | jrnl/plugins/json_exporter.py | jprof/jrnl | 04811405dc0007dcf1184b885b89bc5c965ddaac | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import, unicode_literals
from .text_exporter import TextExporter
import json
from .util import get_tags_count
class JSONExporter(TextExporter):
"""This Exporter can convert entries and journals into json."""
names = ["json"]
extension = "json"
@classmethod
def entry_to_dict(cls, entry):
entry_dict = {
'title': entry.title,
'body': entry.body,
'date': entry.date.strftime("%Y-%m-%d"),
'time': entry.date.strftime("%H:%M"),
'starred': entry.starred
}
if hasattr(entry, "uuid"):
entry_dict['uuid'] = entry.uuid
return entry_dict
@classmethod
def export_entry(cls, entry):
"""Returns a json representation of a single entry."""
return json.dumps(cls.entry_to_dict(entry), indent=2) + "\n"
@classmethod
def export_journal(cls, journal):
"""Returns a json representation of an entire journal."""
tags = get_tags_count(journal)
result = {
"tags": dict((tag, count) for count, tag in tags),
"entries": [cls.entry_to_dict(e) for e in journal.entries]
}
return json.dumps(result, indent=2)
| 30.47619 | 70 | 0.610156 |
27ccf399a1acb58595da8c9680252ffaedd18a8b | 14,332 | py | Python | test/functional/mempool_accept.py | TheBurningSavage/TheBurningSavage | dfc00e6c5acc192b4d8a6e8a8ded2efb1252c861 | [
"MIT"
] | 1 | 2021-04-23T20:46:00.000Z | 2021-04-23T20:46:00.000Z | test/functional/mempool_accept.py | TheBurningSavage/TheBurningSavage | dfc00e6c5acc192b4d8a6e8a8ded2efb1252c861 | [
"MIT"
] | null | null | null | test/functional/mempool_accept.py | TheBurningSavage/TheBurningSavage | dfc00e6c5acc192b4d8a6e8a8ded2efb1252c861 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017 The TheBurningSavage Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool acceptance of raw transactions."""
from io import BytesIO
from test_framework.test_framework import TheBurningSavageTestFramework
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
COutPoint,
CTransaction,
CTxOut,
MAX_BLOCK_BASE_SIZE,
)
from test_framework.script import (
hash160,
CScript,
OP_0,
OP_EQUAL,
OP_HASH160,
OP_RETURN,
)
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
bytes_to_hex_str,
hex_str_to_bytes,
wait_until,
)
class MempoolAcceptanceTest(TheBurningSavageTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'-txindex',
'-reindex', # Need reindex for txindex
'-acceptnonstdtxn=0', # Try to mimic main-net
]] * self.num_nodes
def check_mempool_result(self, result_expected, *args, **kwargs):
"""Wrapper to check result of testmempoolaccept on node_0's mempool"""
result_test = self.nodes[0].testmempoolaccept(*args, **kwargs)
assert_equal(result_expected, result_test)
assert_equal(self.nodes[0].getmempoolinfo()['size'], self.mempool_size) # Must not change mempool state
def run_test(self):
node = self.nodes[0]
self.log.info('Start with empty mempool, and 200 blocks')
self.mempool_size = 0
wait_until(lambda: node.getblockcount() == 200)
assert_equal(node.getmempoolinfo()['size'], self.mempool_size)
self.log.info('Should not accept garbage to testmempoolaccept')
assert_raises_rpc_error(-3, 'Expected type array, got string', lambda: node.testmempoolaccept(rawtxs='ff00baar'))
assert_raises_rpc_error(-8, 'Array must contain exactly one raw transaction for now', lambda: node.testmempoolaccept(rawtxs=['ff00baar', 'ff22']))
assert_raises_rpc_error(-22, 'TX decode failed', lambda: node.testmempoolaccept(rawtxs=['ff00baar']))
self.log.info('A transaction already in the blockchain')
coin = node.listunspent()[0] # Pick a random coin(base) to spend
raw_tx_in_block = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': coin['txid'], 'vout': coin['vout']}],
outputs=[{node.getnewaddress(): 0.3}, {node.getnewaddress(): 49}],
))['hex']
txid_in_block = node.sendrawtransaction(hexstring=raw_tx_in_block, allowhighfees=True)
node.generate(1)
self.check_mempool_result(
result_expected=[{'txid': txid_in_block, 'allowed': False, 'reject-reason': '18: txn-already-known'}],
rawtxs=[raw_tx_in_block],
)
self.log.info('A transaction not in the mempool')
fee = 0.00000700
raw_tx_0 = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{"txid": txid_in_block, "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}], # RBF is used later
outputs=[{node.getnewaddress(): 0.3 - fee}],
))['hex']
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction in the mempool')
node.sendrawtransaction(hexstring=raw_tx_0)
self.mempool_size = 1
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': '18: txn-already-in-mempool'}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that replaces a mempool transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vout[0].nValue -= int(fee * COIN) # Double the fee
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER + 1 # Now, opt out of RBF
raw_tx_0 = node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that conflicts with an unconfirmed tx')
# Send the transaction that replaces the mempool transaction and opts out of replaceability
node.sendrawtransaction(hexstring=bytes_to_hex_str(tx.serialize()), allowhighfees=True)
# take original raw_tx_0
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vout[0].nValue -= int(4 * fee * COIN) # Set more fee
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '18: txn-mempool-conflict'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
allowhighfees=True,
)
self.log.info('A transaction with missing inputs, that never existed')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vin[0].prevout = COutPoint(hash=int('ff' * 32, 16), n=14)
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with missing inputs, that existed once in the past')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vin[0].prevout.n = 1 # Set vout to 1, to spend the other outpoint (49 coins) of the in-chain-tx we want to double spend
raw_tx_1 = node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex']
txid_1 = node.sendrawtransaction(hexstring=raw_tx_1, allowhighfees=True)
# Now spend both to "clearly hide" the outputs, ie. remove the coins from the utxo set by spending them
raw_tx_spend_both = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[
{'txid': txid_0, 'vout': 0},
{'txid': txid_1, 'vout': 0},
],
outputs=[{node.getnewaddress(): 0.1}]
))['hex']
txid_spend_both = node.sendrawtransaction(hexstring=raw_tx_spend_both, allowhighfees=True)
node.generate(1)
self.mempool_size = 0
# Now see if we can add the coins back to the utxo set by sending the exact txs again
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_0],
)
self.check_mempool_result(
result_expected=[{'txid': txid_1, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_1],
)
self.log.info('Create a signed "reference" tx for later use')
raw_tx_reference = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': txid_spend_both, 'vout': 0}],
outputs=[{node.getnewaddress(): 0.05}],
))['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
# Reference tx should be valid on itself
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': True}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with no outputs')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = []
# Skip re-signing the transaction for context independent checks from now on
# tx.deserialize(BytesIO(hex_str_to_bytes(node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex'])))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-empty'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A really large transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin = [tx.vin[0]] * (MAX_BLOCK_BASE_SIZE // len(tx.vin[0].serialize()))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-oversize'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with negative output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue *= -1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-negative'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with too large output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue = 21000000 * COIN + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-toolarge'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with too large sum of output values')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = [tx.vout[0]] * 2
tx.vout[0].nValue = 21000000 * COIN
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-txouttotal-toolarge'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with duplicate inputs')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin = [tx.vin[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-inputs-duplicate'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A coinbase transaction')
# Pick the input of the first tx we signed, so it has to be a coinbase tx
raw_tx_coinbase_spent = node.getrawtransaction(txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid'])
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_coinbase_spent)))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: coinbase'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('Some nonstandard transactions')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.nVersion = 3 # A version currently non-standard
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: version'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].scriptPubKey = CScript([OP_0]) # Some non-standard script
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: scriptpubkey'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].scriptSig = CScript([OP_HASH160]) # Some not-pushonly scriptSig
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: scriptsig-not-pushonly'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
output_p2sh_burn = CTxOut(nValue=540, scriptPubKey=CScript([OP_HASH160, hash160(b'burn'), OP_EQUAL]))
num_scripts = 100000 // len(output_p2sh_burn.serialize()) # Use enough outputs to make the tx too large for our policy
tx.vout = [output_p2sh_burn] * num_scripts
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: tx-size'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0] = output_p2sh_burn
tx.vout[0].nValue -= 1 # Make output smaller, such that it is dust for our policy
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: dust'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'\xff'])
tx.vout = [tx.vout[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: multi-op-return'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A timelocked transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].nSequence -= 1 # Should be non-max, so locktime is not ignored
tx.nLockTime = node.getblockcount() + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: non-final'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction that is locked by BIP68 sequence logic')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].nSequence = 2 # We could include it in the second block mined from now, but not the very next one
# Can skip re-signing the tx because of early rejection
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: non-BIP68-final'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
allowhighfees=True,
)
if __name__ == '__main__':
MempoolAcceptanceTest().main()
| 48.914676 | 154 | 0.644781 |
9a267e59d815cf9e30da2c1132ff8f1188d066ab | 6,601 | py | Python | tests/unittest/benchmark_runner/common/template_operations/golden_files.py | bbenshab/benchmark-runner | 6f541c03c9dca5ee775de93ce62cbeffbc3b10fc | [
"Apache-2.0"
] | null | null | null | tests/unittest/benchmark_runner/common/template_operations/golden_files.py | bbenshab/benchmark-runner | 6f541c03c9dca5ee775de93ce62cbeffbc3b10fc | [
"Apache-2.0"
] | null | null | null | tests/unittest/benchmark_runner/common/template_operations/golden_files.py | bbenshab/benchmark-runner | 6f541c03c9dca5ee775de93ce62cbeffbc3b10fc | [
"Apache-2.0"
] | null | null | null |
import difflib
import filecmp
import os
import shutil
import tempfile
from benchmark_runner.main.environment_variables import environment_variables
from benchmark_runner.common.template_operations.template_operations import TemplateOperations
from tests.unittest.benchmark_runner.common.template_operations.golden_files_exceptions import GoldenFileCheckFailed
class GoldenFiles:
"""Generate golden files for regression testing"""
def __init__(self):
self.__file_path = os.path.join(f'{os.path.dirname(os.path.realpath(__file__))}', 'golden_files')
environment_variables.environment_variables_dict['system_metrics'] = 'True'
environment_variables.environment_variables_dict['elasticsearch'] = 'elasticsearch.example.com'
environment_variables.environment_variables_dict['elasticsearch_port'] = '9999'
environment_variables.environment_variables_dict['elasticsearch_url'] = 'http://elasticsearch.example.com:gol9999'
environment_variables.environment_variables_dict['pin'] = 'True'
environment_variables.environment_variables_dict['pin_node1'] = 'pin-node-1'
environment_variables.environment_variables_dict['pin_node2'] = 'pin-node-2'
environment_variables.environment_variables_dict['prom_token_override'] = 'fake_prom_token'
environment_variables.environment_variables_dict['uuid'] = 'deadbeef-0123-3210-cdef-01234567890abcdef'
environment_variables.environment_variables_dict['trunc_uuid'] = environment_variables.environment_variables_dict['uuid'].split('-')[0]
def __clear_directory_yaml(self, dir):
if os.path.isdir(dir):
for file in os.listdir(dir):
if file.endswith('.yaml'):
os.remove(os.path.join(dir, file))
def __generate_yaml_dir_name(self, run_type: str, workload: str, odf_pvc: str, dest: str=None):
if dest is None:
dest = self.__file_path
return os.path.join(dest, f'{run_type}_{workload}_ODF_PVC_{odf_pvc}')
def __copy_yaml_files_to_dir(self, src: str, dest: str):
if os.path.isdir(dest):
shutil.rmtree(dest)
os.mkdir(dest)
if os.path.isdir(src):
for file in os.listdir(src):
if file.endswith('.yaml'):
with open(os.path.join(src, file), 'r') as r:
with open(os.path.join(dest, file), 'w') as w:
w.write(r.read())
def __generate_golden_yaml_files__(self, dest: str=None):
if not dest:
dest = self.__file_path
if os.path.isdir(dest):
shutil.rmtree(dest)
os.mkdir(dest)
for odf_pvc in 'True', 'False':
environment_variables.environment_variables_dict['odf_pvc'] = odf_pvc
for run_type in environment_variables.run_types_list:
environment_variables.environment_variables_dict['run_type'] = run_type
for workload in environment_variables.workloads_list:
environment_variables.environment_variables_dict['namespace'] = environment_variables.get_workload_namespace(workload)
template = TemplateOperations(workload)
srcdir = template.get_current_run_path()
self.__clear_directory_yaml(srcdir)
destdir = self.__generate_yaml_dir_name(run_type=run_type, workload=workload, odf_pvc=odf_pvc, dest=dest)
template.generate_yamls()
self.__copy_yaml_files_to_dir(src=srcdir, dest=destdir)
self.__clear_directory_yaml(srcdir)
# From https://stackoverflow.com/questions/4187564/recursively-compare-two-directories-to-ensure-they-have-the-same-files-and-subdi
def __compare_tree__(self, root1, root2, subdir: str):
"""
Compare two directories recursively. Files in each directory are
considered to be equal if their names and contents are equal.
@param dir1: First directory path
@param dir2: Second directory path
@return: [left_only, right_only, diff_files, funny_files]
"""
def canon_list(subdir: str, files: list):
return list(map(lambda f: os.path.join(subdir, f), files))
dir1 = os.path.join(root1, subdir)
dir2 = os.path.join(root2, subdir)
dirs_cmp = filecmp.dircmp(dir1, dir2)
left_only = canon_list(subdir, dirs_cmp.left_only)
right_only = canon_list(subdir, dirs_cmp.right_only)
diff_files = []
funny_files = []
(_, mismatch, errors) = filecmp.cmpfiles(dir1, dir2, dirs_cmp.common_files, shallow=False)
diff_files.extend(canon_list(subdir, mismatch))
funny_files.extend(canon_list(subdir, errors))
for common_dir in dirs_cmp.common_dirs:
new_subdir = os.path.join(subdir, common_dir)
(sub_left_only, sub_right_only, sub_diff_files, sub_funny_files) = self.__compare_tree__(root1, root2, new_subdir)
left_only.extend(sub_left_only)
right_only.extend(sub_right_only)
diff_files.extend(sub_diff_files)
funny_files.extend(sub_funny_files)
return (left_only, right_only, diff_files, funny_files)
def __compare_golden_yaml_files__(self):
with tempfile.TemporaryDirectory() as tmpdir:
self.__generate_golden_yaml_files__(dest=tmpdir)
(left_only, right_only, diff_files, funny_files) = self.__compare_tree__(self.__file_path, tmpdir, '.')
for filename in diff_files:
print(f'\n{filename}:')
golden_file = os.path.join(self.__file_path, filename)
comparison_file = os.path.join(tmpdir, filename)
with open(golden_file) as golden:
golden_text = golden.readlines()
with open(comparison_file) as comparison:
comparison_text = comparison.readlines()
for line in difflib.unified_diff(golden_text, comparison_text, fromfile=golden_file, tofile=comparison_file, lineterm=''):
print(line, end='')
if len(left_only) > 0 or len(right_only) > 0 or len(diff_files) > 0 or len(funny_files) > 0:
raise GoldenFileCheckFailed(miscompare=diff_files, missing=left_only, unexpected=right_only, cannot_compare=funny_files)
return True
def generate_golden_files(self):
self.__generate_golden_yaml_files__()
def compare_golden_files(self):
return self.__compare_golden_yaml_files__()
| 51.170543 | 143 | 0.675049 |
1303ffdd964d03df83f636553af54b264ca69e0d | 987 | py | Python | public_tool/solve_on_outlier.py | mapicccy/Stock-Market-Trend-Analysis-Using-HMM-LSTM | 5be0f294208d727d5a3e28b0e99496d056102b4c | [
"MIT"
] | 147 | 2018-08-27T09:05:07.000Z | 2022-03-27T13:06:16.000Z | public_tool/solve_on_outlier.py | mapicccy/Stock-Market-Trend-Analysis-Using-HMM-LSTM | 5be0f294208d727d5a3e28b0e99496d056102b4c | [
"MIT"
] | null | null | null | public_tool/solve_on_outlier.py | mapicccy/Stock-Market-Trend-Analysis-Using-HMM-LSTM | 5be0f294208d727d5a3e28b0e99496d056102b4c | [
"MIT"
] | 70 | 2018-09-10T03:12:54.000Z | 2022-03-26T06:46:48.000Z | from public_tool.form_index import form_index
import numpy as np
from dataset_code.process_on_raw_data import fill_na
def solve_on_outlier(dataset, lengths):
"""
find the outlier data, and replace then by fill_na function
input:
dataset, array
lengths, list, record the length of chains
output:
dataset, array
"""
n = 3 # 如果是比均值相差了n个单位的标准差,那么判断为outlier
result = np.zeros(dataset.shape)
for i in range(len(lengths)):
begin_index, end_index = form_index(lengths, i)
for j in range(dataset.shape[1]):
temp = dataset[begin_index:end_index, j].copy()
if max(temp) > 4.5:
flag = 1
mean = np.mean(temp)
std = np.std(temp)
temp[np.logical_or(temp >= mean+n*std, temp <= mean-n*std)] = np.mean(temp)
# temp = fill_na(temp, 100)
result[begin_index:end_index, j] = temp
return result
| 31.83871 | 88 | 0.590679 |
f3989a04d403db4c62c0a70222e2fcd630d86e31 | 4,498 | py | Python | DRS-Calculate/component/analyse_weigh_ratio.py | monettoCoffee/DormRecommendSys | d5cf10827729f75056da20de0100bcf5c1414d41 | [
"Apache-2.0"
] | 1 | 2020-01-16T05:11:02.000Z | 2020-01-16T05:11:02.000Z | DRS-Calculate/component/analyse_weigh_ratio.py | Happy-hacker0/DormRecommendSys | a3d9c356afd70f1a49aa7eb632d0d8970f7da135 | [
"Apache-2.0"
] | null | null | null | DRS-Calculate/component/analyse_weigh_ratio.py | Happy-hacker0/DormRecommendSys | a3d9c356afd70f1a49aa7eb632d0d8970f7da135 | [
"Apache-2.0"
] | 2 | 2020-01-11T03:04:00.000Z | 2020-01-16T05:16:22.000Z | # coding=utf-8
from weigh.cosine_similarity import CosineSimilarity
import component.section_calculator as section_calculator
# 分析向量平均相似度
def direct_analyse(all_cluster_vector, question_info):
weight_ratio = {}
analyse_one_chosen_weight_ratio(all_cluster_vector, question_info, weight_ratio)
analyse_multi_chosen_weight_ratio(all_cluster_vector, question_info, weight_ratio)
analyse_section_chosen_weight_ratio(all_cluster_vector, question_info, weight_ratio)
for qid in weight_ratio:
weight_ratio[qid] = 1 - weight_ratio[qid]
return weight_ratio
def get_auto_analyse_question_qid(example_all_cluster_vector_element, question_info, chosen_key):
chosen_qid_list = []
for person_one_chosen_qid in example_all_cluster_vector_element[chosen_key]:
if question_info[person_one_chosen_qid]["auto_weight"] == 1:
chosen_qid_list.append(person_one_chosen_qid)
return chosen_qid_list
def analyse_one_chosen_weight_ratio(all_cluster_vector, question_info, weight_ratio):
one_chosen_qid_list = get_auto_analyse_question_qid(all_cluster_vector[0], question_info, "one_chosen")
for one_chosen_question_qid in one_chosen_qid_list:
one_chosen_qid_radio_max_number = -1
one_chosen_question_radio_dict = weight_ratio.get(one_chosen_question_qid, None)
if not one_chosen_question_radio_dict:
one_chosen_question_radio_dict = {}
weight_ratio[one_chosen_question_qid] = one_chosen_question_radio_dict
for cluster_vector in all_cluster_vector:
radio_index = cluster_vector["one_chosen"][one_chosen_question_qid]
radio_choice_times = one_chosen_question_radio_dict.get(radio_index, None)
if not radio_choice_times:
radio_choice_times = 0
radio_choice_times += 1
one_chosen_question_radio_dict[radio_index] = radio_choice_times
one_chosen_qid_radio_max_number = max(one_chosen_qid_radio_max_number, radio_choice_times)
weight_ratio[one_chosen_question_qid] = one_chosen_qid_radio_max_number / len(all_cluster_vector)
def analyse_multi_chosen_weight_ratio(all_cluster_vector, question_info, weight_ratio):
multi_chosen_qid_list = get_auto_analyse_question_qid(all_cluster_vector[0], question_info, "multi_chosen")
for multi_chosen_question_qid in multi_chosen_qid_list:
for cluster_vector in all_cluster_vector:
chosen_list = weight_ratio.get(multi_chosen_question_qid, None)
if not chosen_list:
chosen_list = []
weight_ratio[multi_chosen_question_qid] = chosen_list
chosen_list.append(cluster_vector["multi_chosen"][multi_chosen_question_qid])
for multi_chosen_question_qid in multi_chosen_qid_list:
distance = 0
calculate_count = 0
all_qid_chosen_vector = weight_ratio[multi_chosen_question_qid]
chosen_outside_index = len(all_qid_chosen_vector) - 1
while chosen_outside_index > 0:
chosen_inside_index = chosen_outside_index - 1
while chosen_inside_index > -1:
calculate_count += 1
distance += CosineSimilarity.distance(all_qid_chosen_vector[chosen_inside_index][0],
all_qid_chosen_vector[chosen_outside_index][0])
chosen_inside_index -= 1
chosen_outside_index -= 1
weight_ratio[multi_chosen_question_qid] = distance / calculate_count
def analyse_section_chosen_weight_ratio(all_cluster_vector, question_info, weight_ratio):
section_chosen_qid_list = get_auto_analyse_question_qid(all_cluster_vector[0], question_info, "section_chosen")
cluster_outside_index = len(all_cluster_vector) - 1
while cluster_outside_index > 0:
cluster_outside_section_chosen = all_cluster_vector[cluster_outside_index]["section_chosen"]
cluster_inside_index = cluster_outside_index - 1
while cluster_inside_index > -1:
cluster_inside_section_chosen = all_cluster_vector[cluster_inside_index]["section_chosen"]
for section_chosen_question_qid in section_chosen_qid_list:
section_calculator.time_section_coincide_calculate(
cluster_outside_section_chosen[section_chosen_question_qid],
cluster_inside_section_chosen[section_chosen_question_qid])
cluster_inside_index -= 1
cluster_outside_index -= 1
| 51.701149 | 115 | 0.747666 |
583bb6aed3f54c1165960fdc3233b6ff61225a98 | 357 | py | Python | projects/Digital-Clock-GUI/code.py | Kranthi-Guribilli/Python-Projects | 73059ba06079c14b022b0c80fbc6d031cddfbecb | [
"MIT"
] | 54 | 2021-11-03T08:54:50.000Z | 2022-01-09T19:16:52.000Z | projects/Digital-Clock-GUI/code.py | LeoLivs/Python-Projects | b681deba7220278ea8e37ec2865f8e1fb8ad4755 | [
"MIT"
] | 13 | 2021-10-31T05:01:01.000Z | 2022-01-08T13:48:07.000Z | projects/Digital-Clock-GUI/code.py | LeoLivs/Python-Projects | b681deba7220278ea8e37ec2865f8e1fb8ad4755 | [
"MIT"
] | 14 | 2021-10-30T20:17:50.000Z | 2022-01-09T14:15:13.000Z | from tkinter import *
from tkinter.ttk import *
from time import strftime
root = Tk()
root.title("Clock")
def time():
string = strftime("%H:%M:%S %p")
label.config(text=string)
label.after(100, time)
label = tk.Label(root, font=("JetBrains Mono", 80), background="white", foreground="black")
label.pack(anchor="center")
time()
mainloop()
| 17 | 91 | 0.672269 |
cf3c4a11c49b31036af305c8fced93f927003dd4 | 3,124 | py | Python | tools/debugging/matrix/load_with_generate_messages.py | luehrsFred/raiden | a1b118ebe14badb1acd0744b2d7f2b39f8ba5313 | [
"MIT"
] | null | null | null | tools/debugging/matrix/load_with_generate_messages.py | luehrsFred/raiden | a1b118ebe14badb1acd0744b2d7f2b39f8ba5313 | [
"MIT"
] | 69 | 2020-07-21T05:49:21.000Z | 2022-03-08T18:09:44.000Z | tools/debugging/matrix/load_with_generate_messages.py | luehrsFred/raiden | a1b118ebe14badb1acd0744b2d7f2b39f8ba5313 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from gevent import monkey # isort:skip
monkey.patch_all() # isort:skip
import argparse
import os
import time
from dataclasses import dataclass
from typing import Iterator, List
from raiden.utils.nursery import Janitor, Nursery
CWD = os.path.dirname(os.path.abspath(__file__))
GENERATE_MESSAGES_SCRIPT = os.path.join(CWD, "generate_messages.py")
@dataclass
class Config:
logdir: str
sender_matrix_server_url: str
receiver_matrix_server_url: str
target_qty_of_chat_rooms: int
qty_of_new_rooms_per_iteration: int
concurrent_messages_per_room: int
wait_before_next_iteration: float
def batch_size(target: int, step: int) -> Iterator[int]:
iterations = target // step
for _ in range(iterations):
yield step
rest = target % step
if rest:
yield rest
def run(config: Config, nursery: Nursery) -> None:
for i, qty_of_rooms in enumerate(
batch_size(config.target_qty_of_chat_rooms, config.qty_of_new_rooms_per_iteration)
):
log_file = os.path.join(config.logdir, str(i))
script_args: List[str] = [
GENERATE_MESSAGES_SCRIPT,
"--concurrent-messages",
str(config.concurrent_messages_per_room),
"--chat-rooms",
str(qty_of_rooms),
log_file,
config.sender_matrix_server_url,
config.receiver_matrix_server_url,
]
nursery.exec_under_watch(script_args)
time.sleep(config.wait_before_next_iteration)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--wait-before-next-iteration", type=int, default=60)
parser.add_argument("target_qty_of_chat_rooms", type=int, default=500)
parser.add_argument("qty_of_new_rooms_per_iteration", type=int, default=10)
parser.add_argument("concurrent_messages_per_room", type=int, default=50)
parser.add_argument("logdir", help="Directory used to save the script logs.")
parser.add_argument("server", help="Matrix server used by the sender user.")
parser.add_argument(
"server2",
help=(
"If provided, the server used by the receiever, otherwise the same "
"server as the sender is used."
),
default=None,
nargs="?",
)
args = parser.parse_args()
logdir = args.logdir
os.makedirs(logdir, exist_ok=True)
sender_matrix_server_url = args.server
receiver_matrix_server_url = args.server2 or args.server
config = Config(
logdir=logdir,
sender_matrix_server_url=sender_matrix_server_url,
receiver_matrix_server_url=receiver_matrix_server_url,
target_qty_of_chat_rooms=args.target_qty_of_chat_rooms,
qty_of_new_rooms_per_iteration=args.qty_of_new_rooms_per_iteration,
concurrent_messages_per_room=args.concurrent_messages_per_room,
wait_before_next_iteration=args.wait_before_next_iteration,
)
with Janitor() as nursery:
nursery.spawn_under_watch(run, config, nursery)
nursery.wait(timeout=None)
if __name__ == "__main__":
main()
| 30.330097 | 90 | 0.700384 |
319e456d46ea84cb7d42c5f77b036366de327390 | 152 | py | Python | yc199/730.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
] | null | null | null | yc199/730.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
] | null | null | null | yc199/730.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
] | null | null | null | S = input()
d = {}
for c in S:
d.setdefault(c, 0)
d[c] += 1
if all(map(lambda x: x == 1, d.values())):
print('YES')
else:
print('NO')
| 12.666667 | 42 | 0.480263 |
663bfb7d16ff6a6250ffa8ec27bcb7d5e409cee7 | 249 | py | Python | flapp/ext/cli/__init__.py | ernane/flapp | b31eff9accce5d150a3b852dfb8c5394ed9f589a | [
"MIT"
] | null | null | null | flapp/ext/cli/__init__.py | ernane/flapp | b31eff9accce5d150a3b852dfb8c5394ed9f589a | [
"MIT"
] | 1 | 2020-08-30T20:48:05.000Z | 2020-08-30T20:48:57.000Z | flapp/ext/cli/__init__.py | ernane/flapp | b31eff9accce5d150a3b852dfb8c5394ed9f589a | [
"MIT"
] | null | null | null | from flapp.ext.db.commands import create_db, drop_db, populate_db
def init_app(app):
app.cli.add_command(app.cli.command()(create_db))
app.cli.add_command(app.cli.command()(drop_db))
app.cli.add_command(app.cli.command()(populate_db))
| 31.125 | 65 | 0.746988 |
b3b68a51645515acb249a0f8e9db72155f96984a | 8,185 | py | Python | lib/python3.8/site-packages/ansible_collections/community/general/plugins/connection/lxc.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | 5 | 2020-12-16T21:42:09.000Z | 2022-03-28T16:04:32.000Z | .ansible/collections/ansible_collections/community/general/plugins/connection/lxc.py | chronicc/proving-ground | 3e392122a05fb8383a3700954baebb0df330e9e3 | [
"MIT"
] | null | null | null | .ansible/collections/ansible_collections/community/general/plugins/connection/lxc.py | chronicc/proving-ground | 3e392122a05fb8383a3700954baebb0df330e9e3 | [
"MIT"
] | null | null | null | # (c) 2015, Joerg Thalheim <joerg@higgsboson.tk>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Joerg Thalheim (!UNKNOWN) <joerg@higgsboson.tk>
connection: lxc
short_description: Run tasks in lxc containers via lxc python library
description:
- Run commands or put/fetch files to an existing lxc container using lxc python library
options:
remote_addr:
description:
- Container identifier
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_lxc_host
executable:
default: /bin/sh
description:
- Shell executable
vars:
- name: ansible_executable
- name: ansible_lxc_executable
'''
import os
import shutil
import traceback
import select
import fcntl
import errno
HAS_LIBLXC = False
try:
import lxc as _lxc
HAS_LIBLXC = True
except ImportError:
pass
from ansible import constants as C
from ansible import errors
from ansible.module_utils._text import to_bytes, to_native
from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase):
''' Local lxc based connections '''
transport = 'community.general.lxc'
has_pipelining = True
default_user = 'root'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.container_name = self._play_context.remote_addr
self.container = None
def _connect(self):
''' connect to the lxc; nothing to do here '''
super(Connection, self)._connect()
if not HAS_LIBLXC:
msg = "lxc bindings for python2 are not installed"
raise errors.AnsibleError(msg)
if self.container:
return
self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name)
self.container = _lxc.Container(self.container_name)
if self.container.state == "STOPPED":
raise errors.AnsibleError("%s is not running" % self.container_name)
def _communicate(self, pid, in_data, stdin, stdout, stderr):
buf = {stdout: [], stderr: []}
read_fds = [stdout, stderr]
if in_data:
write_fds = [stdin]
else:
write_fds = []
while len(read_fds) > 0 or len(write_fds) > 0:
try:
ready_reads, ready_writes, _ = select.select(read_fds, write_fds, [])
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
for fd in ready_writes:
in_data = in_data[os.write(fd, in_data):]
if len(in_data) == 0:
write_fds.remove(fd)
for fd in ready_reads:
data = os.read(fd, 32768)
if not data:
read_fds.remove(fd)
buf[fd].append(data)
(pid, returncode) = os.waitpid(pid, 0)
return returncode, b"".join(buf[stdout]), b"".join(buf[stderr])
def _set_nonblocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
return fd
def exec_command(self, cmd, in_data=None, sudoable=False):
''' run a command on the chroot '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
# python2-lxc needs bytes. python3-lxc needs text.
executable = to_native(self._play_context.executable, errors='surrogate_or_strict')
local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')]
read_stdout, write_stdout = None, None
read_stderr, write_stderr = None, None
read_stdin, write_stdin = None, None
try:
read_stdout, write_stdout = os.pipe()
read_stderr, write_stderr = os.pipe()
kwargs = {
'stdout': self._set_nonblocking(write_stdout),
'stderr': self._set_nonblocking(write_stderr),
'env_policy': _lxc.LXC_ATTACH_CLEAR_ENV
}
if in_data:
read_stdin, write_stdin = os.pipe()
kwargs['stdin'] = self._set_nonblocking(read_stdin)
self._display.vvv("EXEC %s" % (local_cmd), host=self.container_name)
pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs)
if pid == -1:
msg = "failed to attach to container %s" % self.container_name
raise errors.AnsibleError(msg)
write_stdout = os.close(write_stdout)
write_stderr = os.close(write_stderr)
if read_stdin:
read_stdin = os.close(read_stdin)
return self._communicate(pid,
in_data,
write_stdin,
read_stdout,
read_stderr)
finally:
fds = [read_stdout,
write_stdout,
read_stderr,
write_stderr,
read_stdin,
write_stdin]
for fd in fds:
if fd:
os.close(fd)
def put_file(self, in_path, out_path):
''' transfer a file from local to lxc '''
super(Connection, self).put_file(in_path, out_path)
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.container_name)
in_path = to_bytes(in_path, errors='surrogate_or_strict')
out_path = to_bytes(out_path, errors='surrogate_or_strict')
if not os.path.exists(in_path):
msg = "file or module does not exist: %s" % in_path
raise errors.AnsibleFileNotFound(msg)
try:
src_file = open(in_path, "rb")
except IOError:
traceback.print_exc()
raise errors.AnsibleError("failed to open input file to %s" % in_path)
try:
def write_file(args):
with open(out_path, 'wb+') as dst_file:
shutil.copyfileobj(src_file, dst_file)
try:
self.container.attach_wait(write_file, None)
except IOError:
traceback.print_exc()
msg = "failed to transfer file to %s" % out_path
raise errors.AnsibleError(msg)
finally:
src_file.close()
def fetch_file(self, in_path, out_path):
''' fetch a file from lxc to local '''
super(Connection, self).fetch_file(in_path, out_path)
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.container_name)
in_path = to_bytes(in_path, errors='surrogate_or_strict')
out_path = to_bytes(out_path, errors='surrogate_or_strict')
try:
dst_file = open(out_path, "wb")
except IOError:
traceback.print_exc()
msg = "failed to open output file %s" % out_path
raise errors.AnsibleError(msg)
try:
def write_file(args):
try:
with open(in_path, 'rb') as src_file:
shutil.copyfileobj(src_file, dst_file)
finally:
# this is needed in the lxc child process
# to flush internal python buffers
dst_file.close()
try:
self.container.attach_wait(write_file, None)
except IOError:
traceback.print_exc()
msg = "failed to transfer file from %s to %s" % (in_path, out_path)
raise errors.AnsibleError(msg)
finally:
dst_file.close()
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False
| 35.742358 | 95 | 0.575809 |
4a10291c430e69e2696c7a5be61e579fe8f8b483 | 4,176 | py | Python | glomeruli_segmentation/api_interface.py | theodore-evans/hubmap-kidney-segmentation | 7842655d49080be28682a9cf0394bcbe53172ddc | [
"MIT"
] | null | null | null | glomeruli_segmentation/api_interface.py | theodore-evans/hubmap-kidney-segmentation | 7842655d49080be28682a9cf0394bcbe53172ddc | [
"MIT"
] | null | null | null | glomeruli_segmentation/api_interface.py | theodore-evans/hubmap-kidney-segmentation | 7842655d49080be28682a9cf0394bcbe53172ddc | [
"MIT"
] | null | null | null | from io import BytesIO
from logging import Logger
from typing import Tuple, Type, Union
import desert
from aiohttp import ClientSession, TCPConnector
from marshmallow import EXCLUDE
from PIL import Image
from glomeruli_segmentation.aiohttp_hooks import get_logging_hooks
from glomeruli_segmentation.data_classes import Rect, Tile, Wsi
from glomeruli_segmentation.logging_tools import get_logger
API_VERSION = "v0"
class LoggingClientSession(ClientSession):
def __init__(self, logger: dict, get_hooks: Tuple = get_logging_hooks, **kwargs):
super().__init__(**kwargs)
self.hooks = {"response": [get_hook(logger) for get_hook in get_hooks]}
async def _request(self, method, str_or_url, **kwargs):
r = await super()._request(method, str_or_url, **kwargs)
for hook in self.hooks["response"]:
await hook(r)
return r
# TODO: add methods for getting from /configuration and post/putting to /failure
class ApiInterface:
def __init__(self, api_url: str, job_id: str, headers: dict, logger: Logger = get_logger()):
self.logger = logger
self.api_url = api_url
self.job_id = job_id
self.headers = headers
self.session: LoggingClientSession = None
async def __aenter__(self):
self.session = LoggingClientSession(
connector=TCPConnector(keepalive_timeout=5, ssl=False, limit=10),
headers=self.headers,
logger=self.logger,
)
await self.session.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self.session:
return await self.session.__aexit__(exc_type, exc_val, exc_tb)
async def check_alive(self) -> dict:
"""
check if API is alive
"""
url = f"{self.api_url}/alive"
r = await self.session.get(url)
return await r.json()
async def get_input(self, key: str, input_type: Type) -> Union[Rect, Wsi]:
"""
fetch an input from API
"""
url = f"{self.api_url}/{API_VERSION}/{self.job_id}/inputs/{key}"
r = await self.session.get(url)
schema = desert.schema(input_type, meta={"unknown": EXCLUDE})
response = await r.json()
return schema.load(response)
async def post_output(self, key: str, data: dict) -> dict:
"""
post output data by key as defined in EAD
"""
url = f"{self.api_url}/{API_VERSION}/{self.job_id}/outputs/{key}"
r = await self.session.post(url, json=data)
return await r.json()
async def post_items_to_collection(self, collection: dict, items: list) -> dict:
"""
add items to an existing output collection
Parameters:
items: list of data elements
"""
url = f"{self.api_url}/{API_VERSION}/{self.job_id}/collections/{collection['id']}/items"
r = await self.session.post(url, json={"items": items})
items = await r.json()
return items["items"]
async def get_wsi_tile(self, slide: Wsi, rect: Rect) -> Tile:
"""
get a WSI tile on level 0
Parameters:
wsi_slide: Wsi object with WSI id (and meta data)
rectangle: Rectangle describing tile position
"""
x, y = rect.upper_left
url = f"{self.api_url}/{API_VERSION}/{self.job_id}/regions/{slide.id}/level/{rect.level}/start/{x}/{y}/size/{rect.width}/{rect.height}"
r = await self.session.get(url)
content = await r.read()
return Tile(image=Image.open(BytesIO(content)), rect=rect)
async def put_finalize(self) -> dict:
"""
finalize job, such that no more data can be added and to inform EMPAIA infrastructure about job state
"""
url = f"{self.api_url}/{API_VERSION}/{self.job_id}/finalize"
r = await self.session.put(url)
return await r.json()
async def put_failure(self, message: str) -> dict:
url = f"{self.api_url}/{API_VERSION}/{self.job_id}/failure"
r = await self.session.put(url, json={"user_message": message.replace('"', "'")})
return await r.json()
| 34.8 | 143 | 0.633142 |
76c317dbbe83727d9f87ff338b8c386e0e7f70c9 | 3,154 | py | Python | tests/test_read_edges.py | genimind/graphene | 51d571fbbca19b3675b5f870c00c059912cd4f38 | [
"Apache-2.0"
] | null | null | null | tests/test_read_edges.py | genimind/graphene | 51d571fbbca19b3675b5f870c00c059912cd4f38 | [
"Apache-2.0"
] | null | null | null | tests/test_read_edges.py | genimind/graphene | 51d571fbbca19b3675b5f870c00c059912cd4f38 | [
"Apache-2.0"
] | null | null | null | import os
import unittest
import json
import networkx as nx
from graphene import graphgen
node_mapper_filename = './node_mapper.json'
edge_mapper_filename = './edge_mapper.json'
data_filename = './test_data.txt'
class TestReadEdges(unittest.TestCase):
def setUp(self):
self.node_mapper = None
self.edge_mapper = None
self.data = []
with open(node_mapper_filename) as f:
self.node_mapper = json.load(f)
with open(edge_mapper_filename) as f:
self.edge_mapper = json.load(f)
with open(data_filename) as f:
for item in f:
self.data.append(json.loads(item))
def test_genEdges(self):
g = nx.Graph()
g = graphgen.create_graph(g,
graph_mapper = self.edge_mapper,
data_provider = self.data, add_type_to_key = True)
# print('\nNODES1:', list(g.nodes(data = True)))
self.assertEqual(nx.number_of_nodes(g), 15)
self.assertEqual(nx.number_of_edges(g), 10)
# get node with key.
key1 = ('TypeA', 'a_val2_1')
key2 = ('TypeB', 'b_val2_1', 'b_val1_1')
key3 = ('TypeB', 'b_val2_3', 'b_val1_3')
key4 = ('TypeC', 'c_val1_3')
self.assertTrue(key1 in g.nodes)
self.assertTrue(key2 in g.nodes)
# print(g.node[key1])
# print(g.node[key2])
# check eges with data
self.assertTrue(g.has_edge(key1, key2))
edge_data = g.get_edge_data(key1, key2)
self.assertTrue(edge_data != {})
# print('e1:', edge_data)
self.assertTrue(g.has_edge(key3, key4))
edge_data = g.get_edge_data(key3, key4)
self.assertTrue(edge_data != {})
# print('e2:', edge_data)
key5 = ('TypeC', 'ABCDEF') # invalid node key
self.assertFalse(key5 in g)
self.assertFalse(g.has_edge(key2, key5))
def test_genNodesAndEdges(self):
g = nx.Graph()
g = graphgen.create_graph(g,
graph_mapper = self.node_mapper,
data_provider = self.data, add_type_to_key = True)
g = graphgen.create_graph(g,
graph_mapper= self.edge_mapper,
data_provider= self.data, add_type_to_key= True)
# print('\nNODES2:', list(g.nodes(data = True)))
self.assertEqual(nx.number_of_nodes(g), 15)
self.assertEqual(nx.number_of_edges(g), 10)
# locate an edge
key1 = ('TypeA', 'a_val2_3')
key2 = ('TypeB', 'b_val2_3', 'b_val1_3')
self.assertTrue(g.has_node(key1))
self.assertTrue(g.has_node(key2))
self.assertTrue(key2 in g)
self.assertTrue(g.has_edge(key1, key2))
# check node data
node_data = g.nodes[key1]
self.assertTrue(node_data != {})
# print('node_data:', node_data)
# check edge data
edge_data = g.get_edge_data(key1, key2)
self.assertTrue(edge_data != {})
# the graph is bidirectional
self.assertTrue(g.has_edge(key2, key1))
# print('edge:', g.edges[(key2, key1)])
# if __name__ == '__main__':
# unittest.main()
| 36.252874 | 71 | 0.588776 |
76cbef243ec37973752412aaebf5b21a477af1b0 | 4,018 | py | Python | sector/protocols/pool_protocol.py | bithadder/sector-blockchain | ce63d162cd8c0c7c85ae64d6d6e8bede0a8675e6 | [
"Apache-2.0"
] | 13 | 2021-07-06T12:45:25.000Z | 2021-09-10T22:24:52.000Z | sector/protocols/pool_protocol.py | bithadder/sector-blockchain | ce63d162cd8c0c7c85ae64d6d6e8bede0a8675e6 | [
"Apache-2.0"
] | null | null | null | sector/protocols/pool_protocol.py | bithadder/sector-blockchain | ce63d162cd8c0c7c85ae64d6d6e8bede0a8675e6 | [
"Apache-2.0"
] | 6 | 2021-07-06T01:14:53.000Z | 2021-07-18T05:33:02.000Z | from dataclasses import dataclass
from enum import Enum
import time
from typing import Optional
from blspy import G1Element, G2Element
from sector.types.blockchain_format.proof_of_space import ProofOfSpace
from sector.types.blockchain_format.sized_bytes import bytes32
from sector.util.ints import uint8, uint16, uint32, uint64
from sector.util.streamable import Streamable, streamable
POOL_PROTOCOL_VERSION = uint8(1)
class PoolErrorCode(Enum):
REVERTED_SIGNAGE_POINT = 1
TOO_LATE = 2
NOT_FOUND = 3
INVALID_PROOF = 4
PROOF_NOT_GOOD_ENOUGH = 5
INVALID_DIFFICULTY = 6
INVALID_SIGNATURE = 7
SERVER_EXCEPTION = 8
INVALID_P2_SINGLETON_PUZZLE_HASH = 9
FARMER_NOT_KNOWN = 10
FARMER_ALREADY_KNOWN = 11
INVALID_AUTHENTICATION_TOKEN = 12
INVALID_PAYOUT_INSTRUCTIONS = 13
INVALID_SINGLETON = 14
DELAY_TIME_TOO_SHORT = 15
REQUEST_FAILED = 16
# Used to verify GET /farmer and GET /login
@dataclass(frozen=True)
@streamable
class AuthenticationPayload(Streamable):
method_name: str
launcher_id: bytes32
target_puzzle_hash: bytes32
authentication_token: uint64
# GET /pool_info
@dataclass(frozen=True)
@streamable
class GetPoolInfoResponse(Streamable):
name: str
logo_url: str
minimum_difficulty: uint64
relative_lock_height: uint32
protocol_version: uint8
fee: str
description: str
target_puzzle_hash: bytes32
authentication_token_timeout: uint8
# POST /partial
@dataclass(frozen=True)
@streamable
class PostPartialPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
proof_of_space: ProofOfSpace
sp_hash: bytes32
end_of_sub_slot: bool
harvester_id: bytes32
@dataclass(frozen=True)
@streamable
class PostPartialRequest(Streamable):
payload: PostPartialPayload
aggregate_signature: G2Element
# Response in success case
@dataclass(frozen=True)
@streamable
class PostPartialResponse(Streamable):
new_difficulty: uint64
# GET /farmer
# Response in success case
@dataclass(frozen=True)
@streamable
class GetFarmerResponse(Streamable):
authentication_public_key: G1Element
payout_instructions: str
current_difficulty: uint64
current_points: uint64
# POST /farmer
@dataclass(frozen=True)
@streamable
class PostFarmerPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
authentication_public_key: G1Element
payout_instructions: str
suggested_difficulty: Optional[uint64]
@dataclass(frozen=True)
@streamable
class PostFarmerRequest(Streamable):
payload: PostFarmerPayload
signature: G2Element
# Response in success case
@dataclass(frozen=True)
@streamable
class PostFarmerResponse(Streamable):
welcome_message: str
# PUT /farmer
@dataclass(frozen=True)
@streamable
class PutFarmerPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
authentication_public_key: Optional[G1Element]
payout_instructions: Optional[str]
suggested_difficulty: Optional[uint64]
@dataclass(frozen=True)
@streamable
class PutFarmerRequest(Streamable):
payload: PutFarmerPayload
signature: G2Element
# Response in success case
@dataclass(frozen=True)
@streamable
class PutFarmerResponse(Streamable):
authentication_public_key: Optional[bool]
payout_instructions: Optional[bool]
suggested_difficulty: Optional[bool]
# Misc
# Response in error case for all endpoints of the pool protocol
@dataclass(frozen=True)
@streamable
class ErrorResponse(Streamable):
error_code: uint16
error_message: Optional[str]
# Get the current authentication toke according "Farmer authentication" in SPECIFICATION.md
def get_current_authentication_token(timeout: uint8) -> uint64:
return uint64(int(int(time.time() / 60) / timeout))
# Validate a given authentication token against our local time
def validate_authentication_token(token: uint64, timeout: uint8):
return abs(token - get_current_authentication_token(timeout)) <= timeout | 22.96 | 91 | 0.777501 |
5306769155050ba032364650daeecaa1581e609b | 4,209 | py | Python | build/android/pylib/base_test_sharder.py | junmin-zhu/chromium-rivertrail | eb1a57aca71fe68d96e48af8998dcfbe45171ee1 | [
"BSD-3-Clause"
] | 5 | 2018-03-10T13:08:42.000Z | 2021-07-26T15:02:11.000Z | build/android/pylib/base_test_sharder.py | sanyaade-mobiledev/chromium.src | d496dfeebb0f282468827654c2b3769b3378c087 | [
"BSD-3-Clause"
] | 1 | 2015-07-21T08:02:01.000Z | 2015-07-21T08:02:01.000Z | build/android/pylib/base_test_sharder.py | jianglong0156/chromium.src | d496dfeebb0f282468827654c2b3769b3378c087 | [
"BSD-3-Clause"
] | 6 | 2016-11-14T10:13:35.000Z | 2021-01-23T15:29:53.000Z | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import android_commands
import logging
import multiprocessing
from test_result import TestResults
def _ShardedTestRunnable(test):
"""Standalone function needed by multiprocessing.Pool."""
log_format = '[' + test.device + '] # %(asctime)-15s: %(message)s'
if logging.getLogger().handlers:
logging.getLogger().handlers[0].setFormatter(logging.Formatter(log_format))
else:
logging.basicConfig(format=log_format)
# Handle SystemExit here since python has a bug to exit current process
try:
return test.Run()
except SystemExit:
return TestResults()
def SetTestsContainer(tests_container):
"""Sets tests container.
multiprocessing.Queue can't be pickled across processes, so we need to set
this as a 'global', per process, via multiprocessing.Pool.
"""
BaseTestSharder.tests_container = tests_container
class BaseTestSharder(object):
"""Base class for sharding tests across multiple devices.
Args:
attached_devices: A list of attached devices.
"""
# See more in SetTestsContainer.
tests_container = None
def __init__(self, attached_devices):
self.attached_devices = attached_devices
self.retries = 1
self.tests = []
def CreateShardedTestRunner(self, device, index):
"""Factory function to create a suite-specific test runner.
Args:
device: Device serial where this shard will run
index: Index of this device in the pool.
Returns:
An object of BaseTestRunner type (that can provide a "Run()" method).
"""
pass
def SetupSharding(self, tests):
"""Called before starting the shards."""
pass
def OnTestsCompleted(self, test_runners, test_results):
"""Notifies that we completed the tests."""
pass
def RunShardedTests(self):
"""Runs the tests in all connected devices.
Returns:
A TestResults object.
"""
logging.warning('*' * 80)
logging.warning('Sharding in ' + str(len(self.attached_devices)) +
' devices.')
logging.warning('Note that the output is not synchronized.')
logging.warning('Look for the "Final result" banner in the end.')
logging.warning('*' * 80)
final_results = TestResults()
for retry in xrange(self.retries):
logging.warning('Try %d of %d', retry + 1, self.retries)
self.SetupSharding(self.tests)
test_runners = []
for index, device in enumerate(self.attached_devices):
logging.warning('*' * 80)
logging.warning('Creating shard %d for %s', index, device)
logging.warning('*' * 80)
test_runner = self.CreateShardedTestRunner(device, index)
test_runners += [test_runner]
logging.warning('Starting...')
pool = multiprocessing.Pool(len(self.attached_devices),
SetTestsContainer,
[BaseTestSharder.tests_container])
# map can't handle KeyboardInterrupt exception. It's a python bug.
# So use map_async instead.
async_results = pool.map_async(_ShardedTestRunnable, test_runners)
results_lists = async_results.get(999999)
test_results = TestResults.FromTestResults(results_lists)
# Re-check the attached devices for some devices may
# become offline
retry_devices = set(android_commands.GetAttachedDevices())
# Remove devices that had exceptions.
retry_devices -= TestResults.DeviceExceptions(results_lists)
# Retry on devices that didn't have any exception.
self.attached_devices = list(retry_devices)
if (retry == self.retries - 1 or
len(self.attached_devices) == 0):
all_passed = final_results.ok + test_results.ok
final_results = test_results
final_results.ok = all_passed
break
else:
final_results.ok += test_results.ok
self.tests = []
for t in test_results.GetAllBroken():
self.tests += [t.name]
if not self.tests:
break
self.OnTestsCompleted(test_runners, final_results)
return final_results
| 33.943548 | 79 | 0.680209 |
6b4657d6104da5a8c7933b5450c48d240c01eec3 | 718 | py | Python | incubation-python/screenmanager/screen_manager.py | yk0242/incubation | f714ed8172aa290d3f13ff8b7f09f888a5b33640 | [
"MIT"
] | 1 | 2018-01-15T09:21:29.000Z | 2018-01-15T09:21:29.000Z | incubation-python/screenmanager/screen_manager.py | FredCoder/incubation | 93e2aba555e3d3a9c71739201f2ea0eb475c31dd | [
"MIT"
] | 19 | 2015-04-14T15:41:58.000Z | 2017-09-23T08:08:31.000Z | incubation-python/screenmanager/screen_manager.py | FredCoder/incubation | 93e2aba555e3d3a9c71739201f2ea0eb475c31dd | [
"MIT"
] | 2 | 2016-01-05T09:21:28.000Z | 2019-03-20T11:41:45.000Z | # -*- coding: utf-8 -*-
# test using kivy to transition between different screens
# cf https://kivy.org/docs/api-kivy.uix.screenmanager.html
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, NoTransition
# --- import screens for tops of subsystems to be used here ---
import b_system_screen
import shutdown_screen
# ---
# --- add screens to sm, the first being the top screen ---
sm = ScreenManager(transition=NoTransition())
sm.add_widget(b_system_screen.SystemScreen(name="sys-top"))
sm.add_widget(shutdown_screen.ShutdownScreen(name="sys-shutdown"))
# ---
class ShutdownScreenApp(App):
def build(self):
return sm
if __name__ == "__main__":
ShutdownScreenApp().run()
| 25.642857 | 66 | 0.735376 |
e77d2d15b328cdfd106ff6ac61f7933b60f2e580 | 12,506 | py | Python | object_detection_metrics/category.py | tetutaro/object_detection_metrics | b0b003ff3752bfd98b24614bb485cee652a1e863 | [
"MIT"
] | 2 | 2021-03-21T10:57:36.000Z | 2021-06-09T11:34:59.000Z | object_detection_metrics/category.py | tetutaro/object_detection_metrics | b0b003ff3752bfd98b24614bb485cee652a1e863 | [
"MIT"
] | null | null | null | object_detection_metrics/category.py | tetutaro/object_detection_metrics | b0b003ff3752bfd98b24614bb485cee652a1e863 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import annotations
from collections import defaultdict
import numpy as np
class Category(object):
'''the basis of evaluation per category id
Attributes:
category_id (int): category id of detected object
n_true (int): the number of ground truth bounding boxes
n_pred (int): the number of predicted bounding boxes
n_img (int): the number of images which has bouding box
of the category id (either ground truth or prediction)
tps (Dict[int, List[np.ndarray]]):
True-Positives (True-Positives and confidence score)
per IoU threshold.
the values of key are int(threshold * 100).
value 100 of key means IoU=0.50:0.95:0.05
aps (Dict[int, float]):
Average Precisions per IoU threshold.
the values of key are int(threshold * 100).
value 100 of key means IoU=0.50:0.95:0.05
'''
def __init__(
self: Category,
category_id: int,
) -> None:
'''initialize function of Category
Args:
category_id (int): category id of detected object
'''
# store given values
self.category_id = category_id
# initialize internal attributes
self.n_true = 0
self.n_pred = 0
self.n_img = 0
self.tps = defaultdict(list)
self.aps = defaultdict(float)
return
@staticmethod
def calc_iou(true: np.ndarray, pred: np.ndarray) -> np.ndarray:
'''calculate IoU (Intersection of Union)
* calc IoU of bounding boxes (ground truth and prediction) at once
* If number of prediction is N and number of ground truth is M,
this function produces N x M matrix (IoU matrix)
* bounding boxes must be written as (min_x, min_y, width, height)
Args:
true (np.ndarray): bounding boxes of ground truth (M x 4)
pred (np.ndarray): bounding boxes of prediction (N x 4)
Returns:
np.ndarray: IoU matrix (N x M)
'''
assert len(true.shape) == len(pred.shape) == 2
assert true.shape[1] == pred.shape[1] == 4
# convert xywh -> xyxy
true_xyxy = true.copy()
true_xyxy[:, 2:4] += true[:, 0:2]
pred_xyxy = pred.copy()
pred_xyxy[:, 2:4] += pred[:, 0:2]
# expand bouding boxes to N x M x 4
ex_like = np.zeros((pred.shape[0], true.shape[0], pred.shape[1]))
ex_true = np.full_like(
ex_like, true_xyxy[np.newaxis, :, :], dtype=np.float
)
ex_pred = np.full_like(
ex_like, pred_xyxy[:, np.newaxis, :], dtype=np.float
)
# calc the area of bouding boxes
area_true = (
ex_true[:, :, 2] - ex_true[:, :, 0]
) * (
ex_true[:, :, 3] - ex_true[:, :, 1]
)
area_pred = (
ex_pred[:, :, 2] - ex_pred[:, :, 0]
) * (
ex_pred[:, :, 3] - ex_pred[:, :, 1]
)
# calc intersections between ground truths and predictions
left_ups = np.maximum(ex_true[:, :, :2], ex_pred[:, :, :2])
right_downs = np.minimum(ex_true[:, :, 2:], ex_pred[:, :, 2:])
intersections = np.maximum(right_downs - left_ups, 0.0)
# calc area of intersection and union
area_inter = intersections[:, :, 0] * intersections[:, :, 1]
area_union = area_true + area_pred - area_inter
# calc IoU and return it
return np.maximum(
1.0 * area_inter / area_union,
np.finfo(np.float).eps
)
@staticmethod
def calc_tp(iou: np.ndarray, threshold: float) -> np.ndarray:
'''calculate True-Positives from IoU matrix
* iou matrix represents IoU between ground truths and predictions
* the shape of iou matrix (N x M) shows
# of prediction is N and # of ground truth is M
* predictions of iou matrix has been sorted
in descending order of confidence score
* this function produces N x 1 matrix and its values are 0 or 1
* 1 means that its prediction is True-Positive
and 0 means that its prediction is False-Positive
* threshold is the minimum IoU that
the prediction considers to be true
* the ground truth is assigned from the prediction
which has higher confidence score
* the ground truth which has the highest IoU (and >= threshold)
among all (the rest) ground truths is assigned to the prediction
* the ground truth once assigned to the prediction is not assigned
to other predictions
* therefore, sum of # of True-Positives is less than or equal to
# of ground truths
Args:
iou (np.ndarray): IoU matrix (N x M)
threshold (float): IoU threshold
Returns:
np.ndarray: True-Positives (N x 1)
'''
masked = np.where(iou >= threshold, iou, 0)
for i in range(iou.shape[0]):
if masked[i, :].max() <= 0:
continue
ind = np.argmax(masked[i, :])
masked[i, :] = 0
masked[:, ind] = 0
masked[i, ind] = 1
return np.where(masked > 0, 1, 0).sum(axis=1)[:, np.newaxis]
def append(
self: Category,
true: np.ndarray,
pred: np.ndarray
) -> None:
'''calc the basis of evaluation for each category id
* store # of ground truths, predictions
* calc IoU between ground truths and predictions
* calc True-Positives for each IoU threshold
* store it to self.tps
* bouding boxes of prediction are sorted
in descending order of confidence score
at Prediction.to_ndarray()
Args:
true (np.ndarray): bounding boxes of ground truth (M x 4)
pred (np.ndarray):
bounding boxes of prediction (N x 5)
(pred[:, 4] is a list of confidence scores)
'''
assert true.shape[0] > 0 or pred.shape[0] > 0
# store # of ground truths, predictions
self.n_true += true.shape[0]
self.n_pred += pred.shape[0]
self.n_img += 1
# if # of predictions == 0, just count up # of ground truth
if pred.shape[0] == 0:
return
# if # of ground truth == 0, all predictions are False-Positive
if true.shape[0] == 0:
for i in range(10):
th_ind = 50 + (i * 5)
tp = np.zeros((pred.shape[0], 1), dtype=np.int)
tp = np.concatenate([tp, pred[:, 4:5]], axis=1)
self.tps[th_ind].append(tp)
return
# calc IoU between ground truths and predictions
iou = self.calc_iou(true=true[:, :4], pred=pred[:, :4])
# calc True-Positives for each IoU threshold
for i in range(10):
threshold = 0.5 + (i * 0.05)
th_ind = 50 + (i * 5)
# calc True-Positives at the IoU threshold
tp = self.calc_tp(iou=iou, threshold=threshold)
ntp = tp.sum()
assert ntp <= true.shape[0]
# unite True-Positives and confidence score
tp = np.concatenate([tp, pred[:, 4:5]], axis=1)
# store it
self.tps[th_ind].append(tp)
return
@staticmethod
def calc_auc(x: np.ndarray, y: np.ndarray) -> float:
'''calculate the area of interpolated curve
Args:
x (np.ndarray):
x-axis of interpolated curve.
to calc Average Precision, x is Recall.
to calc Average Recall, x is Precision.
y (np.ndarray):
y-axis of interpolated curve.
to calc Average Precision, y is Precision.
to calc Average Recall, y is Recall.
'''
area_points = list()
tmp_points = list(zip(x, y))
key_point = tmp_points[0]
# select the points to calc the area(area_points) from all points
# == interpolating the Precision-Recall/Recall-Precision curve
if len(tmp_points) == 1:
area_points.append(key_point)
else:
for i, tmp_point in enumerate(tmp_points[1:]):
if tmp_point[1] > key_point[1]:
# tmp_y > key_y
if tmp_point[0] < key_point[0]:
# tmp_x < key_x and tmp_y > key_y
# add key_point
area_points.append(key_point)
# update
key_point = tmp_point
if i == len(tmp_points) - 2:
# the last tmp_point
# add key_point
area_points.append(key_point)
# calc the area under the interpolated curve
auc = 0
base_x = 0
for area_point in area_points[::-1]:
auc += (area_point[0] - base_x) * area_point[1]
base_x = area_point[0]
return auc
def calc_ap(self: Category, tps: np.ndarray) -> float:
'''calculate Average Precision
Args:
tps (np.ndarray):
True-Positives and confidence score (N x 2).
tps[:, 0] is True-Positive.
tps[:, 1] is confidence score.
it is just a concatenated value from multiple images
and so it has not been sorted by confidence score.
Returns:
float: Average Precision
'''
# sort in descending order of confidence score
tps = tps[np.argsort(tps[:, 1])[::-1]]
# calc accumulated True-Positives
acc_tp = np.cumsum(tps[:, 0])
# calc Precision and Recall
precision = acc_tp / np.array(list(range(1, self.n_pred + 1)))
recall = acc_tp / self.n_true
# calc Average Precision and Average Recall
ap = self.calc_auc(x=recall[::-1], y=precision[::-1])
return ap
def accumulate(self: Category) -> None:
'''calc Average Precision of each IoU threshold for each category id
* calc Average Precisions for each IoU threshold
* store it to self.aps
* calc Average Precision of IoU=0.50:0.95:0.05
* store it to self.aps, too
'''
# if # of ground truths == 0, Average Precision is 0 obviously
if self.n_true == 0:
for i in range(10):
th_ind = 50 + (i * 5)
self.aps[th_ind] = 0.0
self.aps[100] = 0.0
return
aps_all = list()
# calc Average Precision for each IoU threshold
for i in range(10):
# get tps(True-Positives and confidence score)
th_ind = 50 + (i * 5)
tps = self.tps[th_ind]
# if # of predictions == 0, Average Precision is 0 obviously
if len(tps) == 0:
self.aps[th_ind] = 0.0
aps_all.append(0.0)
continue
# unite tps(True-Positives and confidence score) of all images
tps = np.concatenate(tps, axis=0)
# calc Average Precision
ap = self.calc_ap(tps=tps)
# store it
self.aps[th_ind] = ap
aps_all.append(ap)
# calc Average Precision of IoU=0.50:0.95:0.05 and store it
self.aps[100] = np.array(aps_all).mean()
return
class CategoryTotal(Category):
'''class to calc micro mean Average Precision
Attributes:
n_true (int): the number of ground truth bounding boxes
n_pred (int): the number of predicted bounding boxes
n_img (int): the number of images
tps (Dict[int, List[np.ndarray]]):
True-Positives (True-Positives and confidence score)
per IoU threshold.
the values of key are int(threshold * 100).
value 100 of key means IoU=0.50:0.95:0.05
aps (Dict[int, float]):
micro mean Average Precisions per IoU threshold.
the values of key are int(threshold * 100).
value 100 of key means IoU=0.50:0.95:0.05
'''
def __init__(self: CategoryTotal):
'''initialize function of TotalCategories class
'''
self.n_true = 0
self.n_pred = 0
self.n_img = 0
self.tps = defaultdict(list)
self.aps = defaultdict(float)
return
| 38.361963 | 76 | 0.551655 |
c5be495fc1db83f6003233042814e93e6d6f115f | 2,609 | py | Python | ppfa/selenium_tests/management/commands/runtests.py | Goldcap/django-selenium-testing | ad6fc09eb4fd8a54d0532c4478add0753ead1d96 | [
"MIT"
] | null | null | null | ppfa/selenium_tests/management/commands/runtests.py | Goldcap/django-selenium-testing | ad6fc09eb4fd8a54d0532c4478add0753ead1d96 | [
"MIT"
] | null | null | null | ppfa/selenium_tests/management/commands/runtests.py | Goldcap/django-selenium-testing | ad6fc09eb4fd8a54d0532c4478add0753ead1d96 | [
"MIT"
] | null | null | null |
import sys
import os.path
import importlib
from inspect import getmembers, isclass
from collections import defaultdict
from optparse import make_option
from django.utils import timezone
from django.core.management.base import BaseCommand, CommandError
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.db.models.base import ModelBase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from operis.log import log
from selenium_tests.models import PpfaTest, PpfaTestRun
#This command takes an input table of artifacts, of a specific format,
#And ensures that image attatchments for each artifact in the table are created
#Then sets those images up to be parsed by the IKGEN.py
class Command(BaseCommand):
help = 'Selenium Test Runner'
logger = None
"""
option_list = BaseCommand.option_list + (
make_option('--regenerate',
action='store_true',
dest='regenerate',
default=False,
help='Wipe Prior instances'),
)
"""
def handle(self, *args, **options):
self.logger = log( self )
self.logger.log("Starting Tests for %s",["Python"],"info")
tests = PpfaTest.objects.all()
for test in tests:
self.logger.log("Found Test %s",[test.name],"debug")
run = PpfaTestRun.objects.create(ppfa_test=test,
date_created=timezone.now()
)
thetest = "selenium_tests.tests.%s" % test.location
module = importlib.import_module(thetest)
for name, obj in getmembers(module, lambda member: isclass(member) and member.__module__ == thetest):
self.logger.log("Found Test Class %s",[name],"notice")
try:
aclass = getattr(module,name)
object = aclass()
object.logger = self.logger
object.testObject = test
object.runObject = run
object.test()
except:
pass
self.logger.log("Had Errors: %s",[len(object.errors)],"notice")
if (len(object.errors) == 0):
test.status = True
run.status = True
run.save()
test.last_run = run.date_created
test.save()
#self.logger.log("No Test Found in %s",[name],"error") | 35.739726 | 113 | 0.576466 |
ceea16883e15eaaf54c1f69452cbe348041c8964 | 13,113 | py | Python | dataloader/pc_dataset.py | msc9533/Cylinder3D | 4441da83df3e897dd78a89383684c87f9644ed8d | [
"Apache-2.0"
] | 2 | 2021-02-15T12:25:33.000Z | 2021-04-13T13:11:38.000Z | dataloader/pc_dataset.py | msc9533/Cylinder3D | 4441da83df3e897dd78a89383684c87f9644ed8d | [
"Apache-2.0"
] | 1 | 2022-03-11T10:27:20.000Z | 2022-03-11T11:52:29.000Z | dataloader/pc_dataset.py | msc9533/Cylinder3D | 4441da83df3e897dd78a89383684c87f9644ed8d | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# author: Xinge
# @file: pc_dataset.py
import os
import numpy as np
from torch.utils import data
import yaml
import pickle
REGISTERED_PC_DATASET_CLASSES = {}
def register_dataset(cls, name=None):
global REGISTERED_PC_DATASET_CLASSES
if name is None:
name = cls.__name__
assert name not in REGISTERED_PC_DATASET_CLASSES, f"exist class: {REGISTERED_PC_DATASET_CLASSES}"
REGISTERED_PC_DATASET_CLASSES[name] = cls
return cls
def get_pc_model_class(name):
global REGISTERED_PC_DATASET_CLASSES
assert name in REGISTERED_PC_DATASET_CLASSES, f"available class: {REGISTERED_PC_DATASET_CLASSES}"
return REGISTERED_PC_DATASET_CLASSES[name]
@register_dataset
class SemKITTI_demo(data.Dataset):
def __init__(self, data_path, imageset='demo',
return_ref=True, label_mapping="semantic-kitti.yaml", demo_label_path=None):
with open(label_mapping, 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
self.learning_map = semkittiyaml['learning_map']
self.imageset = imageset
self.return_ref = return_ref
self.im_idx = []
self.im_idx += absoluteFilePaths(data_path)
self.label_idx = []
if self.imageset == 'val':
print(demo_label_path)
self.label_idx += absoluteFilePaths(demo_label_path)
def __len__(self):
'Denotes the total number of samples'
return len(self.im_idx)
def __getitem__(self, index):
raw_data = np.fromfile(self.im_idx[index], dtype=np.float32).reshape((-1, 4))
if self.imageset == 'demo':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0], dtype=int), axis=1)
elif self.imageset == 'val':
annotated_data = np.fromfile(self.label_idx[index], dtype=np.uint32).reshape((-1, 1))
annotated_data = annotated_data & 0xFFFF # delete high 16 digits binary
annotated_data = np.vectorize(self.learning_map.__getitem__)(annotated_data)
data_tuple = (raw_data[:, :3], annotated_data.astype(np.uint8))
if self.return_ref:
data_tuple += (raw_data[:, 3],)
return data_tuple
@register_dataset
class SemKITTI_sk(data.Dataset):
def __init__(self, data_path, imageset='train',
return_ref=False, label_mapping="semantic-kitti.yaml", nusc=None):
self.return_ref = return_ref
with open(label_mapping, 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
self.learning_map = semkittiyaml['learning_map']
self.imageset = imageset
if imageset == 'train':
split = semkittiyaml['split']['train']
elif imageset == 'val':
split = semkittiyaml['split']['valid']
elif imageset == 'test':
split = semkittiyaml['split']['test']
else:
raise Exception('Split must be train/val/test')
self.im_idx = []
for i_folder in split:
self.im_idx += absoluteFilePaths('/'.join([data_path, str(i_folder).zfill(2), 'velodyne']))
def __len__(self):
'Denotes the total number of samples'
return len(self.im_idx)
def __getitem__(self, index):
raw_data = np.fromfile(self.im_idx[index], dtype=np.float32).reshape((-1, 4))
if self.imageset == 'test':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0], dtype=int), axis=1)
else:
annotated_data = np.fromfile(self.im_idx[index].replace('velodyne', 'labels')[:-3] + 'label',
dtype=np.uint32).reshape((-1, 1))
annotated_data = annotated_data & 0xFFFF # delete high 16 digits binary
annotated_data = np.vectorize(self.learning_map.__getitem__)(annotated_data)
data_tuple = (raw_data[:, :3], annotated_data.astype(np.uint8))
if self.return_ref:
data_tuple += (raw_data[:, 3],)
return data_tuple
@register_dataset
class SemKITTI_nusc(data.Dataset):
def __init__(self, data_path, imageset='train',
return_ref=False, label_mapping="nuscenes.yaml", nusc=None):
self.return_ref = return_ref
with open(imageset, 'rb') as f:
data = pickle.load(f)
with open(label_mapping, 'r') as stream:
nuscenesyaml = yaml.safe_load(stream)
self.learning_map = nuscenesyaml['learning_map']
self.nusc_infos = data['infos']
self.data_path = data_path
self.nusc = nusc
def __len__(self):
'Denotes the total number of samples'
return len(self.nusc_infos)
def __getitem__(self, index):
info = self.nusc_infos[index]
lidar_path = info['lidar_path'][16:]
lidar_sd_token = self.nusc.get('sample', info['token'])['data']['LIDAR_TOP']
lidarseg_labels_filename = os.path.join(self.nusc.dataroot,
self.nusc.get('lidarseg', lidar_sd_token)['filename'])
points_label = np.fromfile(lidarseg_labels_filename, dtype=np.uint8).reshape([-1, 1])
points_label = np.vectorize(self.learning_map.__getitem__)(points_label)
points = np.fromfile(os.path.join(self.data_path, lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])
data_tuple = (points[:, :3], points_label.astype(np.uint8))
if self.return_ref:
data_tuple += (points[:, 3],)
return data_tuple
def absoluteFilePaths(directory):
for dirpath, _, filenames in os.walk(directory):
filenames.sort()
for f in filenames:
yield os.path.abspath(os.path.join(dirpath, f))
def SemKITTI2train(label):
if isinstance(label, list):
return [SemKITTI2train_single(a) for a in label]
else:
return SemKITTI2train_single(label)
def SemKITTI2train_single(label):
remove_ind = label == 0
label -= 1
label[remove_ind] = 255
return label
from os.path import join
@register_dataset
class SemKITTI_sk_multiscan(data.Dataset):
def __init__(self, data_path, imageset='train',return_ref=False, label_mapping="semantic-kitti-multiscan.yaml"):
self.return_ref = return_ref
with open(label_mapping, 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
self.learning_map = semkittiyaml['learning_map']
self.imageset = imageset
self.data_path = data_path
if imageset == 'train':
split = semkittiyaml['split']['train']
elif imageset == 'val':
split = semkittiyaml['split']['valid']
elif imageset == 'test':
split = semkittiyaml['split']['test']
else:
raise Exception('Split must be train/val/test')
multiscan = 2 # additional two frames are fused with target-frame. Hence, 3 point clouds in total
self.multiscan = multiscan
self.im_idx = []
self.calibrations = []
self.times = []
self.poses = []
self.load_calib_poses()
for i_folder in split:
self.im_idx += absoluteFilePaths('/'.join([data_path, str(i_folder).zfill(2), 'velodyne']))
def __len__(self):
'Denotes the total number of samples'
return len(self.im_idx)
def load_calib_poses(self):
"""
load calib poses and times.
"""
###########
# Load data
###########
self.calibrations = []
self.times = []
self.poses = []
for seq in range(0, 22):
seq_folder = join(self.data_path, str(seq).zfill(2))
# Read Calib
self.calibrations.append(self.parse_calibration(join(seq_folder, "calib.txt")))
# Read times
self.times.append(np.loadtxt(join(seq_folder, 'times.txt'), dtype=np.float32))
# Read poses
poses_f64 = self.parse_poses(join(seq_folder, 'poses.txt'), self.calibrations[-1])
self.poses.append([pose.astype(np.float32) for pose in poses_f64])
def parse_calibration(self, filename):
""" read calibration file with given filename
Returns
-------
dict
Calibration matrices as 4x4 numpy arrays.
"""
calib = {}
calib_file = open(filename)
for line in calib_file:
key, content = line.strip().split(":")
values = [float(v) for v in content.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
calib[key] = pose
calib_file.close()
return calib
def parse_poses(self, filename, calibration):
""" read poses file with per-scan poses from given filename
Returns
-------
list
list of poses as 4x4 numpy arrays.
"""
file = open(filename)
poses = []
Tr = calibration["Tr"]
Tr_inv = np.linalg.inv(Tr)
for line in file:
values = [float(v) for v in line.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
poses.append(np.matmul(Tr_inv, np.matmul(pose, Tr)))
return poses
def fuse_multi_scan(self, points, pose0, pose):
# pose = poses[0][idx]
hpoints = np.hstack((points[:, :3], np.ones_like(points[:, :1])))
# new_points = hpoints.dot(pose.T)
new_points = np.sum(np.expand_dims(hpoints, 2) * pose.T, axis=1)
new_points = new_points[:, :3]
new_coords = new_points - pose0[:3, 3]
# new_coords = new_coords.dot(pose0[:3, :3])
new_coords = np.sum(np.expand_dims(new_coords, 2) * pose0[:3, :3], axis=1)
new_coords = np.hstack((new_coords, points[:, 3:]))
return new_coords
def __getitem__(self, index):
raw_data = np.fromfile(self.im_idx[index], dtype=np.float32).reshape((-1, 4))
origin_len = len(raw_data)
if self.imageset == 'test':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0], dtype=int), axis=1)
else:
annotated_data = np.fromfile(self.im_idx[index].replace('velodyne', 'labels')[:-3] + 'label',
dtype=np.int32).reshape((-1, 1))
annotated_data = annotated_data & 0xFFFF # delete high 16 digits binary
# annotated_data = np.vectorize(self.learning_map.__getitem__)(annotated_data)
number_idx = int(self.im_idx[index][-10:-4])
dir_idx = int(self.im_idx[index][-22:-20])
pose0 = self.poses[dir_idx][number_idx]
if number_idx - self.multiscan >= 0:
for fuse_idx in range(self.multiscan):
plus_idx = fuse_idx + 1
pose = self.poses[dir_idx][number_idx - plus_idx]
newpath2 = self.im_idx[index][:-10] + str(number_idx - plus_idx).zfill(6) + self.im_idx[index][-4:]
raw_data2 = np.fromfile(newpath2, dtype=np.float32).reshape((-1, 4))
if self.imageset == 'test':
annotated_data2 = np.expand_dims(np.zeros_like(raw_data2[:, 0], dtype=int), axis=1)
else:
annotated_data2 = np.fromfile(newpath2.replace('velodyne', 'labels')[:-3] + 'label',
dtype=np.int32).reshape((-1, 1))
annotated_data2 = annotated_data2 & 0xFFFF # delete high 16 digits binary
raw_data2 = self.fuse_multi_scan(raw_data2, pose0, pose)
if len(raw_data2) != 0:
raw_data = np.concatenate((raw_data, raw_data2), 0)
annotated_data = np.concatenate((annotated_data, annotated_data2), 0)
annotated_data = np.vectorize(self.learning_map.__getitem__)(annotated_data)
data_tuple = (raw_data[:, :3], annotated_data.astype(np.uint8))
if self.return_ref:
data_tuple += (raw_data[:, 3], origin_len) # origin_len is used to indicate the length of target-scan
return data_tuple
# load Semantic KITTI class info
def get_SemKITTI_label_name(label_mapping):
with open(label_mapping, 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
SemKITTI_label_name = dict()
for i in sorted(list(semkittiyaml['learning_map'].keys()))[::-1]:
SemKITTI_label_name[semkittiyaml['learning_map'][i]] = semkittiyaml['labels'][i]
return SemKITTI_label_name
def get_nuScenes_label_name(label_mapping):
with open(label_mapping, 'r') as stream:
nuScenesyaml = yaml.safe_load(stream)
nuScenes_label_name = dict()
for i in sorted(list(nuScenesyaml['learning_map'].keys()))[::-1]:
val_ = nuScenesyaml['learning_map'][i]
nuScenes_label_name[val_] = nuScenesyaml['labels_16'][val_]
return nuScenes_label_name
| 35.633152 | 116 | 0.603371 |
e4609305a33f4cbe54e4d101234c42087475214b | 1,091 | py | Python | setup.py | eyeseast/wumb-to-sqlite | c8ea909b4bf90cadc6ef60b3cd548246a97933c7 | [
"Apache-2.0"
] | 2 | 2020-10-15T20:58:00.000Z | 2020-10-16T13:35:13.000Z | setup.py | eyeseast/wumb-to-sqlite | c8ea909b4bf90cadc6ef60b3cd548246a97933c7 | [
"Apache-2.0"
] | 4 | 2020-10-12T15:45:28.000Z | 2020-10-16T14:00:37.000Z | setup.py | eyeseast/wumb-to-sqlite | c8ea909b4bf90cadc6ef60b3cd548246a97933c7 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
import os
VERSION = "0.1"
def get_long_description():
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
) as fp:
return fp.read()
setup(
name="wumb-to-sqlite",
description="Scrape WUMB playlists to SQLite",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Chris Amico",
url="https://github.com/eyeseast/wumb-to-sqlite",
project_urls={
"Issues": "https://github.com/eyeseast/wumb-to-sqlite/issues",
"CI": "https://github.com/eyeseast/wumb-to-sqlite/actions",
"Changelog": "https://github.com/eyeseast/wumb-to-sqlite/releases",
},
license="Apache License, Version 2.0",
version=VERSION,
packages=["wumb_to_sqlite"],
entry_points="""
[console_scripts]
wumb-to-sqlite=wumb_to_sqlite.cli:cli
""",
install_requires=["click", "beautifulsoup4", "httpx", "pytz", "sqlite-utils"],
extras_require={"test": ["pytest"]},
tests_require=["wumb-to-sqlite[test]"],
)
| 29.486486 | 82 | 0.654445 |
0f88066befb88ec5f7060c44b0d2dfbf2b36b17c | 188 | py | Python | Examples/FactorialExample.py | SymmetricChaos/FiniteFields | 65258e06b7f04ce15223c1bc0c2384ef5e9cec1a | [
"MIT"
] | 1 | 2021-08-22T15:03:59.000Z | 2021-08-22T15:03:59.000Z | Examples/FactorialExample.py | SymmetricChaos/NumberTheory | 65258e06b7f04ce15223c1bc0c2384ef5e9cec1a | [
"MIT"
] | null | null | null | Examples/FactorialExample.py | SymmetricChaos/NumberTheory | 65258e06b7f04ce15223c1bc0c2384ef5e9cec1a | [
"MIT"
] | null | null | null | from Combinatorics import falling_factorial, rising_factorial
x = 5
n = 5
print(f"falling_fac({x},{n}) = {falling_factorial(x,n)}")
print(f"rising_fac({x},{n}) = {rising_factorial(x,n)}") | 31.333333 | 61 | 0.712766 |
1d656fad86ca3d019c99b4bea726e6df3734ec85 | 8,527 | py | Python | src/confluent/azext_confluent/vendored_sdks/confluent/aio/operations/_marketplace_agreement_operations.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 2 | 2021-06-05T17:51:26.000Z | 2021-11-17T11:17:56.000Z | src/confluent/azext_confluent/vendored_sdks/confluent/aio/operations/_marketplace_agreement_operations.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 3 | 2020-05-27T20:16:26.000Z | 2020-07-23T19:46:49.000Z | src/confluent/azext_confluent/vendored_sdks/confluent/aio/operations/_marketplace_agreement_operations.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 5 | 2020-09-08T22:46:48.000Z | 2020-11-08T14:54:35.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MarketplaceAgreementOperations:
"""MarketplaceAgreementOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~confluent_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["models.ConfluentAgreementResourceListResponse"]:
"""List Confluent marketplace agreements in the subscription.
List Confluent marketplace agreements in the subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConfluentAgreementResourceListResponse or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~confluent_management_client.models.ConfluentAgreementResourceListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ConfluentAgreementResourceListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ConfluentAgreementResourceListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ResourceProviderDefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Confluent/agreements'} # type: ignore
async def create(
self,
properties: Optional["models.ConfluentAgreementProperties"] = None,
**kwargs
) -> "models.ConfluentAgreementResource":
"""Create Confluent Marketplace agreement in the subscription.
Create Confluent Marketplace agreement in the subscription.
:param properties: Represents the properties of the resource.
:type properties: ~confluent_management_client.models.ConfluentAgreementProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConfluentAgreementResource, or the result of cls(response)
:rtype: ~confluent_management_client.models.ConfluentAgreementResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ConfluentAgreementResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
body = models.ConfluentAgreementResource(properties=properties)
api_version = "2020-03-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'ConfluentAgreementResource')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ResourceProviderDefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConfluentAgreementResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Confluent/agreements/default'} # type: ignore
| 47.904494 | 133 | 0.677495 |
383197e41e6220f2efe2beb00311ff0dae2083a5 | 5,059 | bzl | Python | tools/ts-api-guardian/index.bzl | ddeka2910/angular | 39573769c698409b6a4279acec1a86e0686740a4 | [
"MIT"
] | 2 | 2018-02-26T04:06:30.000Z | 2020-02-12T21:59:09.000Z | tools/ts-api-guardian/index.bzl | ddeka2910/angular | 39573769c698409b6a4279acec1a86e0686740a4 | [
"MIT"
] | 369 | 2021-01-20T05:54:20.000Z | 2022-03-25T21:49:16.000Z | tools/ts-api-guardian/index.bzl | ddeka2910/angular | 39573769c698409b6a4279acec1a86e0686740a4 | [
"MIT"
] | 1 | 2017-02-21T00:57:05.000Z | 2017-02-21T00:57:05.000Z | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs ts_api_guardian
"""
load("@build_bazel_rules_nodejs//:index.bzl", "nodejs_binary", "nodejs_test")
COMMON_MODULE_IDENTIFIERS = ["angular", "jasmine", "protractor", "Symbol"]
def ts_api_guardian_test(
name,
golden,
actual,
data = [],
strip_export_pattern = [],
allow_module_identifiers = COMMON_MODULE_IDENTIFIERS,
use_angular_tag_rules = True,
**kwargs):
"""Runs ts_api_guardian
"""
data += [
# Locally we need to add the TS build target
# But it will replaced to @npm//ts-api-guardian when publishing
"@angular//tools/ts-api-guardian:lib",
# BEGIN-INTERNAL
"@angular//tools/ts-api-guardian:bin",
# END-INTERNAL
# The below are required during runtime
"@npm//chalk",
"@npm//diff",
"@npm//minimist",
"@npm//typescript",
]
args = [
# Needed so that node doesn't walk back to the source directory.
# From there, the relative imports would point to .ts files.
"--node_options=--preserve-symlinks",
# Since version 3, monkey-patch the implementation of require() in NodeJS is opt-in
# https://github.com/bazelbuild/rules_nodejs/wiki#--bazel_patch_module_resolver-now-defaults-to-false-2324
"--bazel_patch_module_resolver",
]
for i in strip_export_pattern:
# Quote the regexp before passing it via the command line.
quoted_pattern = "\"%s\"" % i
args += ["--stripExportPattern", quoted_pattern]
for i in allow_module_identifiers:
args += ["--allowModuleIdentifiers", i]
if use_angular_tag_rules:
args += ["--useAngularTagRules"]
nodejs_test(
name = name,
data = data,
entry_point = Label("@angular//tools/ts-api-guardian:bin/ts-api-guardian"),
tags = kwargs.pop("tags", []) + ["api_guard"],
templated_args = args + ["--verify", golden, actual],
**kwargs
)
nodejs_binary(
name = name + ".accept",
testonly = True,
data = data,
entry_point = Label("@angular//tools/ts-api-guardian:bin/ts-api-guardian"),
tags = kwargs.pop("tags", []) + ["api_guard"],
templated_args = args + ["--out", golden, actual],
**kwargs
)
def ts_api_guardian_test_npm_package(
name,
goldenDir,
actualDir,
data = [],
strip_export_pattern = ["^ɵ(?!ɵdefineInjectable|ɵinject|ɵInjectableDef)"],
allow_module_identifiers = COMMON_MODULE_IDENTIFIERS,
use_angular_tag_rules = True,
**kwargs):
"""Runs ts_api_guardian
"""
data += [
# Locally we need to add the TS build target
# But it will replaced to @npm//ts-api-guardian when publishing
"@angular//tools/ts-api-guardian:lib",
"@angular//tools/ts-api-guardian:bin",
# The below are required during runtime
"@npm//chalk",
"@npm//diff",
"@npm//minimist",
"@npm//typescript",
]
args = [
# Needed so that node doesn't walk back to the source directory.
# From there, the relative imports would point to .ts files.
"--node_options=--preserve-symlinks",
# We automatically discover the enpoints for our NPM package.
"--autoDiscoverEntrypoints",
]
for i in strip_export_pattern:
# Quote the regexp before passing it via the command line.
quoted_pattern = "\"%s\"" % i
args += ["--stripExportPattern", quoted_pattern]
for i in allow_module_identifiers:
args += ["--allowModuleIdentifiers", i]
if use_angular_tag_rules:
args += ["--useAngularTagRules"]
nodejs_test(
name = name,
data = data,
entry_point = "@angular//tools/ts-api-guardian:bin/ts-api-guardian",
tags = kwargs.pop("tags", []) + ["api_guard"],
templated_args = args + ["--autoDiscoverEntrypoints", "--verifyDir", goldenDir, "--rootDir", "$(rlocation %s)" % actualDir],
**kwargs
)
nodejs_binary(
name = name + ".accept",
testonly = True,
data = data,
entry_point = "@angular//tools/ts-api-guardian:bin/ts-api-guardian",
tags = kwargs.pop("tags", []) + ["api_guard"],
templated_args = args + ["--autoDiscoverEntrypoints", "--outDir", goldenDir, "--rootDir", "$(rlocation %s)" % actualDir],
**kwargs
)
| 34.650685 | 132 | 0.616723 |
12a65c3450b774bf2f2dd40aa1ef080a980e4d82 | 6,822 | py | Python | py/test/selenium/webdriver/common/w3c_interaction_tests.py | stevepiercy/selenium | 4464ac4f8230150824f6bf2e4075cd1f53a648c7 | [
"Apache-2.0"
] | 4 | 2021-07-04T00:18:58.000Z | 2021-11-17T11:10:02.000Z | py/test/selenium/webdriver/common/w3c_interaction_tests.py | stevepiercy/selenium | 4464ac4f8230150824f6bf2e4075cd1f53a648c7 | [
"Apache-2.0"
] | 5 | 2021-08-21T19:28:26.000Z | 2022-02-27T16:03:09.000Z | py/test/selenium/webdriver/common/w3c_interaction_tests.py | stevepiercy/selenium | 4464ac4f8230150824f6bf2e4075cd1f53a648c7 | [
"Apache-2.0"
] | 3 | 2017-10-19T04:57:07.000Z | 2020-12-08T19:57:45.000Z | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.actions.action_builder import ActionBuilder
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
def test_should_be_able_to_get_pointer_and_keyboard_inputs(driver, pages):
actions = ActionBuilder(driver)
pointers = actions.pointer_inputs
keyboards = actions.key_inputs
assert pointers is not None
assert keyboards is not None
@pytest.mark.xfail_firefox
@pytest.mark.xfail_safari
@pytest.mark.xfail_remote
def testSendingKeysToActiveElementWithModifier(driver, pages):
pages.load("formPage.html")
e = driver.find_element(By.ID, "working")
e.click()
actions = ActionBuilder(driver)
key_action = actions.key_action
key_action.key_down(Keys.SHIFT) \
.send_keys("abc") \
.key_up(Keys.SHIFT)
actions.perform()
assert "ABC" == e.get_attribute('value')
@pytest.mark.xfail_firefox
@pytest.mark.xfail_remote
def test_can_create_pause_action_on_keyboard(driver, pages):
# If we don't get an error and takes less than 3 seconds to run, we are good
import datetime
start = datetime.datetime.now()
actions1 = ActionBuilder(driver)
key_actions = actions1.key_action
key_actions.pause(1)
actions1.perform()
finish = datetime.datetime.now()
assert (finish - start).seconds <= 3
# Add a filler step
actions2 = ActionBuilder(driver)
key_action = actions2.key_action
key_action.pause()
actions2.perform()
@pytest.mark.xfail_firefox
def test_can_create_pause_action_on_pointer(driver, pages):
# If we don't get an error and takes less than 3 seconds to run, we are good
import datetime
start = datetime.datetime.now()
actions1 = ActionBuilder(driver)
key_actions = actions1.pointer_action
key_actions.pause(1)
actions1.perform()
finish = datetime.datetime.now()
assert (finish - start).seconds <= 3
# Add a filler step
actions2 = ActionBuilder(driver)
key_action = actions2.pointer_action
key_action.pause()
actions2.perform()
@pytest.mark.xfail_firefox
def test_can_clear_actions(driver, pages):
actions = ActionBuilder(driver)
actions.clear_actions()
@pytest.mark.xfail_firefox
def test_move_and_click(driver, pages):
pages.load("javascriptPage.html")
toClick = driver.find_element(By.ID, "clickField")
actions = ActionBuilder(driver)
pointer = actions.pointer_action
pointer.move_to(toClick) \
.click()
actions.perform()
assert "Clicked" == toClick.get_attribute('value')
@pytest.mark.xfail_firefox
def testDragAndDrop(driver, pages):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
element_available_timeout = 15
wait = WebDriverWait(driver, element_available_timeout)
pages.load("droppableItems.html")
wait.until(lambda dr: _isElementAvailable(driver, "draggable"))
if not _isElementAvailable(driver, "draggable"):
raise AssertionError("Could not find draggable element after 15 seconds.")
toDrag = driver.find_element(By.ID, "draggable")
dropInto = driver.find_element(By.ID, "droppable")
actions = ActionBuilder(driver)
pointer = actions.pointer_action
pointer.click_and_hold(toDrag) \
.move_to(dropInto)\
.release()
actions.perform()
dropInto = driver.find_element(By.ID, "droppable")
text = dropInto.find_element(By.TAG_NAME, "p").text
assert "Dropped!" == text
@pytest.mark.xfail_firefox
def test_context_click(driver, pages):
pages.load("javascriptPage.html")
toContextClick = driver.find_element(By.ID, "doubleClickField")
actions = ActionBuilder(driver)
pointer = actions.pointer_action
pointer.context_click(toContextClick)
actions.perform()
assert "ContextClicked" == toContextClick.get_attribute('value')
@pytest.mark.xfail_firefox
@pytest.mark.xfail_safari
@pytest.mark.xfail_remote(reason="Fails on Travis")
@pytest.mark.xfail_chrome(reason="Fails on Travis")
def test_double_click(driver, pages):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
pages.load("javascriptPage.html")
toDoubleClick = driver.find_element(By.ID, "doubleClickField")
actions = ActionBuilder(driver)
pointer = actions.pointer_action
pointer.double_click(toDoubleClick)
actions.perform()
assert "DoubleClicked" == toDoubleClick.get_attribute('value')
@pytest.mark.xfail_firefox
def test_dragging_element_with_mouse_moves_it_to_another_list(driver, pages):
_performDragAndDropWithMouse(driver, pages)
dragInto = driver.find_element(By.ID, "sortable1")
assert 6 == len(dragInto.find_elements(By.TAG_NAME, "li"))
@pytest.mark.xfail_firefox
def test_dragging_element_with_mouse_fires_events(driver, pages):
_performDragAndDropWithMouse(driver, pages)
dragReporter = driver.find_element(By.ID, "dragging_reports")
assert "Nothing happened. DragOut DropIn RightItem 3" == dragReporter.text
def _performDragAndDropWithMouse(driver, pages):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
pages.load("draggableLists.html")
dragReporter = driver.find_element(By.ID, "dragging_reports")
toDrag = driver.find_element(By.ID, "rightitem-3")
dragInto = driver.find_element(By.ID, "sortable1")
actions = ActionBuilder(driver)
pointer = actions.pointer_action
pointer.click_and_hold(toDrag) \
.move_to(driver.find_element(By.ID, "leftitem-4")) \
.move_to(dragInto) \
.release()
assert "Nothing happened." == dragReporter.text
actions.perform()
assert "Nothing happened. DragOut" in dragReporter.text
def _isElementAvailable(driver, id):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
try:
driver.find_element(By.ID, id)
return True
except Exception:
return False
| 32.028169 | 82 | 0.737614 |
122036a741ff1da37293d7a35167c0a2862422de | 3,638 | py | Python | Python/matplotlib/plots.py | lcbendall/numerical_computing | 565cde92525ea44c55abe933c6419c1543f9800b | [
"CC-BY-3.0"
] | null | null | null | Python/matplotlib/plots.py | lcbendall/numerical_computing | 565cde92525ea44c55abe933c6419c1543f9800b | [
"CC-BY-3.0"
] | null | null | null | Python/matplotlib/plots.py | lcbendall/numerical_computing | 565cde92525ea44c55abe933c6419c1543f9800b | [
"CC-BY-3.0"
] | 1 | 2020-12-08T01:19:23.000Z | 2020-12-08T01:19:23.000Z | import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import matplotlib.pyplot as plt
import matplotlib.widgets as wg
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from mayavi import mlab
import solutions
png_size = (1024, 768)
def exp_plot():
x = np.linspace(-2, 3, 501)
y = np.exp(x)
plt.plot(x, y)
plt.savefig("exp_plot.pdf")
plt.clf()
def statemachine():
x = np.linspace(1, 10, 10)
y = np.random.rand(10, 10)
plt.cla()
for n in y:
plt.plot(x, n)
plt.savefig("statemachine.pdf")
plt.clf()
def subplots():
x = np.linspace(-np.pi, np.pi, 400)
y1 = np.sin(x)
y2 = np.cos(x)
plt.subplot(211)
plt.plot(x, y1)
plt.subplot(212)
plt.plot(x, y2)
plt.savefig("subplots.pdf")
plt.clf()
def sinxsiny():
n = 401
x = np.linspace(-6, 6, n)
y = np.linspace(-6, 6, n)
X, Y = np.meshgrid(x, y) # returns a coordinate matrix given coordinate vectors.
C = np.sin(X) * np.sin(Y)
plt.pcolormesh(X, Y, C, edgecolors='face', shading='flat')
plt.savefig("sinxsiny.png", size=png_size)
plt.clf()
def pcolor2():
R = np.linspace(0, 2, 401)
I = R.copy()
R, I = np.meshgrid(R, I)
X = R + complex(0,1)*I
f = np.poly1d([1, 2, -1, 3])
Y = np.absolute(f(X))
plt.pcolormesh(R, I, Y, edgecolors='face', shading='flat')
plt.savefig('pcolor2.png', size=png_size)
plt.clf()
def three_d_plot():
fig = plt.figure()
ax = fig.gca(projection='3d')
x = np.linspace(-6, 6, 301)
y = x.copy()
X, Y = np.meshgrid(x, y)
Z = np.sin(X)*np.sin(Y)
ax.plot_surface(X, Y, Z)
plt.savefig("3dplot.pdf")
plt.clf()
def interact():
ax = plt.subplot(111)
plt.subplots_adjust(bottom=.25)
t = np.arange(0, 1, .001)
a0, f0 = 5, 3
s = a0*np.sin(2*np.pi*f0*t)
l = plt.plot(t, s)[0]
plt.axis([0, 1, -10, 10])
axfreq = plt.axes([.25, .05, .65, .03])
axamp = plt.axes([.25, .1, .65, .03])
sfreq = wg.Slider(axfreq, 'Freq', 0.1, 30.0, valinit=f0)
samp = wg.Slider(axamp, 'Amp', 0.1, 10.0, valinit=a0)
def update(val):
amp = samp.val
freq = sfreq.val
l.set_ydata(amp*np.sin(2*np.pi*freq*t))
plt.draw()
sfreq.on_changed(update)
samp.on_changed(update)
plt.savefig("interact.pdf")
plt.clf()
def plot3d():
num = np.pi/1000
pts = np.arange(0, 2*np.pi + num, num)
x = np.cos(pts) * (1 + np.cos(pts*6))
y = np.sin(pts) * (1 + np.cos(pts*6))
z = np.sin(pts*6/11)
mlab.plot3d(x, y, z)
mlab.savefig("plot3d.png", size=png_size)
mlab.clf()
def points3d():
pts = np.linspace(0, 4 * np.pi, 30)
x = np.sin(2 * pts)
y = np.cos(pts)
z = np.cos(2 * pts)
s = 2+np.sin(pts)
mlab.points3d(x, y, z, s, colormap="cool", scale_factor=.15)
mlab.savefig("points3d.png", size=png_size)
mlab.clf()
def GrandCanyon():
f = solutions.problem8()
mlab.savefig("GrandCanyon.png", size=png_size)
mlab.clf()
def fancymesh():
mlab.savefig('fancymesh.png', size=png_size, figure=mlab.test_fancy_mesh())
mlab.clf()
def prob3_solution():
f = solutions.problem3()
plt.savefig('soln3.pdf')
plt.clf()
def prob2_solution():
f = solutions.problem2()
plt.savefig('soln2.pdf')
plt.clf()
if __name__ == "__main__":
exp_plot()
statemachine()
subplots()
interact()
three_d_plot()
sinxsiny()
pcolor2()
plot3d()
points3d()
fancymesh()
GrandCanyon()
prob3_solution()
prob2_solution()
| 23.320513 | 85 | 0.575591 |
26f0914af0d99d6abe19ffa0702c74d57292d5e9 | 15,804 | py | Python | urwid/treetools.py | DarkPurpleShadow/ConnectFour | af766cb0f6324c735c8c7f5f9161574859edbcdb | [
"BSD-3-Clause"
] | 39 | 2015-04-09T12:55:25.000Z | 2022-01-09T17:56:39.000Z | urwid/treetools.py | DarkPurpleShadow/ConnectFour | af766cb0f6324c735c8c7f5f9161574859edbcdb | [
"BSD-3-Clause"
] | null | null | null | urwid/treetools.py | DarkPurpleShadow/ConnectFour | af766cb0f6324c735c8c7f5f9161574859edbcdb | [
"BSD-3-Clause"
] | 13 | 2015-12-17T21:56:26.000Z | 2019-06-01T18:22:02.000Z | #!/usr/bin/python
#
# Generic TreeWidget/TreeWalker class
# Copyright (c) 2010 Rob Lanphier
# Copyright (C) 2004-2010 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
"""
Urwid tree view
Features:
- custom selectable widgets for trees
- custom list walker for displaying widgets in a tree fashion
"""
import urwid
from urwid.wimp import SelectableIcon
class TreeWidgetError(RuntimeError):
pass
class TreeWidget(urwid.WidgetWrap):
"""A widget representing something in a nested tree display."""
indent_cols = 3
unexpanded_icon = SelectableIcon('+', 0)
expanded_icon = SelectableIcon('-', 0)
def __init__(self, node):
self._node = node
self._innerwidget = None
self.is_leaf = not hasattr(node, 'get_first_child')
self.expanded = True
widget = self.get_indented_widget()
self.__super.__init__(widget)
def selectable(self):
"""
Allow selection of non-leaf nodes so children may be (un)expanded
"""
return not self.is_leaf
def get_indented_widget(self):
widget = self.get_inner_widget()
if not self.is_leaf:
widget = urwid.Columns([('fixed', 1,
[self.unexpanded_icon, self.expanded_icon][self.expanded]),
widget], dividechars=1)
indent_cols = self.get_indent_cols()
return urwid.Padding(widget,
width=('relative', 100), left=indent_cols)
def update_expanded_icon(self):
"""Update display widget text for parent widgets"""
# icon is first element in columns indented widget
self._w.base_widget.widget_list[0] = [
self.unexpanded_icon, self.expanded_icon][self.expanded]
def get_indent_cols(self):
return self.indent_cols * self.get_node().get_depth()
def get_inner_widget(self):
if self._innerwidget is None:
self._innerwidget = self.load_inner_widget()
return self._innerwidget
def load_inner_widget(self):
return urwid.Text(self.get_display_text())
def get_node(self):
return self._node
def get_display_text(self):
return (self.get_node().get_key() + ": " +
str(self.get_node().get_value()))
def next_inorder(self):
"""Return the next TreeWidget depth first from this one."""
# first check if there's a child widget
firstchild = self.first_child()
if firstchild is not None:
return firstchild
# now we need to hunt for the next sibling
thisnode = self.get_node()
nextnode = thisnode.next_sibling()
depth = thisnode.get_depth()
while nextnode is None and depth > 0:
# keep going up the tree until we find an ancestor next sibling
thisnode = thisnode.get_parent()
nextnode = thisnode.next_sibling()
depth -= 1
assert depth == thisnode.get_depth()
if nextnode is None:
# we're at the end of the tree
return None
else:
return nextnode.get_widget()
def prev_inorder(self):
"""Return the previous TreeWidget depth first from this one."""
thisnode = self._node
prevnode = thisnode.prev_sibling()
if prevnode is not None:
# we need to find the last child of the previous widget if its
# expanded
prevwidget = prevnode.get_widget()
lastchild = prevwidget.last_child()
if lastchild is None:
return prevwidget
else:
return lastchild
else:
# need to hunt for the parent
depth = thisnode.get_depth()
if prevnode is None and depth == 0:
return None
elif prevnode is None:
prevnode = thisnode.get_parent()
return prevnode.get_widget()
def keypress(self, size, key):
"""Handle expand & collapse requests (non-leaf nodes)"""
if self.is_leaf:
return key
if key in ("+", "right"):
self.expanded = True
self.update_expanded_icon()
elif key == "-":
self.expanded = False
self.update_expanded_icon()
elif self._w.selectable():
return self.__super.keypress(size, key)
else:
return key
def mouse_event(self, size, event, button, col, row, focus):
if self.is_leaf or event != 'mouse press' or button!=1:
return False
if row == 0 and col == self.get_indent_cols():
self.expanded = not self.expanded
self.update_expanded_icon()
return True
return False
def first_child(self):
"""Return first child if expanded."""
if self.is_leaf or not self.expanded:
return None
else:
if self._node.has_children():
firstnode = self._node.get_first_child()
return firstnode.get_widget()
else:
return None
def last_child(self):
"""Return last child if expanded."""
if self.is_leaf or not self.expanded:
return None
else:
if self._node.has_children():
lastchild = self._node.get_last_child().get_widget()
else:
return None
# recursively search down for the last descendant
lastdescendant = lastchild.last_child()
if lastdescendant is None:
return lastchild
else:
return lastdescendant
class TreeNode(object):
"""
Store tree contents and cache TreeWidget objects.
A TreeNode consists of the following elements:
* key: accessor token for parent nodes
* value: subclass-specific data
* parent: a TreeNode which contains a pointer back to this object
* widget: The widget used to render the object
"""
def __init__(self, value, parent=None, key=None, depth=None):
self._key = key
self._parent = parent
self._value = value
self._depth = depth
self._widget = None
def get_widget(self, reload=False):
""" Return the widget for this node."""
if self._widget is None or reload == True:
self._widget = self.load_widget()
return self._widget
def load_widget(self):
return TreeWidget(self)
def get_depth(self):
if self._depth is None and self._parent is None:
self._depth = 0
elif self._depth is None:
self._depth = self._parent.get_depth() + 1
return self._depth
def get_index(self):
if self.get_depth() == 0:
return None
else:
key = self.get_key()
parent = self.get_parent()
return parent.get_child_index(key)
def get_key(self):
return self._key
def set_key(self, key):
self._key = key
def change_key(self, key):
self.get_parent().change_child_key(self._key, key)
def get_parent(self):
if self._parent == None and self.get_depth() > 0:
self._parent = self.load_parent()
return self._parent
def load_parent(self):
"""Provide TreeNode with a parent for the current node. This function
is only required if the tree was instantiated from a child node
(virtual function)"""
raise TreeWidgetError("virtual function. Implement in subclass")
def get_value(self):
return self._value
def is_root(self):
return self.get_depth() == 0
def next_sibling(self):
if self.get_depth() > 0:
return self.get_parent().next_child(self.get_key())
else:
return None
def prev_sibling(self):
if self.get_depth() > 0:
return self.get_parent().prev_child(self.get_key())
else:
return None
def get_root(self):
root = self
while root.get_parent() is not None:
root = root.get_parent()
return root
class ParentNode(TreeNode):
"""Maintain sort order for TreeNodes."""
def __init__(self, value, parent=None, key=None, depth=None):
TreeNode.__init__(self, value, parent=parent, key=key, depth=depth)
self._child_keys = None
self._children = {}
def get_child_keys(self, reload=False):
"""Return a possibly ordered list of child keys"""
if self._child_keys is None or reload == True:
self._child_keys = self.load_child_keys()
return self._child_keys
def load_child_keys(self):
"""Provide ParentNode with an ordered list of child keys (virtual
function)"""
raise TreeWidgetError("virtual function. Implement in subclass")
def get_child_widget(self, key):
"""Return the widget for a given key. Create if necessary."""
child = self.get_child_node(key)
return child.get_widget()
def get_child_node(self, key, reload=False):
"""Return the child node for a given key. Create if necessary."""
if key not in self._children or reload == True:
self._children[key] = self.load_child_node(key)
return self._children[key]
def load_child_node(self, key):
"""Load the child node for a given key (virtual function)"""
raise TreeWidgetError("virtual function. Implement in subclass")
def set_child_node(self, key, node):
"""Set the child node for a given key. Useful for bottom-up, lazy
population of a tree.."""
self._children[key]=node
def change_child_key(self, oldkey, newkey):
if newkey in self._children:
raise TreeWidgetError("%s is already in use" % newkey)
self._children[newkey] = self._children.pop(oldkey)
self._children[newkey].set_key(newkey)
def get_child_index(self, key):
try:
return self.get_child_keys().index(key)
except ValueError:
errorstring = ("Can't find key %s in ParentNode %s\n" +
"ParentNode items: %s")
raise TreeWidgetError(errorstring % (key, self.get_key(),
str(self.get_child_keys())))
def next_child(self, key):
"""Return the next child node in index order from the given key."""
index = self.get_child_index(key)
# the given node may have just been deleted
if index is None:
return None
index += 1
child_keys = self.get_child_keys()
if index < len(child_keys):
# get the next item at same level
return self.get_child_node(child_keys[index])
else:
return None
def prev_child(self, key):
"""Return the previous child node in index order from the given key."""
index = self.get_child_index(key)
if index is None:
return None
child_keys = self.get_child_keys()
index -= 1
if index >= 0:
# get the previous item at same level
return self.get_child_node(child_keys[index])
else:
return None
def get_first_child(self):
"""Return the first TreeNode in the directory."""
child_keys = self.get_child_keys()
return self.get_child_node(child_keys[0])
def get_last_child(self):
"""Return the last TreeNode in the directory."""
child_keys = self.get_child_keys()
return self.get_child_node(child_keys[-1])
def has_children(self):
"""Does this node have any children?"""
return len(self.get_child_keys())>0
class TreeWalker(urwid.ListWalker):
"""ListWalker-compatible class for displaying TreeWidgets
positions are TreeNodes."""
def __init__(self, start_from):
"""start_from: TreeNode with the initial focus."""
self.focus = start_from
def get_focus(self):
widget = self.focus.get_widget()
return widget, self.focus
def set_focus(self, focus):
self.focus = focus
self._modified()
def get_next(self, start_from):
widget = start_from.get_widget()
target = widget.next_inorder()
if target is None:
return None, None
else:
return target, target.get_node()
def get_prev(self, start_from):
widget = start_from.get_widget()
target = widget.prev_inorder()
if target is None:
return None, None
else:
return target, target.get_node()
class TreeListBox(urwid.ListBox):
"""A ListBox with special handling for navigation and
collapsing of TreeWidgets"""
def keypress(self, size, key):
key = self.__super.keypress(size, key)
return self.unhandled_input(size, key)
def unhandled_input(self, size, input):
"""Handle macro-navigation keys"""
if input == 'left':
self.move_focus_to_parent(size)
elif input == '-':
self.collapse_focus_parent(size)
elif input == 'home':
self.focus_home(size)
elif input == 'end':
self.focus_end(size)
else:
return input
def collapse_focus_parent(self, size):
"""Collapse parent directory."""
widget, pos = self.body.get_focus()
self.move_focus_to_parent(size)
pwidget, ppos = self.body.get_focus()
if pos != ppos:
self.keypress(size, "-")
def move_focus_to_parent(self, size):
"""Move focus to parent of widget in focus."""
widget, pos = self.body.get_focus()
parentpos = pos.get_parent()
if parentpos is None:
return
middle, top, bottom = self.calculate_visible( size )
row_offset, focus_widget, focus_pos, focus_rows, cursor = middle
trim_top, fill_above = top
for widget, pos, rows in fill_above:
row_offset -= rows
if pos == parentpos:
self.change_focus(size, pos, row_offset)
return
self.change_focus(size, pos.get_parent())
def focus_home(self, size):
"""Move focus to very top."""
widget, pos = self.body.get_focus()
rootnode = pos.get_root()
self.change_focus(size, rootnode)
def focus_end( self, size ):
"""Move focus to far bottom."""
maxrow, maxcol = size
widget, pos = self.body.get_focus()
rootnode = pos.get_root()
rootwidget = rootnode.get_widget()
lastwidget = rootwidget.last_child()
lastnode = lastwidget.get_node()
self.change_focus(size, lastnode, maxrow-1)
| 32.451745 | 79 | 0.596495 |
64f76a02eaf50bf1783dac86a25d6f8ec431e9eb | 2,316 | py | Python | setup.py | atlascope/atlascope-api | 8738d20836d3a4f449896215b5fee7c3cf2c7356 | [
"Apache-2.0"
] | 1 | 2022-01-11T22:47:36.000Z | 2022-01-11T22:47:36.000Z | setup.py | atlascope/atlascope | 8738d20836d3a4f449896215b5fee7c3cf2c7356 | [
"Apache-2.0"
] | 80 | 2021-12-08T22:18:44.000Z | 2022-03-31T22:56:48.000Z | setup.py | atlascope/atlascope-api | 8738d20836d3a4f449896215b5fee7c3cf2c7356 | [
"Apache-2.0"
] | 1 | 2021-12-13T19:30:57.000Z | 2021-12-13T19:30:57.000Z | from pathlib import Path
from setuptools import find_packages, setup
readme_file = Path(__file__).parent / 'README.md'
if readme_file.exists():
with readme_file.open() as f:
long_description = f.read()
else:
# When this is first installed in development Docker, README.md is not available
long_description = ''
setup(
name='atlascope',
version='0.1.0',
description='',
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache 2.0',
author='Kitware, Inc.',
author_email='kitware@kitware.com',
keywords='',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django :: 3.0',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python',
],
python_requires='>=3.8',
packages=find_packages(),
include_package_data=True,
install_requires=[
'aiohttp',
'celery',
'django>=3.2,<4.0',
'django-allauth',
'django-configurations[database,email]',
'django-extensions',
'django-filter',
'django-oauth-toolkit',
'djangorestframework',
'drf-yasg',
'django-click',
'importlib_metadata>=3.6',
'jsonschema',
'large-image[gdal,ometiff,pil]>=1.14',
'django-large-image>=0.5.2',
'scikit-learn',
'imagecodecs',
'matplotlib',
# Production-only
'django-composed-configuration[prod]>=0.18',
'gunicorn',
'numpy',
'pillow',
'requests',
# manual override until https://github.com/girder/large_image/pull/799
'pylibtiff',
# pylibtiff depends on this but it is not listed in its dependencies
'bitarray',
],
extras_require={
'dev': [
'pooch',
'tqdm', # for progress bar in pooch
'django-composed-configuration[dev]>=0.18',
'django-debug-toolbar',
'ipython',
'tox',
]
},
)
| 29.316456 | 84 | 0.576425 |
ad34a8993a59e9bc5fe3f8eaab295ceeee4fe906 | 4,505 | py | Python | util/git-pre-commit.py | Jihaoyun/gem5 | c52195e8304b5571008eab050fc9cc38ba91107c | [
"BSD-3-Clause"
] | 3 | 2017-04-09T07:35:57.000Z | 2018-12-12T07:30:13.000Z | util/git-pre-commit.py | Jihaoyun/gem5 | c52195e8304b5571008eab050fc9cc38ba91107c | [
"BSD-3-Clause"
] | null | null | null | util/git-pre-commit.py | Jihaoyun/gem5 | c52195e8304b5571008eab050fc9cc38ba91107c | [
"BSD-3-Clause"
] | 3 | 2018-03-04T18:01:33.000Z | 2019-08-23T22:30:37.000Z | #!/usr/bin/env python
#
# Copyright (c) 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from tempfile import TemporaryFile
import os
import subprocess
import sys
from style.repo import GitRepo
from style.verifiers import all_verifiers, all_regions
from style.style import StdioUI, check_ignores
import argparse
parser = argparse.ArgumentParser(
description="gem5 git style checker hook")
parser.add_argument("--verbose", "-v", action="store_true",
help="Produce verbose output")
args = parser.parse_args()
git = GitRepo()
opts = {}
repo_base = git.repo_base()
ui = StdioUI()
os.chdir(repo_base)
failing_files = set()
staged_mismatch = set()
for status, fname in git.status(filter="MA", cached=True):
if args.verbose:
print "Checking %s..." % fname
if check_ignores(fname):
continue
if status == "M":
regions = git.staged_regions(fname)
else:
regions = all_regions
# Show they appropriate object and dump it to a file
status = git.file_from_index(fname)
f = TemporaryFile()
f.write(status)
verifiers = [ v(ui, opts, base=repo_base) for v in all_verifiers ]
for v in verifiers:
f.seek(0)
# It is prefered that the first check is silent as it is in the
# staged file. If the check fails, then we will do it non-silently
# on the current file, reporting meaningful shortcomings
if not v.skip(fname) and v.check(fname, regions, fobj=f, silent=True):
failing_files.add(fname)
if not v.check(fname, regions):
staged_mismatch.add(fname)
f.close()
if not failing_files:
if len(failing_files) > len(staged_mismatch):
print >> sys.stderr
print >> sys.stderr, "Style checker failed for the following files:"
for f in failing_files:
if f not in staged_mismatch:
print >> sys.stderr, "\t%s" % f
print >> sys.stderr
print >> sys.stderr, \
"Please run the style checker manually to fix the offending files.\n" \
"To check your modifications, run: util/style.py -m"
print >> sys.stderr
if staged_mismatch:
print >> sys.stderr, \
"It looks like you have forgotten to stage your fixes for commit in\n"\
"the following files: "
for f in staged_mismatch:
print >> sys.stderr, "\t%s" % f
print >> sys.stderr, "Please `git --add' them"
sys.exit(1)
| 38.504274 | 79 | 0.71343 |
0ae7d6f69f737627718c8adefd7864a1311752cb | 359 | py | Python | mlbase/utils/images.py | n-kats/mlbase | 7d69f259dcaf9608a921523083458fa6d0d6914b | [
"MIT"
] | null | null | null | mlbase/utils/images.py | n-kats/mlbase | 7d69f259dcaf9608a921523083458fa6d0d6914b | [
"MIT"
] | 2 | 2018-09-23T18:39:01.000Z | 2018-09-24T18:02:21.000Z | mlbase/utils/images.py | n-kats/mlbase | 7d69f259dcaf9608a921523083458fa6d0d6914b | [
"MIT"
] | null | null | null | from mlbase.lazy import numpy as np
def tiled_image(images: "np.ndarray"):
n, h, w, ch = images.shape
sq = int(np.ceil(np.sqrt(n)))
output = np.zeros([sq * h, sq * w, ch], dtype=np.uint8)
for i, image in enumerate(images):
x = i % sq
y = i // sq
output[h * y:h * (y + 1), w * x:w * (x + 1)] = image
return output
| 27.615385 | 60 | 0.534819 |
7024cf9d9bbe851dfd74e9528b9e4a9d7391870e | 3,337 | py | Python | xskillscore/tests/test_skipna_functionality.py | blackary/xskillscore | 2754d60a87dd0d5d3b2e1c727873520754391389 | [
"Apache-2.0"
] | null | null | null | xskillscore/tests/test_skipna_functionality.py | blackary/xskillscore | 2754d60a87dd0d5d3b2e1c727873520754391389 | [
"Apache-2.0"
] | null | null | null | xskillscore/tests/test_skipna_functionality.py | blackary/xskillscore | 2754d60a87dd0d5d3b2e1c727873520754391389 | [
"Apache-2.0"
] | null | null | null | import dask
import numpy as np
import pytest
from xarray.tests import CountingScheduler, assert_allclose, raise_if_dask_computes
from xskillscore.core.deterministic import (
linslope,
mae,
mape,
me,
median_absolute_error,
mse,
pearson_r,
pearson_r_p_value,
r2,
rmse,
smape,
spearman_r,
spearman_r_p_value,
)
WEIGHTED_METRICS = [
linslope,
pearson_r,
pearson_r_p_value,
spearman_r,
spearman_r_p_value,
mae,
mse,
mape,
smape,
me,
rmse,
r2,
]
NON_WEIGHTED_METRICS = [median_absolute_error]
def drop_nans(a, b, weights=None, dim="time"):
"""
Masks a and b where they have pairwise nans.
"""
a = a.where(b.notnull())
b = b.where(a.notnull())
if weights is not None:
weights = weights.where(a.notnull())
weights = weights.dropna(dim)
return a.dropna(dim), b.dropna(dim), weights
@pytest.mark.parametrize("metric", WEIGHTED_METRICS + NON_WEIGHTED_METRICS)
def test_skipna_returns_same_value_as_dropped_pairwise_nans(
a_1d_fixed_nan, b_1d_fixed_nan, metric
):
"""Tests that DataArrays with pairwise nans return the same result
as the same two with those nans dropped."""
a_dropped, b_dropped, _ = drop_nans(a_1d_fixed_nan, b_1d_fixed_nan)
with raise_if_dask_computes():
res_with_nans = metric(a_1d_fixed_nan, b_1d_fixed_nan, "time", skipna=True)
res_dropped_nans = metric(a_dropped, b_dropped, "time")
assert_allclose(res_with_nans, res_dropped_nans)
@pytest.mark.parametrize("metric", WEIGHTED_METRICS)
def test_skipna_returns_same_value_as_dropped_pairwise_nans_with_weights(
a_1d_fixed_nan, b_1d_fixed_nan, weights_time, metric
):
"""Tests that DataArrays with pairwise nans return the same result
as the same two with those nans dropped."""
a_dropped, b_dropped, weights_time_dropped = drop_nans(
a_1d_fixed_nan, b_1d_fixed_nan, weights_time
)
with raise_if_dask_computes():
res_with_nans = metric(
a_1d_fixed_nan, b_1d_fixed_nan, "time", skipna=True, weights=weights_time
)
res_dropped_nans = metric(
a_dropped, b_dropped, "time", weights=weights_time_dropped
)
assert_allclose(res_with_nans, res_dropped_nans)
@pytest.mark.parametrize("metric", WEIGHTED_METRICS + NON_WEIGHTED_METRICS)
def test_skipna_returns_nan_when_false(a_1d_fixed_nan, b_1d_fixed_nan, metric):
"""Tests that nan is returned if there's any nans in the time series
and skipna is False."""
with raise_if_dask_computes():
res = metric(a_1d_fixed_nan, b_1d_fixed_nan, "time", skipna=False)
assert np.isnan(res).all()
@pytest.mark.parametrize("metric", WEIGHTED_METRICS)
def test_skipna_broadcast_weights_assignment_destination(
a_rand_nan, b_rand_nan, weights_lonlat, metric
):
"""Tests that 'assignment destination is read-only' is not raised
https://github.com/xarray-contrib/xskillscore/issues/79"""
with raise_if_dask_computes():
metric(
a_rand_nan, b_rand_nan, ["lat", "lon"], weights=weights_lonlat, skipna=True
)
def test_nan_skipna(a, b):
# Randomly add some nans to a
a = a.where(np.random.random(a.shape) < 0.5)
with raise_if_dask_computes():
pearson_r(a, b, dim="lat", skipna=True)
| 30.336364 | 87 | 0.709919 |
485a00eabbbc1256405a8684f6b790ba53930abc | 3,265 | py | Python | dcodex_bible/plot.py | rbturnbull/dcodex_bible | 7745726867bdc556b3de5505601bbb881d420477 | [
"Apache-2.0"
] | null | null | null | dcodex_bible/plot.py | rbturnbull/dcodex_bible | 7745726867bdc556b3de5505601bbb881d420477 | [
"Apache-2.0"
] | 9 | 2021-04-08T20:32:39.000Z | 2022-03-12T01:06:09.000Z | dcodex_bible/plot.py | rbturnbull/dcodex_bible | 7745726867bdc556b3de5505601bbb881d420477 | [
"Apache-2.0"
] | null | null | null | from dcodex.models import *
from .models import *
def plot_affiliation_matrix(
family,
manuscripts,
verses,
force_compute=False,
matrix_filename=None,
figsize=(12, 7),
major_chapter_markers=10,
minor_chapter_markers=1,
labels=None,
output_filename=None,
colors=['#007AFF'],
):
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import FixedLocator
from os.path import isfile
from os import access, R_OK
if not force_compute and matrix_filename and isfile( matrix_filename ) and access(matrix_filename, R_OK):
matrix = np.load(matrix_filename)
else:
matrix = family.affiliation_matrix( manuscripts, verses )
if matrix_filename:
np.save(matrix_filename, matrix)
print(matrix)
fig, ax = plt.subplots(figsize=figsize)
###### Major Grid Lines ######
verse_min = verses[0]
verse_max = verses[-1]
chapter_beginnings = BibleVerse.objects.filter( id__gt=verse_min.id, id__lt=verse_max.id, verse=1 )
major_tick_locations = [0]
major_tick_annotations = [verse_min.reference_abbreviation().replace(" ","\n")]
for chapter_beginning in chapter_beginnings:
if chapter_beginning.chapter % major_chapter_markers == 0 or chapter_beginning.chapter == 1:
major_tick_locations.append( chapter_beginning.id - verse_min.id )
ref = "%d:%d" % (chapter_beginning.chapter, chapter_beginning.verse) if chapter_beginning.chapter > 1 else chapter_beginning.reference_abbreviation().replace(" ","\n")
major_tick_annotations.append( ref )
major_tick_locations.append( verse_max.id - verse_min.id )
major_tick_annotations.append( verse_max.reference_abbreviation().replace(" ","\n") )
plt.xticks(major_tick_locations, major_tick_annotations )
linewidth = 2 if major_chapter_markers > minor_chapter_markers else 1
ax.xaxis.grid(True, which='major', color='#666666', linestyle='-', alpha=0.4, linewidth=linewidth)
###### Minor Grid Lines ######
minor_ticks = [x.id - verse_min.id for x in chapter_beginnings if x.id not in major_tick_locations and x.chapter % minor_chapter_markers == 0]
ax.xaxis.set_minor_locator(FixedLocator(minor_ticks))
ax.xaxis.grid(True, which='minor', color='#666666', linestyle='-', alpha=0.2, linewidth=1,)
x_values = np.arange( len(verses) )
alpha = 0.4
for manuscript_index, manuscript in enumerate(manuscripts):
color = colors[manuscript_index % len(colors)]
ax.fill_between(x_values, manuscript_index + alpha * matrix[manuscript_index], manuscript_index - alpha * matrix[manuscript_index], color=color)
if labels is None:
labels = [ms.short_name() for ms in manuscripts]
plt.yticks(np.arange(len(manuscripts)), labels)
plt.show()
distinct_verses = matrix.any(axis=0).sum()
proportion = distinct_verses/len(verses) * 100.0
print(f"Distinct verses {distinct_verses} ({proportion}\%)" )
if output_filename:
fig.tight_layout()
fig.savefig(output_filename)
print(f"Saved to {output_filename}") | 41.329114 | 179 | 0.672588 |
28b1475ab82af1e110f0939c358c91678fd6ddce | 915 | py | Python | relay-demo.py | jkaplon/rpi-utils | ad8ef8a4803c4086560345d23a07c70ee40cc7fe | [
"MIT"
] | null | null | null | relay-demo.py | jkaplon/rpi-utils | ad8ef8a4803c4086560345d23a07c70ee40cc7fe | [
"MIT"
] | null | null | null | relay-demo.py | jkaplon/rpi-utils | ad8ef8a4803c4086560345d23a07c70ee40cc7fe | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
import time
in1 = 16
in2 = 18
GPIO.setmode(GPIO.BOARD)
GPIO.setup(in1, GPIO.OUT)
GPIO.setup(in2, GPIO.OUT)
GPIO.output(in1, False)
GPIO.output(in2, False)
try:
while True:
for x in range(5):
GPIO.output(in1, True)
time.sleep(0.1)
GPIO.output(in1, False)
GPIO.output(in2, True)
time.sleep(0.1)
GPIO.output(in2, False)
GPIO.output(in1, True)
GPIO.output(in2, True)
for x in range(4):
GPIO.output(in1, True)
time.sleep(0.05)
GPIO.output(in1, False)
time.sleep(0.05)
GPIO.output(in1, True)
for x in range(4):
GPIO.output(in2, True)
time.sleep(0.05)
GPIO.output(in2, False)
time.sleep(0.05)
GPIO.output(in2, True)
except KeyboardInterrupt:
GPIO.cleanup()
| 20.795455 | 35 | 0.538798 |
1674aaa376a14ec89ada3760babc194611763702 | 174 | py | Python | test_files/encoding_test.py | haiyanghe/rednose | 8a280ce9e7d02a563e252098661f79cfb3011971 | [
"MIT"
] | 30 | 2016-04-15T21:19:30.000Z | 2021-03-25T11:30:36.000Z | test_files/encoding_test.py | haiyanghe/rednose | 8a280ce9e7d02a563e252098661f79cfb3011971 | [
"MIT"
] | 20 | 2016-04-18T14:56:02.000Z | 2021-09-21T14:58:04.000Z | test_files/encoding_test.py | haiyanghe/rednose | 8a280ce9e7d02a563e252098661f79cfb3011971 | [
"MIT"
] | 11 | 2016-07-22T08:58:20.000Z | 2020-06-01T14:47:44.000Z | # vim: fileencoding=utf-8:
# NOTE: this file does *not* import unicode_literals,
# so the assertion message is actually just utf-8 bytes
def test():
assert False, "ä"
| 19.333333 | 55 | 0.706897 |
b3516f2ea9950aed50cce2ba19891a0f46cf3a68 | 71 | py | Python | manage/const.py | augustand/kervice | ccbda8196805a542a9324b404e02d83cc130926e | [
"MIT"
] | 1 | 2017-12-24T07:59:25.000Z | 2017-12-24T07:59:25.000Z | manage/const.py | augustand/kervice | ccbda8196805a542a9324b404e02d83cc130926e | [
"MIT"
] | null | null | null | manage/const.py | augustand/kervice | ccbda8196805a542a9324b404e02d83cc130926e | [
"MIT"
] | null | null | null | class ENV(object):
dev = "dev"
stage = "stage"
pro = "pro"
| 14.2 | 19 | 0.507042 |
e56efa20703c30dfadc089701d275413297aa25b | 2,718 | py | Python | tests/instrumentation/django_tests/template_tests.py | ellisvalentiner/apm-agent-python | 89e2579a7b05433733909632127fd34770bc93ef | [
"BSD-3-Clause"
] | null | null | null | tests/instrumentation/django_tests/template_tests.py | ellisvalentiner/apm-agent-python | 89e2579a7b05433733909632127fd34770bc93ef | [
"BSD-3-Clause"
] | null | null | null | tests/instrumentation/django_tests/template_tests.py | ellisvalentiner/apm-agent-python | 89e2579a7b05433733909632127fd34770bc93ef | [
"BSD-3-Clause"
] | null | null | null | import pytest # isort:skip
pytest.importorskip("django") # isort:skip
from os.path import join
import django
from django.test.utils import override_settings
import mock
import pytest
from conftest import BASE_TEMPLATE_DIR
from elasticapm.conf.constants import TRANSACTION
from tests.utils.compat import middleware_setting
try:
# Django 1.10+
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
# Testing Django 1.8+ backends
TEMPLATES = (
{"BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [BASE_TEMPLATE_DIR]},
{"BACKEND": "django.template.backends.jinja2.Jinja2", "DIRS": [join(BASE_TEMPLATE_DIR, "jinja2")]},
)
def test_template_rendering(instrument, django_elasticapm_client, client):
with override_settings(
**middleware_setting(django.VERSION, ["elasticapm.contrib.django.middleware.TracingMiddleware"])
):
client.get(reverse("render-heavy-template"))
client.get(reverse("render-heavy-template"))
client.get(reverse("render-heavy-template"))
transactions = django_elasticapm_client.events[TRANSACTION]
assert len(transactions) == 3
spans = django_elasticapm_client.spans_for_transaction(transactions[0])
assert len(spans) == 2, [t["name"] for t in spans]
kinds = ["code", "template.django"]
assert set([t["type"] for t in spans]) == set(kinds)
assert spans[0]["type"] == "code"
assert spans[0]["name"] == "something_expensive"
assert spans[0]["parent_id"] == spans[1]["id"]
assert spans[1]["type"] == "template.django"
assert spans[1]["name"] == "list_users.html"
assert spans[1]["parent_id"] == transactions[0]["id"]
@pytest.mark.skipif(django.VERSION < (1, 8), reason="Jinja2 support introduced with Django 1.8")
def test_template_rendering_django18_jinja2(instrument, django_elasticapm_client, client):
with override_settings(
TEMPLATES=TEMPLATES,
**middleware_setting(django.VERSION, ["elasticapm.contrib.django.middleware.TracingMiddleware"])
):
client.get(reverse("render-jinja2-template"))
client.get(reverse("render-jinja2-template"))
client.get(reverse("render-jinja2-template"))
transactions = django_elasticapm_client.events[TRANSACTION]
assert len(transactions) == 3
spans = django_elasticapm_client.spans_for_transaction(transactions[0])
assert len(spans) == 1, [t["name"] for t in spans]
kinds = ["template.jinja2"]
assert set([t["type"] for t in spans]) == set(kinds)
assert spans[0]["type"] == "template.jinja2"
assert spans[0]["name"] == "jinja2_template.html"
assert spans[0]["parent_id"] == transactions[0]["id"]
| 34.405063 | 104 | 0.708241 |
ae04f17c8315e6f05c54082c47b22ad1d78c7732 | 252 | py | Python | 0349_IntersectionOfTwoArrays.py | taro-masuda/leetcode | 39739e9fec7c66513b114c740ef982ccc09dc39f | [
"MIT"
] | null | null | null | 0349_IntersectionOfTwoArrays.py | taro-masuda/leetcode | 39739e9fec7c66513b114c740ef982ccc09dc39f | [
"MIT"
] | null | null | null | 0349_IntersectionOfTwoArrays.py | taro-masuda/leetcode | 39739e9fec7c66513b114c740ef982ccc09dc39f | [
"MIT"
] | 1 | 2020-03-18T05:23:40.000Z | 2020-03-18T05:23:40.000Z | class Solution:
def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
n1 = set(nums1)
n2 = set(nums2)
out = []
for n in n1:
if n in n2:
out.append(n)
return out
| 25.2 | 76 | 0.47619 |
b2ff4b1ef09cbffbdaa86db828a6cd4a33a7a70a | 819 | py | Python | utils/img_tools.py | snu-mllab/Deep-Hash-Table-CVPR19 | 62c811c37001302e6759a18d6143b8ad657e4910 | [
"MIT"
] | 12 | 2019-05-20T10:26:01.000Z | 2020-05-07T02:19:05.000Z | utils/img_tools.py | maestrojeong/Deep-Hash-Table-CVPR19 | 62c811c37001302e6759a18d6143b8ad657e4910 | [
"MIT"
] | 1 | 2019-06-21T11:50:15.000Z | 2019-06-24T05:38:27.000Z | utils/img_tools.py | snu-mllab/Deep-Hash-Table-CVPR19 | 62c811c37001302e6759a18d6143b8ad657e4910 | [
"MIT"
] | 2 | 2019-03-21T01:54:11.000Z | 2019-05-08T10:38:46.000Z | import numpy as np
import os
def jpg2png(str_):
'''
Args:
str_ - .jpg
'''
return os.path.splitext(str_)[0]+'.png'
def rgb2gray(rgb_img):
'''
Args:
rgb_img - Numpy 3D array
[nrow, ncol ,3]
Return:
gray_img - Numpy 3D array
[nrow, ncol ,1]
'''
gray_img = np.mean(rgb_img, axis=-1, keepdims=True)
assert len(gray_img.shape)==3, 'Wrong operations'
return gray_img
def gray2rgb(gray_img):
'''
Args:
gray_img - Numpy 2D array
[nrow, ncol]
Return:
rgb_img - Numpy 3D array
[nrow, ncol ,3]
'''
w, h = gray_img.shape
rgb_img = np.empty((w, h, 3), dtype=np.uint8)
rgb_img[:, :, 0] = rgb_img[:, :, 1] = rgb_img[:, :, 2] = gray_img
return rgb_img
| 21.552632 | 69 | 0.518926 |
04c9afb6fd96ce6354af8cf372d0372ecf7b46f5 | 67 | py | Python | tests/__init__.py | sturmianseq/yamicache | be6400a5742f3f01c5ada01b3ca74c72ce0108e2 | [
"MIT"
] | null | null | null | tests/__init__.py | sturmianseq/yamicache | be6400a5742f3f01c5ada01b3ca74c72ce0108e2 | [
"MIT"
] | 10 | 2017-09-01T17:12:20.000Z | 2021-08-28T17:34:46.000Z | tests/__init__.py | sturmianseq/yamicache | be6400a5742f3f01c5ada01b3ca74c72ce0108e2 | [
"MIT"
] | 3 | 2018-08-08T15:49:35.000Z | 2021-11-04T17:21:10.000Z | # -*- coding: utf-8 -*-
"""Unit test package for yamicache."""
| 16.75 | 39 | 0.552239 |
e0b51dfaae70adf829ce3b45c27f52f7fdc9822d | 1,313 | py | Python | entities/client.py | wolfryan97/Project_0 | 27c1608833b04f067b5bceaeed99e7ed2b164a58 | [
"MIT"
] | null | null | null | entities/client.py | wolfryan97/Project_0 | 27c1608833b04f067b5bceaeed99e7ed2b164a58 | [
"MIT"
] | null | null | null | entities/client.py | wolfryan97/Project_0 | 27c1608833b04f067b5bceaeed99e7ed2b164a58 | [
"MIT"
] | null | null | null | class Client:
def __init__(self, client_id: int, client_firstname: str,
client_lastname: str, address: str, city: str,
state: str, zip_code: int):
self.client_id = client_id
self.client_firstname = client_firstname
self.client_lastname = client_lastname
self.address = address
self.city = city
self.state = state
self.zip_code = zip_code
def __str__(self):
return f"Client ID: {self.client_id}\nName: {self.client_firstname} {self.client_lastname}"
def json(self):
return {'clientID': self.client_id,
'firstname': self.client_firstname,
'lastname': self.client_lastname,
'address': self.address,
'city': self.city,
'state': self.state,
'zip': self.zip_code
}
@staticmethod
def json_deserialize(json):
client = Client(0, '', '', '', '', '', '')
client.client_id = json['clientID']
client.client_firstname = json['firstname']
client.client_lastname = json['lastname']
client.address = json['address']
client.city = json['city']
client.state = json['state']
client.zip_code = json['zip']
return client
| 34.552632 | 99 | 0.566641 |
cc0cab8a718af7a8b0437ff869c4ab6adc7f1fb8 | 170 | py | Python | tulius/forum/threads/__init__.py | kozzztik/tulius | 81b8f6484eefdc453047f62173a08f5e6f640cd6 | [
"MIT"
] | 1 | 2020-04-21T15:09:18.000Z | 2020-04-21T15:09:18.000Z | tulius/forum/threads/__init__.py | kozzztik/tulius | 81b8f6484eefdc453047f62173a08f5e6f640cd6 | [
"MIT"
] | 70 | 2019-04-10T22:32:32.000Z | 2022-03-11T23:12:54.000Z | tulius/forum/threads/__init__.py | kozzztik/tulius | 81b8f6484eefdc453047f62173a08f5e6f640cd6 | [
"MIT"
] | 1 | 2019-04-12T14:55:39.000Z | 2019-04-12T14:55:39.000Z | from django.apps import AppConfig
class ForumThreadsConfig(AppConfig):
name = 'tulius.forum.threads'
label = 'forum_threads'
verbose_name = 'Forum threads'
| 21.25 | 36 | 0.735294 |
cd1fa0a5072c0bf3b1247a4eabd1258fc86aa576 | 3,279 | py | Python | tests/test_i2c.py | jakiee3y/luma.core | 713de8e4e397493dd196e8e7653268877135ffe9 | [
"MIT"
] | null | null | null | tests/test_i2c.py | jakiee3y/luma.core | 713de8e4e397493dd196e8e7653268877135ffe9 | [
"MIT"
] | null | null | null | tests/test_i2c.py | jakiee3y/luma.core | 713de8e4e397493dd196e8e7653268877135ffe9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-18 Richard Hull and contributors
# See LICENSE.rst for details.
"""
Tests for the :py:class:`luma.core.interface.serial.i2c` class.
"""
import errno
import pytest
import smbus2
from unittest.mock import Mock, patch, call
from luma.core.interface.serial import i2c
import luma.core.error
from helpers import i2c_error, fib
smbus = Mock(unsafe=True)
def setup_function(function):
smbus.reset_mock()
def test_init_device_not_found():
port = 200
address = 0x710
path_name = '/dev/i2c-{}'.format(port)
fake_open = i2c_error(path_name, errno.ENOENT)
with patch('os.open', fake_open):
with pytest.raises(luma.core.error.DeviceNotFoundError) as ex:
i2c(port=port, address=address)
assert str(ex.value) == 'I2C device not found: {}'.format(path_name)
def test_init_device_permission_error():
port = 1
path_name = '/dev/i2c-{}'.format(port)
fake_open = i2c_error(path_name, errno.EACCES)
with patch('os.open', fake_open):
try:
i2c(port=port)
except luma.core.error.DevicePermissionError as ex:
# permission error: device exists but no permission
assert str(ex) == 'I2C device permission denied: {}'.format(
path_name)
def test_init_device_address_error():
address = 'foo'
with pytest.raises(luma.core.error.DeviceAddressError) as ex:
i2c(address=address)
assert str(ex.value) == 'I2C device address invalid: {}'.format(address)
def test_init_no_bus():
with patch.object(smbus2.SMBus, 'open') as mock:
i2c(port=2, address=0x71)
mock.assert_called_once_with(2)
def test_init_bus_provided():
i2c(bus=smbus, address=0x71)
smbus.open.assert_not_called()
def test_command():
cmds = [3, 1, 4, 2]
serial = i2c(bus=smbus, address=0x83)
serial.command(*cmds)
smbus.write_i2c_block_data.assert_called_once_with(0x83, 0x00, cmds)
def test_i2c_command_device_not_found_error():
errorbus = Mock(unsafe=True)
address = 0x71
cmds = [3, 1, 4, 2]
expected_error = OSError()
try:
for error_code in [errno.EREMOTEIO, errno.EIO]:
expected_error.errno = error_code
errorbus.write_i2c_block_data.side_effect = expected_error
serial = i2c(bus=errorbus, address=address)
with pytest.raises(luma.core.error.DeviceNotFoundError) as ex:
serial.command(*cmds)
assert str(ex.value) == 'I2C device not found on address: 0x{0:02X}'.format(
address)
except AttributeError as e:
# osx
pytest.skip(str(e))
def test_i2c_data():
data = list(fib(10))
serial = i2c(bus=smbus, address=0x21)
serial.data(data)
smbus.write_i2c_block_data.assert_called_once_with(0x21, 0x40, data)
def test_i2c_data_chunked():
data = list(fib(100))
serial = i2c(bus=smbus, address=0x66)
serial.data(data)
calls = [call(0x66, 0x40, data[i:i + 32]) for i in range(0, 100, 32)]
smbus.write_i2c_block_data.assert_has_calls(calls)
def test_cleanup():
serial = i2c(bus=smbus, address=0x9F)
serial._managed = True
serial.cleanup()
smbus.close.assert_called_once_with()
| 27.325 | 88 | 0.668801 |
3ff105eebd7b312c178dda11b75eab545ce3aae8 | 3,002 | py | Python | official/mnist/mnist_eager_test.py | vincentcheny/models | afb1a59fc1bc792ac72d1a3e22e2469020529788 | [
"Apache-2.0"
] | 1 | 2019-09-11T09:41:11.000Z | 2019-09-11T09:41:11.000Z | official/mnist/mnist_eager_test.py | vincentcheny/models | afb1a59fc1bc792ac72d1a3e22e2469020529788 | [
"Apache-2.0"
] | null | null | null | official/mnist/mnist_eager_test.py | vincentcheny/models | afb1a59fc1bc792ac72d1a3e22e2469020529788 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import tensorflow as tf # pylint: disable=g-bad-import-order
from tensorflow.python import eager as tfe # pylint: disable=g-bad-import-order
from official.mnist import mnist
from official.mnist import mnist_eager
from official.utils.misc import keras_utils
def device():
return '/device:GPU:0' if tfe.context.num_gpus() else '/device:CPU:0'
def data_format():
return 'channels_first' if tfe.context.num_gpus() else 'channels_last'
def random_dataset():
batch_size = 64
images = tf.random_normal([batch_size, 784])
labels = tf.random_uniform([batch_size], minval=0, maxval=10, dtype=tf.int32)
return tf.data.Dataset.from_tensors((images, labels))
def train(defun=False):
model = mnist.create_model(data_format())
if defun:
model.call = tf.function(model.call)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
dataset = random_dataset()
with tf.device(device()):
mnist_eager.train(model, optimizer, dataset,
step_counter=tf.train.get_or_create_global_step())
def evaluate(defun=False):
model = mnist.create_model(data_format())
dataset = random_dataset()
if defun:
model.call = tf.function(model.call)
with tf.device(device()):
mnist_eager.test(model, dataset)
class MNISTTest(tf.test.TestCase):
"""Run tests for MNIST eager loop.
MNIST eager uses contrib and will not work with TF 2.0. All tests are
disabled if using TF 2.0.
"""
def setUp(self):
if not keras_utils.is_v2_0():
tf.compat.v1.enable_v2_behavior()
super(MNISTTest, self).setUp()
@unittest.skipIf(keras_utils.is_v2_0(), 'TF 1.0 only test.')
def test_train(self):
train(defun=False)
@unittest.skipIf(keras_utils.is_v2_0(), 'TF 1.0 only test.')
def test_evaluate(self):
evaluate(defun=False)
@unittest.skipIf(keras_utils.is_v2_0(), 'TF 1.0 only test.')
def test_train_with_defun(self):
train(defun=True)
@unittest.skipIf(keras_utils.is_v2_0(), 'TF 1.0 only test.')
def test_evaluate_with_defun(self):
evaluate(defun=True)
if __name__ == '__main__':
tf.test.main()
| 31.270833 | 81 | 0.691872 |
e86925d31e9ecdfbab2851c4e21d398b9d74e0e0 | 1,199 | py | Python | src/app/halftone.py | telegnom/labello | bb9349909758bebfd03fd96f7a911d61ce29478a | [
"MIT"
] | 2 | 2019-04-07T04:17:28.000Z | 2021-02-25T01:11:33.000Z | src/app/halftone.py | telegnom/labello | bb9349909758bebfd03fd96f7a911d61ce29478a | [
"MIT"
] | 10 | 2019-01-10T06:32:37.000Z | 2020-06-18T00:06:28.000Z | src/app/halftone.py | telegnom/labello | bb9349909758bebfd03fd96f7a911d61ce29478a | [
"MIT"
] | 4 | 2018-04-13T17:15:43.000Z | 2019-10-31T20:09:13.000Z | from PIL import Image, ImageDraw, ImageStat
def halftone(img, sample, scale, angle=45):
img_grey = img.convert('L')
channel = img_grey.split()[0]
channel = channel.rotate(angle, expand=1)
size = channel.size[0]*scale, channel.size[1]*scale
new_img = Image.new('1', size)
draw = ImageDraw.Draw(new_img)
for x in range(0, channel.size[0], sample):
for y in range(0, channel.size[1], sample):
box = channel.crop((x, y, x+sample, y+sample))
mean = ImageStat.Stat(box).mean[0]
diameter = (mean/255) ** 0.5
edge = 0.5 * (1-diameter)
x_pos, y_pos = (x+edge) * scale, (y+edge) * scale
box_edge = sample * diameter * scale
draw.ellipse((x_pos, y_pos, x_pos+box_edge, y_pos+box_edge),
fill=255)
new_img = new_img.rotate(-angle, expand=1)
width_half, height_half = new_img.size
half_x = (width_half - img.size[0]*scale) / 2
half_y = (height_half - img.size[1]*scale) / 2
new_img = new_img.crop((half_x, half_y, half_x + img.size[0]*scale,
half_y + img.size[1]*scale))
return Image.merge('1', [new_img]) | 39.966667 | 72 | 0.581318 |
a35db153cca5c2b10bfdaf05a26756c0125d93da | 2,324 | py | Python | python3/koans/about_comprehension.py | polarization/python_koans | bd0f78a66d7fa60272d21bb24202ebff87e73e41 | [
"MIT"
] | null | null | null | python3/koans/about_comprehension.py | polarization/python_koans | bd0f78a66d7fa60272d21bb24202ebff87e73e41 | [
"MIT"
] | null | null | null | python3/koans/about_comprehension.py | polarization/python_koans | bd0f78a66d7fa60272d21bb24202ebff87e73e41 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutComprehension(Koan):
def test_creating_lists_with_list_comprehensions(self):
feast = ['lambs', 'sloths', 'orangutans', 'breakfast cereals',
'fruit bats']
comprehension = [delicacy.capitalize() for delicacy in feast]
self.assertEqual(__, comprehension[0])
self.assertEqual(__, comprehension[2])
def test_filtering_lists_with_list_comprehensions(self):
feast = ['spam', 'sloths', 'orangutans', 'breakfast cereals',
'fruit bats']
comprehension = [delicacy for delicacy in feast if len(delicacy) > 6]
self.assertEqual(__, len(feast))
self.assertEqual(__, len(comprehension))
def test_unpacking_tuples_in_list_comprehensions(self):
list_of_tuples = [(1, 'lumberjack'), (2, 'inquisition'), (4, 'spam')]
comprehension = [skit * number for number, skit in list_of_tuples]
self.assertEqual(__, comprehension[0])
self.assertEqual(__, comprehension[2])
def test_double_list_comprehension(self):
list_of_eggs = ['poached egg', 'fried egg']
list_of_meats = ['lite spam', 'ham spam', 'fried spam']
comprehension = [
'{0} and {1}'.format(
egg,
meat) for egg in list_of_eggs for meat in list_of_meats]
self.assertEqual(__, comprehension[0])
self.assertEqual(__, len(comprehension))
def test_creating_a_set_with_set_comprehension(self):
comprehension = {x for x in 'aabbbcccc'}
# remember that set members are unique
self.assertEqual(__, comprehension)
def test_creating_a_dictionary_with_dictionary_comprehension(self):
dict_of_weapons = {
'first': 'fear',
'second': 'surprise',
'third': 'ruthless efficiency',
'fourth': 'fanatical devotion',
'fifth': None}
dict_comprehension = {
k.upper(): weapon for k,
weapon in dict_of_weapons.items() if weapon}
self.assertEqual(__, 'first' in dict_comprehension)
self.assertEqual(__, 'FIRST' in dict_comprehension)
self.assertEqual(__, len(dict_of_weapons))
self.assertEqual(__, len(dict_comprehension))
| 34.176471 | 77 | 0.63167 |
81c6f04fabba0621d129cf3f1ba15a9725317db7 | 916 | py | Python | connect/serializer.py | lizKimita/Mshauri-Connect | 24f6f67017eebf5ee1d2e08c9bf249108dee28a2 | [
"MIT"
] | 1 | 2019-06-20T08:23:22.000Z | 2019-06-20T08:23:22.000Z | connect/serializer.py | lizKimita/Mshauri-Connect | 24f6f67017eebf5ee1d2e08c9bf249108dee28a2 | [
"MIT"
] | 16 | 2019-06-11T14:55:14.000Z | 2021-09-08T01:02:58.000Z | connect/serializer.py | lizKimita/Mshauri-Connect | 24f6f67017eebf5ee1d2e08c9bf249108dee28a2 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import Foundation, Awareness, Forums, Profile, Comment
class FoundationSerializer(serializers.ModelSerializer):
class Meta:
model=Foundation
fields=('image','name','location', 'contact', 'website_link', 'description')
class AwarenessSerializer(serializers.ModelSerializer):
class Meta:
model=Awareness
fields=('article_title','article','date', 'foundation')
class ForumsSerializer(serializers.ModelSerializer):
class Meta:
model=Forums
fields=('forum_title','forum_post','post_date','post_user')
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model=Profile
fields=('username','tel_no','email','user_location')
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model=Comment
fields=('user_comment','comment','comment_id')
| 29.548387 | 84 | 0.713974 |
18ffcb40a0b1bd2a9b11e0f8365d81e2864d0fbb | 934 | py | Python | venv/lib/python3.8/site-packages/matplotlib/tests/test_backend_tk.py | willBear/willBear-Fundamental_Analysis | bc67eb1e69dcf6765c0b77314d37f7f165a7318f | [
"MIT"
] | 15 | 2020-06-29T08:33:39.000Z | 2022-02-12T00:28:51.000Z | venv/lib/python3.8/site-packages/matplotlib/tests/test_backend_tk.py | willBear/willBear-Fundamental_Analysis | bc67eb1e69dcf6765c0b77314d37f7f165a7318f | [
"MIT"
] | 30 | 2020-04-15T19:37:40.000Z | 2020-04-22T21:19:35.000Z | venv/lib/python3.8/site-packages/matplotlib/tests/test_backend_tk.py | willBear/willBear-Fundamental_Analysis | bc67eb1e69dcf6765c0b77314d37f7f165a7318f | [
"MIT"
] | 11 | 2020-06-29T08:40:24.000Z | 2022-02-24T17:39:16.000Z | import pytest
import numpy as np
from matplotlib import pyplot as plt
@pytest.mark.backend('TkAgg', skip_on_importerror=True)
def test_blit():
from matplotlib.backends import _tkagg
def evil_blit(photoimage, aggimage, offsets, bboxptr):
data = np.asarray(aggimage)
height, width = data.shape[:2]
dataptr = (height, width, data.ctypes.data)
_tkagg.blit(
photoimage.tk.interpaddr(), str(photoimage), dataptr, offsets,
bboxptr)
fig, ax = plt.subplots()
for bad_boxes in ((-1, 2, 0, 2),
(2, 0, 0, 2),
(1, 6, 0, 2),
(0, 2, -1, 2),
(0, 2, 2, 0),
(0, 2, 1, 6)):
with pytest.raises(ValueError):
evil_blit(fig.canvas._tkphoto,
np.ones((4, 4, 4)),
(0, 1, 2, 3),
bad_boxes)
| 32.206897 | 74 | 0.493576 |
137ca232e9ce8d6913600a4978a0a45707c4320a | 2,909 | py | Python | src/olympia/reviews/templatetags/jinja_helpers.py | Osmose/olympia | 774c3b927ec05ef971e4206e2669b4291b8b4f17 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/reviews/templatetags/jinja_helpers.py | Osmose/olympia | 774c3b927ec05ef971e4206e2669b4291b8b4f17 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/reviews/templatetags/jinja_helpers.py | Osmose/olympia | 774c3b927ec05ef971e4206e2669b4291b8b4f17 | [
"BSD-3-Clause"
] | null | null | null | import jinja2
from django_jinja import library
from django.template.loader import get_template
from django.utils.translation import ugettext
from olympia import amo
from olympia.access import acl
from olympia.reviews.models import ReviewFlag
from .. import forms
@library.filter
def stars(num, large=False):
# check for 0.0 incase None was cast to a float. Should
# be safe since lowest rating you can give is 1.0
if num is None or num == 0.0:
return ugettext('Not yet rated')
else:
num = min(5, int(round(num)))
t = get_template('reviews/impala/reviews_rating.html')
# These are getting renamed for contextual sense in the template.
return jinja2.Markup(t.render({'rating': num, 'detailpage': large}))
@library.global_function
def reviews_link(addon, collection_uuid=None, link_to_list=False):
t = get_template('reviews/reviews_link.html')
return jinja2.Markup(t.render({'addon': addon,
'link_to_list': link_to_list,
'collection_uuid': collection_uuid}))
@library.global_function
def impala_reviews_link(addon, collection_uuid=None, link_to_list=False):
t = get_template('reviews/impala/reviews_link.html')
return jinja2.Markup(t.render({'addon': addon,
'link_to_list': link_to_list,
'collection_uuid': collection_uuid}))
@library.global_function
@library.render_with('reviews/report_review.html')
def report_review_popup():
return {'ReviewFlag': ReviewFlag, 'flag_form': forms.ReviewFlagForm()}
@library.global_function
@library.render_with('reviews/edit_review.html')
def edit_review_form():
return {'form': forms.ReviewForm()}
@library.global_function
@library.render_with('reviews/edit_review.html')
def edit_review_reply_form():
return {'form': forms.ReviewReplyForm()}
def user_can_delete_review(request, review):
"""Return whether or not the request.user can delete reviews.
People who can delete reviews:
* The original review author.
* Editors, but only if they aren't listed as an author of the add-on
and the add-on is flagged for moderation
* Users in a group with "Users:Edit" privileges.
* Users in a group with "Addons:Edit" privileges.
Persona editors can't delete addons reviews.
"""
is_author = review.addon.has_author(request.user)
return (
review.user_id == request.user.id or
not is_author and (
(acl.is_editor(request, review.addon) and review.editorreview) or
acl.action_allowed(request, amo.permissions.USERS_EDIT) or
acl.action_allowed(request, amo.permissions.ADDONS_EDIT)))
@library.global_function
@jinja2.contextfunction
def check_review_delete(context, review):
return user_can_delete_review(context['request'], review)
| 33.436782 | 77 | 0.696116 |
ab3e250f158b4ed0173fe7715ee2559fe186d522 | 1,879 | py | Python | qurator/sbb_ned/embeddings/bert.py | qurator-spk/sbb_ned | d4cfe249f72e48913f254a58fbe0dbe6e47bd168 | [
"Apache-2.0"
] | 6 | 2020-09-05T16:08:59.000Z | 2022-03-05T00:54:47.000Z | qurator/sbb_ned/embeddings/bert.py | qurator-spk/sbb_ned | d4cfe249f72e48913f254a58fbe0dbe6e47bd168 | [
"Apache-2.0"
] | 6 | 2020-09-23T17:58:37.000Z | 2022-03-10T14:02:09.000Z | qurator/sbb_ned/embeddings/bert.py | qurator-spk/sbb_ned | d4cfe249f72e48913f254a58fbe0dbe6e47bd168 | [
"Apache-2.0"
] | 2 | 2021-03-22T00:12:51.000Z | 2022-01-31T10:04:08.000Z | from ..embeddings.base import Embeddings
from flair.data import Sentence
class BertEmbeddings(Embeddings):
def __init__(self, model_path,
layers="-1, -2, -3, -4", pooling_operation='first', use_scalar_mix=True, no_cuda=False, *args, **kwargs):
super(BertEmbeddings, self).__init__(*args, **kwargs)
self._path = model_path
self._embeddings = None
self._layers = layers
self._pooling_operation = pooling_operation
self._use_scalar_mix = use_scalar_mix
self._no_cuda = no_cuda
def get(self, keys):
if self._embeddings is None:
if self._no_cuda:
import flair
import torch
flair.device = torch.device('cpu')
from .flair_bert import BertEmbeddings
self._embeddings = BertEmbeddings(bert_model_or_path=self._path,
layers=self._layers,
pooling_operation=self._pooling_operation,
use_scalar_mix=self._use_scalar_mix)
sentences = [Sentence(key) for key in keys]
# noinspection PyUnresolvedReferences
self._embeddings.embed(sentences)
for s_idx, sentence in enumerate(sentences):
for t_idx, token in enumerate(sentence):
emb = token.embedding.cpu().numpy()
yield token.text, emb
del token
del sentence
def config(self):
return {'description': self.description()}
def description(self):
layer_str = self._layers
layer_str = layer_str.replace(' ', '')
layer_str = layer_str.replace(',', '_')
return "bert-layers_{}-pooling_{}-scalarmix_{}".format(layer_str, self._pooling_operation, self._use_scalar_mix)
| 29.359375 | 122 | 0.585418 |
ebb8400c4a5e99a1aed5f9a3da92162ef02f6bef | 59,835 | py | Python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2019_06_01/operations/_blob_containers_operations.py | hurtn/azure-sdk-for-python | 64cc053e589691da22fed7a47611199818c99b2b | [
"MIT"
] | 1 | 2020-12-10T03:17:51.000Z | 2020-12-10T03:17:51.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2019_06_01/operations/_blob_containers_operations.py | hurtn/azure-sdk-for-python | 64cc053e589691da22fed7a47611199818c99b2b | [
"MIT"
] | 15 | 2019-07-12T18:18:04.000Z | 2019-07-25T20:55:51.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2019_06_01/operations/_blob_containers_operations.py | hurtn/azure-sdk-for-python | 64cc053e589691da22fed7a47611199818c99b2b | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class BlobContainersOperations(object):
"""BlobContainersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
account_name, # type: str
maxpagesize=None, # type: Optional[str]
filter=None, # type: Optional[str]
include="deleted", # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ListContainerItems"]
"""Lists all containers and does not support a prefix like data plane. Also SRP today does not
return continuation token.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param maxpagesize: Optional. Specified maximum number of containers that can be included in
the list.
:type maxpagesize: str
:param filter: Optional. When specified, only container names starting with the filter will be
listed.
:type filter: str
:param include: Optional, used to include the properties for soft deleted blob containers.
:type include: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListContainerItems or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2019_06_01.models.ListContainerItems]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ListContainerItems"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if maxpagesize is not None:
query_parameters['$maxpagesize'] = self._serialize.query("maxpagesize", maxpagesize, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if include is not None:
query_parameters['$include'] = self._serialize.query("include", include, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListContainerItems', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers'} # type: ignore
def create(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
blob_container, # type: "models.BlobContainer"
**kwargs # type: Any
):
# type: (...) -> "models.BlobContainer"
"""Creates a new container under the specified account as described by request body. The container
resource includes metadata and properties for that container. It does not include a list of the
blobs contained by the container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param blob_container: Properties of the blob container to create.
:type blob_container: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.BlobContainer"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(blob_container, 'BlobContainer')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BlobContainer', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
blob_container, # type: "models.BlobContainer"
**kwargs # type: Any
):
# type: (...) -> "models.BlobContainer"
"""Updates container properties as specified in request body. Properties not mentioned in the
request will be unchanged. Update fails if the specified container doesn't already exist.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param blob_container: Properties to update for the blob container.
:type blob_container: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.BlobContainer"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(blob_container, 'BlobContainer')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.BlobContainer"
"""Gets properties of a specified container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.BlobContainer"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes specified container under its account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
def set_legal_hold(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
legal_hold, # type: "models.LegalHold"
**kwargs # type: Any
):
# type: (...) -> "models.LegalHold"
"""Sets legal hold tags. Setting the same tag results in an idempotent operation. SetLegalHold
follows an append pattern and does not clear out the existing tags that are not specified in
the request.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param legal_hold: The LegalHold property that will be set to a blob container.
:type legal_hold: ~azure.mgmt.storage.v2019_06_01.models.LegalHold
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LegalHold, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.LegalHold
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LegalHold"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.set_legal_hold.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(legal_hold, 'LegalHold')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LegalHold', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_legal_hold.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/setLegalHold'} # type: ignore
def clear_legal_hold(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
legal_hold, # type: "models.LegalHold"
**kwargs # type: Any
):
# type: (...) -> "models.LegalHold"
"""Clears legal hold tags. Clearing the same or non-existent tag results in an idempotent
operation. ClearLegalHold clears out only the specified tags in the request.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param legal_hold: The LegalHold property that will be clear from a blob container.
:type legal_hold: ~azure.mgmt.storage.v2019_06_01.models.LegalHold
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LegalHold, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.LegalHold
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LegalHold"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.clear_legal_hold.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(legal_hold, 'LegalHold')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LegalHold', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
clear_legal_hold.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/clearLegalHold'} # type: ignore
def create_or_update_immutability_policy(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
if_match=None, # type: Optional[str]
parameters=None, # type: Optional["models.ImmutabilityPolicy"]
**kwargs # type: Any
):
# type: (...) -> "models.ImmutabilityPolicy"
"""Creates or updates an unlocked immutability policy. ETag in If-Match is honored if given but
not required for this operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:param parameters: The ImmutabilityPolicy Properties that will be created or updated to a blob
container.
:type parameters: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ImmutabilityPolicy"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_or_update_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'ImmutabilityPolicy')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
def get_immutability_policy(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.ImmutabilityPolicy"
"""Gets the existing immutability policy along with the corresponding ETag in response headers and
body.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ImmutabilityPolicy"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2019-06-01"
# Construct URL
url = self.get_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
def delete_immutability_policy(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
if_match, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ImmutabilityPolicy"
"""Aborts an unlocked immutability policy. The response of delete has
immutabilityPeriodSinceCreationInDays set to 0. ETag in If-Match is required for this
operation. Deleting a locked immutability policy is not allowed, only way is to delete the
container after deleting all blobs inside the container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ImmutabilityPolicy"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2019-06-01"
# Construct URL
url = self.delete_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
delete_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
def lock_immutability_policy(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
if_match, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ImmutabilityPolicy"
"""Sets the ImmutabilityPolicy to Locked state. The only action allowed on a Locked policy is
ExtendImmutabilityPolicy action. ETag in If-Match is required for this operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ImmutabilityPolicy"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self.lock_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
lock_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/lock'} # type: ignore
def extend_immutability_policy(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
if_match, # type: str
parameters=None, # type: Optional["models.ImmutabilityPolicy"]
**kwargs # type: Any
):
# type: (...) -> "models.ImmutabilityPolicy"
"""Extends the immutabilityPeriodSinceCreationInDays of a locked immutabilityPolicy. The only
action allowed on a Locked policy will be this action. ETag in If-Match is required for this
operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:param parameters: The ImmutabilityPolicy Properties that will be extended for a blob
container.
:type parameters: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ImmutabilityPolicy"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.extend_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'ImmutabilityPolicy')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
extend_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/extend'} # type: ignore
def lease(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
parameters=None, # type: Optional["models.LeaseContainerRequest"]
**kwargs # type: Any
):
# type: (...) -> "models.LeaseContainerResponse"
"""The Lease Container operation establishes and manages a lock on a container for delete
operations. The lock duration can be 15 to 60 seconds, or can be infinite.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param parameters: Lease Container request body.
:type parameters: ~azure.mgmt.storage.v2019_06_01.models.LeaseContainerRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LeaseContainerResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_06_01.models.LeaseContainerResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LeaseContainerResponse"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.lease.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'LeaseContainerRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LeaseContainerResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
lease.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/lease'} # type: ignore
| 55.30037 | 297 | 0.68134 |
4efa8710121979d46fd0cdfe7a7d5cd6ec54bb3a | 2,016 | py | Python | venv/lib/python3.8/site-packages/vsts/build/v4_0/models/build_definition_revision.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/build/v4_0/models/build_definition_revision.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/build/v4_0/models/build_definition_revision.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class BuildDefinitionRevision(Model):
"""BuildDefinitionRevision.
:param changed_by:
:type changed_by: :class:`IdentityRef <build.v4_0.models.IdentityRef>`
:param changed_date:
:type changed_date: datetime
:param change_type:
:type change_type: object
:param comment:
:type comment: str
:param definition_url:
:type definition_url: str
:param name:
:type name: str
:param revision:
:type revision: int
"""
_attribute_map = {
'changed_by': {'key': 'changedBy', 'type': 'IdentityRef'},
'changed_date': {'key': 'changedDate', 'type': 'iso-8601'},
'change_type': {'key': 'changeType', 'type': 'object'},
'comment': {'key': 'comment', 'type': 'str'},
'definition_url': {'key': 'definitionUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'int'}
}
def __init__(self, changed_by=None, changed_date=None, change_type=None, comment=None, definition_url=None, name=None, revision=None):
super(BuildDefinitionRevision, self).__init__()
self.changed_by = changed_by
self.changed_date = changed_date
self.change_type = change_type
self.comment = comment
self.definition_url = definition_url
self.name = name
self.revision = revision
| 40.32 | 139 | 0.546131 |
3c41903c33289a8376f7df339b1faec5eddc1b2b | 2,587 | py | Python | projects/models.py | rkprajapat/construction-crm | ae70238862f3048e4b1b849f9c16bf1bdf867a28 | [
"MIT"
] | null | null | null | projects/models.py | rkprajapat/construction-crm | ae70238862f3048e4b1b849f9c16bf1bdf867a28 | [
"MIT"
] | 11 | 2020-02-12T00:51:32.000Z | 2022-03-11T23:57:22.000Z | projects/models.py | rkprajapat/construction-crm | ae70238862f3048e4b1b849f9c16bf1bdf867a28 | [
"MIT"
] | null | null | null | from django.db import models
from django.urls import reverse
from django_countries.fields import CountryField
from django.utils.text import slugify
from mdm.models import ProjectStatus, UnitStatus
from users.models import CustomUser
class Project(models.Model):
name = models.CharField(max_length=100, blank=False)
slug = models.SlugField(unique=True)
status = models.ForeignKey(ProjectStatus, blank=False, null=True, on_delete=models.SET_NULL)
address1 = models.CharField('Address Line 1', max_length=254, blank=False)
address2 = models.CharField('Address Line 2', max_length=254, blank=True)
city = models.CharField(max_length=50, blank=False)
zip_code = models.PositiveSmallIntegerField('ZIP/Postal Code')
country = CountryField()
image = models.ImageField(blank=True, )
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Project, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('project_details', args=[str(self.slug)])
@property
def address(self):
return ', '.join(str(x) for x in [self.address1, self.address2, self.city, self.zip_code, self.country.name] if x)
def __str__(self):
return self.name
class Tower(models.Model):
name = models.CharField(max_length=20, blank=True)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
def __str__(self):
if self.name:
return ' '.join([self.project.name, '-', self.name])
else:
return self.project.name
class Unit(models.Model):
unit = models.PositiveSmallIntegerField(blank=False)
project_tower = models.ForeignKey(Tower, on_delete=models.CASCADE)
status = models.ForeignKey(UnitStatus, blank=False, null=True, on_delete=models.SET_NULL)
slug = models.SlugField(unique=True)
owner = models.ManyToManyField(CustomUser, blank=True, related_name="owned_unit")
class Meta:
ordering = ['project_tower', 'unit']
def save(self, *args, **kwargs):
self.slug = slugify('-'.join([self.project_tower.project.name, self.project_tower.name, str(self.unit)]))
super(Unit, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('unit_details', args=[str(self.slug)])
@property
def address(self):
return self.project_tower.project.address
@property
def project(self):
return self.project_tower.project
def __str__(self):
return ' '.join([self.project_tower.project.name, self.project_tower.name, str(self.unit)]) | 35.438356 | 122 | 0.696946 |
15828be88e0e2efba05f5228f676397dc953ef50 | 5,291 | py | Python | ITHACA_SEM_code/Nektar_data.py | Mopolino8/ITHACA-SEM | 5ffdc13c9ecf14a51a6d044d567d3ec5f220efa0 | [
"MIT"
] | null | null | null | ITHACA_SEM_code/Nektar_data.py | Mopolino8/ITHACA-SEM | 5ffdc13c9ecf14a51a6d044d567d3ec5f220efa0 | [
"MIT"
] | null | null | null | ITHACA_SEM_code/Nektar_data.py | Mopolino8/ITHACA-SEM | 5ffdc13c9ecf14a51a6d044d567d3ec5f220efa0 | [
"MIT"
] | 1 | 2019-11-24T17:24:26.000Z | 2019-11-24T17:24:26.000Z |
import numpy as np
"""
runs using
1) python/3.3.2 2) numpy/1.11.1/python/3.3 3) scipy/0.17.1/python/3.3
module load python3/3.3.2 /scratch/mhess/py3p3/3.3
"""
def convert():
print("Generating Parameter-independent terms.")
f_IP = open('Nektar_data/IP.txt', 'r')
f_IPp = open('Nektar_data/IPp.txt', 'r')
f_cartmap0 = open('Nektar_data/cartmap0.txt', 'r')
f_cartmap1 = open('Nektar_data/cartmap1.txt', 'r')
IP = np.loadtxt(f_IP)
IPp = np.loadtxt(f_IPp)
cartmap0 = np.loadtxt(f_cartmap0)
cartmap1 = np.loadtxt(f_cartmap1)
np.save('ITHACA_SEM_data/IP', IP)
np.save('ITHACA_SEM_data/IPp', IPp)
np.save('ITHACA_SEM_data/cartmap0', cartmap0)
np.save('ITHACA_SEM_data/cartmap1', cartmap1)
nphys = cartmap0.shape[1]
npc = IP.shape[0]
num_elem = npc // nphys
for i in range(0,num_elem):
HM_str = 'Nektar_data/HelmMat_' + str(i) + '.txt'
f_HM = open(HM_str)
HM = np.loadtxt(f_HM)
HM_str = 'ITHACA_SEM_data/HM_' + str(i)
np.save(HM_str, HM)
f_bmap = open('Nektar_data/bmap.txt', 'r')
f_imap = open('Nektar_data/imap.txt', 'r')
bmap = np.loadtxt(f_bmap)
np.save('ITHACA_SEM_data/bmap', bmap)
imap = np.loadtxt(f_imap)
np.save('ITHACA_SEM_data/imap', imap)
f_bwdtrans = open('Nektar_data/bwdtrans.txt', 'r')
f_LocGloBndMap = open('Nektar_data/LocGloBndMap.txt', 'r')
f_LocGloBndSign = open('Nektar_data/LocGloBndSign.txt', 'r')
f_BndCondCoeffsToGlobalCoeffsMap = open('Nektar_data/BndCondCoeffsToGlobalCoeffsMap.txt', 'r')
f_LocGloMap = open('Nektar_data/LocGloMap.txt', 'r')
f_LocGloSign = open('Nektar_data/LocGloSign.txt', 'r')
f_glodofphys = open('Nektar_data/glo_dof_phys.txt', 'r')
f_numDirBnd = open('Nektar_data/NumGlobalDirBndCoeffs.txt', 'r')
f_LocGloMapMatA = open('Nektar_data/LocGloMapMatA.txt', 'r')
f_forcing0 = open('Nektar_data/forcing0.txt', 'r')
f_forcing1 = open('Nektar_data/forcing1.txt', 'r')
f_bndcond_k0_i_0 = open('Nektar_data/bndcond_k0_i_0.txt', 'r')
f_bndcond_k0_i_1 = open('Nektar_data/bndcond_k0_i_1.txt', 'r')
f_bndcond_k0_i_2 = open('Nektar_data/bndcond_k0_i_2.txt', 'r')
f_bndcond_k1_i_0 = open('Nektar_data/bndcond_k1_i_0.txt', 'r')
f_bndcond_k1_i_1 = open('Nektar_data/bndcond_k1_i_1.txt', 'r')
f_bndcond_k1_i_2 = open('Nektar_data/bndcond_k1_i_2.txt', 'r')
# f_LocGloMapA = open('LocGloMapA.txt', 'r')
# f_LocGloSignA = open('LocGloSignA.txt', 'r')
bwdtrans = np.loadtxt(f_bwdtrans)
LocGloBndMap = np.loadtxt(f_LocGloBndMap)
LocGloBndSign = np.loadtxt(f_LocGloBndSign)
BndCondCoeffsToGlobalCoeffsMap = np.loadtxt(f_BndCondCoeffsToGlobalCoeffsMap)
LocGloMap = np.loadtxt(f_LocGloMap)
LocGloSign = np.loadtxt(f_LocGloSign)
glodofphys = np.loadtxt(f_glodofphys)
numDirBnd = np.loadtxt(f_numDirBnd)
LocGloMapMatA = np.loadtxt(f_LocGloMapMatA)
forcing0 = np.loadtxt(f_forcing0)
forcing1 = np.loadtxt(f_forcing1)
bndcond_k0_i_0 = np.loadtxt(f_bndcond_k0_i_0)
bndcond_k0_i_1 = np.loadtxt(f_bndcond_k0_i_1)
bndcond_k0_i_2 = np.loadtxt(f_bndcond_k0_i_2)
bndcond_k1_i_0 = np.loadtxt(f_bndcond_k1_i_0)
bndcond_k1_i_1 = np.loadtxt(f_bndcond_k1_i_1)
bndcond_k1_i_2 = np.loadtxt(f_bndcond_k1_i_2)
np.save('ITHACA_SEM_data/bwdtrans', bwdtrans)
np.save('ITHACA_SEM_data/LocGloBndMap', LocGloBndMap)
np.save('ITHACA_SEM_data/LocGloBndSign', LocGloBndSign)
np.save('ITHACA_SEM_data/BndCondCoeffsToGlobalCoeffsMap', BndCondCoeffsToGlobalCoeffsMap)
np.save('ITHACA_SEM_data/LocGloMap', LocGloMap)
np.save('ITHACA_SEM_data/LocGloSign', LocGloSign)
np.save('ITHACA_SEM_data/glodofphys', glodofphys)
np.save('ITHACA_SEM_data/numDirBnd', numDirBnd)
np.save('ITHACA_SEM_data/LocGloMapMatA', LocGloMapMatA)
np.save('ITHACA_SEM_data/forcing0', forcing0)
np.save('ITHACA_SEM_data/forcing1', forcing1)
np.save('ITHACA_SEM_data/bndcond_k0_i_0', bndcond_k0_i_0)
np.save('ITHACA_SEM_data/bndcond_k0_i_1', bndcond_k0_i_1)
np.save('ITHACA_SEM_data/bndcond_k0_i_2', bndcond_k0_i_2)
np.save('ITHACA_SEM_data/bndcond_k1_i_0', bndcond_k1_i_0)
np.save('ITHACA_SEM_data/bndcond_k1_i_1', bndcond_k1_i_1)
np.save('ITHACA_SEM_data/bndcond_k1_i_2', bndcond_k1_i_2)
# f_filetxt = open('Nektar_data/cavity_poi_Oseen_ROM.txt', 'r')
# f_filetxt = open('Nektar_data/testsimu.txt', 'r')
# np.save('ITHACA_SEM_data/testsimu', filetxt)
# f_filetxt = open('Nektar_data/cavity_poi_Oseen_nD.txt', 'r')
# filetxt = np.loadtxt(f_filetxt)
# np.save('ITHACA_SEM_data/testsimu_nD', filetxt)
# f_filetxt = open('Nektar_data/cavity_poi_Oseen_D.txt', 'r')
# filetxt = np.loadtxt(f_filetxt)
# np.save('ITHACA_SEM_data/testsimu_D', filetxt)
# f_filetxt = open('Nektar_data/cav_tt_corr_D.txt', 'r')
# filetxt = np.loadtxt(f_filetxt)
# np.save('ITHACA_SEM_data/testsimu_time_D', filetxt)
# f_filetxt = open('Nektar_data/cav_tt_corr_nD.txt', 'r')
# filetxt = np.loadtxt(f_filetxt)
#s np.save('ITHACA_SEM_data/testsimu_time_nD', filetxt)
# f_physglodof = open('phys_glo_dof.txt', 'r')
# physglodof = np.loadtxt(f_physglodof)
# np.save('physglodof', physglodof)
# f_f0 = open('forcing0.txt', 'r')
# force0 = np.loadtxt(f_f0)
# np.save('force0', force0)
# f_f1 = open('forcing1.txt', 'r')
# force1 = np.loadtxt(f_f1)
# np.save('force1', force1)
# LocGloMapA = np.loadtxt(f_LocGloMapA)
# LocGloSignA = np.loadtxt(f_LocGloSignA)
# np.save('LocGloSignA', LocGloSignA)
# np.save('LocGloMapA', LocGloMapA)
| 36.239726 | 95 | 0.749008 |
1afba788cf6dd5a30ffab348eeb5a47653d384f9 | 274 | py | Python | tests/artificial/transf_Integration/trend_MovingAverage/cycle_5/ar_12/test_artificial_32_Integration_MovingAverage_5_12_0.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/artificial/transf_Integration/trend_MovingAverage/cycle_5/ar_12/test_artificial_32_Integration_MovingAverage_5_12_0.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/artificial/transf_Integration/trend_MovingAverage/cycle_5/ar_12/test_artificial_32_Integration_MovingAverage_5_12_0.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 5, transform = "Integration", sigma = 0.0, exog_count = 0, ar_order = 12); | 39.142857 | 169 | 0.737226 |
72a12b17c30ad937907bbfe48a27937878657ea1 | 705 | py | Python | operators/sampling/random_sampling.py | f4nku4n/MOENAS-TF-PSI | 5e25df9143a09ffdcfbb4d03851b919aed60003a | [
"MIT"
] | null | null | null | operators/sampling/random_sampling.py | f4nku4n/MOENAS-TF-PSI | 5e25df9143a09ffdcfbb4d03851b919aed60003a | [
"MIT"
] | null | null | null | operators/sampling/random_sampling.py | f4nku4n/MOENAS-TF-PSI | 5e25df9143a09ffdcfbb4d03851b919aed60003a | [
"MIT"
] | null | null | null | from model.population import Population
from utils import get_hashKey
class RandomSampling:
def __init__(self, nSamples=0):
self.nSamples = nSamples
def do(self, problem, **kwargs):
problem_name = problem.name
P = Population(self.nSamples)
n = 0
P_hash_key = []
while n < self.nSamples:
X = problem.sample_a_compact_architecture()
if problem.isValid(X):
hashKey = get_hashKey(X, problem_name)
if hashKey not in P_hash_key:
P[n].set('X', X)
P[n].set('hashKey', hashKey)
n += 1
return P
if __name__ == '__main__':
pass | 27.115385 | 55 | 0.550355 |
580b8e37127e85e0caffc1105fc0229b3044289f | 4,415 | py | Python | moderngl_window/meta/program.py | DavideRuzza/moderngl-window | e9debc6ed4a1899aa83c0da2320e03b0c2922b80 | [
"MIT"
] | 142 | 2019-11-11T23:14:28.000Z | 2022-03-29T08:37:03.000Z | moderngl_window/meta/program.py | DavideRuzza/moderngl-window | e9debc6ed4a1899aa83c0da2320e03b0c2922b80 | [
"MIT"
] | 107 | 2019-10-31T20:31:45.000Z | 2022-03-23T15:01:41.000Z | moderngl_window/meta/program.py | DavideRuzza/moderngl-window | e9debc6ed4a1899aa83c0da2320e03b0c2922b80 | [
"MIT"
] | 36 | 2019-12-12T16:14:10.000Z | 2022-01-18T22:58:21.000Z | from typing import List
from moderngl_window.meta.base import ResourceDescription
class ProgramDescription(ResourceDescription):
"""Describes a program to load
By default a program can be loaded in the following ways:
- By supplying a `path` to s single glsl file containing all shaders
- By supplying several paths to separate files containing each shader type.
For example ``vertex_shader``, ``fragment_shader`` .. etc.
.. code:: python
# Single glsl file containing all shaders
ProgramDescription(path='programs/myprogram.glsl')
# Multiple shader files
ProgramDescription(
vertex_shader='programs/myprogram_vs.glsl'.
fragment_shader='programs/myprogram_fs.glsl'.
geometry_shader='programs/myprogram_gs.glsl'.
)
"""
default_kind = None
resource_type = "programs"
def __init__(
self,
path: str = None,
kind: str = None,
reloadable=False,
vertex_shader: str = None,
geometry_shader: str = None,
fragment_shader: str = None,
tess_control_shader: str = None,
tess_evaluation_shader: str = None,
compute_shader: str = None,
defines: dict = None,
varyings: List = None,
**kwargs
):
"""Create a program description
Keyword Args:
path (str): path to the resource relative to search directories
kind (str): The kind of loader to use
reloadable (bool): Should this program be reloadable
vertex_shader (str): Path to vertex shader file
geometry_shader (str): Path to geometry shader
fragment_shader (str): Path to fragmet shader
tess_control_shader (str) Path to tess control shader
tess_evaluation_shader (str): Path to tess eval shader
compute_shader (str): Path to compute shader
defines (dict): Dictionary with define values to replace in the source
varyings (List): List of varying names for transform shader
**kwargs: Optional custom attributes
"""
kwargs.update(
{
"path": path,
"kind": kind,
"reloadable": reloadable,
"vertex_shader": vertex_shader,
"geometry_shader": geometry_shader,
"fragment_shader": fragment_shader,
"tess_control_shader": tess_control_shader,
"tess_evaluation_shader": tess_evaluation_shader,
"compute_shader": compute_shader,
"defines": defines,
"varyings": varyings,
}
)
super().__init__(**kwargs)
@property
def reloadable(self) -> bool:
"""bool: if this program is reloadable"""
return self._kwargs.get("reloadable")
@reloadable.setter
def reloadable(self, value):
self._kwargs["reloadable"] = value
@property
def vertex_shader(self) -> str:
"""str: Relative path to vertex shader"""
return self._kwargs.get("vertex_shader")
@property
def geometry_shader(self) -> str:
"""str: Relative path to geometry shader"""
return self._kwargs.get("geometry_shader")
@property
def fragment_shader(self) -> str:
"""str: Relative path to fragment shader"""
return self._kwargs.get("fragment_shader")
@property
def tess_control_shader(self) -> str:
"""str: Relative path to tess control shader"""
return self._kwargs.get("tess_control_shader")
@property
def tess_evaluation_shader(self) -> str:
"""str: Relative path to tessellation evaluation shader"""
return self._kwargs.get("tess_evaluation_shader")
@property
def compute_shader(self) -> str:
"""str: Relative path to compute shader"""
return self._kwargs.get("compute_shader")
@property
def defines(self) -> dict:
"""dict: Dictionary with define values to replace in the source"""
return self._kwargs.get("defines", {})
@property
def varyings(self) -> List:
"""List: List of varying names for transform shaders"""
return self._kwargs.get("varyings", [])
| 35.039683 | 83 | 0.598641 |
6b6275971c2c2c93ef056ea14daf1275cb7b38e2 | 11,053 | py | Python | zoo/analytics/tasks/utils.py | aexvir/the-zoo | 7816afb9a0a26c6058b030b4a987c73e952d92bd | [
"MIT"
] | 90 | 2018-11-20T10:58:24.000Z | 2022-02-19T16:12:46.000Z | zoo/analytics/tasks/utils.py | kiwicom/the-zoo | fee0108ea7b65112e5b572a146cff4b1c54033fd | [
"MIT"
] | 348 | 2018-11-21T09:22:31.000Z | 2021-11-03T13:45:08.000Z | zoo/analytics/tasks/utils.py | aexvir/the-zoo | 7816afb9a0a26c6058b030b4a987c73e952d92bd | [
"MIT"
] | 11 | 2018-12-08T18:42:07.000Z | 2021-02-21T06:27:58.000Z | import re
from typing import Optional
import structlog
log = structlog.get_logger()
def parse_version(raw_version: str) -> str:
"""Resolve semantic versioning used in javascript packages."""
try:
validate_version(raw_version)
except ValueError:
log.exception("analytics.utils.parse_version.error", raw_version=raw_version)
return None
if "=" in raw_version:
return raw_version.split("=")[1]
if raw_version.startswith("^"):
return resolve_caret_version(raw_version)
if raw_version.startswith("~"):
return resolve_tilde_version(raw_version)
return resolve_unknown_format(raw_version)
def validate_version(version: str) -> bool: # Ignore RadonBear
if not version:
raise ValueError("Version given is an empty string?!?!¿!?")
if version.count(".") > 2:
raise ValueError(f"Version {version} has too many dots")
if not any(True for char in version if char.isdigit()):
raise ValueError(f"Version {version} has no digits in it")
if " " in version or "||" in version or "#" in version:
raise ValueError(f"Version {version} has unsupported version specifiers")
if ("<" in version or ">" in version) and "=" not in version:
raise ValueError(f"Version {version} has unsupported version specifiers")
def resolve_caret_version(version: str) -> str:
temp_version = ""
for version_digit in version[1:].split("."):
temp_version += version_digit
if version_digit.isdigit() and int(version_digit) != 0:
break
temp_version += "."
return temp_version
def resolve_tilde_version(version: str) -> str:
version = version[1:].split(".")
if any(True for version_digit in version[:~0] if int(version_digit) != 0):
del version[~0]
return ".".join(version)
def resolve_unknown_format(version: str) -> str:
temp_version = []
for version_digit in version.split("."):
if version_digit.isdigit():
temp_version.append(version_digit)
else:
break
return ".".join(temp_version)
class DockerImageId:
"""Represents parsed docker image id."""
# pylint: disable=too-many-instance-attributes # Ignore PyCommentedCodeBear
def __init__(self, image: str): # Ignore RadonBear
self.__full_image_id = " ".join(image.split())
self.__registry = None
self.__registry_host_name = None
self.__registry_port = None
self.__full_image_name = None
self.__full_image_name_no_tag = None
self.__username = None
self.__full_repository_name = None
self.__repository_name = None
self.__namespaces = None
self.__tag = None
self.__version = None
self.__full_os = None
self.__os = None
self.__os_version = None
self.__alias = None
@property
def full_image_id(self) -> str:
"""Full provided image identifier."""
return self.__full_image_id
@property
def registry(self) -> str:
"""Docker registry.
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
Docker registry is recognised as:
a) "localhost" present before first "/" of the full docker id.
b) "." present before first "/" of the full docker id.
"""
if "/" in self.full_image_id:
registry_part, _, _ = self.full_image_id.partition("/")
if "localhost" in registry_part or "." in registry_part:
# This really is a registry string.
self.__registry, _, _ = self.full_image_id.partition("/")
return self.__registry
@property
def registry_host_name(self) -> str:
"""Docker registry host name.
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
"""
self.__registry_host_name = self.registry
if self.registry and ":" in self.registry:
self.__registry_host_name, _, _ = self.registry.partition(":")
return self.__registry_host_name
@property
def registry_port(self) -> str:
"""Docker registry port.
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾‾
"""
if self.registry and ":" in self.registry:
_, _, self.__registry_port = self.registry.partition(":")
return self.__registry_port
@property
def full_image_name(self) -> str:
"""Whole name of the image without the registry.
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
"""
self.__full_image_name = self.full_image_id
if self.registry:
_, _, self.__full_image_name = self.full_image_id.partition("/")
return self.__full_image_name
@property
def tag(self) -> str:
"""Tag.
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
"""
if ":" in self.full_image_name:
_, _, self.__tag = self.full_image_name.partition(":")
if " as " in self.__tag:
self.__tag, _, _ = self.__tag.partition(" as ")
return self.__tag
@property
def full_image_name_no_tag(self) -> str:
"""Full image name without the tag and alias.
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
"""
self.__full_image_name_no_tag, _, _ = self.full_image_name.partition(":")
self.__full_image_name_no_tag, _, _ = self.__full_image_name_no_tag.partition(
" as "
)
return self.__full_image_name_no_tag
@property
def username(self) -> str:
"""Username (first part of the image path by convention).
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾‾‾‾‾‾‾‾‾‾
"""
if "/" in self.full_image_name_no_tag:
self.__username, _, _ = self.full_image_name_no_tag.partition("/")
return self.__username
@property
def full_repository_name(self) -> str:
"""Full repository name without "username".
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
"""
self.__full_repository_name = self.full_image_name_no_tag
if "/" in self.__full_repository_name:
_, _, self.__full_repository_name = self.__full_repository_name.partition(
"/"
)
return self.__full_repository_name
@property
def repository_name(self) -> str:
"""First part of the full repository name path (repository name by convention).
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾‾‾‾‾‾
"""
self.__repository_name = self.full_repository_name
if "/" in self.__repository_name:
# Take the first part as repo name if there still are more slashes present.
self.__repository_name, _, _ = self.__repository_name.partition("/")
return self.__repository_name
@property
def namespaces(self) -> []:
"""Namespaces" - all path parts after username and image name as a list.
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
"""
if "/" in self.full_repository_name:
_, _, namespaces = self.full_repository_name.partition("/")
self.__namespaces = namespaces.split("/")
return self.__namespaces
@property
def alias(self) -> str:
"""Image identifier alias.
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾‾‾‾‾‾‾‾
"""
if " as " in self.full_image_id:
_, _, self.__alias = self.full_image_id.partition(" as ")
return self.__alias
@property
def version(self) -> str:
"""Version of the image.
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾‾‾
"""
self.__version = self.tag
if self.__version and "-" in self.__version:
self.__version, _, _ = self.tag.partition("-")
return self.__version
@property
def full_os(self) -> str:
"""OS part - parsed from tag when "-" is present.
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾‾‾‾‾‾‾
"""
if self.tag and "-" in self.tag:
_, _, self.__full_os = self.tag.partition("-")
return self.__full_os
@property
def os(self) -> str:
"""OS without version.
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾‾‾‾
"""
self.__os = self.__full_os
if self.full_os:
os_version = self.__parse_semantic_version(self.__full_os)
if os_version:
self.__os = self.__full_os.replace(os_version, "")
return self.__os
@property
def os_version(self) -> str:
"""OS version.
my.registry.com:5005/special-name/my-image/namespace/another-namespace:0.1.1-alpine3.9 as base-image
‾‾‾
"""
self.__os_version = self.__parse_semantic_version(self.__full_os)
return self.__os_version
@staticmethod
def __parse_semantic_version(string: Optional[str]) -> Optional[str]:
"""Parse semantic version from provided input."""
if string:
match = re.search(r"\d+[\d|\.]*\d", string)
if match:
return match.group()
return None
def __str__(self):
return str(self.__dict__)
| 35.088889 | 108 | 0.570252 |
fe9d3e970caa858bc4e75aa0dab31c4140a120ca | 3,626 | py | Python | tests/unit/test_sink/test_rdf_sink.py | kevinschaper/kgx | 4c129428131047af4506a93fe8dc54fb92a7c702 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/test_sink/test_rdf_sink.py | kevinschaper/kgx | 4c129428131047af4506a93fe8dc54fb92a7c702 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/test_sink/test_rdf_sink.py | kevinschaper/kgx | 4c129428131047af4506a93fe8dc54fb92a7c702 | [
"BSD-3-Clause"
] | null | null | null | import os
import pytest
import rdflib
from kgx.sink import RdfSink
from tests import TARGET_DIR
from tests.unit.test_sink import get_graph
def test_write_rdf1():
"""
Write a graph as RDF N-Triples using RdfSink.
"""
graph = get_graph()
filename = os.path.join(TARGET_DIR, 'test_graph1.nt')
s = RdfSink(filename=filename)
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
assert os.path.exists(filename)
lines = open(filename, 'r').readlines()
assert len(lines) == 18
def test_write_rdf2():
"""
Write a graph as a compressed RDF N-Triples using RdfSink.
"""
graph = get_graph()
filename = os.path.join(TARGET_DIR, 'test_graph2.nt.gz')
s = RdfSink(filename=filename, compression=True)
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
assert os.path.exists(filename)
lines = open(filename, 'r').readlines()
assert len(lines) == 18
def test_write_rdf3():
"""
Write a graph as RDF N-Triples using RdfSink, where all edges are reified.
"""
graph = get_graph()
filename = os.path.join(TARGET_DIR, 'test_graph3.nt')
s = RdfSink(filename=filename, reify_all_edges=True)
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
assert os.path.exists(filename)
lines = open(filename, 'r').readlines()
assert len(lines) == 42
@pytest.mark.parametrize(
"query",
[
('id', 'uriorcurie', 'MONDO:000001', 'URIRef', None),
(
'name',
'xsd:string',
'Test concept name',
'Literal',
rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#string'),
),
('predicate', 'uriorcurie', 'biolink:related_to', 'URIRef', None),
('relation', 'uriorcurie', 'RO:000000', 'URIRef', None),
('custom_property1', 'uriorcurie', 'X:123', 'URIRef', None),
(
'custom_property2',
'xsd:float',
'480.213',
'Literal',
rdflib.term.URIRef('http://www.w3.org/2001/XMLSchema#float'),
),
],
)
def test_prepare_object(query):
"""
Test internal _prepare_object method.
"""
sink = RdfSink(os.path.join(TARGET_DIR, 'test_graph3.nt'))
o = sink._prepare_object(query[0], query[1], query[2])
assert type(o).__name__ == query[3]
if query[4]:
assert o.datatype == query[4]
@pytest.mark.parametrize(
"query", [('name', 'xsd:string'), ('predicate', 'uriorcurie'), ('xyz', 'xsd:string')]
)
def test_get_property_type(query):
"""
Test to ensure that get_property_type returns the appropriate type
for a given property.
"""
sink = RdfSink(os.path.join(TARGET_DIR, 'test_graph3.nt'))
assert sink._get_property_type(query[0]) == query[1]
@pytest.mark.parametrize(
"query",
[
('MONDO:000001', 'URIRef', 'http://purl.obolibrary.org/obo/MONDO_000001'),
('urn:uuid:12345', 'URIRef', 'urn:uuid:12345'),
(':new_prop', 'URIRef', 'https://www.example.org/UNKNOWN/new_prop'),
],
)
def test_uriref(query):
"""
Test for uriref method.
"""
sink = RdfSink(os.path.join(TARGET_DIR, 'test_graph3.nt'))
x = sink.uriref(query[0])
assert type(x).__name__ == query[1]
assert str(x) == query[2]
| 27.263158 | 89 | 0.608384 |
200f5625d034013153bc777bbedae61457a783c6 | 4,626 | py | Python | render_swap_matrix.py | EdwardJTL/Edit3D | bbb6364aeb5ea17c12c0c23578268641c066ebca | [
"MIT"
] | null | null | null | render_swap_matrix.py | EdwardJTL/Edit3D | bbb6364aeb5ea17c12c0c23578268641c066ebca | [
"MIT"
] | null | null | null | render_swap_matrix.py | EdwardJTL/Edit3D | bbb6364aeb5ea17c12c0c23578268641c066ebca | [
"MIT"
] | null | null | null | import math
import numpy as np
import os
import torch
from torch_ema import ExponentialMovingAverage
from torchvision.utils import save_image
from PIL import Image, ImageDraw, ImageFont
import curriculums
import network_config
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def generate_image(generator, z, return_aux_img=True, **kwargs):
def transform(img):
img_min = img.min()
img_max = img.max()
img = (img - img_min) / (img_max - img_min) * 256
img = img.permute(0, 2, 3, 1).squeeze().cpu().numpy()
return img
with torch.no_grad():
imgs = generator(z, forward_points=256**2, return_aux_img=return_aux_img, **kwargs)[0]
img = imgs[0, :, :, :].unsqueeze(dim=0)
aux_img = imgs[1, :, :, :].unsqueeze(dim=0)
img = transform(img)
aux_img = transform(aux_img)
return img, aux_img
def make_curriculum(curriculum):
curriculum = getattr(curriculums, curriculum, None)
if curriculum is None:
raise ValueError(f"{curriculum} is not a valid curriculum")
curriculum["num_steps"] = curriculum[0]["num_steps"]
curriculum["psi"] = 1
curriculum["v_stddev"] = 0
curriculum["h_stddev"] = 0
curriculum["nerf_noise"] = 0
curriculum = {key: value for key, value in curriculum.items() if type(key) is str}
return curriculum
def make_gen_args(curriculum):
gen_args = {
"img_size": curriculum["img_size"],
"fov": curriculum["fov"],
"ray_start": curriculum["ray_start"],
"ray_end": curriculum["ray_end"],
"num_steps": curriculum["num_steps"],
"h_mean": curriculum["h_mean"],
"v_mean": curriculum["v_mean"],
"h_stddev": 0,
"v_stddev": 0,
"hierarchical_sample": curriculum["hierarchical_sample"],
"psi": curriculum["psi"],
"sample_dist": curriculum["sample_dist"],
"nerf_noise": curriculum["nerf_noise"]
}
return gen_args
def load_generator(model_path):
generator = torch.load(
os.path.join(model_path, "generator.pth"), map_location=torch.device(device)
)
ema_dict = torch.load(
os.path.join(model_path, "ema.pth"), map_location=torch.device(device)
)
ema = ExponentialMovingAverage(generator.parameters(), decay=0.999)
ema.load_state_dict(ema_dict)
ema.copy_to(generator.parameters())
generator.set_device(device)
generator.eval()
return generator
def main():
img_size = 128
# model_path = "/checkpoint/edwardl/6774980/DELAYEDPURGE/"
# curriculum = "CelebA"
model_path = "/h/edwardl/scratch/edit3d/output/6754083/DELAYEDPURGE/"
curriculum = "LSUN"
curriculum = make_curriculum(curriculum)
curriculum["img_size"] = img_size
curriculum["h_mean"] = math.pi * 0.5 + 0.7
curriculum["v_mean"] = math.pi / 2 - 0.5
gen_args = make_gen_args(curriculum)
# make z's
generator = load_generator(model_path)
# seeds = [0, 30, 37, 44, 58]
seeds = [51, 68, 285, 4363, 1996, 314233, 314418, 314344, 314381]
z_s = []
for seed in seeds:
torch.manual_seed(seed)
z_s.append(generator.get_zs(b=1, dist=curriculum['z_dist']))
canvas = Image.new(
# channels
"RGBA",
(
# width
img_size * len(seeds),
# height
img_size * len(seeds)
),
# fill color
(255, 255, 255, 255),
)
canvas_aux = Image.new(
# channels
"RGBA",
(
# width
img_size * len(seeds),
# height
img_size * len(seeds)
),
# fill color
(255, 255, 255, 255),
)
for i, z_a in enumerate(z_s):
for j, z_b in enumerate(z_s):
print("i {} {}; j {} {}".format(i, np.linalg.norm(z_a["z_nerf"].cpu()), j, np.linalg.norm(z_b["z_inr"].cpu())))
z = {
"z_nerf": z_a["z_nerf"],
# "z_inr": torch.zeros(z_b["z_inr"].shape, device=device),
"z_inr": z_b["z_inr"]
}
img, aux_img = generate_image(generator, z, **gen_args)
PIL_image = Image.fromarray(np.uint8(img)).convert("RGB")
canvas.paste(
PIL_image, (img_size * i, img_size * j)
)
PIL_image_aux = Image.fromarray(np.uint8(aux_img)).convert("RGB")
canvas_aux.paste(
PIL_image_aux, (img_size * i, img_size * j)
)
canvas.save("./test.png")
canvas_aux.save("./test_aux.png")
return
if __name__ == "__main__":
main()
| 30.235294 | 123 | 0.591224 |
54b12a6122c9dff8d73d6a2e803697e4ecfab3d5 | 19,926 | py | Python | panel/interact.py | ahuang11/panel | a0e38316701cac5293da6de3441010c0ff5719da | [
"BSD-3-Clause"
] | null | null | null | panel/interact.py | ahuang11/panel | a0e38316701cac5293da6de3441010c0ff5719da | [
"BSD-3-Clause"
] | null | null | null | panel/interact.py | ahuang11/panel | a0e38316701cac5293da6de3441010c0ff5719da | [
"BSD-3-Clause"
] | null | null | null | """
Interact with functions using widgets.
The interact Pane implemented in this module mirrors
ipywidgets.interact in its API and implementation. Large parts of the
code were copied directly from ipywidgets:
Copyright (c) Jupyter Development Team and PyViz Development Team.
Distributed under the terms of the Modified BSD License.
"""
from __future__ import absolute_import, division, unicode_literals
import types
from collections import OrderedDict
from inspect import getcallargs
from numbers import Real, Integral
from six import string_types
try: # Python >= 3.3
from inspect import signature, Parameter
from collections.abc import Iterable, Mapping
empty = Parameter.empty
except ImportError:
from collections import Iterable, Mapping
try:
from IPython.utils.signatures import signature, Parameter
empty = Parameter.empty
except Exception:
signature, Parameter, empty = None, None, None
try:
from inspect import getfullargspec as check_argspec
except ImportError:
from inspect import getargspec as check_argspec # py2
import param
from .layout import Panel, Column, Row
from .pane import PaneBase, HTML, panel
from .pane.base import ReplacementPane
from .util import as_unicode
from .viewable import Viewable
from .widgets import (Checkbox, TextInput, Widget, IntSlider, FloatSlider,
Select, DiscreteSlider, Button)
def _get_min_max_value(min, max, value=None, step=None):
"""Return min, max, value given input values with possible None."""
# Either min and max need to be given, or value needs to be given
if value is None:
if min is None or max is None:
raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))
diff = max - min
value = min + (diff / 2)
# Ensure that value has the same type as diff
if not isinstance(value, type(diff)):
value = min + (diff // 2)
else: # value is not None
if not isinstance(value, Real):
raise TypeError('expected a real number, got: %r' % value)
# Infer min/max from value
if value == 0:
# This gives (0, 1) of the correct type
vrange = (value, value + 1)
elif value > 0:
vrange = (-value, 3*value)
else:
vrange = (3*value, -value)
if min is None:
min = vrange[0]
if max is None:
max = vrange[1]
if step is not None:
# ensure value is on a step
tick = int((value - min) / step)
value = min + tick * step
if not min <= value <= max:
raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))
return min, max, value
def _yield_abbreviations_for_parameter(parameter, kwargs):
"""Get an abbreviation for a function parameter."""
name = parameter.name
kind = parameter.kind
ann = parameter.annotation
default = parameter.default
not_found = (name, empty, empty)
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY):
if name in kwargs:
value = kwargs.pop(name)
elif ann is not empty:
param.main.warning("Using function annotations to implicitly specify interactive controls is deprecated. "
"Use an explicit keyword argument for the parameter instead.", DeprecationWarning)
value = ann
elif default is not empty:
value = default
if isinstance(value, (Iterable, Mapping)):
value = fixed(value)
else:
yield not_found
yield (name, value, default)
elif kind == Parameter.VAR_KEYWORD:
# In this case name=kwargs and we yield the items in kwargs with their keys.
for k, v in kwargs.copy().items():
kwargs.pop(k)
yield k, v, empty
def _matches(o, pattern):
"""Match a pattern of types in a sequence."""
if not len(o) == len(pattern):
return False
comps = zip(o,pattern)
return all(isinstance(obj,kind) for obj,kind in comps)
class interactive(PaneBase):
default_layout = param.ClassSelector(default=Column, class_=(Panel),
is_instance=False)
manual_update = param.Boolean(default=False, doc="""
Whether to update manually by clicking on button.""")
manual_name = param.String(default='Run Interact')
def __init__(self, object, params={}, **kwargs):
if signature is None:
raise ImportError('interact requires either recent Python version '
'(>=3.3 or IPython to inspect function signatures.')
super(interactive, self).__init__(object, **params)
new_kwargs = self.find_abbreviations(kwargs)
# Before we proceed, let's make sure that the user has passed a set of args+kwargs
# that will lead to a valid call of the function. This protects against unspecified
# and doubly-specified arguments.
try:
check_argspec(object)
except TypeError:
# if we can't inspect, we can't validate
pass
else:
getcallargs(object, **{n:v for n,v,_ in new_kwargs})
widgets = self.widgets_from_abbreviations(new_kwargs)
if self.manual_update:
widgets.append(('manual', Button(name=self.manual_name)))
self._widgets = OrderedDict(widgets)
pane = self.object(**self.kwargs)
if isinstance(pane, Viewable):
self._pane = pane
self._internal = False
else:
self._pane = panel(pane, name=self.name)
self._internal = True
self._inner_layout = Row(self._pane)
widgets = [widget for _, widget in widgets if isinstance(widget, Widget)]
if 'name' in params:
widgets.insert(0, HTML('<h2>%s</h2>' % self.name))
self.widget_box = Column(*widgets)
self.layout.objects = [self.widget_box, self._inner_layout]
self._link_widgets()
#----------------------------------------------------------------
# Model API
#----------------------------------------------------------------
def _get_model(self, doc, root=None, parent=None, comm=None):
return self._inner_layout._get_model(doc, root, parent, comm)
#----------------------------------------------------------------
# Callback API
#----------------------------------------------------------------
def _synced_params(self):
return []
def _link_widgets(self):
if self.manual_update:
widgets = [('manual', self._widgets['manual'])]
else:
widgets = self._widgets.items()
for name, widget in widgets:
def update_pane(change):
# Try updating existing pane
new_object = self.object(**self.kwargs)
new_pane, internal = ReplacementPane._update_from_object(
new_object, self._pane, self._internal
)
if new_pane is None:
return
# Replace pane entirely
self._pane = new_pane
self._inner_layout[0] = new_pane
self._internal = internal
pname = 'clicks' if name == 'manual' else 'value'
watcher = widget.param.watch(update_pane, pname)
self._callbacks.append(watcher)
def _cleanup(self, root):
self._inner_layout._cleanup(root)
super(interactive, self)._cleanup(root)
#----------------------------------------------------------------
# Public API
#----------------------------------------------------------------
@property
def kwargs(self):
return {k: widget.value for k, widget in self._widgets.items()
if k != 'manual'}
def signature(self):
return signature(self.object)
def find_abbreviations(self, kwargs):
"""Find the abbreviations for the given function and kwargs.
Return (name, abbrev, default) tuples.
"""
new_kwargs = []
try:
sig = self.signature()
except (ValueError, TypeError):
# can't inspect, no info from function; only use kwargs
return [ (key, value, value) for key, value in kwargs.items() ]
for parameter in sig.parameters.values():
for name, value, default in _yield_abbreviations_for_parameter(parameter, kwargs):
if value is empty:
raise ValueError('cannot find widget or abbreviation for argument: {!r}'.format(name))
new_kwargs.append((name, value, default))
return new_kwargs
def widgets_from_abbreviations(self, seq):
"""Given a sequence of (name, abbrev, default) tuples, return a sequence of Widgets."""
result = []
for name, abbrev, default in seq:
if isinstance(abbrev, fixed):
widget = abbrev
else:
widget = self.widget_from_abbrev(abbrev, name, default)
if not (isinstance(widget, Widget) or isinstance(widget, fixed)):
if widget is None:
continue
else:
raise TypeError("{!r} is not a ValueWidget".format(widget))
result.append((name, widget))
return result
@classmethod
def applies(cls, object):
return isinstance(object, types.FunctionType)
@classmethod
def widget_from_abbrev(cls, abbrev, name, default=empty):
"""Build a ValueWidget instance given an abbreviation or Widget."""
if isinstance(abbrev, Widget):
return abbrev
if isinstance(abbrev, tuple):
widget = cls.widget_from_tuple(abbrev, name, default)
if default is not empty:
try:
widget.value = default
except Exception:
# ignore failure to set default
pass
return widget
# Try single value
widget = cls.widget_from_single_value(abbrev, name)
if widget is not None:
return widget
# Something iterable (list, dict, generator, ...). Note that str and
# tuple should be handled before, that is why we check this case last.
if isinstance(abbrev, Iterable):
widget = cls.widget_from_iterable(abbrev, name)
if default is not empty:
try:
widget.value = default
except Exception:
# ignore failure to set default
pass
return widget
# No idea...
return fixed(abbrev)
@staticmethod
def widget_from_single_value(o, name):
"""Make widgets from single values, which can be used as parameter defaults."""
if isinstance(o, string_types):
return TextInput(value=as_unicode(o), name=name)
elif isinstance(o, bool):
return Checkbox(value=o, name=name)
elif isinstance(o, Integral):
min, max, value = _get_min_max_value(None, None, o)
return IntSlider(value=o, start=min, end=max, name=name)
elif isinstance(o, Real):
min, max, value = _get_min_max_value(None, None, o)
return FloatSlider(value=o, start=min, end=max, name=name)
else:
return None
@staticmethod
def widget_from_tuple(o, name, default=empty):
"""Make widgets from a tuple abbreviation."""
int_default = (default is empty or isinstance(default, int))
if _matches(o, (Real, Real)):
min, max, value = _get_min_max_value(o[0], o[1])
if all(isinstance(_, Integral) for _ in o) and int_default:
cls = IntSlider
else:
cls = FloatSlider
return cls(value=value, start=min, end=max, name=name)
elif _matches(o, (Real, Real, Real)):
step = o[2]
if step <= 0:
raise ValueError("step must be >= 0, not %r" % step)
min, max, value = _get_min_max_value(o[0], o[1], step=step)
if all(isinstance(_, Integral) for _ in o) and int_default:
cls = IntSlider
else:
cls = FloatSlider
return cls(value=value, start=min, end=max, step=step, name=name)
elif _matches(o, (Real, Real, Real, Real)):
step = o[2]
if step <= 0:
raise ValueError("step must be >= 0, not %r" % step)
min, max, value = _get_min_max_value(o[0], o[1], value=o[3], step=step)
if all(isinstance(_, Integral) for _ in o):
cls = IntSlider
else:
cls = FloatSlider
return cls(value=value, start=min, end=max, step=step, name=name)
elif len(o) == 4:
min, max, value = _get_min_max_value(o[0], o[1], value=o[3])
if all(isinstance(_, Integral) for _ in [o[0], o[1], o[3]]):
cls = IntSlider
else:
cls = FloatSlider
return cls(value=value, start=min, end=max, name=name)
@staticmethod
def widget_from_iterable(o, name):
"""Make widgets from an iterable. This should not be done for
a string or tuple."""
# Select expects a dict or list, so we convert an arbitrary
# iterable to either of those.
values = list(o.values()) if isinstance(o, Mapping) else list(o)
widget_type = DiscreteSlider if all(param._is_number(v) for v in values) else Select
if isinstance(o, (list, dict)):
return widget_type(options=o, name=name)
elif isinstance(o, Mapping):
return widget_type(options=list(o.items()), name=name)
else:
return widget_type(options=list(o), name=name)
# Return a factory for interactive functions
@classmethod
def factory(cls):
options = dict(manual_update=False, manual_name="Run Interact")
return _InteractFactory(cls, options)
class _InteractFactory(object):
"""
Factory for instances of :class:`interactive`.
Arguments
---------
cls: class
The subclass of :class:`interactive` to construct.
options: dict
A dict of options used to construct the interactive
function. By default, this is returned by
``cls.default_options()``.
kwargs: dict
A dict of **kwargs to use for widgets.
"""
def __init__(self, cls, options, kwargs=None):
self.cls = cls
self.opts = options
self.kwargs = kwargs or {}
def widget(self, f):
"""
Return an interactive function widget for the given function.
The widget is only constructed, not displayed nor attached to
the function.
Returns
-------
An instance of ``self.cls`` (typically :class:`interactive`).
Parameters
----------
f : function
The function to which the interactive widgets are tied.
"""
return self.cls(f, self.opts, **self.kwargs)
def __call__(self, __interact_f=None, **kwargs):
"""
Make the given function interactive by adding and displaying
the corresponding :class:`interactive` widget.
Expects the first argument to be a function. Parameters to this
function are widget abbreviations passed in as keyword arguments
(``**kwargs``). Can be used as a decorator (see examples).
Returns
-------
f : __interact_f with interactive widget attached to it.
Parameters
----------
__interact_f : function
The function to which the interactive widgets are tied. The `**kwargs`
should match the function signature. Passed to :func:`interactive()`
**kwargs : various, optional
An interactive widget is created for each keyword argument that is a
valid widget abbreviation. Passed to :func:`interactive()`
Examples
--------
Render an interactive text field that shows the greeting with the passed in
text::
# 1. Using interact as a function
def greeting(text="World"):
print("Hello {}".format(text))
interact(greeting, text="IPython Widgets")
# 2. Using interact as a decorator
@interact
def greeting(text="World"):
print("Hello {}".format(text))
# 3. Using interact as a decorator with named parameters
@interact(text="IPython Widgets")
def greeting(text="World"):
print("Hello {}".format(text))
Render an interactive slider widget and prints square of number::
# 1. Using interact as a function
def square(num=1):
print("{} squared is {}".format(num, num*num))
interact(square, num=5)
# 2. Using interact as a decorator
@interact
def square(num=2):
print("{} squared is {}".format(num, num*num))
# 3. Using interact as a decorator with named parameters
@interact(num=5)
def square(num=2):
print("{} squared is {}".format(num, num*num))
"""
# If kwargs are given, replace self by a new
# _InteractFactory with the updated kwargs
if kwargs:
params = list(interactive.param)
kw = dict(self.kwargs)
kw.update({k: v for k, v in kwargs.items() if k not in params})
opts = dict(self.opts, **{k: v for k, v in kwargs.items() if k in params})
self = type(self)(self.cls, opts, kw)
f = __interact_f
if f is None:
# This branch handles the case 3
# @interact(a=30, b=40)
# def f(*args, **kwargs):
# ...
#
# Simply return the new factory
return self
# positional arg support in: https://gist.github.com/8851331
# Handle the cases 1 and 2
# 1. interact(f, **kwargs)
# 2. @interact
# def f(*args, **kwargs):
# ...
w = self.widget(f)
try:
f.widget = w
except AttributeError:
# some things (instancemethods) can't have attributes attached,
# so wrap in a lambda
f = lambda *args, **kwargs: __interact_f(*args, **kwargs)
f.widget = w
return w.layout
def options(self, **kwds):
"""
Change options for interactive functions.
Returns
-------
A new :class:`_InteractFactory` which will apply the
options when called.
"""
opts = dict(self.opts)
for k in kwds:
if k not in opts:
raise ValueError("invalid option {!r}".format(k))
opts[k] = kwds[k]
return type(self)(self.cls, opts, self.kwargs)
interact = interactive.factory()
interact_manual = interact.options(manual_update=True, manual_name="Run Interact")
class fixed(param.Parameterized):
"""A pseudo-widget whose value is fixed and never synced to the client."""
value = param.Parameter(doc="Any Python object")
description = param.String(default='')
def __init__(self, value, **kwargs):
super(fixed, self).__init__(value=value, **kwargs)
def get_interact_value(self):
"""Return the value for this widget which should be passed to
interactive functions. Custom widgets can change this method
to process the raw value ``self.value``.
"""
return self.value
| 37.954286 | 118 | 0.576383 |
700b6392a8bc4a75f74dacf5b43148ec6f4de8b3 | 521 | py | Python | DDQN/FRED/a5.py | liu1355/dl_fin | 9a4be858127be0daa96cc8bb5cfed9d14f7912b8 | [
"MIT"
] | null | null | null | DDQN/FRED/a5.py | liu1355/dl_fin | 9a4be858127be0daa96cc8bb5cfed9d14f7912b8 | [
"MIT"
] | null | null | null | DDQN/FRED/a5.py | liu1355/dl_fin | 9a4be858127be0daa96cc8bb5cfed9d14f7912b8 | [
"MIT"
] | 1 | 2018-04-11T06:15:00.000Z | 2018-04-11T06:15:00.000Z | import csv
import requests
import pandas as pd
FRED_CPILFESL = 'https://www.quandl.com/api/v3/datasets/FRED/CPILFESL/data.csv?api_key=6CbgFEPrywyyFy1yNywC'
with requests.Session() as s:
download = s.get(FRED_CPILFESL)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter = ',')
CPILFESL_list = list(cr)
for row in CPILFESL_list:
print(row)
CPILFESL_list = pd.DataFrame(CPILFESL_list)
CPILFESL_list.to_csv('a5.csv', encoding = 'utf-8') | 28.944444 | 108 | 0.731286 |
98761324df43b471ebdc74daef2d700e940612e6 | 9,689 | py | Python | homeassistant/components/deconz/light.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 1 | 2022-02-21T05:50:41.000Z | 2022-02-21T05:50:41.000Z | homeassistant/components/deconz/light.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 12 | 2021-12-16T06:18:49.000Z | 2022-03-31T06:25:54.000Z | homeassistant/components/deconz/light.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 1 | 2022-02-24T16:07:32.000Z | 2022-02-24T16:07:32.000Z | """Support for deCONZ lights."""
from __future__ import annotations
from typing import Any
from pydeconz.group import Group
from pydeconz.light import (
ALERT_LONG,
ALERT_SHORT,
EFFECT_COLOR_LOOP,
EFFECT_NONE,
Light,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_XY_COLOR,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
COLOR_MODE_ONOFF,
COLOR_MODE_XY,
DOMAIN,
EFFECT_COLORLOOP,
FLASH_LONG,
FLASH_SHORT,
LightEntity,
LightEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.color import color_hs_to_xy
from .const import DOMAIN as DECONZ_DOMAIN, POWER_PLUGS
from .deconz_device import DeconzDevice
from .gateway import DeconzGateway, get_gateway_from_config_entry
DECONZ_GROUP = "is_deconz_group"
EFFECT_TO_DECONZ = {EFFECT_COLORLOOP: EFFECT_COLOR_LOOP, "None": EFFECT_NONE}
FLASH_TO_DECONZ = {FLASH_SHORT: ALERT_SHORT, FLASH_LONG: ALERT_LONG}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the deCONZ lights and groups from a config entry."""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
@callback
def async_add_light(lights: list[Light] | None = None) -> None:
"""Add light from deCONZ."""
entities = []
if lights is None:
lights = gateway.api.lights.values()
for light in lights:
if (
isinstance(light, Light)
and light.type not in POWER_PLUGS
and light.unique_id not in gateway.entities[DOMAIN]
):
entities.append(DeconzLight(light, gateway))
if entities:
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass,
gateway.signal_new_light,
async_add_light,
)
)
@callback
def async_add_group(groups: list[Group] | None = None) -> None:
"""Add group from deCONZ."""
if not gateway.option_allow_deconz_groups:
return
entities = []
if groups is None:
groups = list(gateway.api.groups.values())
for group in groups:
if not group.lights:
continue
known_groups = set(gateway.entities[DOMAIN])
new_group = DeconzGroup(group, gateway)
if new_group.unique_id not in known_groups:
entities.append(new_group)
if entities:
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass,
gateway.signal_new_group,
async_add_group,
)
)
async_add_light()
async_add_group()
class DeconzBaseLight(DeconzDevice, LightEntity):
"""Representation of a deCONZ light."""
TYPE = DOMAIN
def __init__(self, device: Group | Light, gateway: DeconzGateway) -> None:
"""Set up light."""
super().__init__(device, gateway)
self._attr_supported_color_modes: set[str] = set()
if device.color_temp is not None:
self._attr_supported_color_modes.add(COLOR_MODE_COLOR_TEMP)
if device.hue is not None and device.saturation is not None:
self._attr_supported_color_modes.add(COLOR_MODE_HS)
if device.xy is not None:
self._attr_supported_color_modes.add(COLOR_MODE_XY)
if not self._attr_supported_color_modes and device.brightness is not None:
self._attr_supported_color_modes.add(COLOR_MODE_BRIGHTNESS)
if not self._attr_supported_color_modes:
self._attr_supported_color_modes.add(COLOR_MODE_ONOFF)
if device.brightness is not None:
self._attr_supported_features |= LightEntityFeature.FLASH
self._attr_supported_features |= LightEntityFeature.TRANSITION
if device.effect is not None:
self._attr_supported_features |= LightEntityFeature.EFFECT
self._attr_effect_list = [EFFECT_COLORLOOP]
@property
def color_mode(self) -> str:
"""Return the color mode of the light."""
if self._device.color_mode == "ct":
color_mode = COLOR_MODE_COLOR_TEMP
elif self._device.color_mode == "hs":
color_mode = COLOR_MODE_HS
elif self._device.color_mode == "xy":
color_mode = COLOR_MODE_XY
elif self._device.brightness is not None:
color_mode = COLOR_MODE_BRIGHTNESS
else:
color_mode = COLOR_MODE_ONOFF
return color_mode
@property
def brightness(self) -> int | None:
"""Return the brightness of this light between 0..255."""
return self._device.brightness # type: ignore[no-any-return]
@property
def color_temp(self) -> int:
"""Return the CT color value."""
return self._device.color_temp # type: ignore[no-any-return]
@property
def hs_color(self) -> tuple[float, float]:
"""Return the hs color value."""
return (self._device.hue / 65535 * 360, self._device.saturation / 255 * 100)
@property
def xy_color(self) -> tuple[float, float] | None:
"""Return the XY color value."""
return self._device.xy # type: ignore[no-any-return]
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._device.state # type: ignore[no-any-return]
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on light."""
data: dict[str, bool | float | int | str | tuple[float, float]] = {"on": True}
if (attr_brightness := kwargs.get(ATTR_BRIGHTNESS)) is not None:
data["brightness"] = attr_brightness
if attr_color_temp := kwargs.get(ATTR_COLOR_TEMP):
data["color_temperature"] = attr_color_temp
if attr_hs_color := kwargs.get(ATTR_HS_COLOR):
if COLOR_MODE_XY in self._attr_supported_color_modes:
data["xy"] = color_hs_to_xy(*attr_hs_color)
else:
data["hue"] = int(attr_hs_color[0] / 360 * 65535)
data["saturation"] = int(attr_hs_color[1] / 100 * 255)
if ATTR_XY_COLOR in kwargs:
data["xy"] = kwargs[ATTR_XY_COLOR]
if (attr_transition := kwargs.get(ATTR_TRANSITION)) is not None:
data["transition_time"] = int(attr_transition * 10)
elif "IKEA" in self._device.manufacturer:
data["transition_time"] = 0
if (alert := FLASH_TO_DECONZ.get(kwargs.get(ATTR_FLASH, ""))) is not None:
data["alert"] = alert
del data["on"]
if (effect := EFFECT_TO_DECONZ.get(kwargs.get(ATTR_EFFECT, ""))) is not None:
data["effect"] = effect
await self._device.set_state(**data)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off light."""
if not self._device.state:
return
data: dict[str, bool | int | str] = {"on": False}
if (attr_transition := kwargs.get(ATTR_TRANSITION)) is not None:
data["brightness"] = 0
data["transition_time"] = int(attr_transition * 10)
if (alert := FLASH_TO_DECONZ.get(kwargs.get(ATTR_FLASH, ""))) is not None:
data["alert"] = alert
del data["on"]
await self._device.set_state(**data)
@property
def extra_state_attributes(self) -> dict[str, bool]:
"""Return the device state attributes."""
return {DECONZ_GROUP: isinstance(self._device, Group)}
class DeconzLight(DeconzBaseLight):
"""Representation of a deCONZ light."""
_device: Light
@property
def max_mireds(self) -> int:
"""Return the warmest color_temp that this light supports."""
return self._device.max_color_temp or super().max_mireds
@property
def min_mireds(self) -> int:
"""Return the coldest color_temp that this light supports."""
return self._device.min_color_temp or super().min_mireds
class DeconzGroup(DeconzBaseLight):
"""Representation of a deCONZ group."""
_device: Group
def __init__(self, device: Group, gateway: DeconzGateway) -> None:
"""Set up group and create an unique id."""
self._unique_id = f"{gateway.bridgeid}-{device.deconz_id}"
super().__init__(device, gateway)
@property
def unique_id(self) -> str:
"""Return a unique identifier for this device."""
return self._unique_id
@property
def device_info(self) -> DeviceInfo:
"""Return a device description for device registry."""
return DeviceInfo(
identifiers={(DECONZ_DOMAIN, self.unique_id)},
manufacturer="Dresden Elektronik",
model="deCONZ group",
name=self._device.name,
via_device=(DECONZ_DOMAIN, self.gateway.api.config.bridge_id),
)
@property
def extra_state_attributes(self) -> dict[str, bool]:
"""Return the device state attributes."""
attributes = dict(super().extra_state_attributes)
attributes["all_on"] = self._device.all_on
return attributes
| 32.082781 | 86 | 0.642068 |
e3822edef58c9c1b83da3c56eeef242e031e0b17 | 1,917 | py | Python | results/lmg_different_spinnumbers_20190228/script_lmg_doublebang_powell_150spins.py | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | [
"MIT"
] | 1 | 2020-07-21T02:31:41.000Z | 2020-07-21T02:31:41.000Z | results/lmg_different_spinnumbers_20190228/script_lmg_doublebang_powell_150spins.py | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | [
"MIT"
] | null | null | null | results/lmg_different_spinnumbers_20190228/script_lmg_doublebang_powell_150spins.py | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | [
"MIT"
] | null | null | null | import os
import sys
import numpy as np
import pandas as pd
import logging
if '../../' not in sys.path:
sys.path.append('../../')
import src.optimization as optimization
model = 'lmg'
model_parameters = dict(num_spins=150)
protocol = 'doublebang'
optimization_method = 'Powell'
# ------ build and check name for output file
additional_file_name_qualifiers = '150spins'
output_file_name = (model + '_' + protocol + '_' +
optimization_method.replace('-', '').lower())
if additional_file_name_qualifiers is not None:
output_file_name += '_' + additional_file_name_qualifiers
filenum = 1
_output_file_name = output_file_name
while os.path.isfile(_output_file_name + '.csv'):
_output_file_name = output_file_name + '({:02})'.format(filenum)
filenum += 1
output_file_name = _output_file_name + '.csv'
# ------ set up logger
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s]"
"[%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
# consoleHandler = logging.StreamHandler()
# consoleHandler.setFormatter(logFormatter)
# rootLogger.addHandler(consoleHandler)
fileHandler = logging.FileHandler(output_file_name[:-4] + '.log')
fileHandler.setFormatter(logFormatter)
fileHandler.setLevel(logging.DEBUG)
rootLogger.addHandler(fileHandler)
logging.info('Output file name will be "{}"'.format(output_file_name))
# ------ start optimization
results = optimization.find_best_protocol(
problem_specification=dict(
model=model,
model_parameters=model_parameters,
task='critical point state generation'
),
optimization_specs=dict(
protocol=protocol,
optimization_method=optimization_method
),
other_options=dict(
scan_times=np.linspace(0.1, 2, 100)
)
)
# ------ save results to file
results.to_csv(output_file_name)
| 29.045455 | 70 | 0.708399 |
a19ecaa9abeb8d066fa8d0ddcc3af644e28e747b | 13,958 | py | Python | PyCommon/modules/Motion/ysBipedAnalysis.py | snumrl/DataDrivenBipedController | 68ecaa17790ebf3039ae8c0b91d21fab4829bb8c | [
"Apache-2.0",
"MIT"
] | 7 | 2018-08-17T10:25:56.000Z | 2021-09-01T11:28:56.000Z | PyCommon/modules/Motion/ysBipedAnalysis.py | snumrl/DataDrivenBipedController | 68ecaa17790ebf3039ae8c0b91d21fab4829bb8c | [
"Apache-2.0",
"MIT"
] | null | null | null | PyCommon/modules/Motion/ysBipedAnalysis.py | snumrl/DataDrivenBipedController | 68ecaa17790ebf3039ae8c0b91d21fab4829bb8c | [
"Apache-2.0",
"MIT"
] | 5 | 2017-01-05T09:22:58.000Z | 2021-07-26T15:13:19.000Z | # +-------------------------------------------------------------------------
# | ysBipedAnalysis.py
# |
# | Author: Yoonsang Lee
# +-------------------------------------------------------------------------
# | COPYRIGHT:
# | Copyright Yoonsang Lee 2013
# | See the included COPYRIGHT.txt file for further details.
# |
# | This file is part of the DataDrivenBipedController.
# | DataDrivenBipedController is free software: you can redistribute it and/or modify
# | it under the terms of the MIT License.
# |
# | You should have received a copy of the MIT License
# | along with DataDrivenBipedController. If not, see <mit-license.org>.
# +-------------------------------------------------------------------------
import math
import sys
if '..' not in sys.path:
sys.path.append('..')
import Util.ysPythonEx as ype
import Motion.ysMotionAnalysis as yma
class GaitState:
STOP = 0
LSWING = 1
RSWING = 2
JUMP = 3
text = ype.getReverseDict(locals())
# return interval
def getWalkingCycle(jointMotion, one_contactStates, endZoneSize = 10):
intervals, types = yma.states2intervals(one_contactStates)
half1stIndex = (len(intervals)-1)/2 - 1
half2ndIndex = half1stIndex + 1
# print half1stIndex, half2ndIndex, intervals[half1stIndex], intervals[half2ndIndex]
# startFrame = intervals[half1stIndex][0]
# endFrame = intervals[half2ndIndex][-1]
startFrame = int(math.ceil(intervals[half1stIndex][0]))
endFrame = int(math.floor(intervals[half2ndIndex][-1]))
minDistance = sys.maxint
minFrame = 0
for i in range(endFrame-endZoneSize, endFrame+endZoneSize):
d = yma.distanceByRelPos(jointMotion[startFrame], jointMotion[i])
# print i, d
if d < minDistance:
# print 'min', i, d
minDistance = d
minFrame = i
endFrame = minFrame
return [startFrame, endFrame]
def getWalkingCycle2(jointMotion, one_contactStates, endZoneSize = 10):
intervals, types = yma.states2intervals(one_contactStates)
half1stIndex = (len(intervals)-1)/2 - 1
half2ndIndex = half1stIndex + 1
# print half1stIndex, half2ndIndex, intervals[half1stIndex], intervals[half2ndIndex]
# startFrame = intervals[half1stIndex][0]
# endFrame = intervals[half2ndIndex][-1]
startFrame = int(math.ceil(intervals[half1stIndex][0]))
endFrame = int(math.floor(intervals[half2ndIndex][-1]))
minDistance = sys.maxint
minFrame = 0
for i in range(endFrame-endZoneSize, endFrame+endZoneSize):
d = yma.distanceByRelPos2(jointMotion[startFrame], jointMotion[i])
# print i, d
if d < minDistance:
# print 'min', i, d
minDistance = d
minFrame = i
endFrame = minFrame
return [startFrame, endFrame]
# depreciate
def getWalkingSteps(left_contactStates, right_contactStates, includeFirstLastSteps=False, overlap=True):
l_taking, l_landing = yma.getTakingLandingFrames(left_contactStates)
r_taking, r_landing = yma.getTakingLandingFrames(right_contactStates)
landing = l_landing + r_landing
landing.sort()
if includeFirstLastSteps:
# landing.insert(0, min(l_taking[0], r_taking[0]))
landing.insert(0, 0)
else:
del landing[-1]
return yma.borders2intervals(landing, overlap)
def getBipedGaitStates(lFootContactStates, rFootContactStates, jumpThreshold = 0, jumpBias = .5, stopThreshold = 0, stopBias = .5):
gaitStates = [None]*len(lFootContactStates)
for i in range(len(lFootContactStates)):
if lFootContactStates[i] and not rFootContactStates[i]:
gaitStates[i] = GaitState.RSWING
elif not lFootContactStates[i] and rFootContactStates[i]:
gaitStates[i] = GaitState.LSWING
elif lFootContactStates[i] and rFootContactStates[i]:
gaitStates[i] = GaitState.STOP
elif not lFootContactStates[i] and not rFootContactStates[i]:
gaitStates[i] = GaitState.JUMP
# check thresholds
# intervals, types = getBipedGaitIntervalsTypes(gaitStates)
# stopIntervals = getSpecifiedTypeIntervals(GaitState.STOP, intervals, types)
# jumpIntervals = getSpecifiedTypeIntervals(GaitState.JUMP, intervals, types)
intervals, types = yma.states2intervals(gaitStates)
intervals = [yma.intIntervalInner(interval) for interval in intervals]
stopIntervals = yma.getIntervalsWithState(GaitState.STOP, intervals, types)
jumpIntervals = yma.getIntervalsWithState(GaitState.JUMP, intervals, types)
total = [stopIntervals, jumpIntervals]
thresholds = [stopThreshold, jumpThreshold]
biases = [stopBias, jumpBias]
for b in range(len(total)):
intervals = total[b]
threshold = thresholds[b]
bias = biases[b]
for interval in intervals:
if interval[0] == 0 or interval[1] == len(gaitStates)-1:
continue
prevState = gaitStates[interval[0]-1]
nextState = gaitStates[interval[1]+1]
if interval[1] - interval[0] < threshold:
mid = (interval[1] + interval[0])*bias
for i in range(interval[0], interval[1]+1):
if i < mid:
gaitStates[i] = prevState
else:
gaitStates[i] = nextState
return gaitStates
def getBipedGaitIntervals(lFootContactStates, rFootContactStates, jumpThreshold = 0, jumpBias = .5, stopThreshold = 0, stopBias = .5):
states = getBipedGaitStates(lFootContactStates, rFootContactStates, jumpThreshold, jumpBias, stopThreshold, stopBias)
intervals, states = yma.states2intervals(states)
return [yma.intIntervalUp(interval) for interval in intervals], states
def getBipedGaitStates2(lFootContactStates, rFootContactStates, jumpThreshold = 0, jumpBias = .5, stopThreshold = 0, stopBias = .5):
gaitStates = [None]*len(lFootContactStates)
for i in range(len(lFootContactStates)):
if lFootContactStates[i] and not rFootContactStates[i]:
gaitStates[i] = GaitState.RSWING
elif not lFootContactStates[i] and rFootContactStates[i]:
gaitStates[i] = GaitState.LSWING
elif lFootContactStates[i] and rFootContactStates[i]:
gaitStates[i] = GaitState.STOP
elif not lFootContactStates[i] and not rFootContactStates[i]:
gaitStates[i] = GaitState.JUMP
# check thresholds
# intervals, types = getBipedGaitIntervalsTypes(gaitStates)
# stopIntervals = getSpecifiedTypeIntervals(GaitState.STOP, intervals, types)
# jumpIntervals = getSpecifiedTypeIntervals(GaitState.JUMP, intervals, types)
intervals, types = yma.states2intervals(gaitStates)
intervals = [yma.intIntervalInner(interval) for interval in intervals]
stopIntervals = yma.getIntervalsWithState(GaitState.STOP, intervals, types)
jumpIntervals = yma.getIntervalsWithState(GaitState.JUMP, intervals, types)
total = [stopIntervals, jumpIntervals]
thresholds = [stopThreshold, jumpThreshold]
biases = [stopBias, jumpBias]
for b in range(len(total)):
intervals = total[b]
threshold = thresholds[b]
bias = biases[b]
for interval in intervals:
# if interval[0] == 0 or interval[1] == len(gaitStates)-1:
# continue
# prevState = gaitStates[interval[0]-1]
# nextState = gaitStates[interval[1]+1]
# temp - to be fixed
if interval[0] == 0:
prevState = gaitStates[interval[1]+1]
nextState = gaitStates[interval[1]+1]
elif interval[1] == len(gaitStates)-1:
prevState = gaitStates[interval[0]-1]
nextState = gaitStates[interval[0]-1]
else:
prevState = gaitStates[interval[0]-1]
nextState = gaitStates[interval[1]+1]
if interval[1] - interval[0] < threshold:
mid = (interval[1] + interval[0])*bias
for i in range(interval[0], interval[1]+1):
if i < mid:
gaitStates[i] = prevState
else:
gaitStates[i] = nextState
return gaitStates
def getBipedGaitIntervals2(lFootContactStates, rFootContactStates, jumpThreshold = 0, jumpBias = .5, stopThreshold = 0, stopBias = .5):
states = getBipedGaitStates2(lFootContactStates, rFootContactStates, jumpThreshold, jumpBias, stopThreshold, stopBias)
intervals, states = yma.states2intervals(states)
return [yma.intIntervalUp(interval) for interval in intervals], states
if __name__ == '__main__':
import psyco; psyco.full()
import copy, numpy
from fltk import *
import Resource.ysMotionLoader as yf
import Motion.ysMotionConverter as ymc
import Util.ysMatplotEx as ymp
import GUI.ysSimpleViewer as ysv
import Renderer.ysRenderer as yr
import Motion.ysMotion as ym
def test_getWalkingCycle():
bvhFilePath = '../samples/wd2_WalkSameSame00.bvh'
jointMotion = yf.readBvhFile(bvhFilePath, .01)
lFoot = jointMotion[0].skeleton.getElementIndex('LeftFoot')
rFoot = jointMotion[0].skeleton.getElementIndex('RightFoot')
hRef = .1; vRef = .3
lc = yma.getElementContactStates(jointMotion, lFoot, hRef, vRef)
# rc = getElementContactStates(jointMotion, rFoot, hRef, vRef)
interval = getWalkingCycle(jointMotion, lc)
cycleMotion = jointMotion[interval[0]:interval[-1]+1]
startMotion = cycleMotion[:1]
endMotion = cycleMotion[-1:]
viewer = ysv.SimpleViewer()
viewer.record(False)
viewer.doc.addRenderer('jointMotion', yr.JointMotionRenderer(jointMotion, (0,0,255), yr.LINK_LINE))
viewer.doc.addObject('jointMotion', jointMotion)
viewer.doc.addRenderer('startMotion', yr.JointMotionRenderer(startMotion, (0,255,0), yr.LINK_LINE))
viewer.doc.addObject('startMotion', startMotion)
viewer.doc.addRenderer('endMotion', yr.JointMotionRenderer(endMotion, (0,255,0), yr.LINK_LINE))
viewer.doc.addObject('endMotion', endMotion)
viewer.startTimer(1/30.)
viewer.show()
Fl.run()
def test_getWalkingSteps():
bvhFilePath = '../samples/wd2_WalkSameSame00.bvh'
motion = yf.readBvhFile(bvhFilePath, .01)
motion = motion[:-10]
lFoot = motion[0].skeleton.getElementIndex('LeftFoot')
rFoot = motion[0].skeleton.getElementIndex('RightFoot')
hRef = .1; vRef = .3
lc = yma.getElementContactStates(motion, lFoot, hRef, vRef)
rc = yma.getElementContactStates(motion, rFoot, hRef, vRef)
t, l_landing = yma.getTakingLandingFrames(lc)
t, r_landing = yma.getTakingLandingFrames(rc)
landing = l_landing + r_landing
landing.sort()
print 'landingFrames', landing
# steps = getWalkingSteps(lc, rc, True)
steps = getWalkingSteps(lc, rc, True, False)
# steps = getWalkingSteps(lc, rc, False)
print 'steps', steps
print
stepMotions = yma.splitMotionIntoSegments(motion, steps)
for i in range(len(steps)):
print 'stepMotions[%d]: motion[%d]~motion[%d], len %d'%(i, steps[i][0], steps[i][1], len(stepMotions[i]))
viewer = ysv.SimpleViewer()
viewer.record(False)
viewer.doc.addRenderer('motion', yr.JointMotionRenderer(motion, (0,100,255), yr.LINK_LINE, 3.))
viewer.doc.addObject('motion', motion)
for i in range(len(steps)):
viewer.doc.addRenderer('stepMotions[%d]'%i, yr.JointMotionRenderer(stepMotions[i], (0,255,0), yr.LINK_LINE, 3.))
viewer.doc.addObject('stepMotions[%d]', stepMotions[i])
viewer.startTimer(1/30.)
viewer.show()
Fl.run()
def test_getBipedGaitStates():
bvhFilePath = '../samples/wd2_WalkSameSame00.bvh'
motion = yf.readBvhFile(bvhFilePath, .01)
hRef = .1; vRef = .3
lc = yma.getElementContactStates(motion, motion[0].skeleton.getElementIndex('LeftFoot'), hRef, vRef)
rc = yma.getElementContactStates(motion, motion[0].skeleton.getElementIndex('RightFoot'), hRef, vRef)
rawStateList = getBipedGaitStates(lc, rc)
cookedStateList = getBipedGaitStates(lc, rc, 10, 1.)
intervals, types = yma.states2intervals(cookedStateList)
for i in range(len(intervals)):
print intervals[i], GaitState.text[types[i]]
print
print [yma.intIntervalUp(int) for int in intervals]
print getWalkingSteps(lc, rc, True)
print getBipedGaitIntervals(lc, rc, 10, 1.)
plot = ymp.SmartPlot()
plot.setXdata('frame', range(len(motion)))
plot.addYdata('rawState', rawStateList)
plot.addYdata('cookedState', cookedStateList)
plot.showModeless()
viewer = ysv.SimpleViewer()
viewer.record(False)
viewer.doc.addRenderer('motion', yr.JointMotionRenderer(motion, (0,0,255), yr.LINK_LINE))
viewer.doc.addObject('motion', motion)
viewer.startTimer(1/30.)
viewer.show()
Fl.run()
pass
# test_getWalkingCycle()
# test_getWalkingSteps()
test_getBipedGaitStates() | 42.042169 | 136 | 0.616707 |
1c22fb8e6b7882a97f0d08c0dc1552022758f3a3 | 3,644 | py | Python | vsgia_model/models/gazenet.py | nkuhzx/VSG-IA | 075b58c2bf89562cc197e721f050396589861c6a | [
"Apache-2.0"
] | null | null | null | vsgia_model/models/gazenet.py | nkuhzx/VSG-IA | 075b58c2bf89562cc197e721f050396589861c6a | [
"Apache-2.0"
] | null | null | null | vsgia_model/models/gazenet.py | nkuhzx/VSG-IA | 075b58c2bf89562cc197e721f050396589861c6a | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from vsgia_model.models.scenepathway import SceneNet
from vsgia_model.models.headpathway import HeadNet
from vsgia_model.models.vsgat import GraphDealModule
from vsgia_model.models.decoder import HeatmapDecoder,InoutDecoder
from vsgia_model.models.utils.transformer import build_transformer
from vsgia_model.models.utils.position_encoding import PositionEmbeddingLearned
from vsgia_model.models.utils.misc import NestedTensor
class VSGIANet(nn.Module):
def __init__(self, transformer,vsgraph, hidden_dim=256, num_queries=16,output_size=64, pretrained=False,inout_branch=False):
super(VSGIANet, self).__init__()
self.hidden_dim = hidden_dim
self.output_size = output_size
self.inout_branch=inout_branch
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.vsgraph=vsgraph
self.scene_backbone=SceneNet(pretrained)
self.head_backbone=HeadNet()
self.input_proj = nn.Conv2d(3072, hidden_dim, kernel_size=1)
self.encoder = transformer
self.posembedding = PositionEmbeddingLearned(hidden_dim // 2)
self.heatmap_decoder=HeatmapDecoder()
if self.inout_branch:
self.inout_decoder=InoutDecoder()
def forward(self, simg: NestedTensor, face: NestedTensor, head_loc:NestedTensor, mask,nodenum,v_feat,s_feat):
# batch_size
bs = simg.tensors.size(0)
wei, hei = simg.tensors.size(2), simg.tensors.size(3)
# infer the interactive probability
inter_prob=self.vsgraph(nodenum,v_feat,s_feat)
inter_prob=inter_prob.unsqueeze(1)
# infer the interactive attention map according to interactive
mask=mask.unsqueeze(3)
mask = mask.reshape(-1, 224 * 224, 1)
max_value=torch.max(mask.detach(), dim=1)[0].reshape(-1,1,1)
max_value[max_value==0]=1
mask=mask/max_value
interatt = torch.matmul(mask, inter_prob)
interatt = interatt.reshape(-1, 224, 224)
split_index = (np.array(nodenum) - 1).tolist()
interatt_tuple = interatt.split(split_index, dim=0)
batch_interatt = torch.stack([torch.mean(i, dim=0) for i in interatt_tuple], dim=0)
batch_interatt=batch_interatt.unsqueeze(1)
# obtain the saliency with interactive attention feat
scene_feats = self.scene_backbone(torch.cat((simg.tensors, head_loc.tensors,batch_interatt), dim=1))
scene_mask = F.interpolate(simg.mask[None].float(), size=scene_feats.shape[-2:]).to(torch.bool)[0]
head_feats = self.head_backbone(face.tensors)
head_mask=F.interpolate(face.mask[None].float(),size=head_feats.shape[-2:]).to(torch.bool)[0]
# fed into the encoder builted by transformer
input_src=torch.cat((scene_feats,head_feats),dim=1)
input_mask=torch.bitwise_or(scene_mask,head_mask)
input_features = NestedTensor(input_src, input_mask)
input_pos = self.posembedding(input_features)
query_embed= self.query_embed.weight.unsqueeze(0).repeat(bs, 1, 1)
global_memory = self.encoder(self.input_proj(input_src), input_mask, query_embed, input_pos)[0]
# predicte the heatmap and inout
heatmap = self.heatmap_decoder(global_memory)
if self.inout_branch:
inout=self.inout_decoder(global_memory)
else:
inout=None
outs = {
'heatmap': heatmap,
'inout':inout,
'inter_prob':inter_prob,
"inter_att":batch_interatt
}
return outs
| 33.127273 | 128 | 0.692371 |
e406cab11849df8fe2ebeee1593d7edcf9771c35 | 10,127 | py | Python | teafacto/scripts/atis/atisseqtrans.py | lukovnikov/teafacto | 5e863df8d061106ad705c0837f2d2ca4e08db0e4 | [
"MIT"
] | 2 | 2016-06-28T23:41:42.000Z | 2017-01-14T12:41:36.000Z | teafacto/scripts/atis/atisseqtrans.py | lukovnikov/teafacto | 5e863df8d061106ad705c0837f2d2ca4e08db0e4 | [
"MIT"
] | 1 | 2016-04-20T20:09:20.000Z | 2016-08-17T19:02:47.000Z | teafacto/scripts/atis/atisseqtrans.py | lukovnikov/teafacto | 5e863df8d061106ad705c0837f2d2ca4e08db0e4 | [
"MIT"
] | 5 | 2016-07-18T17:05:18.000Z | 2018-10-13T05:40:05.000Z | import pickle
import numpy as np
import os
import theano
from theano import tensor as TT
from teafacto.blocks.seq.trans import SimpleSeqTrans
#from teafacto.blocks.seq.oldseqproc import SimpleSeqTransducer as SimpleSeqTrans
from teafacto.blocks.seq.rnu import GRU
from teafacto.blocks.basic import Softmax
from teafacto.core.base import Block, param, tensorops as T
from teafacto.util import argprun
from teafacto.procutil import *
def getdatamatrix(lot, maxlen, k, offset=1):
data = np.zeros((len(lot), maxlen))
i = 0
while i < len(lot):
x = lot[i][k]
j = 0
while j < x.shape[0]:
data[i, j] = x[j] + offset
j += 1
i += 1
return data
def tup2text(tup, word2idx, table2idx, label2idx):
word2idxrev = {v: k for k, v in word2idx.items()}
table2idxrev = {v: k for k, v in table2idx.items()}
label2idxrev = {v: k for k, v in label2idx.items()}
i = 0
words = " ".join(map(lambda x: word2idxrev[tup[0][x]], range(len(tup[0]))))
labels = " ".join(map(lambda x: label2idxrev[tup[2][x]], range(len(tup[0]))))
print words
print labels
def atiseval(preds, golds, revdic):
""" computes accuracy, precision, recall and f-score on recognized slots"""
assert(preds.shape[0] == golds.shape[0])
i = 0
tp = 0
fp = 0
fn = 0
while i < preds.shape[0]:
predslots = getslots(preds[i], revdic=revdic)
goldslots = getslots(golds[i], revdic=revdic)
for predslot in predslots:
if predslot in goldslots:
tp += 1
else:
fp += 1
for goldslot in goldslots:
if goldslot not in predslots: # FN
fn += 1
i += 1
precision = 1.0* tp / (tp + fp) if (tp + fp) > 0 else 0.
recall = 1.0* tp / (tp + fn) if (tp + fp) > 0 else 0.
fscore = 2.0 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0.
return fscore, {"p": precision, "r": recall}, {"tp": tp, "fp": fp, "fn": fn}
def getslots(x, revdic):
y = np.vectorize(lambda a: revdic[a] if a in revdic else "_" )(x)
slots = []
currentslot = None
i = 0
sumtingwong = False
while i < len(y):
ye = y[i]
if ye == "O": # no slot/finalize slot
if currentslot is not None: #finalize slot
slots.append(currentslot)
currentslot = None
else: # do nothing
pass
elif ye[0] == "B": # slot starting
if currentslot is not None: #finalize slot
slots.append(currentslot)
else: # do nothing
pass
currentslot = (ye[2:], [i]) # start new slot
elif ye[0] == "I": # slot continuing
if currentslot is not None:
if currentslot[0] == ye[2:]:
currentslot[1].append(i)
else: # wrong continuation --> finalize slot?
slots.append(currentslot)
currentslot = None
else: # something wrong
print "sum ting wong"
sumtingwong = True
i += 1
if sumtingwong:
print y
return slots
class StupidAtis(Block):
def __init__(self, indim=100, inpembdim=50, outdim=100, **kw):
self.E = param((indim, inpembdim), name="emb").uniform()
self.W = param((inpembdim, outdim), name="W").uniform()
super(StupidAtis, self).__init__(**kw)
def apply(self, x): # x: (batsize, seqlen)
emb = self.E[x] # (batsize, seqlen, inpembdim)
outs = T.tensordot(emb, self.W, 1) # (batsize, seqlen, outdim)
outsf = outs.reshape((outs.shape[0] * outs.shape[1], outs.shape[2])) # (batsize*seqlen, outdim)
outsfsm = Softmax()(outsf)
ret = outsfsm.reshape(outs.shape) # (batsize, seqlen, outdim)
return ret
class StupidAtisNative(Block):
def __init__(self, indim=100, inpembdim=50, outdim=100, **kw):
super(StupidAtisNative, self).__init__(**kw)
self.E = self.add_param(param((indim, inpembdim), name="emb").uniform())
self.W = self.add_param(param((inpembdim, outdim), name="W").uniform())
def _apply(self, x):
emb = self.E.d[x]
outs = TT.tensordot(emb, self.W.d, 1)
outsf = outs.reshape((outs.shape[0] * outs.shape[1], outs.shape[2]))
outsfsm = TT.nnet.softmax(outsf)
ret = outsfsm.reshape(outs.shape)
return ret
class StupidAtisScanMod(StupidAtis):
scanemb = False
scanout = True
scansm = True
def apply(self, x):
emb = self.recembed(x) if self.scanemb else self.E[x] # (batsize, seqlen, inpembdim)
outs = self.recout(emb) if self.scanout else T.tensordot(emb, self.W, 1)
ret = self.recret(outs) if self.scansm else Softmax()(outs.reshape((outs.shape[0] * outs.shape[1], outs.shape[2]))).reshape(outs.shape) # (batsize*seqlen, outdim)
return ret
def recembed(self, x):
E = self.E
def rec(x_t):
return E[x_t]
return T.scan(fn=rec, sequences=x.dimshuffle(1, 0), outputs_info=None).dimshuffle(1, 0, 2)
def recout(self, x):
W = self.W
def rec(x_t):
return T.dot(x_t, W)
return T.scan(fn=rec, sequences=x.dimshuffle(1, 0, 2), outputs_info=None).dimshuffle(1, 0, 2)
def recret(self, x):
sm = T.nnet.softmax
def rec(x_t):
return sm(x_t)
return T.scan(fn=rec, sequences=x.dimshuffle(1, 0, 2), outputs_info=None).dimshuffle(1, 0, 2)
class StupidAtisScanModNative(StupidAtisNative):
scanemb = True
scanout = False
scansm = False
def _apply(self, x):
emb = self.recembed(x) if self.scanemb else self.E.d[x] # (batsize, seqlen, inpembdim)
outs = self.recout(emb) if self.scanout else TT.tensordot(emb, self.W.d, 1)
ret = self.recret(outs) if self.scansm else TT.nnet.softmax(outs.reshape((outs.shape[0] * outs.shape[1], outs.shape[2]))).reshape(outs.shape) # (batsize*seqlen, outdim)
return ret
def recembed(self, x):
E = self.E.d
def rec(x_t):
return E[x_t]
return theano.scan(fn=rec, sequences=x.dimshuffle(1, 0), outputs_info=None)[0].dimshuffle(1, 0, 2)
def recout(self, x):
W = self.W.d
def rec(x_t):
return TT.dot(x_t, W)
return theano.scan(fn=rec, sequences=x.dimshuffle(1, 0, 2), outputs_info=None)[0].dimshuffle(1, 0, 2)
def recret(self, x):
sm = TT.nnet.softmax
def rec(x_t):
return sm(x_t)
return theano.scan(fn=rec, sequences=x.dimshuffle(1, 0, 2), outputs_info=None)[0].dimshuffle(1, 0, 2)
class StupidAtisScan(StupidAtis):
def apply(self, x):
E = self.E
W = self.W
sm = Softmax()
def rec(x_t):
emb = E[x_t]
outs = T.dot(emb, W)
return sm(outs)
o = T.scan(fn=rec, sequences=x.dimshuffle(1, 0), outputs_info=None)
return o.dimshuffle(1, 0, 2)
class StupidAtisScanNative(StupidAtisNative):
def _apply(self, x):
E = self.E.d
W = self.W.d
sm = TT.nnet.softmax
def rec(x_t):
emb = E[x_t]
outs = TT.dot(emb, W)
return sm(outs)
o = theano.scan(fn=rec, sequences=x.dimshuffle(1, 0, 2), outputs_info=None)
return o.dimshuffle(1, 0, 2)
def run(p="../../../data/atis/atis.pkl", wordembdim=100,
innerdim=200, lr=1., numbats=100, epochs=20,
validinter=1, wreg=0.00003, depth=1):
p = os.path.join(os.path.dirname(__file__), p)
train, test, dics = pickle.load(open(p))
word2idx = dics["words2idx"]
table2idx = dics["tables2idx"]
label2idx = dics["labels2idx"]
label2idxrev = {v: k for k, v in label2idx.items()}
train = zip(*train)
test = zip(*test)
print "%d training examples, %d test examples" % (len(train), len(test))
#tup2text(train[0], word2idx, table2idx, label2idx)
maxlen = 0
for tup in train + test:
maxlen = max(len(tup[0]), maxlen)
numwords = max(word2idx.values()) + 2
numlabels = max(label2idx.values()) + 2
# get training data
traindata = getdatamatrix(train, maxlen, 0).astype("int32")
traingold = getdatamatrix(train, maxlen, 2).astype("int32")
trainmask = (traindata > 0).astype("float32")
# test data
testdata = getdatamatrix(test, maxlen, 0).astype("int32")
testgold = getdatamatrix(test, maxlen, 2).astype("int32")
testmask = (testdata > 0).astype("float32")
res = atiseval(testgold-1, testgold-1, label2idxrev); print res#; exit()
# define model
innerdim = [innerdim] * depth
m = SimpleSeqTrans(indim=numwords, embdim=wordembdim, rnu=GRU,
innerdim=innerdim, outdim=numlabels)
'''m = StupidAtis(inpembdim = wordembdim, indim = numwords, outdim = numlabels)
m = StupidAtisNative(inpembdim=wordembdim, indim=numwords, outdim=numlabels)'''
#m = StupidAtisScanMod(inpembdim=wordembdim, indim=numwords, outdim=numlabels)
#m = StupidAtisScanModNative(inpembdim=wordembdim, indim=numwords, outdim=numlabels)
# training
'''m.train([traindata, trainmask], traingold).adagrad(lr=lr).grad_total_norm(5.0).seq_cross_entropy().l2(wreg)\
.split_validate(splits=5, random=True).seq_cross_entropy().seq_accuracy().validinter(validinter)\
.train(numbats, epochs)'''
m.train([traindata], traingold).adagrad(lr=lr).seq_cross_entropy().l2(wreg).grad_total_norm(1.)\
.split_validate(splits=5, random=True).seq_cross_entropy().seq_accuracy().validinter(validinter)\
.train(numbats, epochs)
# predict after training
testpredprobs = m.predict(testdata, testmask)
testpred = np.argmax(testpredprobs, axis=2)-1
#testpred = testpred * testmask
#print np.vectorize(lambda x: label2idxrev[x] if x > -1 else " ")(testpred)
evalres = atiseval(testpred, testgold-1, label2idxrev); print evalres
if __name__ == "__main__":
argprun(run)
| 35.533333 | 179 | 0.59761 |
f9c40a9173e451d4d42765eb25e7ade7ff3c26fc | 294 | py | Python | output/models/nist_data/atomic/unsigned_int/schema_instance/nistschema_sv_iv_atomic_unsigned_int_max_inclusive_5_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/atomic/unsigned_int/schema_instance/nistschema_sv_iv_atomic_unsigned_int_max_inclusive_5_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/atomic/unsigned_int/schema_instance/nistschema_sv_iv_atomic_unsigned_int_max_inclusive_5_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.nist_data.atomic.unsigned_int.schema_instance.nistschema_sv_iv_atomic_unsigned_int_max_inclusive_5_xsd.nistschema_sv_iv_atomic_unsigned_int_max_inclusive_5 import NistschemaSvIvAtomicUnsignedIntMaxInclusive5
__all__ = [
"NistschemaSvIvAtomicUnsignedIntMaxInclusive5",
]
| 49 | 226 | 0.908163 |
112bfdbc296142da194e58ddc0600b4aeb452536 | 25,093 | py | Python | selfdrive/car/honda/interface.py | TMORI135/openpilot | bc986477eb34f554933caafeac71538c57fb6838 | [
"MIT"
] | 65 | 2019-07-27T11:27:02.000Z | 2022-02-03T09:10:38.000Z | selfdrive/car/honda/interface.py | TMORI135/openpilot | bc986477eb34f554933caafeac71538c57fb6838 | [
"MIT"
] | 41 | 2018-08-01T17:36:08.000Z | 2020-12-16T02:42:57.000Z | selfdrive/car/honda/interface.py | TMORI135/openpilot | bc986477eb34f554933caafeac71538c57fb6838 | [
"MIT"
] | 229 | 2019-07-27T20:31:02.000Z | 2021-09-21T11:02:49.000Z | #!/usr/bin/env python3
import numpy as np
from cereal import car
from common.numpy_fast import clip, interp
from common.realtime import DT_CTRL
from selfdrive.swaglog import cloudlog
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.events import ET
from selfdrive.car.honda.values import CruiseButtons, CAR, HONDA_BOSCH
from selfdrive.car import STD_CARGO_KG, CivicParams, scale_rot_inertia, scale_tire_stiffness, gen_empty_fingerprint
from selfdrive.controls.lib.longitudinal_planner import _A_CRUISE_MAX_V_FOLLOWING
from selfdrive.car.interfaces import CarInterfaceBase
A_ACC_MAX = max(_A_CRUISE_MAX_V_FOLLOWING)
ButtonType = car.CarState.ButtonEvent.Type
EventName = car.CarEvent.EventName
def compute_gb_honda(accel, speed):
creep_brake = 0.0
creep_speed = 2.3
creep_brake_value = 0.15
if speed < creep_speed:
creep_brake = (creep_speed - speed) / creep_speed * creep_brake_value
return float(accel) / 4.8 - creep_brake
def get_compute_gb_acura():
# generate a function that takes in [desired_accel, current_speed] -> [-1.0, 1.0]
# where -1.0 is max brake and 1.0 is max gas
# see debug/dump_accel_from_fiber.py to see how those parameters were generated
w0 = np.array([[ 1.22056961, -0.39625418, 0.67952657],
[ 1.03691769, 0.78210306, -0.41343188]])
b0 = np.array([ 0.01536703, -0.14335321, -0.26932889])
w2 = np.array([[-0.59124422, 0.42899439, 0.38660881],
[ 0.79973811, 0.13178682, 0.08550351],
[-0.15651935, -0.44360259, 0.76910877]])
b2 = np.array([ 0.15624429, 0.02294923, -0.0341086 ])
w4 = np.array([[-0.31521443],
[-0.38626176],
[ 0.52667892]])
b4 = np.array([-0.02922216])
def compute_output(dat, w0, b0, w2, b2, w4, b4):
m0 = np.dot(dat, w0) + b0
m0 = leakyrelu(m0, 0.1)
m2 = np.dot(m0, w2) + b2
m2 = leakyrelu(m2, 0.1)
m4 = np.dot(m2, w4) + b4
return m4
def leakyrelu(x, alpha):
return np.maximum(x, alpha * x)
def _compute_gb_acura(accel, speed):
# linearly extrap below v1 using v1 and v2 data
v1 = 5.
v2 = 10.
dat = np.array([accel, speed])
if speed > 5.:
m4 = compute_output(dat, w0, b0, w2, b2, w4, b4)
else:
dat[1] = v1
m4v1 = compute_output(dat, w0, b0, w2, b2, w4, b4)
dat[1] = v2
m4v2 = compute_output(dat, w0, b0, w2, b2, w4, b4)
m4 = (speed - v1) * (m4v2 - m4v1) / (v2 - v1) + m4v1
return float(m4)
return _compute_gb_acura
class CarInterface(CarInterfaceBase):
def __init__(self, CP, CarController, CarState):
super().__init__(CP, CarController, CarState)
self.last_enable_pressed = 0
self.last_enable_sent = 0
if self.CS.CP.carFingerprint == CAR.ACURA_ILX:
self.compute_gb = get_compute_gb_acura()
else:
self.compute_gb = compute_gb_honda
@staticmethod
def compute_gb(accel, speed): # pylint: disable=method-hidden
raise NotImplementedError
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
# normalized max accel. Allowing max accel at low speed causes speed overshoots
max_accel_bp = [10, 20] # m/s
max_accel_v = [0.714, 1.0] # unit of max accel
max_accel = interp(v_ego, max_accel_bp, max_accel_v)
# limit the pcm accel cmd if:
# - v_ego exceeds v_target, or
# - a_ego exceeds a_target and v_ego is close to v_target
eA = a_ego - a_target
valuesA = [1.0, 0.1]
bpA = [0.3, 1.1]
eV = v_ego - v_target
valuesV = [1.0, 0.1]
bpV = [0.0, 0.5]
valuesRangeV = [1., 0.]
bpRangeV = [-1., 0.]
# only limit if v_ego is close to v_target
speedLimiter = interp(eV, bpV, valuesV)
accelLimiter = max(interp(eA, bpA, valuesA), interp(eV, bpRangeV, valuesRangeV))
# accelOverride is more or less the max throttle allowed to pcm: usually set to a constant
# unless aTargetMax is very high and then we scale with it; this help in quicker restart
return float(max(max_accel, a_target / A_ACC_MAX)) * min(speedLimiter, accelLimiter)
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), car_fw=[]): # pylint: disable=dangerous-default-value
ret = CarInterfaceBase.get_std_params(candidate, fingerprint)
ret.carName = "honda"
if candidate in HONDA_BOSCH:
ret.safetyModel = car.CarParams.SafetyModel.hondaBoschHarness
ret.enableCamera = True
ret.radarOffCan = True
ret.openpilotLongitudinalControl = False
else:
ret.safetyModel = car.CarParams.SafetyModel.hondaNidec
ret.enableCamera = True
ret.enableGasInterceptor = 0x201 in fingerprint[0]
ret.openpilotLongitudinalControl = ret.enableCamera
cloudlog.warning("ECU Camera Simulated: %r", ret.enableCamera)
cloudlog.warning("ECU Gas Interceptor: %r", ret.enableGasInterceptor)
ret.enableCruise = not ret.enableGasInterceptor
ret.communityFeature = ret.enableGasInterceptor
# Certain Hondas have an extra steering sensor at the bottom of the steering rack,
# which improves controls quality as it removes the steering column torsion from feedback.
# Tire stiffness factor fictitiously lower if it includes the steering column torsion effect.
# For modeling details, see p.198-200 in "The Science of Vehicle Dynamics (2014), M. Guiggiani"
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0], [0]]
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0.], [0.]]
ret.lateralTuning.pid.kf = 0.00006 # conservative feed-forward
eps_modified = False
for fw in car_fw:
if fw.ecu == "eps" and b"," in fw.fwVersion:
eps_modified = True
if candidate == CAR.CIVIC:
stop_and_go = True
ret.mass = CivicParams.MASS
ret.wheelbase = CivicParams.WHEELBASE
ret.centerToFront = CivicParams.CENTER_TO_FRONT
ret.steerRatio = 15.38 # 10.93 is end-to-end spec
if eps_modified:
# stock request input values: 0x0000, 0x00DE, 0x014D, 0x01EF, 0x0290, 0x0377, 0x0454, 0x0610, 0x06EE
# stock request output values: 0x0000, 0x0917, 0x0DC5, 0x1017, 0x119F, 0x140B, 0x1680, 0x1680, 0x1680
# modified request output values: 0x0000, 0x0917, 0x0DC5, 0x1017, 0x119F, 0x140B, 0x1680, 0x2880, 0x3180
# stock filter output values: 0x009F, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108
# modified filter output values: 0x009F, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0108, 0x0400, 0x0480
# note: max request allowed is 4096, but request is capped at 3840 in firmware, so modifications result in 2x max
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 2560, 8000], [0, 2560, 3840]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.1]]
else:
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 2560], [0, 2560]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[1.1], [0.33]]
tire_stiffness_factor = 1.
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [3.6, 2.4, 1.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.54, 0.36]
elif candidate in (CAR.CIVIC_BOSCH, CAR.CIVIC_BOSCH_DIESEL):
stop_and_go = True
ret.mass = CivicParams.MASS
ret.wheelbase = CivicParams.WHEELBASE
ret.centerToFront = CivicParams.CENTER_TO_FRONT
ret.steerRatio = 15.38 # 10.93 is end-to-end spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 1.
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.8], [0.24]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate in (CAR.ACCORD, CAR.ACCORD_15, CAR.ACCORDH):
stop_and_go = True
if not candidate == CAR.ACCORDH: # Hybrid uses same brake msg as hatch
ret.safetyParam = 1 # Accord(ICE), CRV 5G, and RDX 3G use an alternate user brake msg
ret.mass = 3279. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.83
ret.centerToFront = ret.wheelbase * 0.39
ret.steerRatio = 16.33 # 11.82 is spec end-to-end
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.8467
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
if eps_modified:
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.09]]
else:
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.18]]
elif candidate == CAR.ACURA_ILX:
stop_and_go = False
ret.mass = 3095. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.67
ret.centerToFront = ret.wheelbase * 0.37
ret.steerRatio = 18.61 # 15.3 is spec end-to-end
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 3840], [0, 3840]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.72
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.8], [0.24]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate in (CAR.CRV, CAR.CRV_EU):
stop_and_go = False
ret.mass = 3572. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.62
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 16.89 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 1000], [0, 1000]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.444
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.8], [0.24]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.CRV_5G:
stop_and_go = True
ret.safetyParam = 1 # Accord(ICE), CRV 5G, and RDX 3G use an alternate user brake msg
ret.mass = 3410. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.66
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 16.0 # 12.3 is spec end-to-end
if eps_modified:
# stock request input values: 0x0000, 0x00DB, 0x01BB, 0x0296, 0x0377, 0x0454, 0x0532, 0x0610, 0x067F
# stock request output values: 0x0000, 0x0500, 0x0A15, 0x0E6D, 0x1100, 0x1200, 0x129A, 0x134D, 0x1400
# modified request output values: 0x0000, 0x0500, 0x0A15, 0x0E6D, 0x1100, 0x1200, 0x1ACD, 0x239A, 0x2800
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 2560, 10000], [0, 2560, 3840]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.21], [0.07]]
else:
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 3840], [0, 3840]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.64], [0.192]]
tire_stiffness_factor = 0.677
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.CRV_HYBRID:
stop_and_go = True
ret.safetyParam = 1 # Accord(ICE), CRV 5G, and RDX 3G use an alternate user brake msg
ret.mass = 1667. + STD_CARGO_KG # mean of 4 models in kg
ret.wheelbase = 2.66
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 16.0 # 12.3 is spec end-to-end
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.677
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.18]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.FIT:
stop_and_go = False
ret.mass = 2644. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.53
ret.centerToFront = ret.wheelbase * 0.39
ret.steerRatio = 13.06
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.75
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.2], [0.05]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.HRV:
stop_and_go = False
ret.mass = 3125 * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.61
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 15.2
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]]
tire_stiffness_factor = 0.5
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.16], [0.025]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.ACURA_RDX:
stop_and_go = False
ret.mass = 3935. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.68
ret.centerToFront = ret.wheelbase * 0.38
ret.steerRatio = 15.0 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 1000], [0, 1000]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.444
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.8], [0.24]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.ACURA_RDX_3G:
stop_and_go = True
ret.safetyParam = 1 # Accord(ICE), CRV 5G, and RDX 3G use an alternate user brake msg
ret.mass = 4068. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.75
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 11.95 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 3840], [0, 3840]]
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.18]]
tire_stiffness_factor = 0.677
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.ODYSSEY:
stop_and_go = False
ret.mass = 4471. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 3.00
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 14.35 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.82
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.28], [0.08]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.ODYSSEY_CHN:
stop_and_go = False
ret.mass = 1849.2 + STD_CARGO_KG # mean of 4 models in kg
ret.wheelbase = 2.90
ret.centerToFront = ret.wheelbase * 0.41 # from CAR.ODYSSEY
ret.steerRatio = 14.35
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 32767], [0, 32767]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.82
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.28], [0.08]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate in (CAR.PILOT, CAR.PILOT_2019):
stop_and_go = False
ret.mass = 4204. * CV.LB_TO_KG + STD_CARGO_KG # average weight
ret.wheelbase = 2.82
ret.centerToFront = ret.wheelbase * 0.428
ret.steerRatio = 17.25 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.444
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.38], [0.11]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.RIDGELINE:
stop_and_go = False
ret.mass = 4515. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 3.18
ret.centerToFront = ret.wheelbase * 0.41
ret.steerRatio = 15.59 # as spec
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.444
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.38], [0.11]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
elif candidate == CAR.INSIGHT:
stop_and_go = True
ret.mass = 2987. * CV.LB_TO_KG + STD_CARGO_KG
ret.wheelbase = 2.7
ret.centerToFront = ret.wheelbase * 0.39
ret.steerRatio = 15.0 # 12.58 is spec end-to-end
ret.lateralParams.torqueBP, ret.lateralParams.torqueV = [[0, 4096], [0, 4096]] # TODO: determine if there is a dead zone at the top end
tire_stiffness_factor = 0.82
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.18]]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.longitudinalTuning.kiV = [0.18, 0.12]
else:
raise ValueError("unsupported car %s" % candidate)
# min speed to enable ACC. if car can do stop and go, then set enabling speed
# to a negative value, so it won't matter. Otherwise, add 0.5 mph margin to not
# conflict with PCM acc
ret.minEnableSpeed = -1. if (stop_and_go or ret.enableGasInterceptor) else 25.5 * CV.MPH_TO_MS
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = scale_rot_inertia(ret.mass, ret.wheelbase)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront, ret.tireStiffnessRear = scale_tire_stiffness(ret.mass, ret.wheelbase, ret.centerToFront,
tire_stiffness_factor=tire_stiffness_factor)
ret.gasMaxBP = [0.] # m/s
ret.gasMaxV = [0.6] if ret.enableGasInterceptor else [0.] # max gas allowed
ret.brakeMaxBP = [5., 20.] # m/s
ret.brakeMaxV = [1., 0.8] # max brake allowed
ret.stoppingControl = True
ret.startAccel = 0.5
ret.steerActuatorDelay = 0.1
ret.steerRateCost = 0.5
ret.steerLimitTimer = 0.8
return ret
# returns a car.CarState
def update(self, c, can_strings):
# ******************* do can recv *******************
self.cp.update_strings(can_strings)
self.cp_cam.update_strings(can_strings)
if self.cp_body:
self.cp_body.update_strings(can_strings)
ret = self.CS.update(self.cp, self.cp_cam, self.cp_body)
ret.canValid = self.cp.can_valid and self.cp_cam.can_valid and (self.cp_body is None or self.cp_body.can_valid)
ret.yawRate = self.VM.yaw_rate(ret.steeringAngleDeg * CV.DEG_TO_RAD, ret.vEgo)
# FIXME: read sendcan for brakelights
brakelights_threshold = 0.02 if self.CS.CP.carFingerprint == CAR.CIVIC else 0.1
ret.brakeLights = bool(self.CS.brake_switch or
c.actuators.brake > brakelights_threshold)
buttonEvents = []
if self.CS.cruise_buttons != self.CS.prev_cruise_buttons:
be = car.CarState.ButtonEvent.new_message()
be.type = ButtonType.unknown
if self.CS.cruise_buttons != 0:
be.pressed = True
but = self.CS.cruise_buttons
else:
be.pressed = False
but = self.CS.prev_cruise_buttons
if but == CruiseButtons.RES_ACCEL:
be.type = ButtonType.accelCruise
elif but == CruiseButtons.DECEL_SET:
be.type = ButtonType.decelCruise
elif but == CruiseButtons.CANCEL:
be.type = ButtonType.cancel
elif but == CruiseButtons.MAIN:
be.type = ButtonType.altButton3
buttonEvents.append(be)
if self.CS.cruise_setting != self.CS.prev_cruise_setting:
be = car.CarState.ButtonEvent.new_message()
be.type = ButtonType.unknown
if self.CS.cruise_setting != 0:
be.pressed = True
but = self.CS.cruise_setting
else:
be.pressed = False
but = self.CS.prev_cruise_setting
if but == 1:
be.type = ButtonType.altButton1
# TODO: more buttons?
buttonEvents.append(be)
ret.buttonEvents = buttonEvents
# events
events = self.create_common_events(ret, pcm_enable=False)
if self.CS.brake_error:
events.add(EventName.brakeUnavailable)
if self.CS.brake_hold and self.CS.CP.openpilotLongitudinalControl:
events.add(EventName.brakeHold)
if self.CS.park_brake:
events.add(EventName.parkBrake)
if self.CP.enableCruise and ret.vEgo < self.CP.minEnableSpeed:
events.add(EventName.belowEngageSpeed)
# it can happen that car cruise disables while comma system is enabled: need to
# keep braking if needed or if the speed is very low
if self.CP.enableCruise and not ret.cruiseState.enabled \
and (c.actuators.brake <= 0. or not self.CP.openpilotLongitudinalControl):
# non loud alert if cruise disables below 25mph as expected (+ a little margin)
if ret.vEgo < self.CP.minEnableSpeed + 2.:
events.add(EventName.speedTooLow)
else:
events.add(EventName.cruiseDisabled)
if self.CS.CP.minEnableSpeed > 0 and ret.vEgo < 0.001:
events.add(EventName.manualRestart)
cur_time = self.frame * DT_CTRL
enable_pressed = False
# handle button presses
for b in ret.buttonEvents:
# do enable on both accel and decel buttons
if b.type in [ButtonType.accelCruise, ButtonType.decelCruise] and not b.pressed:
self.last_enable_pressed = cur_time
enable_pressed = True
# do disable on button down
if b.type == "cancel" and b.pressed:
events.add(EventName.buttonCancel)
if self.CP.enableCruise:
# KEEP THIS EVENT LAST! send enable event if button is pressed and there are
# NO_ENTRY events, so controlsd will display alerts. Also not send enable events
# too close in time, so a no_entry will not be followed by another one.
# TODO: button press should be the only thing that triggers enable
if ((cur_time - self.last_enable_pressed) < 0.2 and
(cur_time - self.last_enable_sent) > 0.2 and
ret.cruiseState.enabled) or \
(enable_pressed and events.any(ET.NO_ENTRY)):
events.add(EventName.buttonEnable)
self.last_enable_sent = cur_time
elif enable_pressed:
events.add(EventName.buttonEnable)
ret.events = events.to_msg()
self.CS.out = ret.as_reader()
return self.CS.out
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c):
if c.hudControl.speedVisible:
hud_v_cruise = c.hudControl.setSpeed * CV.MS_TO_KPH
else:
hud_v_cruise = 255
pcm_accel = int(clip(c.cruiseControl.accelOverride, 0, 1) * 0xc6)
can_sends = self.CC.update(c.enabled, self.CS, self.frame,
c.actuators,
c.cruiseControl.speedOverride,
c.cruiseControl.override,
c.cruiseControl.cancel,
pcm_accel,
hud_v_cruise,
c.hudControl.lanesVisible,
hud_show_car=c.hudControl.leadVisible,
hud_alert=c.hudControl.visualAlert)
self.frame += 1
return can_sends
| 43.868881 | 144 | 0.654724 |
f4eb7eab9ac7d24a5769c0e2c707eee23cf45891 | 16,588 | py | Python | access_course/views.py | monishnarwani/access_course | 0f262f381768fbe8f1a77ca861d1c2efd57c76f7 | [
"CNRI-Python",
"RSA-MD",
"OML"
] | null | null | null | access_course/views.py | monishnarwani/access_course | 0f262f381768fbe8f1a77ca861d1c2efd57c76f7 | [
"CNRI-Python",
"RSA-MD",
"OML"
] | null | null | null | access_course/views.py | monishnarwani/access_course | 0f262f381768fbe8f1a77ca861d1c2efd57c76f7 | [
"CNRI-Python",
"RSA-MD",
"OML"
] | null | null | null | import json
import logging
from django.conf import settings
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import login as django_login, backends
from django.shortcuts import redirect
from django.views import View
from .validations import get_course_key,get_course_enrollment,get_usage_key
from courseware.access import has_access
from courseware.courses import get_course
from courseware.access_response import StartDateError
from enrollment.data import create_course_enrollment
from .utils import verify_usertoken,create_edx_user,check_subscription
from rest_framework.authentication import SessionAuthentication
from rest_framework_oauth.authentication import OAuth2Authentication
from rest_framework.generics import ListAPIView
from edx_rest_framework_extensions.paginators import NamespacedPageNumberPagination
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin, view_auth_classes
from django.core.exceptions import ValidationError
from openedx.features.course_experience.utils import get_course_outline_block_tree
from .api import list_units
from .forms import UnitListGetForm
from .serializers import CourseSerializer
from rest_framework.views import APIView
# from rest_framework.response import Response
from django.http import JsonResponse
from django.db import transaction
from openedx.core.djangoapps.user_api.models import UserRetirementStatus
from social_django.models import UserSocialAuth
from student.models import get_retired_email_by_email, Registration
from openedx.core.djangolib.oauth2_retirement_utils import retire_dot_oauth2_models, retire_dop_oauth2_models
from rest_framework import status, permissions
from six import text_type
from .models import userdetails
import requests
log=logging.getLogger("test")
def accesscompetition(request,course_id,usage_id):
return _redirect_to_url(request,course_id,usage_id,1)
def accesstraining(request,course_id,usage_id):
return _redirect_to_url(request,course_id,usage_id,0)
def accesscourse(request,course_id):
return _redirect_to_url(request,course_id,None,0)
def _get_session_token(request):
token=None
session_cookie_name = settings.BOP_CONFIGURATION.get('WRDPRS_COOKIE_NAME', '')
if session_cookie_name != '':
if request.COOKIES.has_key(session_cookie_name):
token = request.COOKIES[session_cookie_name]
return token
def _redirect_to_url(request, course_id,block_id,competition=0 ):
# try:
# print(request.session)
# print(request.session.accessed)
# print(request.session.modified)
# print(request.session.is_empty())
# print(request.session.session_key)
# except:
# print("inside except")
# pass
uuid=""
if course_id is None or course_id == "":
return HttpResponse("Course is mandatory input")
token=_get_session_token(request)
if token is None:
return HttpResponse("Token doesnot exist")
try:
course_key = get_course_key(course_id)
except Exception as e:
return HttpResponse("Course '{}' is not a valid course ".format(course_id), e)
if block_id:
try:
usage_key = get_usage_key(block_id)
except Exception as e:
return HttpResponse("Block '{}' is not a valid course ".format(block_id), e)
result= verify_usertoken(token)
print(result)
if result.has_key('error'):
return HttpResponse(result['error'])
#save photourl and display name
if 'profile_image' in result['user'].keys():
profileurl= result['user']['profile_image']
else:
profileurl=""
if result['user'].has_key('uuid'):
external_ref_id=result['user']['uuid']
# validate if user has subscribed/purchased the course/training for non competition courses
if not competition and result['user'].has_key('uuid'):
valid_subscription = check_subscription(result['user']['uuid'],course_id,block_id);
if valid_subscription.has_key('error'):
return HttpResponse(valid_subscription['error'])
if result['user'].has_key('email') and result['user'].has_key('first_name') and result['user'].has_key('last_name'):
email=result['user']['email']
firstname=(result['user']['first_name'])[:29]
lastname=result['user']['last_name'][:29]
displayname = result['user']['first_name'] +" "+ result['user']['last_name']
else:
return HttpResponse("The information received from login service doesnot contain firstname lastname or email.\n"+str(result))
#if request.session.session_key:
log.info(profileurl)
log.info(displayname)
try:
user = User.objects.get(email=email)
status=True
except User.DoesNotExist:
status=create_edx_user(email,firstname,lastname)
if status:
try:
user = User.objects.get(email=email)
except:
return HttpResponse("The user in not able to create")
else:
return HttpResponse("Error in account creation")
if user:
try:
details=userdetails.objects.get(student=user)
with transaction.atomic():
details.photourl=profileurl
# details.displayname= unicode(displayname)
details.displayname= displayname
details.external_ref_id = external_ref_id
details.save()
except Exception as e:
try:
log.info("befoe calling")
with transaction.atomic():
# details = userdetails(student=user,photourl=profileurl,displayname=unicode(displayname))
details = userdetails(student=user,photourl=profileurl,displayname=displayname,external_ref_id=external_ref_id)
details.save()
log.info("calling after saver")
except Exception as e:
log.info("Error in adding-updating userdetails",e)
pass
try:
#log.info(request.session.session_key)
if request.session.session_key is None:
django_login(request, user, backends.ModelBackend)
except Exception as exc:
return HttpResponse("Error in login"+str(exc))
if user.is_authenticated:
enrol_data=get_course_enrollment(user,course_key)
if not enrol_data:
try:
create_course_enrollment(user.username,course_id,None,True)
except Exception as e:
return HttpResponse('User {} was not enrolled to the course {} '.format(user.username,course_id)+ str(e))
else:
return HttpResponse('User {} is not logged in '.format(request.user.username))
print("before redirect")
if block_id == None:
return redirect( '/courses/'+course_id)
else:
return redirect('/courses/'+course_id+'/jump_to/'+block_id)
# return HttpResponse("hi")
@view_auth_classes(is_authenticated=True)
class UnitListView(DeveloperErrorViewMixin, ListAPIView):
"""
**Use Case**
Returns the units for a course
**Example requests**:
GET /access_course/v1/units/<course_id>/
GET /access_course/v1/units/<course_id>/type/<unit_type>/
**Parameters**:
* course_id: course id for which units to be returned.
* unit_type: 'training'
if training then only units with assessment type as training will be returned.
**Response Values**
{ "pagination":{"count":7,"previous":null,"num_pages":2,"next":"http://localhost:18000/access_course/v1/units/course-v1:org1+cn1+cr1/?page=2"},
"results":[
{"id":<<id>>,
"course_id":<<course_id>>,
"block_id":<<block_id>>,
"block_name":<<block_name>>,
"block_type":<<block_type>>
},
...
]
}
The following fields are returned with a successful response.
* id: (string) The ID of the unit block.
* course_id: (string) The usage ID of the course for which unit list was requested.
* block_id: (string) The usage ID of the unit block.
* block_name: (string) The display name of unit block.
* block_type: (string) The assessment type of unit block if assessment xblock else 'General'.
Possible values are:
'General', 'Training', 'Deep Learning', 'Knowledge Acquisition'
**Example**
For all units within a course:
http://foo.localhost:18000/access_course/v1/units/course-v1:org1+cn1+cr1/
{ "pagination":{"count":7,"previous":null,"num_pages":2,"next":"http://localhost:18000/access_course/v1/units/course-v1:org1+cn1+cr1/?page=2"},
"results":[
{"id":"bffd97d59b604a54af96151814c6d33c",
"course_id":"course-v1:org1+cn1+cr1",
"block_id":"block-v1:org1+cn1+cr1+type@vertical+block@bffd97d59b604a54af96151814c6d33c",
"block_name":"Unit1",
"block_type":"Knowledge Acquisition"
},
{"id":"fc52aca52c2d491f87fa918a86dbf2f0",
"course_id":"course-v1:org1+cn1+cr1",
"block_id":"block-v1:org1+cn1+cr1+type@vertical+block@fc52aca52c2d491f87fa918a86dbf2f0",
"block_name":"Unit2",
"block_type":"Knowledge Acquisition"
},
...
]
}
For training units within a course:
http://foo.localhost:18000/access_course/v1/units/course-v1:org1+cn1+cr1/type/training/
{ "pagination":{"count":3,"previous":null,"num_pages":1,"next":null},
"results":[
{"id":"bd3d26675bd6482c9fd13930c3c6f239",
"course_id":"course-v1:org1+cn1+cr1",
"block_id":"block-v1:org1+cn1+cr1+type@vertical+block@bd3d26675bd6482c9fd13930c3c6f239",
"block_name":"Unit1Training",
"block_type":"Training"
},
{"id":"0d0279f5a87d4e05b33112030a885c3f",
"course_id":"course-v1:org1+cn1+cr1",
"block_id":"block-v1:org1+cn1+cr1+type@vertical+block@0d0279f5a87d4e05b33112030a885c3f",
"block_name":"Unit11",
"block_type":"Training"
},
...
]
}
"""
pagination_class = NamespacedPageNumberPagination
pagination_class.max_page_size = 100
serializer_class = CourseSerializer
# authentication_classes = (OAuth2Authentication,
# SessionAuthentication,)
def get_queryset(self):
"""
Return a list of course verticals/units(training/all) visible to the user.
"""
if not self.request.user.is_superuser:
raise ValidationError("You don't have authority to perform this action")
requested_params = self.request.query_params.copy()
requested_params.update({'course_key': self.kwargs['course_key_string']})
# Passing unit_type as 'all' to retrieve all units if unit_type is not already set through endpoint URL.
requested_params.update({'unit_type': self.kwargs.setdefault('unit_type', 'all')})
form = UnitListGetForm(requested_params)
if not form.is_valid():
raise ValidationError(form.errors)
units_list = list_units(
self.request,
form.cleaned_data['course_key'],
form.cleaned_data['unit_type'],
)
return [unit for unit in units_list]
class DeactivateAccountView(APIView):
"""
POST /access_course/user/v1/account/deactivate/
{
"email": "test@test.com",
}
**POST Parameters**
A POST request must include the following parameter.
* email: Required. The email of the user being deactivated.
**POST Response Values**
If the request does not specify any access token,
then request returns below response:
{ 'detail': 'Authentication credentials were not provided.'
}
If the request does specify invalid access token,
then request returns below response:
{ 'detail': 'Invalid token'
}
If the request does not specify an email then request returns below response:
{ 'status': 403,
'message': 'Mandatory parameter is missing.'
}
If the request submits an email for a non-existent user,
then request returns below response:
{ 'status': 404,
'message': 'Could not verify user email: test@test.com not found.'
}
If the specified user is successfully deactivated, the request
returns below response:
{
'status': 200,
'message': 'User deleted successfully.'
}
If an unanticipated error occurs, the request returns below response:
{ 'status': 500,
'message': error description
}
Allows user with valid access token to take the following actions:
- Change the user's password permanently to Django's unusable password
- User's exact personal data is vitiated so that it is unusable
- Removes the activation keys sent by email to the user for account activation.
- Deletes OAuth tokens associated with the user
- Create a row in the retirement table for that user so as to indicate the user is no longer active
"""
authentication_classes = (SessionAuthentication,
OAuth2Authentication )
permission_classes = (permissions.IsAuthenticated, )
def post(self, request):
"""
POST /access_course/user/v1/account/deactivate/
Marks the user as having no password set for deactivation purposes.
"""
if request.POST.get('email', '') == '':
return JsonResponse(
{ 'status': status.HTTP_403_FORBIDDEN,
'message': 'Mandatory parameter is missing.'}
)
email = request.POST.get('email')
try:
# Get the user from the email
user = User.objects.get(email=email)
with transaction.atomic():
UserRetirementStatus.create_retirement(user)
# Unlink LMS social auth accounts
UserSocialAuth.objects.filter(user_id=user.id).delete()
# Change LMS password & email
user_email = user.email
user.email = get_retired_email_by_email(user_email)
user.save()
user.set_unusable_password()
user.save()
# TODO: Unlink social accounts & change password on each IDA.
# Remove the activation keys sent by email to the user for account activation.
Registration.objects.filter(user=user).delete()
# Add user to retirement queue.
# Delete OAuth tokens associated with the user.
retire_dop_oauth2_models(user)
retire_dot_oauth2_models(user)
# TODO: send notification to user if needed.
return JsonResponse(
{ 'status': status.HTTP_200_OK,
'message': 'User deleted successfully.'}
)
except User.DoesNotExist as err: # pylint: disable=broad-except
return JsonResponse(
{ 'status': status.HTTP_404_NOT_FOUND,
'message': 'Could not verify user email: {} not found.'.format(email)}
)
except KeyError:
return JsonResponse(
{ 'status': status.HTTP_400_BAD_REQUEST,
'message': 'User email not specified.'}
)
except Exception as exc: # pylint: disable=broad-except
return JsonResponse(
{ 'status': status.HTTP_500_INTERNAL_SERVER_ERROR,
'message': text_type(exc)}
)
| 39.779376 | 155 | 0.619122 |
6a0802ab38d61c8e4b6bd25e75e43996ffd2df58 | 809 | py | Python | suorganizer/manage.py | jmcevoy1984/Django-Unleashed | 8c9014ee3bf271dcd86727c131184f7348af75d8 | [
"MIT"
] | null | null | null | suorganizer/manage.py | jmcevoy1984/Django-Unleashed | 8c9014ee3bf271dcd86727c131184f7348af75d8 | [
"MIT"
] | null | null | null | suorganizer/manage.py | jmcevoy1984/Django-Unleashed | 8c9014ee3bf271dcd86727c131184f7348af75d8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "suorganizer.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.173913 | 77 | 0.644005 |
82615d7b86bdb7e4883c5908d1a5476f75819c2c | 79,331 | py | Python | Lab6_python/ssd.py | kmodrick/pynq_v0 | f4a1a71f76f5d3b1fe0aefca0ed3a9e37c21879d | [
"BSD-3-Clause"
] | null | null | null | Lab6_python/ssd.py | kmodrick/pynq_v0 | f4a1a71f76f5d3b1fe0aefca0ed3a9e37c21879d | [
"BSD-3-Clause"
] | null | null | null | Lab6_python/ssd.py | kmodrick/pynq_v0 | f4a1a71f76f5d3b1fe0aefca0ed3a9e37c21879d | [
"BSD-3-Clause"
] | null | null | null | """
The code module ssd.py was developed to accompany the book
Signals and Systems for Dummies published by Wiley Publishing.
Copyright 2012, 2013 Mark Wickert, mwickert@uccs.edu
v 1.0
Notes
-----
The primary purpose of this function library is to support the book Signals and Systems for Dummies. Beyond that it should be useful to anyone who wants to use Pylab for general signals and systems modeling and simulation. There is a good collection of digital communication simulation primitives included in the library. More enhancements are planned over time.
The formatted docstrings for the library follow. Click index in the upper right to get an
alphabetical listing of the library functions. In all of the example code given it is assumed that ssd has been imported into your workspace. See the examples below for import options.
Examples
--------
>>> import ssd
>>> # Commands then need to be prefixed with ssd., i.e.,
>>> ssd.tri(t,tau)
>>> # A full import of the module, to avoid the the need to prefix with ssd, is:
>>> from ssd import *
Function Catalog
----------------
"""
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from matplotlib import pylab
from matplotlib import mlab
import numpy as np
from numpy import fft
import matplotlib.pyplot as plt
from scipy import signal
from scipy.io import wavfile
def ten_band_eq_filt(x,GdB,Q=3.5):
"""
Filter the input signal x with a ten-band equalizer having octave gain values in ndarray GdB.
The signal x is filtered using octave-spaced peaking filters starting at 31.25 Hz and
stopping at 16 kHz. The Q of each filter is 3.5, but can be changed. The sampling rate
is assumed to be 44.1 kHz.
Parameters
----------
x : ndarray of the input signal samples
GdB : ndarray containing ten octave band gain values [G0dB,...,G9dB]
Q : Quality factor vector for each of the NB peaking filters
Returns
-------
y : ndarray of output signal samples
Examples
--------
>>> # Test with white noise
>>> w = randn(100000)
>>> y = ten_band_eq_filt(x,GdB)
>>> psd(y,2**10,44.1)
"""
fs = 44100.0 # Hz
NB = len(GdB)
Fc = 31.25*2**np.arange(10)
B = np.zeros((NB,3))
A = np.zeros((NB,3))
# Create matrix of cascade coefficients
for k in range(NB):
[b,a] = peaking(GdB[k],Fc[k],Q)
B[k,:] = b
A[k,:] = a
#Pass signal x through the cascade of ten filters
y = np.zeros(len(x))
for k in range(NB):
if k == 0:
y = signal.lfilter(B[k,:],A[k,:],x)
else:
y = signal.lfilter(B[k,:],A[k,:],y)
return y
def ten_band_eq_resp(GdB,Q=3.5):
"""
Create a frequency response magnitude plot in dB of a ten band equalizer
using a semilogplot (semilogx()) type plot
Parameters
----------
GdB : Gain vector for 10 peaking filters [G0,...,G9]
Q : Quality factor for each peaking filter (default 3.5)
Returns
-------
Nothing : two plots are created
Examples
--------
>>> ten_band_eq_resp([0,10.0,0,0,-1,0,5,0,-4,0])
"""
fs = 44100.0 # Hz
Fc = 31.25*2**np.arange(10)
NB = len(GdB)
B = np.zeros((NB,3));
A = np.zeros((NB,3));
# Create matrix of cascade coefficients
for k in range(NB):
b,a = peaking(GdB[k],Fc[k],Q,fs)
B[k,:] = b
A[k,:] = a
# Create the cascade frequency response
F = np.logspace(1,np.log10(20e3),1000)
H = np.ones(len(F))*np.complex(1.0,0.0)
for k in range(NB):
w,Htemp = signal.freqz(B[k,:],A[k,:],2*np.pi*F/fs)
H *= Htemp
plt.figure(figsize=(6,4))
plt.subplot(211)
plt.semilogx(F,20*np.log10(abs(H)))
plt.axis([10, fs/2, -12, 12])
plt.grid()
plt.title('Ten-Band Equalizer Frequency Response')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.subplot(212)
plt.stem(np.arange(NB),GdB,'b','bs')
#plt.bar(np.arange(NB)-.1,GdB,0.2)
plt.axis([0, NB-1, -12, 12])
plt.xlabel('Equalizer Band Number')
plt.ylabel('Gain Set (dB)')
plt.grid()
def peaking(GdB, fc, Q=3.5, fs=44100.):
"""
A second-order peaking filter having GdB gain at fc and approximately
and 0 dB otherwise.
The filter coefficients returns correspond to a biquadratic system function
containing five parameters.
Parameters
----------
GdB : Lowpass gain in dB
fc : Center frequency in Hz
Q : Filter Q which is inversely proportional to bandwidth
fs : Sampling frquency in Hz
Returns
-------
b : ndarray containing the numerator filter coefficients
a : ndarray containing the denominator filter coefficients
Examples
--------
>>> from scipy import signal
>>> b,a = peaking(2.0,500)
>>> b,a = peaking(-5.0,500,4)
>>> # Assuming pylab imported
>>> f = logspace(1,5,400)
>>> .w,H = signal.freqz(b,a,2*pi*f/44100)
>>> semilogx(f,20*log10(abs(H)))
"""
mu = 10**(GdB/20.)
kq = 4/(1 + mu)*np.tan(2*np.pi*fc/fs/(2*Q))
Cpk = (1 + kq *mu)/(1 + kq)
b1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq*mu)
b2 = (1 - kq*mu)/(1 + kq*mu)
a1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq)
a2 = (1 - kq)/(1 + kq)
b = Cpk*np.array([1, b1, b2])
a = np.array([1, a1, a2])
return b,a
def ex6_2(n):
"""
Generate a triangle pulse as described in Example 6-2
of Chapter 6.
You need to supply an index array n that covers at least [-2, 5].
The function returns the hard-coded signal of the example.
Parameters
----------
n : time index ndarray covering at least -2 to +5.
Returns
-------
x : ndarray of signal samples in x
Examples
--------
>>> n = arange(-5,8)
>>> x = ex6_2(n)
>>> stem(n,x) # creates a stem plot of x vs n
"""
x = np.zeros(len(n))
for k, nn in enumerate(n):
if nn >= -2 and nn <= 5:
x[k] = 8 - nn
return x
def position_CD(Ka,out_type = 'fb_exact'):
"""
CD sled position control case study of Chapter 18.
The function returns the closed-loop and open-loop
system function for a CD/DVD sled position control
system. The loop amplifier gain is the only variable
that may be changed. The returned system function can
however be changed.
Parameters
----------
Ka : loop amplifier gain, start with 50.
out_type : 'open_loop' for open loop system function
out_type : 'fb_approx' for closed-loop approximation
out_type : 'fb_exact' for closed-loop exact
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Notes
-----
With the exception of the loop amplifier gain, all
other parameters are hard-coded from Case Study example.
Examples
------
>>> b,a = position_CD(Ka,'fb_approx')
>>> b,a = position_CD(Ka,'fb_exact')
"""
rs = 10/(2*np.pi)
# Load b and a ndarrays with the coefficients
if out_type.lower() == 'open_loop':
b = np.array([Ka*4000*rs])
a = np.array([1,1275,31250,0])
elif out_type.lower() == 'fb_approx':
b = np.array([3.2*Ka*rs])
a = np.array([1, 25, 3.2*Ka*rs])
elif out_type.lower() == 'fb_exact':
b = np.array([4000*Ka*rs])
a = np.array([1, 1250+25, 25*1250, 4000*Ka*rs])
else:
print('out_type must be: open_loop, fb_approx, or fc_exact')
return 1
return b, a
def cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H'):
"""
Cruise control with PI controller and hill disturbance.
This function returns various system function configurations
for a the cruise control Case Study example found in
the supplementary article. The plant model is obtained by the
linearizing the equations of motion and the controller contains a
proportional and integral gain term set via the closed-loop parameters
natuarl frequency wn (rad/s) and damping zeta.
Parameters
----------
wn : closed-loop natural frequency in rad/s, nominally 0.1
zeta : closed-loop damping factor, nominally 1.0
T : vehicle time constant, nominally 10 s
vcruise : cruise velocity set point, nominally 75 mph
vmax : maximum vehicle velocity, nominally 120 mph
tf_mode : 'H', 'HE', 'HVW', or 'HED' controls the system function returned by the function
'H' : closed-loop system function V(s)/R(s)
'HE' : closed-loop system function E(s)/R(s)
'HVW' : closed-loop system function V(s)/W(s)
'HED' : closed-loop system function E(s)/D(s), where D is the hill disturbance input
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Examples
--------
>>> # return the closed-loop system function output/input velocity
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H')
>>> # return the closed-loop system function loop error/hill disturbance
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='HED')
"""
tau = T/2.*vmax/vcruise
g = 9.8
g *= 3*60**2/5280. # m/s to mph conversion
Kp = T/vmax*(2*zeta*wn-1/tau)
Ki = T/vmax*wn**2
K = Kp*vmax/T
print('wn = ', np.sqrt(K/(Kp/Ki)))
print('zeta = ', (K + 1/tau)/(2*wn))
a = np.array([1, 2*zeta*wn, wn**2])
if tf_mode == 'H':
b = np.array([K, wn**2])
elif tf_mode == 'HE':
b = np.array([1, 2*zeta*wn-K, 0.])
elif tf_mode == 'HVW':
b = np.array([ 1, wn**2/K+1/tau, wn**2/(K*tau)])
b *= Kp
elif tf_mode == 'HED':
b = np.array([g, 0])
else:
print('tf_mode must be: H, HE, HVU, or HED')
return 1
return b, a
def splane(b,a,auto_scale=True,size=[-1,1,-1,1]):
"""
Create an s-plane pole-zero plot.
As input the function uses the numerator and denominator
s-domain system function coefficient ndarrays b and a respectively.
Assumed to be stored in descending powers of s.
Parameters
----------
b : numerator coefficient ndarray.
a : denominator coefficient ndarray.
auto_scale : True
size : [xmin,xmax,ymin,ymax] plot scaling when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> splane(b,a)
>>> # Here the plot is generated using manual scaling
>>> splane(b,a,False,[-10,1,-10,10])
"""
M = len(b) - 1
N = len(a) - 1
plt.figure(figsize=(5,5))
#plt.axis('equal')
N_roots = np.array([0.0])
if M > 0:
N_roots = np.roots(b)
D_roots = np.array([0.0])
if N > 0:
D_roots = np.roots(a)
if auto_scale:
size[0] = min(np.min(np.real(N_roots)),np.min(np.real(D_roots)))-0.5
size[1] = max(np.max(np.real(N_roots)),np.max(np.real(D_roots)))+0.5
size[1] = max(size[1],0.5)
size[2] = min(np.min(np.imag(N_roots)),np.min(np.imag(D_roots)))-0.5
size[3] = max(np.max(np.imag(N_roots)),np.max(np.imag(D_roots)))+0.5
plt.plot([size[0],size[1]],[0,0],'k--')
plt.plot([0,0],[size[2],size[3]],'r--')
# Plot labels if multiplicity greater than 1
x_scale = size[1]-size[0]
y_scale = size[3]-size[2]
x_off = 0.03
y_off = 0.01
if M > 0:
#N_roots = np.roots(b)
N_uniq, N_mult=signal.unique_roots(N_roots,tol=1e-3, rtype='avg')
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = mlab.find(N_mult>1)
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),ha='center',va='bottom',fontsize=10)
if N > 0:
#D_roots = np.roots(a)
D_uniq, D_mult=signal.unique_roots(D_roots,tol=1e-3, rtype='avg')
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = mlab.find(D_mult>1)
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis(np.array(size))
return M,N
def OS_filter(x,h,N,mode=0):
"""
Overlap and save transform domain FIR filtering.
This function implements the classical overlap and save method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> n = arange(0,100)
>>> x cos(2*pi*0.05*n)
>>> b = ones(10)
>>> y = OS_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = OS_filter(x,h,N,1)
"""
P = len(h)
# zero pad start of x so first frame can recover first true samples of x
x = np.hstack((np.zeros(P-1),x))
L = N - P + 1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad end of x to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(Nframe*N)
# create an instrumentation matrix to observe the overlap and save behavior
y_mat = np.zeros((Nframe,Nframe*N))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:k*L+N]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk)) # imag part should be zero
y[k*L+P-1:k*L+N] = yk[P-1:]
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[P-1:Nx], y_mat[:,P-1:Nx]
else:
return y[P-1:Nx]
def OA_filter(x,h,N,mode=0):
"""
Overlap and add transform domain FIR filtering.
This function implements the classical overlap and add method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> n = arange(0,100)
>>> x cos(2*pi*0.05*n)
>>> b = ones(10)
>>> y = OA_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = OA_filter(x,h,N,1)
"""
P = len(h)
L = N - P + 1 # need N >= L + P -1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(Nframe*N)
# create an instrumentation matrix to observe the overlap and add behavior
y_mat = np.zeros((Nframe,Nframe*N))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:(k+1)*L]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk))
y[k*L:k*L+N] += yk
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[0:Nx], y_mat[:,0:Nx]
else:
return y[0:Nx]
def lp_samp(fb,fs,fmax,N,shape='tri',fsize=(6,4)):
"""
Lowpass sampling theorem plotting function.
Display the spectrum of a sampled signal after setting the bandwidth,
sampling frequency, maximum display frequency, and spectral shape.
Parameters
----------
fb : spectrum lowpass bandwidth in Hz
fs : sampling frequency in Hz
fmax : plot over [-fmax,fmax]
shape : 'tri' or 'line'
N : number of translates, N positive and N negative
fsize : the size of the figure window, default (6,4)
Returns
-------
Nothing : A plot window opens containing the spectrum plot
Examples
--------
>>> # No aliasing as 10 < 25/2
>>> lp_samp(10,25,50,10)
>>> # Aliasing as 15 > 25/2
>>> lp_samp(15,25,50,10)
"""
plt.figure(figsize=fsize)
# define the plot interval
f = np.arange(-fmax,fmax+fmax/200.,fmax/200.)
A = 1.0;
line_ampl = A/2.*np.array([0, 1])
# plot the lowpass spectrum in black
if shape.lower() == 'tri':
plt.plot(f,lp_tri(f,fb))
elif shape.lower() == 'line':
plt.plot([fb, fb],line_ampl,'b', linewidth=2)
plt.plot([-fb, -fb],line_ampl,'b', linewidth=2)
else:
print('shape must be tri or line')
# overlay positive and negative frequency translates
for n in range(N):
if shape.lower() == 'tri':
plt.plot(f,lp_tri(f-(n+1)*fs,fb),'--r')
plt.plot(f,lp_tri(f+(n+1)*fs,fb),'--g')
elif shape.lower() == 'line':
plt.plot([fb+(n+1)*fs, fb+(n+1)*fs],line_ampl,'--r', linewidth=2)
plt.plot([-fb+(n+1)*fs, -fb+(n+1)*fs],line_ampl,'--r', linewidth=2)
plt.plot([fb-(n+1)*fs, fb-(n+1)*fs],line_ampl,'--g', linewidth=2)
plt.plot([-fb-(n+1)*fs, -fb-(n+1)*fs],line_ampl,'--g', linewidth=2)
else:
print('shape must be tri or line')
#plt.title('Lowpass Sampling Theorem for a Real Signal: Blk = orig, dotted = translates')
plt.ylabel('Spectrum Magnitude')
plt.xlabel('Frequency in Hz')
plt.axis([-fmax,fmax,0,1])
plt.grid()
def lp_tri(f, fb):
"""
Triangle spectral shape function used by lp_spec.
This is a support function for the lowpass spectrum plotting function
lp_spec().
Parameters
----------
f : ndarray containing frequency samples
fb : the bandwidth as a float constant
Returns
-------
x : ndarray of spectrum samples for a single triangle shape
Examples
--------
>>> x = lp_tri(f, fb)
"""
x = np.zeros(len(f))
for k in range(len(f)):
if abs(f[k]) <= fb:
x[k] = 1 - abs(f[k])/float(fb)
return x
def sinusoidAWGN(x,SNRdB):
"""
Add white Gaussian noise to a single real sinusoid.
Input a single sinusoid to this function and it returns a noisy
sinusoid at a specific SNR value in dB. Sinusoid power is calculated
using np.var.
Parameters
----------
x : Input signal as ndarray consisting of a single sinusoid
SNRdB : SNR in dB for output sinusoid
Returns
-------
y : Noisy sinusoid return vector
Examples
--------
>>> # set the SNR to 10 dB
>>> n = arange(0,10000)
>>> x = cos(2*pi*0.04*n)
>>> y = sinusoidAWGN(x,10.0)
"""
# Estimate signal power
x_pwr = np.var(x)
# Create noise vector
noise = np.sqrt(x_pwr/10**(SNRdB/10.))*np.random.randn(len(x));
return x + noise
def simpleQuant(x,Btot,Xmax,Limit):
"""
A simple rounding quantizer for bipolar signals having Btot = B + 1 bits.
This function models a quantizer that employs Btot bits that has one of
three selectable limiting types: saturation, overflow, and none.
The quantizer is bipolar and implements rounding.
Parameters
----------
x : input signal ndarray to be quantized
Btot : total number of bits in the quantizer, e.g. 16
Xmax : quantizer full-scale dynamic range is [-Xmax, Xmax]
Limit = Limiting of the form 'sat', 'over', 'none'
Returns
-------
xq : quantized output ndarray
Notes
-----
The quantization can be formed as e = xq - x
Examples
--------
>>> n = arange(0,10000)
>>> x = cos(2*pi*0.211*n)
>>> y = sinusoidAWGN(x,90)
>>> yq = simpleQuant(y,12,1,sat)
>>> psd(y,2**10,Fs=1);
>>> psd(yq,2**10,Fs=1)
"""
B = Btot-1
x = x/Xmax
if Limit.lower() == 'over':
xq = (np.mod(np.round(x*2**B)+2**B,2**Btot)-2**B)/2**B
elif Limit.lower() == 'sat':
xq = np.round(x*2**B)+2**B
s1 = mlab.find(xq >= 2**Btot-1)
s2 = mlab.find(xq < 0)
xq[s1] = (2**Btot - 1)*np.ones(len(s1))
xq[s2] = np.zeros(len(s2))
xq = (xq - 2**B)/2**B
elif Limit.lower() == 'none':
xq = np.round(x*2**B)/2**B
else:
print('limit must be the string over, sat, or none')
return xq*Xmax
def prin_alias(f_in,fs):
"""
Calculate the principle alias frequencies.
Given an array of input frequencies the function returns an
array of principle alias frequencies.
Parameters
----------
f_in : ndarray of input frequencies
fs : sampling frequency
Returns
-------
f_out : ndarray of principle alias frequencies
Examples
--------
>>> # Linear frequency sweep from 0 to 50 Hz
>>> f_in = arange(0,50,0.1)
>>> # Calculate principle alias with fs = 10 Hz
>>> f_out = prin_alias(f_in,10)
"""
return abs(np.rint(f_in/fs)*fs - f_in)
"""
Principle alias via recursion
f_out = np.copy(f_in)
for k in range(len(f_out)):
while f_out[k] > fs/2.:
f_out[k] = abs(f_out[k] - fs)
return f_out
"""
def cascade_filters(b1,a1,b2,a2):
"""
Cascade two IIR digital filters into a single (b,a) coefficient set.
To cascade two digital filters (system functions) given their numerator
and denominator coefficients you simply convolve the coefficient arrays.
Parameters
----------
b1 : ndarray of numerator coefficients for filter 1
a1 : ndarray of denominator coefficients for filter 1
b2 : ndarray of numerator coefficients for filter 2
a2 : ndarray of denominator coefficients for filter 2
Returns
-------
b : ndarray of numerator coefficients for the cascade
a : ndarray of denominator coefficients for the cascade
Examples
--------
>>> from scipy import signal
>>> b1,a1 = signal.butter(3, 0.1)
>>> b2,a2 = signal.butter(3, 0.15)
>>> b,a = cascade_filters(b1,a1,b2,a2)
"""
return signal.convolve(b1,b2), signal.convolve(a1,a2)
def soi_snoi_gen(s,SIR_dB,N,fi,fs = 8000):
"""
Add an interfering sinusoidal tone to the input signal at a given SIR_dB.
The input is the signal of interest (SOI) and number of sinsuoid signals
not of interest (SNOI) are addedto the SOI at a prescribed signal-to-
intereference SIR level in dB.
Parameters
----------
s : ndarray of signal of SOI
SIR_dB : interference level in dB
N : Trim input signal s to length N + 1 samples
fi : ndarray of intereference frequencies in Hz
fs : sampling rate in Hz, default is 8000 Hz
Returns
-------
r : ndarray of combined signal plus intereference of length N+1 samples
Examples
--------
>>> # load a speech ndarray and trim to 5*8000 + 1 samples
>>> fs,s = from_wav('OSR_us_000_0030_8k.wav')
>>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500])
"""
n = np.arange(0,N+1)
K = len(fi)
si = np.zeros(N+1)
for k in range(K):
si += np.cos(2*np.pi*fi[k]/fs*n);
s = s[:N+1]
Ps = np.var(s)
Psi = np.var(si)
r = s + np.sqrt(Ps/Psi*10**(-SIR_dB/10))*si
return r
def lms_ic(r,M,mu,delta=1):
"""
Least mean square (LMS) interference canceller adaptive filter.
A complete LMS adaptive filter simulation function for the case of
interference cancellation. Used in the digital filtering case study.
Parameters
----------
M : FIR Filter length (order M-1)
delta : Delay used to generate the reference signal
mu : LMS step-size
delta : decorrelation delay between input and FIR filter input
Returns
-------
n : ndarray Index vector
r : ndarray noisy (with interference) input signal
r_hat : ndarray filtered output (NB_hat[n])
e : ndarray error sequence (WB_hat[n])
ao : ndarray final value of weight vector
F : ndarray frequency response axis vector
Ao : ndarray frequency response of filter
Examples
----------
>>> # import a speech signal
>>> fs,s = from_wav('OSR_us_000_0030_8k.wav')
>>> # add interference at 1kHz and 1.5 kHz and
>>> # truncate to 5 seconds
>>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500])
>>> # simulate with a 64 tap FIR and mu = 0.005
>>> n,r,r_hat,e,ao,F,Ao = lms_ic(r,64,0.005)
"""
N = len(r)-1;
# Form the reference signal y via delay delta
y = signal.lfilter(np.hstack((np.zeros(delta), np.array([1]))),1,r)
# Initialize output vector x_hat to zero
r_hat = np.zeros(N+1)
# Initialize error vector e to zero
e = np.zeros(N+1)
# Initialize weight vector to zero
ao = np.zeros(M+1)
# Initialize filter memory to zero
z = np.zeros(M)
# Initialize a vector for holding ym of length M+1
ym = np.zeros(M+1)
for k in range(N+1):
# Filter one sample at a time
r_hat[k],z = signal.lfilter(ao,np.array([1]),np.array([y[k]]),zi=z)
# Form the error sequence
e[k] = r[k] - r_hat[k]
# Update the weight vector
ao = ao + 2*mu*e[k]*ym
# Update vector used for correlation with e(k)
ym = np.hstack((np.array([y[k]]), ym[:-1]))
# Create filter frequency response
F, Ao = signal.freqz(ao,1,1024)
F/= (2*np.pi)
Ao = 20*np.log10(abs(Ao))
return np.arange(0,N+1), r, r_hat, e, ao, F, Ao
def fir_iir_notch(fi,fs,r=0.95):
"""
Design a second-order FIR or IIR notch filter.
A second-order FIR notch filter is created by placing conjugate
zeros on the unit circle at angle corresponidng to the notch center
frequency. The IIR notch variation places a pair of conjugate poles
at the same angle, but with radius r < 1 (typically 0.9 to 0.95).
Parameters
----------
fi : notch frequency is Hz relative to fs
fs : the sampling frequency in Hz, e.g. 8000
r : pole radius for IIR version, default = 0.95
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Notes
-----
If the pole radius is 0 then an FIR version is created, that is
there are no poles except at z = 0.
Examples
--------
>>> b_FIR, a_FIR = fir_iir_notch(1000,8000,0)
>>> b_IIR, a_IIR = fir_iir_notch(1000,8000)
"""
w0 = 2*np.pi*fi/float(fs)
if r >= 1:
print('Poles on or outside unit circle.')
if r == 0:
a = np.array([1.0])
else:
a = np.array([1, -2*r*np.cos(w0), r**2])
b = np.array([1, -2*np.cos(w0), 1])
return b, a
def simple_SA(x,NS,NFFT,fs,NAVG=1,window='boxcar'):
"""
Spectral estimation using windowing and averaging.
This function implements averaged periodogram spectral estimation
estimation similar to the NumPy's psd() function, but more
specialized for the the windowing case study of Chapter 16.
Parameters
----------
x : ndarray containing the input signal
NS : The subrecord length less zero padding, e.g. NS < NFFT
NFFT : FFT length, e.g., 1024 = 2**10
fs : sampling rate in Hz
NAVG : the number of averages, e.g., 1 for deterministic signals
window : hardcoded window 'boxcar' (default) or 'hanning'
Returns
-------
f : ndarray frequency axis in Hz on [0, fs/2]
Sx : ndarray the power spectrum estimate
Notes
-----
The function also prints the maximum number of averages K possible
for the input data record.
Examples
--------
>>> n = arange(0,2048)
>>> x = cos(2*pi*1000/10000*n) + 0.01*cos(2*pi*3000/10000*n)
>>> f, Sx = simple_SA(x,128,512,10000)
>>> f, Sx = simple_SA(x,256,1024,10000,window='hanning')
>>> plot(f, 10*log10(Sx))
"""
Nx = len(x)
K = Nx/NS
print('K = ', K)
if NAVG > K:
print('NAVG exceeds number of available subrecords')
return 0,0
if window.lower() == 'boxcar' or window.lower() == 'rectangle':
w = signal.boxcar(NS)
elif window.lower() == 'hanning':
w = signal.hanning(NS)
xsw = np.zeros((K,NS)) + 1j*np.zeros((K,NS))
for k in range(NAVG):
xsw[k,] = w*x[k*NS:(k+1)*NS]
Sx = np.zeros(NFFT)
for k in range(NAVG):
X = fft.fft(xsw[k,],NFFT)
Sx += abs(X)**2
Sx /= float(NAVG)
Sx /= float(NFFT**2)
if x.dtype != 'complex128':
n = np.arange(NFFT/2)
f = fs*n/float(NFFT)
Sx = Sx[0:NFFT/2]
else:
n = np.arange(NFFT/2)
f = fs*np.hstack((np.arange(-NFFT/2,0),np.arange(NFFT/2)))/float(NFFT)
Sx = np.hstack((Sx[NFFT/2:],Sx[0:NFFT/2]))
return f, Sx
def line_spectra(fk,Xk,mode,sides=2,linetype='b',lwidth=2,floor_dB=-100,fsize=(6,4)):
"""
Plot the Fouier series line spectral given the coefficients.
This function plots two-sided and one-sided line spectra of a periodic
signal given the complex exponential Fourier series coefficients and
the corresponding harmonic frequencies.
Parameters
----------
fk : vector of real sinusoid frequencies
Xk : magnitude and phase at each positive frequency in fk
mode : 'mag' => magnitude plot, 'magdB' => magnitude in dB plot,
mode cont : 'magdBn' => magnitude in dB normalized, 'phase' => a phase plot in radians
sides : 2; 2-sided or 1-sided
linetype : line type per Matplotlib definitions, e.g., 'b';
lwidth : 2; linewidth in points
fsize : optional figure size in inches, default = (6,4) inches
Returns
-------
Nothing : A plot window opens containing the line spectrum plot
Notes
-----
Since real signals are assumed the frequencies of fk are 0 and/or positive
numbers. The supplied Fourier coefficients correspond.
Examples
--------
>>> n = arange(0,25)
>>> # a pulse train with 10 Hz fundamental and 20% duty cycle
>>> fk = n*10
>>> Xk = sinc(n*10*.02)*exp(-1j*2*pi*n*10*.01) # 1j = sqrt(-1)
>>> line_spectra(fk,Xk,'mag')
>>> line_spectra(fk,Xk,'phase')
"""
plt.figure(figsize=fsize)
# Eliminate zero valued coefficients
idx = pylab.find(Xk != 0)
Xk = Xk[idx]
fk = fk[idx]
if mode == 'mag':
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, 2.*np.abs(Xk[k])],linetype, linewidth=lwidth)
else:
print('Invalid sides type')
plt.grid()
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), 0, 1.05*max(abs(Xk))])
elif sides == 1:
plt.axis([0, 1.2*max(fk), 0, 1.05*2*max(abs(Xk))])
else:
print('Invalid sides type')
plt.ylabel('Magnitude')
plt.xlabel('Frequency (Hz)')
elif mode == 'magdB':
Xk_dB = 20*np.log10(np.abs(Xk))
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+6.02],linetype, linewidth=lwidth)
else:
print('Invalid sides type')
plt.grid()
max_dB = np.ceil(max(Xk_dB/10.))*10
min_dB = max(floor_dB,np.floor(min(Xk_dB/10.))*10)
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), min_dB, max_dB])
elif sides == 1:
plt.axis([0, 1.2*max(fk), min_dB, max_dB])
else:
print('Invalid sides type')
plt.ylabel('Magnitude (dB)')
plt.xlabel('Frequency (Hz)')
elif mode == 'magdBn':
Xk_dB = 20*np.log10(np.abs(Xk)/max(np.abs(Xk)))
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+6.02],linetype, linewidth=lwidth)
else:
print('Invalid sides type')
plt.grid()
max_dB = np.ceil(max(Xk_dB/10.))*10
min_dB = max(floor_dB,np.floor(min(Xk_dB/10.))*10)
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), min_dB, max_dB])
elif sides == 1:
plt.axis([0, 1.2*max(fk), min_dB, max_dB])
else:
print('Invalid sides type')
plt.ylabel('Normalized Magnitude (dB)')
plt.xlabel('Frequency (Hz)')
elif mode == 'phase':
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[0, -np.angle(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
else:
print('Invalid sides type')
plt.grid()
if sides == 2:
plt.plot([-1.2*max(fk), 1.2*max(fk)], [0, 0],'k')
plt.axis([-1.2*max(fk), 1.2*max(fk), -1.1*max(np.abs(np.angle(Xk))), 1.1*max(np.abs(np.angle(Xk)))])
elif sides == 1:
plt.plot([0, 1.2*max(fk)], [0, 0],'k')
plt.axis([0, 1.2*max(fk), -1.1*max(np.abs(np.angle(Xk))), 1.1*max(np.abs(np.angle(Xk)))])
else:
print('Invalid sides type')
plt.ylabel('Phase (rad)')
plt.xlabel('Frequency (Hz)')
else:
print('Invalid mode type')
def fs_coeff(xp,N,f0,one_side=True):
"""
Numerically approximate the Fourier series coefficients given periodic x(t).
The input is assummed to represent one period of the waveform
x(t) that has been uniformly sampled. The number of samples supplied
to represent one period of the waveform sets the sampling rate.
Parameters
----------
xp : ndarray of one period of the waveform x(t)
N : maximum Fourier series coefficient, [0,...,N]
f0 : fundamental frequency used to form fk.
Returns
-------
Xk : ndarray of the coefficients over indices [0,1,...,N]
fk : ndarray of the harmonic frequencies [0, f0,2f0,...,Nf0]
Notes
-----
len(xp) >= 2*N+1 as len(xp) is the fft length.
Examples
--------
>>> t = arange(0,1,1/1024.)
>>> # a 20% duty cycle pulse starting at t = 0
>>> x_rect = rect(t-.1,0.2)
>>> Xk, fk = fs_coeff(x_rect,25,10)
>>> # plot the spectral lines
>>> line_spectra(fk,Xk,'mag')
"""
Nint = len(xp)
if Nint < 2*N+1:
print('Number of samples in xp insufficient for requested N.')
return 0,0
Xp = fft.fft(xp,Nint)/float(Nint)
# To interface with the line_spectra function use one_side mode
if one_side:
Xk = Xp[0:N+1]
fk = f0*np.arange(0,N+1)
else:
Xk = np.hstack((Xp[-N:],Xp[0:N+1]))
fk = f0*np.arange(-N,N+1)
return Xk, fk
def fs_approx(Xk,fk,t):
"""
Synthesize periodic signal x(t) using Fourier series coefficients at harmonic frequencies
Assume the signal is real so coefficients Xk are supplied for nonnegative
indicies. The negative index coefficients are assumed to be complex
conjugates.
Parameters
----------
Xk : ndarray of complex Fourier series coefficients
fk : ndarray of harmonic frequencies in Hz
t : ndarray time axis corresponding to output signal array x_approx
Returns
-------
x_approx : ndarray of periodic waveform approximation over time span t
Examples
--------
>>> t = arange(0,2,.002)
>>> # a 20% duty cycle pulse train
>>> n = arange(0,20,1) # 0 to 19th harmonic
>>> fk = 1*n # period = 1s
>>> Xk = np.sinc(n*10*.02)*np.exp(-1j*2*np.pi*n*10*.01)
>>> x_approx = fs_approx(Xk,fk,t)
>>> plot(t,x_approx)
"""
x_approx = np.zeros(len(t))
for k,Xkk in enumerate(Xk):
if fk[k] == 0:
x_approx += np.real(Xkk)*np.ones(len(t)) # assume signal is real so DC is real
else:
x_approx += 2*np.abs(Xkk)*np.cos(2*np.pi*fk[k]*t+np.angle(Xkk))
return x_approx
def conv_sum(x1,nx1,x2,nx2,extent=('f','f')):
"""
Discrete convolution of x1 and x2 with proper tracking of the output time axis.
Convolve two discrete-time signals using the SciPy function signal.convolution.
The time (sequence axis) are managed from input to output. y[n] = x1[n]*x2[n].
Parameters
----------
x1 : ndarray of signal x1 corresponding to nx1
nx1 : ndarray time axis for x1
x2 : ndarray of signal x2 corresponding to nx2
nx2 : ndarray time axis for x2
extent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided
Returns
-------
y : ndarray of output values y
ny : ndarray of the corresponding sequence index n
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The default
extents of ('f','f') are used for signals that are active (have support)
on or within n1 and n2 respectively. A right-sided signal such as
a^n*u[n] is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> nx = arange(-5,10)
>>> x = drect(nx,4)
>>> y,ny = conv_sum(x,nx,x,nx)
>>> stem(ny,y)
>>> # Consider a pulse convolved with an exponential ('r' type extent)
>>> h = 0.5**nx*dstep(nx)
>>> y,ny = conv_sum(x,nx,h,nx,('f','r')) # note extents set
>>> stem(ny,y) # expect a pulse charge and discharge sequence
"""
nnx1 = np.arange(0,len(nx1))
nnx2 = np.arange(0,len(nx2))
n1 = nnx1[0]
n2 = nnx1[-1]
n3 = nnx2[0]
n4 = nnx2[-1]
# Start by finding the valid output support or extent interval to insure that
# for no finite extent signals ambiquous results are not returned.
# Valid extents are f (finite), r (right-sided), and l (left-sided)
if extent[0] == 'f' and extent[1] == 'f':
nny = np.arange(n1+n3,n2+1+n4+1-1)
ny = np.arange(0,len(x1)+len(x2)-1) + nx1[0]+nx2[0]
elif extent[0] == 'f' and extent[1] == 'r':
nny = np.arange(n1+n3,n1+1+n4+1-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'r' and extent[1] == 'f':
nny = np.arange(n1+n3,n2+1+n3+1-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'f' and extent[1] == 'l':
nny = np.arange(n2+n3,n2+1+n4+1-1)
ny = nny + nx1[-1]+nx2[0]
elif extent[0] == 'l' and extent[1] == 'f':
nny = np.arange(n1+n4,n2+1+n4+1-1)
ny = nny + tx1[0]+tx2[-1]
elif extent[0] == 'r' and extent[1] == 'r':
nny = np.arange(n1+n3,min(n1+1+n4+1,n2+1+n3+1)-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'l' and extent[1] == 'l':
nny = np.arange(max(n1+n4,n2+n3),n2+1+n4+1-1)
ny = nny + max(nx1[0]+nx2[-1],nx1[-1]+nx2[0])
else:
print('Invalid x1 x2 extents specified or valid extent not found!')
return 0,0
# Finally convolve the sequences
y = signal.convolve(x1, x2)
print("Output support: (%+d, %+d)" % (ny[0],ny[-1]))
return y[nny], ny
def conv_integral(x1,tx1,x2,tx2,extent = ('f','f')):
"""
Continuous-time convolution of x1 and x2 with proper tracking of the output time axis.
Appromimate the convolution integral for the convolution of two continuous-time signals using the SciPy function signal. The time (sequence axis) are managed from input to output. y(t) = x1(t)*x2(t).
Parameters
----------
x1 : ndarray of signal x1 corresponding to tx1
tx1 : ndarray time axis for x1
x2 : ndarray of signal x2 corresponding to tx2
tx2 : ndarray time axis for x2
extent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided
Returns
-------
y : ndarray of output values y
ty : ndarray of the corresponding time axis for y
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The time steps used in
x1(t) and x2(t) must match. The default extents of ('f','f') are used for signals
that are active (have support) on or within t1 and t2 respectively. A right-sided
signal such as exp(-a*t)*u(t) is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> tx = arange(-5,10,.01)
>>> x = rect(tx-2,4) # pulse starts at t = 0
>>> y,ty = conv_integral(x,tx,x,tx)
>>> plot(ty,y) # expect a triangle on [0,8]
>>> # Consider a pulse convolved with an exponential ('r' type extent)
>>> h = 4*exp(-4*tx)*step(tx)
>>> y,ty = conv_integral(x,tx,h,tx,extent=('f','r')) # note extents set
>>> plot(ty,y) # expect a pulse charge and discharge waveform
"""
dt = tx1[1] - tx1[0]
nx1 = np.arange(0,len(tx1))
nx2 = np.arange(0,len(tx2))
n1 = nx1[0]
n2 = nx1[-1]
n3 = nx2[0]
n4 = nx2[-1]
# Start by finding the valid output support or extent interval to insure that
# for no finite extent signals ambiquous results are not returned.
# Valid extents are f (finite), r (right-sided), and l (left-sided)
if extent[0] == 'f' and extent[1] == 'f':
ny = np.arange(n1+n3,n2+1+n4+1-1)
ty = np.arange(0,len(x1)+len(x2)-1)*dt + tx1[0]+tx2[0]
elif extent[0] == 'f' and extent[1] == 'r':
ny = np.arange(n1+n3,n1+1+n4+1-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'r' and extent[1] == 'f':
ny = np.arange(n1+n3,n2+1+n3+1-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'f' and extent[1] == 'l':
ny = np.arange(n2+n3,n2+1+n4+1-1)
ty = ny*dt + tx1[-1]+tx2[0]
elif extent[0] == 'l' and extent[1] == 'f':
ny = np.arange(n1+n4,n2+1+n4+1-1)
ty = ny*dt + tx1[0]+tx2[-1]
elif extent[0] == 'r' and extent[1] == 'r':
ny = np.arange(n1+n3,min(n1+1+n4+1,n2+1+n3+1)-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'l' and extent[1] == 'l':
ny = np.arange(max(n1+n4,n2+n3),n2+1+n4+1-1)
ty = ny*dt + max(tx1[0]+tx2[-1],tx1[-1]+tx2[0])
else:
print('Invalid x1 x2 extents specified or valid extent not found!')
return 0,0
# Finally convolve the sampled sequences and scale by dt
y = signal.convolve(x1, x2)*dt
print("Output support: (%+2.2f, %+2.2f)" % (ty[0],ty[-1]))
return y[ny], ty
def delta_eps(t,eps):
"""
Rectangular pulse approximation to impulse function.
Parameters
----------
t : ndarray of time axis
eps : pulse width
Returns
-------
d : ndarray containing the impulse approximation
Examples
--------
>>> t = arange(-2,2,.001)
>>> d = delta_eps(t,.1)
>>> plot(t,d)
"""
d = np.zeros(len(t))
for k,tt in enumerate(t):
if abs(tt) <= eps/2.:
d[k] = 1/float(eps)
return d
def step(t):
"""
Approximation to step function signal u(t).
In this numerical version of u(t) the step turns on at t = 0.
Parameters
----------
t : ndarray of the time axis
Returns
-------
x : ndarray of the step function signal u(t)
Examples
--------
>>> t = arange(-1,5,.01)
>>> x = step(t)
>>> plot(t,x)
>>> # to turn on at t = 1 shift t
>>> x = step(t - 1.0)
>>> plot(t,x)
"""
x = np.zeros(len(t))
for k,tt in enumerate(t):
if tt >= 0:
x[k] = 1.0
return x
def rect(t,tau):
"""
Approximation to the rectangle pulse Pi(t/tau).
In this numerical version of Pi(t/tau) the pulse is active
over -tau/2 <= t <= tau/2.
Parameters
----------
t : ndarray of the time axis
tau : the pulse width
Returns
-------
x : ndarray of the signal Pi(t/tau)
Examples
--------
>>> t = arange(-1,5,.01)
>>> x = rect(t,1.0)
>>> plot(t,x)
>>> # to turn on at t = 1 shift t
>>> x = rect(t - 1.0,1.0)
>>> plot(t,x)
"""
x = np.zeros(len(t))
for k,tk in enumerate(t):
if np.abs(tk) > tau/2.:
x[k] = 0
else:
x[k] = 1
return x
def tri(t,tau):
"""
Approximation to the triangle pulse Lambda(t/tau).
In this numerical version of Lambda(t/tau) the pulse is active
over -tau <= t <= tau.
Parameters
----------
t : ndarray of the time axis
tau : one half the triangle base width
Returns
-------
x : ndarray of the signal Lambda(t/tau)
Examples
--------
>>> t = arange(-1,5,.01)
>>> x = tri(t,1.0)
>>> plot(t,x)
>>> # to turn on at t = 1 shift t
>>> x = tri(t - 1.0,1.0)
>>> plot(t,x)
"""
x = np.zeros(len(t))
for k,tk in enumerate(t):
if np.abs(tk) > tau/1.:
x[k] = 0
else:
x[k] = 1 - np.abs(tk)/tau
return x
def dimpulse(n):
"""
Discrete impulse function delta[n].
Parameters
----------
n : ndarray of the time axis
Returns
-------
x : ndarray of the signal delta[n]
Examples
--------
>>> n = arange(-5,5)
>>> x = dimpulse(n)
>>> stem(n,x)
>>> # shift the delta left by 2
>>> x = dimpulse(n+2)
>>> stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn == 0:
x[k] = 1.0
return x
def dstep(n):
"""
Discrete step function u[n].
Parameters
----------
n : ndarray of the time axis
Returns
-------
x : ndarray of the signal u[n]
Examples
--------
>>> n = arange(-5,5)
>>> x = dstep(n)
>>> stem(n,x)
>>> # shift the delta left by 2
>>> x = dstep(n+2)
>>> stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn >= 0:
x[k] = 1.0
return x
def drect(n,N):
"""
Discrete rectangle function of duration N samples.
The signal is active on the interval 0 <= n <= N-1. Also known
as the rectangular window function, which is available in
scipy.signal.
Parameters
----------
n : ndarray of the time axis
N : the pulse duration
Returns
-------
x : ndarray of the signal
Notes
-----
The discrete rectangle turns on at n = 0, off at n = N-1 and
has duration of exactly N samples.
Examples
--------
>>> n = arange(-5,5)
>>> x = drect(n)
>>> stem(n,x)
>>> # shift the delta left by 2
>>> x = drect(n+2)
>>> stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn >= 0 and nn < N:
x[k] = 1.0
return x
def rc_imp(Ns,alpha,M=6):
"""
A truncated raised cosine pulse used in digital communications.
The pulse shaping factor 0< alpha < 1 is required as well as the
truncation factor M which sets the pulse duration to be 2*M*Tsymbol.
Parameters
----------
Ns : number of samples per symbol
alpha : excess bandwidth factor on (0, 1), e.g., 0.35
M : equals RC one-sided symbol truncation factor
Returns
-------
b : ndarray containing the pulse shape
Notes
-----
The pulse shape b is typically used as the FIR filter coefficients
when forming a pulse shaped digital communications waveform.
Examples
--------
>>> # ten samples per symbol and alpha = 0.35
>>> b = rc_imp(10,0.35)
>>> n = arange(-10*6,10*6+1)
>>> stem(n,b)
"""
# Design the filter
n = np.arange(-M*Ns,M*Ns+1)
b = np.zeros(len(n));
a = alpha;
Ns *= 1.0
for i in range(len(n)):
if (1 - 4*(a*n[i]/Ns)**2) == 0:
b[i] = np.pi/4*np.sinc(1/(2.*a))
else:
b[i] = np.sinc(n[i]/Ns)*np.cos(np.pi*a*n[i]/Ns)/(1 - 4*(a*n[i]/Ns)**2)
return b
def sqrt_rc_imp(Ns,alpha,M=6):
"""
A truncated square root raised cosine pulse used in digital communications.
The pulse shaping factor 0< alpha < 1 is required as well as the
truncation factor M which sets the pulse duration to be 2*M*Tsymbol.
Parameters
----------
Ns : number of samples per symbol
alpha : excess bandwidth factor on (0, 1), e.g., 0.35
M : equals RC one-sided symbol truncation factor
Returns
-------
b : ndarray containing the pulse shape
Notes
-----
The pulse shape b is typically used as the FIR filter coefficients
when forming a pulse shaped digital communications waveform. When
square root raised cosine (SRC) pulse is used generate Tx signals and
at the receiver used as a matched filter (receiver FIR filter), the
received signal is now raised cosine shaped, this having zero
intersymbol interference and the optimum removal of additive white
noise if present at the receiver input.
Examples
--------
>>> # ten samples per symbol and alpha = 0.35
>>> b = sqrt_rc_imp(10,0.35)
>>> n = arange(-10*6,10*6+1)
>>> stem(n,b)
"""
# Design the filter
n = np.arange(-M*Ns,M*Ns+1)
b = np.zeros(len(n))
Ns *= 1.0
a = alpha
for i in range(len(n)):
if abs(1 - 16*a**2*(n[i]/Ns)**2) <= np.finfo(np.float).eps/2:
b[i] = 1/2.*((1+a)*np.sin((1+a)*np.pi/(4.*a))-(1-a)*np.cos((1-a)*np.pi/(4.*a))+(4*a)/np.pi*np.sin((1-a)*np.pi/(4.*a)))
else:
b[i] = 4*a/(np.pi*(1 - 16*a**2*(n[i]/Ns)**2))
b[i] = b[i]*(np.cos((1+a)*np.pi*n[i]/Ns) + np.sinc((1-a)*n[i]/Ns)*(1-a)*np.pi/(4.*a))
return b
def PN_gen(N_bits,m=5):
"""
Maximal length sequence signal generator.
Generates a sequence 0/1 bits of N_bit duration. The bits themselves
are obtained from an m-sequence of length m. Available m-sequence
(PN generators) include m = 2,3,...,12, & 16.
Parameters
----------
N_bits : the number of bits to generate
m : the number of shift registers. 2,3, .., 12, & 16
Returns
-------
PN : ndarray of the generator output over N_bits
Notes
-----
The sequence is periodic having period 2**m - 1 (2^m - 1).
Examples
--------
>>> # A 15 bit period signal nover 50 bits
>>> PN = PN_gen(50,4)
"""
c = m_seq(m)
Q = len(c)
max_periods = int(np.ceil(N_bits/float(Q)))
PN = np.zeros(max_periods*Q)
for k in range(max_periods):
PN[k*Q:(k+1)*Q] = c
PN = np.resize(PN, (1,N_bits))
return PN.flatten()
def m_seq(m):
"""
Generate an m-sequence ndarray using an all-ones initialization.
Available m-sequence (PN generators) include m = 2,3,...,12, & 16.
Parameters
----------
m : the number of shift registers. 2,3, .., 12, & 16
Returns
-------
c : ndarray of one period of the m-sequence
Notes
-----
The sequence period is 2**m - 1 (2^m - 1).
Examples
--------
>>> c = m_seq(5)
"""
# Load shift register with all ones to start
sr = np.ones(m)
# M-squence length is:
Q = 2**m - 1
c = np.zeros(Q)
if m == 2:
taps = np.array([1, 1, 1])
elif m == 3:
taps = np.array([1, 0, 1, 1])
elif m == 4:
taps = np.array([1, 0, 0, 1, 1])
elif m == 5:
taps = np.array([1, 0, 0, 1, 0, 1])
elif m == 6:
taps = np.array([1, 0, 0, 0, 0, 1, 1])
elif m == 7:
taps = np.array([1, 0, 0, 0, 1, 0, 0, 1])
elif m == 8:
taps = np.array([1, 0, 0, 0, 1, 1, 1, 0, 1])
elif m == 9:
taps = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 1])
elif m == 10:
taps = np.array([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1])
elif m == 11:
taps = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1])
elif m == 12:
taps = np.array([1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1])
elif m == 16:
taps = np.array([1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1])
else:
print('Invalid length specified')
for n in range(Q):
tap_xor = 0
c[n] = sr[-1]
for k in range(1,m):
if taps[k] == 1:
tap_xor = np.bitwise_xor(tap_xor,np.bitwise_xor(int(sr[-1]),int(sr[m-1-k])))
sr[1:] = sr[:-1]
sr[0] = tap_xor
return c
def BPSK_tx(N_bits,Ns,ach_fc=2.0,ach_lvl_dB=-100,pulse='rect',alpha = 0.25,M=6):
"""
Genrates biphase shift keyed (BPSK) transmitter with adjacent channel interference.
Generates three BPSK signals with rectangular or square root raised cosine (SRC)
pulse shaping of duration N_bits and Ns samples per bit. The desired signal is
centered on f = 0, which the adjacent channel signals to the left and right
are also generated at dB level relative to the desired signal. Used in the
digital communications Case Study supplement.
Parameters
----------
N_bits : the number of bits to simulate
Ns : the number of samples per bit
ach_fc : the frequency offset of the adjacent channel signals (default 2.0)
ach_lvl_dB : the level of the adjacent channel signals in dB (default -100)
pulse :the pulse shape 'rect' or 'src'
alpha : square root raised cosine pulse shape factor (default = 0.25)
M : square root raised cosine pulse truncation factor (default = 6)
Returns
-------
x : ndarray of the composite signal x0 + ach_lvl*(x1p + x1m)
b : the transmit pulse shape
data0 : the data bits used to form the desired signal; used for error checking
Notes
-----
Examples
--------
>>> x,b,data0 = BPSK_tx(1000,10,'src')
"""
x0,b,data0 = NRZ_bits(N_bits,Ns,pulse,alpha,M)
x1p,b,data1p = NRZ_bits(N_bits,Ns,pulse,alpha,M)
x1m,b,data1m = NRZ_bits(N_bits,Ns,pulse,alpha,M)
n = np.arange(len(x0))
x1p = x1p*np.exp(1j*2*np.pi*ach_fc/float(Ns)*n)
x1m = x1m*np.exp(-1j*2*np.pi*ach_fc/float(Ns)*n)
ach_lvl = 10**(ach_lvl_dB/20.)
return x0 + ach_lvl*(x1p + x1m), b, data0
#def BPSK_rx(r,b,):
def NRZ_bits(N_bits,Ns,pulse='rect',alpha = 0.25,M=6):
"""
Generate non-return-to-zero (NRZ) data bits with pulse shaping.
A baseband digital data signal using +/-1 amplitude signal values
and including pulse shaping.
Parameters
----------
N_bits : number of NRZ +/-1 data bits to produce
Ns : the number of samples per bit,
pulse_type : 'rect' , 'rc', 'src' (default 'rect')
alpha : excess bandwidth factor(default 0.25)
M : single sided pulse duration (default = 6)
Returns
-------
x : ndarray of the NRZ signal values
b : ndarray of the pulse shape
data : ndarray of the underlying data bits
Notes
-----
Pulse shapes include 'rect' (rectangular), 'rc' (raised cosine),
'src' (root raised cosine). The actual pulse length is 2*M+1 samples.
This function is used by BPSK_tx in the Case Study article.
Examples
--------
>>> x,b,data = NRZ_bits(100,10)
>>> t = arange(len(x))
>>> plot(t,x)
"""
data = np.random.randint(0,2,N_bits)
x = np.hstack((2*data.reshape(N_bits,1)-1,np.zeros((N_bits,Ns-1))))
x =x.flatten()
if pulse.lower() == 'rect':
b = np.ones(Ns)
elif pulse.lower() == 'rc':
b = rc_imp(Ns,alpha,M)
elif pulse.lower() == 'src':
b = sqrt_rc_imp(Ns,alpha,M)
else:
print('pulse type must be rec, rc, or src')
x = signal.lfilter(b,1,x)
return x,b/float(Ns),data
def NRZ_bits2(data,Ns,pulse='rect',alpha = 0.25,M=6):
"""
Generate non-return-to-zero (NRZ) data bits with pulse shaping with user data
A baseband digital data signal using +/-1 amplitude signal values
and including pulse shaping. The data sequence is user supplied.
Parameters
----------
data : ndarray of the data bits as 0/1 values
Ns : the number of samples per bit,
pulse_type : 'rect' , 'rc', 'src' (default 'rect')
alpha : excess bandwidth factor(default 0.25)
M : single sided pulse duration (default = 6)
Returns
-------
x : ndarray of the NRZ signal values
b : ndarray of the pulse shape
Notes
-----
Pulse shapes include 'rect' (rectangular), 'rc' (raised cosine),
'src' (root raised cosine). The actual pulse length is 2*M+1 samples.
Examples
--------
>>> x,b = NRZ_bits2([m_seq(5),10)
>>> t = arange(len(x))
>>> plot(t,x)
"""
N_bits = len(data)
x = np.hstack((2*data.reshape(N_bits,1)-1,np.zeros((N_bits,Ns-1))))
x = x.flatten()
if pulse.lower() == 'rect':
b = np.ones(Ns)
elif pulse.lower() == 'rc':
b = rc_imp(Ns,alpha,M)
elif pulse.lower() == 'src':
b = sqrt_rc_imp(Ns,alpha,M)
else:
print('pulse type must be rec, rc, or src')
x = signal.lfilter(b,1,x)
return x,b/float(Ns)
def eye_plot(x,L,S=0):
"""
Eye pattern plot of a baseband digital communications waveform.
The signal must be real, but can be multivalued in terms of the underlying
modulation scheme. Used for BPSK eye plots in the Case Study article.
Parameters
----------
x : ndarray of the real input data vector/array
L : display length in samples (usually two symbols)
S : start index
Returns
-------
Nothing : A plot window opens containing the eye plot
Notes
-----
Increase S to eliminate filter transients.
Examples
--------
>>> # 1000 bits at 10 samples per bit with 'rc' shaping
>>> x,b, data = NRZ_bits(1000,10,'rc')
>>> eye_plot(x,20,60)
"""
plt.figure(figsize=(6,4))
idx = np.arange(0,L+1)
plt.plot(idx,x[S:S+L+1],'b')
k_max = int((len(x) - S)/L)-1
for k in range(1,k_max):
plt.plot(idx,x[S+k*L:S+L+1+k*L],'b')
plt.grid()
plt.xlabel('Time Index - n')
plt.ylabel('Amplitude')
plt.title('Eye Plot')
return 0
def scatter(x,Ns,start):
"""
Sample a baseband digital communications waveform at the symbol spacing.
Parameters
----------
x : ndarray of the input digital comm signal
Ns : number of samples per symbol (bit)
start : the array index to start the sampling
Returns
-------
xI : ndarray of the real part of x following sampling
xQ : ndarray of the imaginary part of x following sampling
Notes
-----
Normally the signal is complex, so the scatter plot contains
clusters at point in the complex plane. For a binary signal
such as BPSK, the point centers are nominally +/-1 on the real
axis. Start is used to eliminate transients from the FIR
pulse shaping filters from appearing in the scatter plot.
Examples
--------
>>> x,b, data = NRZ_bits(1000,10,'rc')
>>> # add some noise so points are now scattered about +/-1
>>> y = cpx_AWGN(x,20,10)
>>> yI,yQ = scatter(y,10,60)
>>> plot(yI,yQ,'.')
>>> axis('equal')
"""
xI = np.real(x[start::Ns])
xQ = np.imag(x[start::Ns])
return xI, xQ
def bit_errors(z,data,start,Ns):
"""
A simple bit error counting function.
In its present form this function counts bit errors between
hard decision BPSK bits in +/-1 form and compares them with
0/1 binary data that was transmitted. Timing between the Tx
and Rx data is the responsibility of the user. An enhanced
version of this function, which features automatic synching
will be created in the future.
Parameters
----------
z : ndarray of hard decision BPSK data prior to symbol spaced sampling
data : ndarray of reference bits in 1/0 format
start : timing reference for the received
Ns : the number of samples per symbol
Returns
-------
Pe_hat : the estimated probability of a bit error
Notes
-----
The Tx and Rx data streams are exclusive-or'd and the then the bit errors
are summed, and finally divided by the number of bits observed to form an
estimate of the bit error probability. This function needs to be
enhanced to be more useful.
Examples
--------
>>> from scipy import signal
>>> x,b, data = NRZ_bits(1000,10)
>>> # set Eb/N0 to 8 dB
>>> y = cpx_AWGN(x,8,10)
>>> # matched filter the signal
>>> z = signal.lfilter(b,1,y)
>>> # make bit decisions at 10 and Ns multiples thereafter
>>> Pe_hat = bit_errors(z,data,10,10)
"""
Pe_hat = np.sum(data[0:len(z[start::Ns])]^np.int64((np.sign(np.real(z[start::Ns]))+1)/2))/float(len(z[start::Ns]))
return Pe_hat
def cpx_AWGN(x,EsN0,Ns):
"""
Apply white Gaussian noise to a digital communications signal.
This function represents a complex baseband white Gaussian noise
digital communications channel. The input signal array may be real
or complex.
Parameters
----------
x : ndarray noise free complex baseband input signal.
EsNO : set the channel Es/N0 (Eb/N0 for binary) level in dB
Ns : number of samples per symbol (bit)
Returns
-------
y : ndarray x with additive noise added.
Notes
-----
Set the channel energy per symbol-to-noise power spectral
density ratio (Es/N0) in dB.
Examples
--------
>>> x,b, data = NRZ_bits(1000,10)
>>> # set Eb/N0 = 10 dB
>>> y = cpx_AWGN(x,10,10)
"""
w = np.sqrt(Ns*np.var(x)*10**(-EsN0/10.)/2.)*(np.random.randn(len(x)) + 1j*np.random.randn(len(x)))
return x+w
def my_psd(x,NFFT=2**10,Fs=1):
"""
A local version of NumPy's PSD function that returns the plot arrays.
A mlab.psd wrapper function that returns two ndarrays;
makes no attempt to auto plot anything.
Parameters
----------
x : ndarray input signal
NFFT : a power of two, e.g., 2**10 = 1024
Fs : the sampling rate in Hz
Returns
-------
Px : ndarray of the power spectrum estimate
f : ndarray of frequency values
Notes
-----
This function makes it easier to overlay spectrum plots because
you have better control over the axis scaling than when using psd()
in the autoscale mode.
Examples
--------
>>> x,b, data = NRZ_bits(10000,10)
>>> Px,f = my_psd(x,2**10,10)
>>> plot(f, 10*log10(Px))
"""
Px,f = pylab.mlab.psd(x,NFFT,Fs)
return Px.flatten(), f
def am_tx(m,a_mod,fc=75e3):
"""
AM transmitter for Case Study of Chapter 17.
Assume input is sampled at 8 Ksps and upsampling
by 24 is performed to arrive at fs_out = 192 Ksps.
Parameters
----------
m : ndarray of the input message signal
a_mod : AM modulation index, between 0 and 1
fc : the carrier frequency in Hz
Returns
-------
x192 : ndarray of the upsampled by 24 and modulated carrier
t192 : ndarray of the upsampled by 24 time axis
m24 : ndarray of the upsampled by 24 message signal
Notes
-----
The sampling rate of the input signal is assumed to be 8 kHz.
Examples
--------
>>> n = arange(0,1000)
>>> # 1 kHz message signal
>>> m = cos(2*pi*1000/8000.*n)
>>> x192, t192 = am_tx(m,0.8,fc=75e3)
"""
m24 = interp24(m)
t192 = np.arange(len(m24))/192.0e3
#m24 = np.cos(2*np.pi*2.0e3*t192)
m_max = np.max(np.abs(m24))
x192 = (1 + a_mod*m24/m_max)*np.cos(2*np.pi*fc*t192)
return x192, t192, m24
def am_rx(x192):
"""
AM envelope detector receiver for the Chapter 17 Case Study
The receiver bandpass filter is not included in this function.
Parameters
----------
x192 : ndarray of the AM signal at sampling rate 192 ksps
Returns
-------
m_rx8 : ndarray of the demodulated message at 8 ksps
t8 : ndarray of the time axis at 8 ksps
m_rx192 : ndarray of the demodulated output at 192 ksps
x_edet192 : ndarray of the envelope detector output at 192 ksps
Notes
-----
The bandpass filter needed at the receiver front-end can be designed
using b_bpf,a_bpf = am_rx_BPF().
Examples
--------
>>> n = arange(0,1000)
>>> # 1 kHz message signal
>>> m = cos(2*pi*1000/8000.*n)
>>> m_rx8,t8,m_rx192,x_edet192 = am_rx(x192)
"""
x_edet192 = env_det(x192)
m_rx8 = deci24(x_edet192)
# remove DC offset from the env_det + LPF output
m_rx8 -= np.mean(m_rx8)
t8 = np.arange(len(m_rx8))/8.0e3
"""
For performance testing also filter x_env_det
192e3 using a Butterworth cascade.
The filter cutoff is 5kHz, the message BW.
"""
b192,a192 = signal.butter(5,2*5.0e3/192.0e3)
m_rx192 = signal.lfilter(b192,a192,x_edet192)
m_rx192 = signal.lfilter(b192,a192,m_rx192)
m_rx192 -= np.mean(m_rx192)
return m_rx8,t8,m_rx192,x_edet192
def am_rx_BPF(N_order = 7, ripple_dB = 1, B = 10e3, fs = 192e3):
"""
Bandpass filter design for the AM receiver Case Study of Chapter 17.
Design a 7th-order Chebyshev type 1 bandpass filter to remove/reduce
adjacent channel intereference at the envelope detector input.
Parameters
----------
N_order : the filter order (default = 7)
ripple_dB : the passband ripple in dB (default = 1)
B : the RF bandwidth (default = 10e3)
fs : the sampling frequency
Returns
-------
b_bpf : ndarray of the numerator filter coefficients
a_bpf : ndarray of the denominator filter coefficients
Examples
--------
>>> from scipy import signal
>>> # Use the default values
>>> b_bpf,a_bpf = am_rx_BPF()
>>> # plot the filter pole-zero plot
>>> zplane(b_bpf,a_bpf)
>>> # plot the frequency response
>>> f = arange(0,192/2.,.1)
>>> w, Hbpf = signal.freqz(b_bpf,a_bpf,2*pi*f/192)
>>> plot(f,20*log10(abs(Hbpf)))
>>> axis([0,192/2.,-80,10])
"""
b_bpf,a_bpf = signal.cheby1(N_order,ripple_dB,2*np.array([75e3-B/2.,75e3+B/2.])/fs,'bandpass')
return b_bpf,a_bpf
def env_det(x):
"""
Ideal envelope detector.
This function retains the positive half cycles of the input signal.
Parameters
----------
x : ndarray of the input sugnal
Returns
-------
y : ndarray of the output signal
Examples
--------
>>> n = arange(0,100)
>>> # 1 kHz message signal
>>> m = cos(2*pi*1000/8000.*n)
>>> x192, t192 = am_tx(m,0.8,fc=75e3)
>>> y = env_det(x192)
"""
y = np.zeros(len(x))
for k,xx in enumerate(x):
if xx >= 0:
y[k] = xx
return y
def interp24(x):
"""
Interpolate by L = 24 using Butterworth filters.
The interpolation is done using three stages. Upsample by
L = 2 and lowpass filter, upsample by 3 and lowpass filter, then
upsample by L = 4 and lowpass filter. In all cases the lowpass
filter is a 10th-order Butterworth lowpass.
Parameters
----------
x : ndarray of the input signal
Returns
-------
y : ndarray of the output signal
Notes
-----
The cutoff frequency of the lowpass filters is 1/2, 1/3, and 1/4 to
track the upsampling by 2, 3, and 4 respectively.
Examples
--------
>>> y = interp24(x)
"""
# Stage 1: L = 2
b2,a2 = signal.butter(10,1/2.)
y1 = upsample(x,2)
y1 = signal.lfilter(b2,a2,2*y1)
# Stage 2: L = 3
b3,a3 = signal.butter(10,1/3.)
y2 = upsample(y1,3)
y2 = signal.lfilter(b3,a3,3*y2)
# Stage 3: L = 4
b4,a4 = signal.butter(10,1/4.)
y3 = upsample(y2,4)
y3 = signal.lfilter(b4,a4,4*y3)
return y3
def deci24(x):
"""
Decimate by L = 24 using Butterworth filters.
The decimation is done using two three stages. Downsample sample by
L = 2 and lowpass filter, downsample by 3 and lowpass filter, then
downsample by L = 4 and lowpass filter. In all cases the lowpass
filter is a 10th-order Butterworth lowpass.
Parameters
----------
x : ndarray of the input signal
Returns
-------
y : ndarray of the output signal
Notes
-----
The cutoff frequency of the lowpass filters is 1/2, 1/3, and 1/4 to
track the upsampling by 2, 3, and 4 respectively.
Examples
--------
>>> y = deci24(x)
"""
# Stage 1: M = 2
b2,a2 = signal.butter(10,1/2.)
y1 = signal.lfilter(b2,a2,x)
y1 = downsample(y1,2)
# Stage 2: M = 3
b3,a3 = signal.butter(10,1/3.)
y2 = signal.lfilter(b3,a3,y1)
y2 = downsample(y2,3)
# Stage 3: L = 4
b4,a4 = signal.butter(10,1/4.)
y3 = signal.lfilter(b4,a4,y2)
y3 = downsample(y3,4)
return y3
def upsample(x,L):
"""
Upsample by factor L
Insert L - 1 zero samples in between each input sample.
Parameters
----------
x : ndarray of input signal values
L : upsample factor
Returns
-------
y : ndarray of the output signal values
Examples
--------
>>> y = upsample(x,3)
"""
N_input = len(x)
y = np.hstack((x.reshape(N_input,1),np.zeros((N_input,L-1))))
y = y.flatten()
return y
def downsample(x,M,p=0):
"""
Downsample by factor M
Keep every Mth sample of the input. The phase of the input samples
kept can be selected.
Parameters
----------
x : ndarray of input signal values
M : upsample factor
p : phase of decimated value, 0 (default), 1, ..., M-1
Returns
-------
y : ndarray of the output signal values
Examples
--------
>>> y = downsample(x,3)
>>> y = downsample(x,3,1)
"""
x = x[0:np.floor(len(x)/M)*M]
x = x.reshape((len(x)/M,M))
y = x[:,p]
return y
def zplane(b,a,auto_scale=True,size=2):
"""
Create an z-plane pole-zero plot.
Create an z-plane pole-zero plot using the numerator
and denominator z-domain system function coefficient
ndarrays b and a respectively. Assume descending powers of z.
Parameters
----------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
auto_scale : bool (default True)
size : plot radius maximum when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> zplane(b,a)
>>> # Here the plot is generated using manual scaling
>>> zplane(b,a,False,1.5)
"""
M = len(b) - 1
N = len(a) - 1
# Plot labels if multiplicity greater than 1
x_scale = 1.5*size
y_scale = 1.5*size
x_off = 0.02
y_off = 0.01
#N_roots = np.array([1.0])
if M > 0:
N_roots = np.roots(b)
#D_roots = np.array([1.0])
if N > 0:
D_roots = np.roots(a)
if auto_scale:
if M > 0 and N > 0:
size = max(np.max(np.abs(N_roots)),np.max(np.abs(D_roots)))+.1
elif M > 0:
size = max(np.max(np.abs(N_roots)),1.0)+.1
elif N > 0:
size = max(1.0,np.max(np.abs(D_roots)))+.1
else:
size = 1.1
plt.figure(figsize=(5,5))
plt.axis('equal')
r = np.linspace(0,2*np.pi,200)
plt.plot(np.cos(r),np.sin(r),'r--')
plt.plot([-size,size],[0,0],'k-.')
plt.plot([0,0],[-size,size],'k-.')
if M > 0:
#N_roots = np.roots(b)
N_uniq, N_mult=signal.unique_roots(N_roots,tol=1e-3, rtype='avg')
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = mlab.find(N_mult>1)
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),ha='center',va='bottom',fontsize=10)
if N > 0:
#D_roots = np.roots(a)
D_uniq, D_mult=signal.unique_roots(D_roots,tol=1e-3, rtype='avg')
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = mlab.find(D_mult>1)
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),ha='center',va='bottom',fontsize=10)
if M - N < 0:
plt.plot(0.0,0.0,'bo',mfc='None',ms=8)
elif M - N > 0:
plt.plot(0.0,0.0,'kx',ms=8)
if abs(M - N) > 1:
plt.text(x_off*x_scale,y_off*y_scale,str(abs(M-N)),ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis([-size,size,-size,size])
return M,N
def rect_conv(n,N_len):
"""
The theoretical result of convolving two rectangle sequences.
The result is a triangle. The solution is
based on pure analysis. Simply coded as opposed
to efficiently coded.
Parameters
----------
n : ndarray of time axis
N_len : rectangle pulse duration
Returns
-------
y : ndarray of of output signal
Examples
--------
>>> n = arange(-5,20)
>>> y = rect_conv(n,6)
"""
y = np.zeros(len(n))
for k in range(len(n)):
if n[k] >= 0 and n[k] < N_len-1:
y[k] = n[k] + 1
elif n[k] >= N_len-1 and n[k] <= 2*N_len-2:
y[k] = 2*N_len-1-n[k]
return y
def biquad2(w_num, r_num, w_den, r_den):
"""
A biquadratic filter in terms of conjugate pole and zero pairs.
Parameters
----------
w_num : zero frequency (angle) in rad/sample
r_num : conjugate zeros radius
w_den : pole frequency (angle) in rad/sample
r_den : conjugate poles radius; less than 1 for stability
Returns
-------
b : ndarray of numerator coefficients
a : ndarray of denominator coefficients
Examples
--------
b,a = biquad2(pi/4., 1, pi/4., 0.95)
"""
b = np.array([1, -2*r_num*np.cos(w_num), r_num**2])
a = np.array([1, -2*r_den*np.cos(w_den), r_den**2])
return b, a
def plot_na(x,y,mode='stem'):
pylab.figure(figsize=(5,2))
frame1 = pylab.gca()
if mode.lower() == 'stem':
pylab.stem(x,y)
else:
pylab.plot(x,y)
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
pylab.show()
def from_wav(filename):
"""
Read a wave file.
A wrapper function for scipy.io.wavfile.read
that also includes int16 to float [-1,1] scaling.
Parameters
----------
filename : file name string
Returns
-------
fs : sampling frequency in Hz
x : ndarray of normalized to 1 signal samples
Examples
--------
>>> fs,x = from_wav('test_file.wav')
"""
fs, x = wavfile.read(filename)
return fs, x/32767.
def to_wav(filename,rate,x):
"""
Write a wave file.
A wrapper function for scipy.io.wavfile.write
that also includes int16 scaling and conversion.
Assume input x is [-1,1] values.
Parameters
----------
filename : file name string
rate : sampling frequency in Hz
Returns
-------
Nothing : writes only the *.wav file
Examples
--------
>>> to_wav('test_file.wav', 8000, x)
"""
x16 = np.int16(x*32767)
wavfile.write(filename, rate, x16)
if __name__ == '__main__':
t = np.arange(0,2,.002)
n = np.arange(0,20,1) # 0 to 19th harmonic
fk = 1*n # period = 1s
Xk = np.sinc(n*10*.02)*np.exp(-1j*2*np.pi*n*10*.01)
x_approx = fs_approx(Xk,fk,t)
plt.plot(t,x_approx)
plt.show()
# n = np.arange(0,10000)
# xc = np.exp(1j*7*np.sin(2*np.pi*n*1e3/1.e5))
# f,Sx = simple_SA(xc, 1024, 1024, 1e5)
"""
x = np.random.randn(10)
print x
b = signal.remez(16,[0,.1,.2,.5], [1,0], [1,1], 1)
w,H = signal.freqz(b,[1],512)
plot(w,20*log10(abs(H)))
figure(figsize=(6,4))
#plot(arange(0,len(b)),b)
y = signal.lfilter(b, [1], x,)
print y
zplane([1,1,1,1,1],[1,-.8],1.25)
"""
| 30.796196 | 362 | 0.578803 |
b45ab968fcd225e68781d4a3bdce360cca46e12b | 1,471 | py | Python | tests/python/test_stop_grad.py | rezahojabr/taichi | 122c0352ec480b740a4118819458cbf08d2e5ddb | [
"MIT"
] | 3 | 2020-01-08T02:58:51.000Z | 2020-10-28T07:01:58.000Z | tests/python/test_stop_grad.py | rezahojabr/taichi | 122c0352ec480b740a4118819458cbf08d2e5ddb | [
"MIT"
] | null | null | null | tests/python/test_stop_grad.py | rezahojabr/taichi | 122c0352ec480b740a4118819458cbf08d2e5ddb | [
"MIT"
] | 1 | 2020-03-25T16:37:00.000Z | 2020-03-25T16:37:00.000Z | import taichi as ti
@ti.all_archs
def test_normal_grad():
x = ti.var(ti.f32)
loss = ti.var(ti.f32)
n = 128
@ti.layout
def place():
ti.root.dense(ti.i, n).place(x)
ti.root.place(loss)
ti.root.lazy_grad()
@ti.kernel
def func():
for i in range(n):
ti.atomic_add(loss, x[i]**2)
for i in range(n):
x[i] = i
with ti.Tape(loss):
func()
for i in range(n):
assert x.grad[i] == i * 2
@ti.all_archs
def test_stop_grad():
x = ti.var(ti.f32)
loss = ti.var(ti.f32)
n = 128
@ti.layout
def place():
ti.root.dense(ti.i, n).place(x)
ti.root.place(loss)
ti.root.lazy_grad()
@ti.kernel
def func():
for i in range(n):
ti.core.stop_grad(x.snode().ptr)
ti.atomic_add(loss, x[i]**2)
for i in range(n):
x[i] = i
with ti.Tape(loss):
func()
for i in range(n):
assert x.grad[i] == 0
@ti.all_archs
def test_stop_grad2():
x = ti.var(ti.f32)
loss = ti.var(ti.f32)
n = 128
@ti.layout
def place():
ti.root.dense(ti.i, n).place(x)
ti.root.place(loss)
ti.root.lazy_grad()
@ti.kernel
def func():
# Two loops, one with stop grad on without
for i in range(n):
ti.stop_grad(x)
ti.atomic_add(loss, x[i]**2)
for i in range(n):
ti.atomic_add(loss, x[i]**2)
for i in range(n):
x[i] = i
with ti.Tape(loss):
func()
# If without stop, grad x.grad[i] = i * 4
for i in range(n):
assert x.grad[i] == i * 2
| 15.98913 | 46 | 0.563562 |
c8daf392a90a1460c4e64893c8e214df4c2a85d9 | 417 | py | Python | medical_prescription/user/admin.py | thiagonf/Sprint-3-GPP | 89f1dcf4e649f1bbbb8e21ea6b6f8a4565c996e2 | [
"MIT"
] | null | null | null | medical_prescription/user/admin.py | thiagonf/Sprint-3-GPP | 89f1dcf4e649f1bbbb8e21ea6b6f8a4565c996e2 | [
"MIT"
] | 2 | 2020-06-05T18:41:29.000Z | 2021-06-10T20:35:12.000Z | medical_prescription/user/admin.py | thiagonf/Sprint-3-GPP | 89f1dcf4e649f1bbbb8e21ea6b6f8a4565c996e2 | [
"MIT"
] | null | null | null | # Django
from django.contrib import admin
from .models import (
User, HealthProfessional, Patient, UserActivateProfile, ResetPasswordProfile
)
class UserAdmin(admin.ModelAdmin):
list_display = ['email', 'is_active']
admin.site.register(User, UserAdmin)
admin.site.register(HealthProfessional)
admin.site.register(Patient)
admin.site.register(UserActivateProfile)
admin.site.register(ResetPasswordProfile)
| 24.529412 | 80 | 0.803357 |
120868393ba9ea0dd010929d4aa3549887ad341d | 2,359 | py | Python | blog/models.py | tarek1500/Python-Blog | d9e8e90c20ad5906139b1468e4d195038dfbfc11 | [
"MIT"
] | null | null | null | blog/models.py | tarek1500/Python-Blog | d9e8e90c20ad5906139b1468e4d195038dfbfc11 | [
"MIT"
] | 5 | 2020-02-27T18:12:25.000Z | 2020-02-27T22:43:52.000Z | blog/models.py | tarek1500/Python-Blog | d9e8e90c20ad5906139b1468e4d195038dfbfc11 | [
"MIT"
] | 1 | 2020-05-16T00:34:02.000Z | 2020-05-16T00:34:02.000Z | from django.db import models
from django.contrib.auth.models import User
import re
# Create your models here.
def stringify(self):
return self.first_name + ' ' + self.last_name
User.add_to_class('__str__', stringify)
class Category(models.Model):
name = models.CharField(max_length = 100)
subscribe = models.ManyToManyField(User, blank = True)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length = 200, unique = True)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
def __str__(self):
return self.name
class Post(models.Model):
title = models.CharField(max_length = 200, unique = True)
body = models.TextField()
author = models.ForeignKey(User, on_delete = models.CASCADE)
tag = models.ManyToManyField(Tag, blank = True)
category = models.ForeignKey(Category, on_delete = models.CASCADE)
image = models.ImageField(null = True, blank = True)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
def __str__(self):
return self.title
def snippet(self):
return self.body[:200] + "..."
class Comment(models.Model):
content = models.TextField()
post = models.ForeignKey(Post, on_delete = models.CASCADE)
user = models.ForeignKey(User, on_delete = models.CASCADE)
reply = models.ForeignKey('Comment', null = True, related_name = 'replies', on_delete = models.CASCADE)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.content
def removeWords(self):
words = Word.objects.all()
for word in words:
self.content = re.sub(r"(?i)\b{}\b".format(word.name), word.name, self.content)
return self.content
class Word(models.Model):
name = models.CharField(max_length = 100)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class Like(models.Model):
like = models.BooleanField()
post = models.ForeignKey(Post, on_delete = models.CASCADE)
user = models.ForeignKey(User, on_delete = models.CASCADE)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
| 31.453333 | 104 | 0.751166 |
f2573ca35fc374ec3cff6eca72a4f0b6f5f93549 | 712 | py | Python | coding-exercises-avairds/week4-edge-pyramids- contours/part1-edge-pyramids/sobel-operator.py | KyawThuHtun/OpenCV-with-Python | 2c471060d4f2aa4ec45cda5f544a61923e4ea2ed | [
"MIT"
] | 2 | 2021-06-30T04:56:51.000Z | 2021-06-30T04:57:11.000Z | coding-exercises-avairds/week4-edge-pyramids- contours/part1-edge-pyramids/sobel-operator.py | KyawThuHtun/OpenCV-with-Python | 2c471060d4f2aa4ec45cda5f544a61923e4ea2ed | [
"MIT"
] | null | null | null | coding-exercises-avairds/week4-edge-pyramids- contours/part1-edge-pyramids/sobel-operator.py | KyawThuHtun/OpenCV-with-Python | 2c471060d4f2aa4ec45cda5f544a61923e4ea2ed | [
"MIT"
] | 6 | 2020-08-03T02:29:48.000Z | 2020-08-09T13:21:23.000Z | import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread("sudoku1.jpg", cv.IMREAD_GRAYSCALE) # read image as grayscale
sobelX = cv.Sobel(img, cv.CV_64F, 1, 0) # derivative with intensities along x direction
sobelY = cv.Sobel(img, cv.CV_64F, 0, 1) # derivative with intensities along y direction
sobelX = np.uint8(np.absolute(sobelX))
sobelY = np.uint8(np.absolute(sobelY))
sobelCombined = cv.bitwise_or(sobelX, sobelY)
titles = ['image', 'sobelX', 'sobelY', 'sobelCombined']
images = [img, sobelX, sobelY, sobelCombined]
for i in range(4):
plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
| 30.956522 | 88 | 0.703652 |
ba6b16a5bc12e5d07e067ae56db8bc395143af3d | 1,008 | py | Python | details_generator.py | Emotional-Support/discordaltgen | 7be376dc372258d824fdabb3edac07ebeba6e8d9 | [
"Apache-2.0"
] | null | null | null | details_generator.py | Emotional-Support/discordaltgen | 7be376dc372258d824fdabb3edac07ebeba6e8d9 | [
"Apache-2.0"
] | null | null | null | details_generator.py | Emotional-Support/discordaltgen | 7be376dc372258d824fdabb3edac07ebeba6e8d9 | [
"Apache-2.0"
] | null | null | null | import random
def rand_gen_email():
email = "".join(random.choice("abcdefghijklmnopqrstuvwxyz1234567890") for _ in range(10))
return email
def rand_gen_user():
username = "".join(random.choice("abcdefghijklmnopqrstuvwxyz1234567890") for _ in range(10))
return username
def rand_gen_pass():
password = "".join(random.choice("abcdefghijklmnopqrstuvwxyz1234567890") for _ in range(10))
return password
def rand_gen_birth():
year = random.randint(1984, 2001)
month = random.randint(1, 12)
day = random.randint(1, 12)
concat = year, "-", month, "-", day
return concat
email = rand_gen_email()
username = rand_gen_user()
password = rand_gen_pass()
bday = rand_gen_birth()
payload = {
"fingerprint": "956833213890846760.kebhTMLabvfSte5HxhOrNPtcc2I",
"email": email,
"username": username,
"password": password,
"invite": "null",
"consent": "true",
"date_of_birth": bday,
"gift_code_sku_id": "null",
"captcha_key": "null",
}
| 22.909091 | 96 | 0.679563 |
32c77b7664d10c25be334946ad452799fc0f6fce | 85,570 | py | Python | python3-virtualenv/lib/python3.8/site-packages/sqlalchemy/dialects/oracle/base.py | bbalkaransingh23888/OrientationHack | 7eae6cce1226112c000ea8a175f6dc5a82ee0ac2 | [
"MIT"
] | null | null | null | python3-virtualenv/lib/python3.8/site-packages/sqlalchemy/dialects/oracle/base.py | bbalkaransingh23888/OrientationHack | 7eae6cce1226112c000ea8a175f6dc5a82ee0ac2 | [
"MIT"
] | null | null | null | python3-virtualenv/lib/python3.8/site-packages/sqlalchemy/dialects/oracle/base.py | bbalkaransingh23888/OrientationHack | 7eae6cce1226112c000ea8a175f6dc5a82ee0ac2 | [
"MIT"
] | null | null | null | # oracle/base.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: oracle
:name: Oracle
:full_support: 11.2, 18c
:normal_support: 11+
:best_effort: 8+
Auto Increment Behavior
-----------------------
SQLAlchemy Table objects which include integer primary keys are usually
assumed to have "autoincrementing" behavior, meaning they can generate their
own primary key values upon INSERT. For use within Oracle, two options are
available, which are the use of IDENTITY columns (Oracle 12 and above only)
or the association of a SEQUENCE with the column.
Specifying GENERATED AS IDENTITY (Oracle 12 and above)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Starting from version 12 Oracle can make use of identity columns using
the :class:`_sql.Identity` to specify the autoincrementing behavior::
t = Table('mytable', metadata,
Column('id', Integer, Identity(start=3), primary_key=True),
Column(...), ...
)
The CREATE TABLE for the above :class:`_schema.Table` object would be:
.. sourcecode:: sql
CREATE TABLE mytable (
id INTEGER GENERATED BY DEFAULT AS IDENTITY (START WITH 3),
...,
PRIMARY KEY (id)
)
The :class:`_schema.Identity` object support many options to control the
"autoincrementing" behavior of the column, like the starting value, the
incrementing value, etc.
In addition to the standard options, Oracle supports setting
:paramref:`_schema.Identity.always` to ``None`` to use the default
generated mode, rendering GENERATED AS IDENTITY in the DDL. It also supports
setting :paramref:`_schema.Identity.on_null` to ``True`` to specify ON NULL
in conjunction with a 'BY DEFAULT' identity column.
Using a SEQUENCE (all Oracle versions)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Older version of Oracle had no "autoincrement"
feature, SQLAlchemy relies upon sequences to produce these values. With the
older Oracle versions, *a sequence must always be explicitly specified to
enable autoincrement*. This is divergent with the majority of documentation
examples which assume the usage of an autoincrement-capable database. To
specify sequences, use the sqlalchemy.schema.Sequence object which is passed
to a Column construct::
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
Column(...), ...
)
This step is also required when using table reflection, i.e. autoload_with=engine::
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
autoload_with=engine
)
.. versionchanged:: 1.4 Added :class:`_schema.Identity` construct
in a :class:`_schema.Column` to specify the option of an autoincrementing
column.
.. _oracle_isolation_level:
Transaction Isolation Level / Autocommit
----------------------------------------
The Oracle database supports "READ COMMITTED" and "SERIALIZABLE" modes of
isolation. The AUTOCOMMIT isolation level is also supported by the cx_Oracle
dialect.
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="AUTOCOMMIT"
)
For ``READ COMMITTED`` and ``SERIALIZABLE``, the Oracle dialect sets the
level at the session level using ``ALTER SESSION``, which is reverted back
to its default setting when the connection is returned to the connection
pool.
Valid values for ``isolation_level`` include:
* ``READ COMMITTED``
* ``AUTOCOMMIT``
* ``SERIALIZABLE``
.. note:: The implementation for the
:meth:`_engine.Connection.get_isolation_level` method as implemented by the
Oracle dialect necessarily forces the start of a transaction using the
Oracle LOCAL_TRANSACTION_ID function; otherwise no level is normally
readable.
Additionally, the :meth:`_engine.Connection.get_isolation_level` method will
raise an exception if the ``v$transaction`` view is not available due to
permissions or other reasons, which is a common occurrence in Oracle
installations.
The cx_Oracle dialect attempts to call the
:meth:`_engine.Connection.get_isolation_level` method when the dialect makes
its first connection to the database in order to acquire the
"default"isolation level. This default level is necessary so that the level
can be reset on a connection after it has been temporarily modified using
:meth:`_engine.Connection.execution_options` method. In the common event
that the :meth:`_engine.Connection.get_isolation_level` method raises an
exception due to ``v$transaction`` not being readable as well as any other
database-related failure, the level is assumed to be "READ COMMITTED". No
warning is emitted for this initial first-connect condition as it is
expected to be a common restriction on Oracle databases.
.. versionadded:: 1.3.16 added support for AUTOCOMMIT to the cx_oracle dialect
as well as the notion of a default isolation level
.. versionadded:: 1.3.21 Added support for SERIALIZABLE as well as live
reading of the isolation level.
.. versionchanged:: 1.3.22 In the event that the default isolation
level cannot be read due to permissions on the v$transaction view as
is common in Oracle installations, the default isolation level is hardcoded
to "READ COMMITTED" which was the behavior prior to 1.3.21.
.. seealso::
:ref:`dbapi_autocommit`
Identifier Casing
-----------------
In Oracle, the data dictionary represents all case insensitive identifier
names using UPPERCASE text. SQLAlchemy on the other hand considers an
all-lower case identifier name to be case insensitive. The Oracle dialect
converts all case insensitive identifiers to and from those two formats during
schema level communication, such as reflection of tables and indexes. Using
an UPPERCASE name on the SQLAlchemy side indicates a case sensitive
identifier, and SQLAlchemy will quote the name - this will cause mismatches
against data dictionary data received from Oracle, so unless identifier names
have been truly created as case sensitive (i.e. using quoted names), all
lowercase names should be used on the SQLAlchemy side.
.. _oracle_max_identifier_lengths:
Max Identifier Lengths
----------------------
Oracle has changed the default max identifier length as of Oracle Server
version 12.2. Prior to this version, the length was 30, and for 12.2 and
greater it is now 128. This change impacts SQLAlchemy in the area of
generated SQL label names as well as the generation of constraint names,
particularly in the case where the constraint naming convention feature
described at :ref:`constraint_naming_conventions` is being used.
To assist with this change and others, Oracle includes the concept of a
"compatibility" version, which is a version number that is independent of the
actual server version in order to assist with migration of Oracle databases,
and may be configured within the Oracle server itself. This compatibility
version is retrieved using the query ``SELECT value FROM v$parameter WHERE
name = 'compatible';``. The SQLAlchemy Oracle dialect, when tasked with
determining the default max identifier length, will attempt to use this query
upon first connect in order to determine the effective compatibility version of
the server, which determines what the maximum allowed identifier length is for
the server. If the table is not available, the server version information is
used instead.
As of SQLAlchemy 1.4, the default max identifier length for the Oracle dialect
is 128 characters. Upon first connect, the compatibility version is detected
and if it is less than Oracle version 12.2, the max identifier length is
changed to be 30 characters. In all cases, setting the
:paramref:`_sa.create_engine.max_identifier_length` parameter will bypass this
change and the value given will be used as is::
engine = create_engine(
"oracle+cx_oracle://scott:tiger@oracle122",
max_identifier_length=30)
The maximum identifier length comes into play both when generating anonymized
SQL labels in SELECT statements, but more crucially when generating constraint
names from a naming convention. It is this area that has created the need for
SQLAlchemy to change this default conservatively. For example, the following
naming convention produces two very different constraint names based on the
identifier length::
from sqlalchemy import Column
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy.dialects import oracle
from sqlalchemy.schema import CreateIndex
m = MetaData(naming_convention={"ix": "ix_%(column_0N_name)s"})
t = Table(
"t",
m,
Column("some_column_name_1", Integer),
Column("some_column_name_2", Integer),
Column("some_column_name_3", Integer),
)
ix = Index(
None,
t.c.some_column_name_1,
t.c.some_column_name_2,
t.c.some_column_name_3,
)
oracle_dialect = oracle.dialect(max_identifier_length=30)
print(CreateIndex(ix).compile(dialect=oracle_dialect))
With an identifier length of 30, the above CREATE INDEX looks like::
CREATE INDEX ix_some_column_name_1s_70cd ON t
(some_column_name_1, some_column_name_2, some_column_name_3)
However with length=128, it becomes::
CREATE INDEX ix_some_column_name_1some_column_name_2some_column_name_3 ON t
(some_column_name_1, some_column_name_2, some_column_name_3)
Applications which have run versions of SQLAlchemy prior to 1.4 on an Oracle
server version 12.2 or greater are therefore subject to the scenario of a
database migration that wishes to "DROP CONSTRAINT" on a name that was
previously generated with the shorter length. This migration will fail when
the identifier length is changed without the name of the index or constraint
first being adjusted. Such applications are strongly advised to make use of
:paramref:`_sa.create_engine.max_identifier_length`
in order to maintain control
of the generation of truncated names, and to fully review and test all database
migrations in a staging environment when changing this value to ensure that the
impact of this change has been mitigated.
.. versionchanged:: 1.4 the default max_identifier_length for Oracle is 128
characters, which is adjusted down to 30 upon first connect if an older
version of Oracle server (compatibility version < 12.2) is detected.
LIMIT/OFFSET Support
--------------------
Oracle has no direct support for LIMIT and OFFSET until version 12c.
To achieve this behavior across all widely used versions of Oracle starting
with the 8 series, SQLAlchemy currently makes use of ROWNUM to achieve
LIMIT/OFFSET; the exact methodology is taken from
https://blogs.oracle.com/oraclemagazine/on-rownum-and-limiting-results .
There is currently a single option to affect its behavior:
* the "FIRST_ROWS()" optimization keyword is not used by default. To enable
the usage of this optimization directive, specify ``optimize_limits=True``
to :func:`_sa.create_engine`.
.. versionchanged:: 1.4
The Oracle dialect renders limit/offset integer values using a "post
compile" scheme which renders the integer directly before passing the
statement to the cursor for execution. The ``use_binds_for_limits`` flag
no longer has an effect.
.. seealso::
:ref:`change_4808`.
Support for changing the row number strategy, which would include one that
makes use of the ``row_number()`` window function as well as one that makes
use of the Oracle 12c "FETCH FIRST N ROW / OFFSET N ROWS" keywords may be
added in a future release.
.. _oracle_returning:
RETURNING Support
-----------------
The Oracle database supports a limited form of RETURNING, in order to retrieve
result sets of matched rows from INSERT, UPDATE and DELETE statements.
Oracle's RETURNING..INTO syntax only supports one row being returned, as it
relies upon OUT parameters in order to function. In addition, supported
DBAPIs have further limitations (see :ref:`cx_oracle_returning`).
SQLAlchemy's "implicit returning" feature, which employs RETURNING within an
INSERT and sometimes an UPDATE statement in order to fetch newly generated
primary key values and other SQL defaults and expressions, is normally enabled
on the Oracle backend. By default, "implicit returning" typically only
fetches the value of a single ``nextval(some_seq)`` expression embedded into
an INSERT in order to increment a sequence within an INSERT statement and get
the value back at the same time. To disable this feature across the board,
specify ``implicit_returning=False`` to :func:`_sa.create_engine`::
engine = create_engine("oracle://scott:tiger@dsn",
implicit_returning=False)
Implicit returning can also be disabled on a table-by-table basis as a table
option::
# Core Table
my_table = Table("my_table", metadata, ..., implicit_returning=False)
# declarative
class MyClass(Base):
__tablename__ = 'my_table'
__table_args__ = {"implicit_returning": False}
.. seealso::
:ref:`cx_oracle_returning` - additional cx_oracle-specific restrictions on
implicit returning.
ON UPDATE CASCADE
-----------------
Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based
solution is available at
http://asktom.oracle.com/tkyte/update_cascade/index.html .
When using the SQLAlchemy ORM, the ORM has limited ability to manually issue
cascading updates - specify ForeignKey objects using the
"deferrable=True, initially='deferred'" keyword arguments,
and specify "passive_updates=False" on each relationship().
Oracle 8 Compatibility
----------------------
When Oracle 8 is detected, the dialect internally configures itself to the
following behaviors:
* the use_ansi flag is set to False. This has the effect of converting all
JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN
makes use of Oracle's (+) operator.
* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when
the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are
issued instead. This because these types don't seem to work correctly on
Oracle 8 even though they are available. The
:class:`~sqlalchemy.types.NVARCHAR` and
:class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate
NVARCHAR2 and NCLOB.
* the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy
encodes all Python unicode objects to "string" before passing in as bind
parameters.
Synonym/DBLINK Reflection
-------------------------
When using reflection with Table objects, the dialect can optionally search
for tables indicated by synonyms, either in local or remote schemas or
accessed over DBLINK, by passing the flag ``oracle_resolve_synonyms=True`` as
a keyword argument to the :class:`_schema.Table` construct::
some_table = Table('some_table', autoload_with=some_engine,
oracle_resolve_synonyms=True)
When this flag is set, the given name (such as ``some_table`` above) will
be searched not just in the ``ALL_TABLES`` view, but also within the
``ALL_SYNONYMS`` view to see if this name is actually a synonym to another
name. If the synonym is located and refers to a DBLINK, the oracle dialect
knows how to locate the table's information using DBLINK syntax(e.g.
``@dblink``).
``oracle_resolve_synonyms`` is accepted wherever reflection arguments are
accepted, including methods such as :meth:`_schema.MetaData.reflect` and
:meth:`_reflection.Inspector.get_columns`.
If synonyms are not in use, this flag should be left disabled.
.. _oracle_constraint_reflection:
Constraint Reflection
---------------------
The Oracle dialect can return information about foreign key, unique, and
CHECK constraints, as well as indexes on tables.
Raw information regarding these constraints can be acquired using
:meth:`_reflection.Inspector.get_foreign_keys`,
:meth:`_reflection.Inspector.get_unique_constraints`,
:meth:`_reflection.Inspector.get_check_constraints`, and
:meth:`_reflection.Inspector.get_indexes`.
.. versionchanged:: 1.2 The Oracle dialect can now reflect UNIQUE and
CHECK constraints.
When using reflection at the :class:`_schema.Table` level, the
:class:`_schema.Table`
will also include these constraints.
Note the following caveats:
* When using the :meth:`_reflection.Inspector.get_check_constraints` method,
Oracle
builds a special "IS NOT NULL" constraint for columns that specify
"NOT NULL". This constraint is **not** returned by default; to include
the "IS NOT NULL" constraints, pass the flag ``include_all=True``::
from sqlalchemy import create_engine, inspect
engine = create_engine("oracle+cx_oracle://s:t@dsn")
inspector = inspect(engine)
all_check_constraints = inspector.get_check_constraints(
"some_table", include_all=True)
* in most cases, when reflecting a :class:`_schema.Table`,
a UNIQUE constraint will
**not** be available as a :class:`.UniqueConstraint` object, as Oracle
mirrors unique constraints with a UNIQUE index in most cases (the exception
seems to be when two or more unique constraints represent the same columns);
the :class:`_schema.Table` will instead represent these using
:class:`.Index`
with the ``unique=True`` flag set.
* Oracle creates an implicit index for the primary key of a table; this index
is **excluded** from all index results.
* the list of columns reflected for an index will not include column names
that start with SYS_NC.
Table names with SYSTEM/SYSAUX tablespaces
-------------------------------------------
The :meth:`_reflection.Inspector.get_table_names` and
:meth:`_reflection.Inspector.get_temp_table_names`
methods each return a list of table names for the current engine. These methods
are also part of the reflection which occurs within an operation such as
:meth:`_schema.MetaData.reflect`. By default,
these operations exclude the ``SYSTEM``
and ``SYSAUX`` tablespaces from the operation. In order to change this, the
default list of tablespaces excluded can be changed at the engine level using
the ``exclude_tablespaces`` parameter::
# exclude SYSAUX and SOME_TABLESPACE, but not SYSTEM
e = create_engine(
"oracle://scott:tiger@xe",
exclude_tablespaces=["SYSAUX", "SOME_TABLESPACE"])
.. versionadded:: 1.1
DateTime Compatibility
----------------------
Oracle has no datatype known as ``DATETIME``, it instead has only ``DATE``,
which can actually store a date and time value. For this reason, the Oracle
dialect provides a type :class:`_oracle.DATE` which is a subclass of
:class:`.DateTime`. This type has no special behavior, and is only
present as a "marker" for this type; additionally, when a database column
is reflected and the type is reported as ``DATE``, the time-supporting
:class:`_oracle.DATE` type is used.
.. versionchanged:: 0.9.4 Added :class:`_oracle.DATE` to subclass
:class:`.DateTime`. This is a change as previous versions
would reflect a ``DATE`` column as :class:`_types.DATE`, which subclasses
:class:`.Date`. The only significance here is for schemes that are
examining the type of column for use in special Python translations or
for migrating schemas to other database backends.
.. _oracle_table_options:
Oracle Table Options
-------------------------
The CREATE TABLE phrase supports the following options with Oracle
in conjunction with the :class:`_schema.Table` construct:
* ``ON COMMIT``::
Table(
"some_table", metadata, ...,
prefixes=['GLOBAL TEMPORARY'], oracle_on_commit='PRESERVE ROWS')
.. versionadded:: 1.0.0
* ``COMPRESS``::
Table('mytable', metadata, Column('data', String(32)),
oracle_compress=True)
Table('mytable', metadata, Column('data', String(32)),
oracle_compress=6)
The ``oracle_compress`` parameter accepts either an integer compression
level, or ``True`` to use the default compression level.
.. versionadded:: 1.0.0
.. _oracle_index_options:
Oracle Specific Index Options
-----------------------------
Bitmap Indexes
~~~~~~~~~~~~~~
You can specify the ``oracle_bitmap`` parameter to create a bitmap index
instead of a B-tree index::
Index('my_index', my_table.c.data, oracle_bitmap=True)
Bitmap indexes cannot be unique and cannot be compressed. SQLAlchemy will not
check for such limitations, only the database will.
.. versionadded:: 1.0.0
Index compression
~~~~~~~~~~~~~~~~~
Oracle has a more efficient storage mode for indexes containing lots of
repeated values. Use the ``oracle_compress`` parameter to turn on key
compression::
Index('my_index', my_table.c.data, oracle_compress=True)
Index('my_index', my_table.c.data1, my_table.c.data2, unique=True,
oracle_compress=1)
The ``oracle_compress`` parameter accepts either an integer specifying the
number of prefix columns to compress, or ``True`` to use the default (all
columns for non-unique indexes, all but the last column for unique indexes).
.. versionadded:: 1.0.0
""" # noqa
from itertools import groupby
import re
from ... import Computed
from ... import exc
from ... import schema as sa_schema
from ... import sql
from ... import util
from ...engine import default
from ...engine import reflection
from ...sql import compiler
from ...sql import expression
from ...sql import sqltypes
from ...sql import util as sql_util
from ...sql import visitors
from ...types import BLOB
from ...types import CHAR
from ...types import CLOB
from ...types import FLOAT
from ...types import INTEGER
from ...types import NCHAR
from ...types import NVARCHAR
from ...types import TIMESTAMP
from ...types import VARCHAR
from ...util import compat
RESERVED_WORDS = set(
"SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN "
"DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED "
"ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE "
"ANY TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE "
"BY ASC REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES "
"AS IN VIEW EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS "
"NOT TRIGGER ELSE CREATE INTERSECT PCTFREE DISTINCT USER "
"CONNECT SET MODE OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR "
"DECIMAL UNION PUBLIC AND START UID COMMENT CURRENT LEVEL".split()
)
NO_ARG_FNS = set(
"UID CURRENT_DATE SYSDATE USER " "CURRENT_TIME CURRENT_TIMESTAMP".split()
)
class RAW(sqltypes._Binary):
__visit_name__ = "RAW"
OracleRaw = RAW
class NCLOB(sqltypes.Text):
__visit_name__ = "NCLOB"
class VARCHAR2(VARCHAR):
__visit_name__ = "VARCHAR2"
NVARCHAR2 = NVARCHAR
class NUMBER(sqltypes.Numeric, sqltypes.Integer):
__visit_name__ = "NUMBER"
def __init__(self, precision=None, scale=None, asdecimal=None):
if asdecimal is None:
asdecimal = bool(scale and scale > 0)
super(NUMBER, self).__init__(
precision=precision, scale=scale, asdecimal=asdecimal
)
def adapt(self, impltype):
ret = super(NUMBER, self).adapt(impltype)
# leave a hint for the DBAPI handler
ret._is_oracle_number = True
return ret
@property
def _type_affinity(self):
if bool(self.scale and self.scale > 0):
return sqltypes.Numeric
else:
return sqltypes.Integer
class DOUBLE_PRECISION(sqltypes.Float):
__visit_name__ = "DOUBLE_PRECISION"
class BINARY_DOUBLE(sqltypes.Float):
__visit_name__ = "BINARY_DOUBLE"
class BINARY_FLOAT(sqltypes.Float):
__visit_name__ = "BINARY_FLOAT"
class BFILE(sqltypes.LargeBinary):
__visit_name__ = "BFILE"
class LONG(sqltypes.Text):
__visit_name__ = "LONG"
class DATE(sqltypes.DateTime):
"""Provide the oracle DATE type.
This type has no special Python behavior, except that it subclasses
:class:`_types.DateTime`; this is to suit the fact that the Oracle
``DATE`` type supports a time value.
.. versionadded:: 0.9.4
"""
__visit_name__ = "DATE"
def _compare_type_affinity(self, other):
return other._type_affinity in (sqltypes.DateTime, sqltypes.Date)
class INTERVAL(sqltypes.NativeForEmulated, sqltypes._AbstractInterval):
__visit_name__ = "INTERVAL"
def __init__(self, day_precision=None, second_precision=None):
"""Construct an INTERVAL.
Note that only DAY TO SECOND intervals are currently supported.
This is due to a lack of support for YEAR TO MONTH intervals
within available DBAPIs.
:param day_precision: the day precision value. this is the number of
digits to store for the day field. Defaults to "2"
:param second_precision: the second precision value. this is the
number of digits to store for the fractional seconds field.
Defaults to "6".
"""
self.day_precision = day_precision
self.second_precision = second_precision
@classmethod
def _adapt_from_generic_interval(cls, interval):
return INTERVAL(
day_precision=interval.day_precision,
second_precision=interval.second_precision,
)
@property
def _type_affinity(self):
return sqltypes.Interval
def as_generic(self, allow_nulltype=False):
return sqltypes.Interval(
native=True,
second_precision=self.second_precision,
day_precision=self.day_precision,
)
def coerce_compared_value(self, op, value):
return self
class ROWID(sqltypes.TypeEngine):
"""Oracle ROWID type.
When used in a cast() or similar, generates ROWID.
"""
__visit_name__ = "ROWID"
class _OracleBoolean(sqltypes.Boolean):
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
colspecs = {
sqltypes.Boolean: _OracleBoolean,
sqltypes.Interval: INTERVAL,
sqltypes.DateTime: DATE,
}
ischema_names = {
"VARCHAR2": VARCHAR,
"NVARCHAR2": NVARCHAR,
"CHAR": CHAR,
"NCHAR": NCHAR,
"DATE": DATE,
"NUMBER": NUMBER,
"BLOB": BLOB,
"BFILE": BFILE,
"CLOB": CLOB,
"NCLOB": NCLOB,
"TIMESTAMP": TIMESTAMP,
"TIMESTAMP WITH TIME ZONE": TIMESTAMP,
"INTERVAL DAY TO SECOND": INTERVAL,
"RAW": RAW,
"FLOAT": FLOAT,
"DOUBLE PRECISION": DOUBLE_PRECISION,
"LONG": LONG,
"BINARY_DOUBLE": BINARY_DOUBLE,
"BINARY_FLOAT": BINARY_FLOAT,
}
class OracleTypeCompiler(compiler.GenericTypeCompiler):
# Note:
# Oracle DATE == DATETIME
# Oracle does not allow milliseconds in DATE
# Oracle does not support TIME columns
def visit_datetime(self, type_, **kw):
return self.visit_DATE(type_, **kw)
def visit_float(self, type_, **kw):
return self.visit_FLOAT(type_, **kw)
def visit_unicode(self, type_, **kw):
if self.dialect._use_nchar_for_unicode:
return self.visit_NVARCHAR2(type_, **kw)
else:
return self.visit_VARCHAR2(type_, **kw)
def visit_INTERVAL(self, type_, **kw):
return "INTERVAL DAY%s TO SECOND%s" % (
type_.day_precision is not None and "(%d)" % type_.day_precision or "",
type_.second_precision is not None
and "(%d)" % type_.second_precision
or "",
)
def visit_LONG(self, type_, **kw):
return "LONG"
def visit_TIMESTAMP(self, type_, **kw):
if type_.timezone:
return "TIMESTAMP WITH TIME ZONE"
else:
return "TIMESTAMP"
def visit_DOUBLE_PRECISION(self, type_, **kw):
return self._generate_numeric(type_, "DOUBLE PRECISION", **kw)
def visit_BINARY_DOUBLE(self, type_, **kw):
return self._generate_numeric(type_, "BINARY_DOUBLE", **kw)
def visit_BINARY_FLOAT(self, type_, **kw):
return self._generate_numeric(type_, "BINARY_FLOAT", **kw)
def visit_FLOAT(self, type_, **kw):
# don't support conversion between decimal/binary
# precision yet
kw["no_precision"] = True
return self._generate_numeric(type_, "FLOAT", **kw)
def visit_NUMBER(self, type_, **kw):
return self._generate_numeric(type_, "NUMBER", **kw)
def _generate_numeric(
self, type_, name, precision=None, scale=None, no_precision=False, **kw
):
if precision is None:
precision = type_.precision
if scale is None:
scale = getattr(type_, "scale", None)
if no_precision or precision is None:
return name
elif scale is None:
n = "%(name)s(%(precision)s)"
return n % {"name": name, "precision": precision}
else:
n = "%(name)s(%(precision)s, %(scale)s)"
return n % {"name": name, "precision": precision, "scale": scale}
def visit_string(self, type_, **kw):
return self.visit_VARCHAR2(type_, **kw)
def visit_VARCHAR2(self, type_, **kw):
return self._visit_varchar(type_, "", "2")
def visit_NVARCHAR2(self, type_, **kw):
return self._visit_varchar(type_, "N", "2")
visit_NVARCHAR = visit_NVARCHAR2
def visit_VARCHAR(self, type_, **kw):
return self._visit_varchar(type_, "", "")
def _visit_varchar(self, type_, n, num):
if not type_.length:
return "%(n)sVARCHAR%(two)s" % {"two": num, "n": n}
elif not n and self.dialect._supports_char_length:
varchar = "VARCHAR%(two)s(%(length)s CHAR)"
return varchar % {"length": type_.length, "two": num}
else:
varchar = "%(n)sVARCHAR%(two)s(%(length)s)"
return varchar % {"length": type_.length, "two": num, "n": n}
def visit_text(self, type_, **kw):
return self.visit_CLOB(type_, **kw)
def visit_unicode_text(self, type_, **kw):
if self.dialect._use_nchar_for_unicode:
return self.visit_NCLOB(type_, **kw)
else:
return self.visit_CLOB(type_, **kw)
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_big_integer(self, type_, **kw):
return self.visit_NUMBER(type_, precision=19, **kw)
def visit_boolean(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_RAW(self, type_, **kw):
if type_.length:
return "RAW(%(length)s)" % {"length": type_.length}
else:
return "RAW"
def visit_ROWID(self, type_, **kw):
return "ROWID"
class OracleCompiler(compiler.SQLCompiler):
"""Oracle compiler modifies the lexical structure of Select
statements to work under non-ANSI configured Oracle databases, if
the use_ansi flag is False.
"""
compound_keywords = util.update_copy(
compiler.SQLCompiler.compound_keywords,
{expression.CompoundSelect.EXCEPT: "MINUS"},
)
def __init__(self, *args, **kwargs):
self.__wheres = {}
super(OracleCompiler, self).__init__(*args, **kwargs)
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_char_length_func(self, fn, **kw):
return "LENGTH" + self.function_argspec(fn, **kw)
def visit_match_op_binary(self, binary, operator, **kw):
return "CONTAINS (%s, %s)" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_true(self, expr, **kw):
return "1"
def visit_false(self, expr, **kw):
return "0"
def get_cte_preamble(self, recursive):
return "WITH"
def get_select_hint_text(self, byfroms):
return " ".join("/*+ %s */" % text for table, text in byfroms.items())
def function_argspec(self, fn, **kw):
if len(fn.clauses) > 0 or fn.name.upper() not in NO_ARG_FNS:
return compiler.SQLCompiler.function_argspec(self, fn, **kw)
else:
return ""
def visit_function(self, func, **kw):
text = super(OracleCompiler, self).visit_function(func, **kw)
if kw.get("asfrom", False):
text = "TABLE (%s)" % func
return text
def visit_table_valued_column(self, element, **kw):
text = super(OracleCompiler, self).visit_table_valued_column(element, **kw)
text = "COLUMN_VALUE " + text
return text
def default_from(self):
"""Called when a ``SELECT`` statement has no froms,
and no ``FROM`` clause is to be appended.
The Oracle compiler tacks a "FROM DUAL" to the statement.
"""
return " FROM DUAL"
def visit_join(self, join, from_linter=None, **kwargs):
if self.dialect.use_ansi:
return compiler.SQLCompiler.visit_join(
self, join, from_linter=from_linter, **kwargs
)
else:
if from_linter:
from_linter.edges.add((join.left, join.right))
kwargs["asfrom"] = True
if isinstance(join.right, expression.FromGrouping):
right = join.right.element
else:
right = join.right
return (
self.process(join.left, from_linter=from_linter, **kwargs)
+ ", "
+ self.process(right, from_linter=from_linter, **kwargs)
)
def _get_nonansi_join_whereclause(self, froms):
clauses = []
def visit_join(join):
if join.isouter:
# https://docs.oracle.com/database/121/SQLRF/queries006.htm#SQLRF52354
# "apply the outer join operator (+) to all columns of B in
# the join condition in the WHERE clause" - that is,
# unconditionally regardless of operator or the other side
def visit_binary(binary):
if isinstance(
binary.left, expression.ColumnClause
) and join.right.is_derived_from(binary.left.table):
binary.left = _OuterJoinColumn(binary.left)
elif isinstance(
binary.right, expression.ColumnClause
) and join.right.is_derived_from(binary.right.table):
binary.right = _OuterJoinColumn(binary.right)
clauses.append(
visitors.cloned_traverse(
join.onclause, {}, {"binary": visit_binary}
)
)
else:
clauses.append(join.onclause)
for j in join.left, join.right:
if isinstance(j, expression.Join):
visit_join(j)
elif isinstance(j, expression.FromGrouping):
visit_join(j.element)
for f in froms:
if isinstance(f, expression.Join):
visit_join(f)
if not clauses:
return None
else:
return sql.and_(*clauses)
def visit_outer_join_column(self, vc, **kw):
return self.process(vc.column, **kw) + "(+)"
def visit_sequence(self, seq, **kw):
return self.preparer.format_sequence(seq) + ".nextval"
def get_render_as_alias_suffix(self, alias_name_text):
"""Oracle doesn't like ``FROM table AS alias``"""
return " " + alias_name_text
def returning_clause(self, stmt, returning_cols):
columns = []
binds = []
for i, column in enumerate(expression._select_iterables(returning_cols)):
if (
self.isupdate
and isinstance(column, sa_schema.Column)
and isinstance(column.server_default, Computed)
and not self.dialect._supports_update_returning_computed_cols
):
util.warn(
"Computed columns don't work with Oracle UPDATE "
"statements that use RETURNING; the value of the column "
"*before* the UPDATE takes place is returned. It is "
"advised to not use RETURNING with an Oracle computed "
"column. Consider setting implicit_returning to False on "
"the Table object in order to avoid implicit RETURNING "
"clauses from being generated for this Table."
)
if column.type._has_column_expression:
col_expr = column.type.column_expression(column)
else:
col_expr = column
outparam = sql.outparam("ret_%d" % i, type_=column.type)
self.binds[outparam.key] = outparam
binds.append(self.bindparam_string(self._truncate_bindparam(outparam)))
# ensure the ExecutionContext.get_out_parameters() method is
# *not* called; the cx_Oracle dialect wants to handle these
# parameters separately
self.has_out_parameters = False
columns.append(self.process(col_expr, within_columns_clause=False))
self._add_to_result_map(
getattr(col_expr, "name", col_expr._anon_name_label),
getattr(col_expr, "name", col_expr._anon_name_label),
(
column,
getattr(column, "name", None),
getattr(column, "key", None),
),
column.type,
)
return "RETURNING " + ", ".join(columns) + " INTO " + ", ".join(binds)
def translate_select_structure(self, select_stmt, **kwargs):
select = select_stmt
if not getattr(select, "_oracle_visit", None):
if not self.dialect.use_ansi:
froms = self._display_froms_for_select(
select, kwargs.get("asfrom", False)
)
whereclause = self._get_nonansi_join_whereclause(froms)
if whereclause is not None:
select = select.where(whereclause)
select._oracle_visit = True
# if fetch is used this is not needed
if select._has_row_limiting_clause and select._fetch_clause is None:
limit_clause = select._limit_clause
offset_clause = select._offset_clause
if select._simple_int_clause(limit_clause):
limit_clause = limit_clause.render_literal_execute()
if select._simple_int_clause(offset_clause):
offset_clause = offset_clause.render_literal_execute()
# currently using form at:
# https://blogs.oracle.com/oraclemagazine/\
# on-rownum-and-limiting-results
orig_select = select
select = select._generate()
select._oracle_visit = True
# add expressions to accommodate FOR UPDATE OF
for_update = select._for_update_arg
if for_update is not None and for_update.of:
for_update = for_update._clone()
for_update._copy_internals()
for elem in for_update.of:
if not select.selected_columns.contains_column(elem):
select = select.add_columns(elem)
# Wrap the middle select and add the hint
inner_subquery = select.alias()
limitselect = sql.select(
*[
c
for c in inner_subquery.c
if orig_select.selected_columns.corresponding_column(c)
is not None
]
)
if (
limit_clause is not None
and self.dialect.optimize_limits
and select._simple_int_clause(limit_clause)
):
limitselect = limitselect.prefix_with(
expression.text(
"/*+ FIRST_ROWS(%s) */"
% self.process(limit_clause, **kwargs)
)
)
limitselect._oracle_visit = True
limitselect._is_wrapper = True
# add expressions to accommodate FOR UPDATE OF
if for_update is not None and for_update.of:
adapter = sql_util.ClauseAdapter(inner_subquery)
for_update.of = [adapter.traverse(elem) for elem in for_update.of]
# If needed, add the limiting clause
if limit_clause is not None:
if select._simple_int_clause(limit_clause) and (
offset_clause is None
or select._simple_int_clause(offset_clause)
):
max_row = limit_clause
if offset_clause is not None:
max_row = max_row + offset_clause
else:
max_row = limit_clause
if offset_clause is not None:
max_row = max_row + offset_clause
limitselect = limitselect.where(
sql.literal_column("ROWNUM") <= max_row
)
# If needed, add the ora_rn, and wrap again with offset.
if offset_clause is None:
limitselect._for_update_arg = for_update
select = limitselect
else:
limitselect = limitselect.add_columns(
sql.literal_column("ROWNUM").label("ora_rn")
)
limitselect._oracle_visit = True
limitselect._is_wrapper = True
if for_update is not None and for_update.of:
limitselect_cols = limitselect.selected_columns
for elem in for_update.of:
if limitselect_cols.corresponding_column(elem) is None:
limitselect = limitselect.add_columns(elem)
limit_subquery = limitselect.alias()
origselect_cols = orig_select.selected_columns
offsetselect = sql.select(
*[
c
for c in limit_subquery.c
if origselect_cols.corresponding_column(c) is not None
]
)
offsetselect._oracle_visit = True
offsetselect._is_wrapper = True
if for_update is not None and for_update.of:
adapter = sql_util.ClauseAdapter(limit_subquery)
for_update.of = [
adapter.traverse(elem) for elem in for_update.of
]
offsetselect = offsetselect.where(
sql.literal_column("ora_rn") > offset_clause
)
offsetselect._for_update_arg = for_update
select = offsetselect
return select
def limit_clause(self, select, **kw):
return ""
def visit_empty_set_expr(self, type_):
return "SELECT 1 FROM DUAL WHERE 1!=1"
def for_update_clause(self, select, **kw):
if self.is_subquery():
return ""
tmp = " FOR UPDATE"
if select._for_update_arg.of:
tmp += " OF " + ", ".join(
self.process(elem, **kw) for elem in select._for_update_arg.of
)
if select._for_update_arg.nowait:
tmp += " NOWAIT"
if select._for_update_arg.skip_locked:
tmp += " SKIP LOCKED"
return tmp
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "DECODE(%s, %s, 0, 1) = 1" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_is_not_distinct_from_binary(self, binary, operator, **kw):
return "DECODE(%s, %s, 0, 1) = 0" % (
self.process(binary.left),
self.process(binary.right),
)
def _get_regexp_args(self, binary, kw):
string = self.process(binary.left, **kw)
pattern = self.process(binary.right, **kw)
flags = binary.modifiers["flags"]
if flags is not None:
flags = self.process(flags, **kw)
return string, pattern, flags
def visit_regexp_match_op_binary(self, binary, operator, **kw):
string, pattern, flags = self._get_regexp_args(binary, kw)
if flags is None:
return "REGEXP_LIKE(%s, %s)" % (string, pattern)
else:
return "REGEXP_LIKE(%s, %s, %s)" % (string, pattern, flags)
def visit_not_regexp_match_op_binary(self, binary, operator, **kw):
return "NOT %s" % self.visit_regexp_match_op_binary(binary, operator, **kw)
def visit_regexp_replace_op_binary(self, binary, operator, **kw):
string, pattern, flags = self._get_regexp_args(binary, kw)
replacement = self.process(binary.modifiers["replacement"], **kw)
if flags is None:
return "REGEXP_REPLACE(%s, %s, %s)" % (
string,
pattern,
replacement,
)
else:
return "REGEXP_REPLACE(%s, %s, %s, %s)" % (
string,
pattern,
replacement,
flags,
)
class OracleDDLCompiler(compiler.DDLCompiler):
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
# oracle has no ON UPDATE CASCADE -
# its only available via triggers
# http://asktom.oracle.com/tkyte/update_cascade/index.html
if constraint.onupdate is not None:
util.warn(
"Oracle does not contain native UPDATE CASCADE "
"functionality - onupdates will not be rendered for foreign "
"keys. Consider using deferrable=True, initially='deferred' "
"or triggers."
)
return text
def visit_drop_table_comment(self, drop):
return "COMMENT ON TABLE %s IS ''" % self.preparer.format_table(drop.element)
def visit_create_index(self, create):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
if index.dialect_options["oracle"]["bitmap"]:
text += "BITMAP "
text += "INDEX %s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=True),
preparer.format_table(index.table, use_schema=True),
", ".join(
self.sql_compiler.process(expr, include_table=False, literal_binds=True)
for expr in index.expressions
),
)
if index.dialect_options["oracle"]["compress"] is not False:
if index.dialect_options["oracle"]["compress"] is True:
text += " COMPRESS"
else:
text += " COMPRESS %d" % (index.dialect_options["oracle"]["compress"])
return text
def post_create_table(self, table):
table_opts = []
opts = table.dialect_options["oracle"]
if opts["on_commit"]:
on_commit_options = opts["on_commit"].replace("_", " ").upper()
table_opts.append("\n ON COMMIT %s" % on_commit_options)
if opts["compress"]:
if opts["compress"] is True:
table_opts.append("\n COMPRESS")
else:
table_opts.append("\n COMPRESS FOR %s" % (opts["compress"]))
return "".join(table_opts)
def get_identity_options(self, identity_options):
text = super(OracleDDLCompiler, self).get_identity_options(identity_options)
text = text.replace("NO MINVALUE", "NOMINVALUE")
text = text.replace("NO MAXVALUE", "NOMAXVALUE")
text = text.replace("NO CYCLE", "NOCYCLE")
text = text.replace("NO ORDER", "NOORDER")
return text
def visit_computed_column(self, generated):
text = "GENERATED ALWAYS AS (%s)" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
if generated.persisted is True:
raise exc.CompileError(
"Oracle computed columns do not support 'stored' persistence; "
"set the 'persisted' flag to None or False for Oracle support."
)
elif generated.persisted is False:
text += " VIRTUAL"
return text
def visit_identity_column(self, identity, **kw):
if identity.always is None:
kind = ""
else:
kind = "ALWAYS" if identity.always else "BY DEFAULT"
text = "GENERATED %s" % kind
if identity.on_null:
text += " ON NULL"
text += " AS IDENTITY"
options = self.get_identity_options(identity)
if options:
text += " (%s)" % options
return text
class OracleIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = {x.lower() for x in RESERVED_WORDS}
illegal_initial_characters = {str(dig) for dig in range(0, 10)}.union(["_", "$"])
def _bindparam_requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (
lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(util.text_type(value))
)
def format_savepoint(self, savepoint):
name = savepoint.ident.lstrip("_")
return super(OracleIdentifierPreparer, self).format_savepoint(savepoint, name)
class OracleExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
return self._execute_scalar(
"SELECT "
+ self.identifier_preparer.format_sequence(seq)
+ ".nextval FROM DUAL",
type_,
)
class OracleDialect(default.DefaultDialect):
name = "oracle"
supports_statement_cache = True
supports_alter = True
supports_unicode_statements = False
supports_unicode_binds = False
max_identifier_length = 128
supports_simple_order_by_label = False
cte_follows_insert = True
supports_sequences = True
sequences_optional = False
postfetch_lastrowid = False
default_paramstyle = "named"
colspecs = colspecs
ischema_names = ischema_names
requires_name_normalize = True
supports_comments = True
supports_default_values = False
supports_default_metavalue = True
supports_empty_insert = False
supports_identity_columns = True
statement_compiler = OracleCompiler
ddl_compiler = OracleDDLCompiler
type_compiler = OracleTypeCompiler
preparer = OracleIdentifierPreparer
execution_ctx_cls = OracleExecutionContext
reflection_options = ("oracle_resolve_synonyms",)
_use_nchar_for_unicode = False
construct_arguments = [
(
sa_schema.Table,
{"resolve_synonyms": False, "on_commit": None, "compress": False},
),
(sa_schema.Index, {"bitmap": False, "compress": False}),
]
@util.deprecated_params(
use_binds_for_limits=(
"1.4",
"The ``use_binds_for_limits`` Oracle dialect parameter is "
"deprecated. The dialect now renders LIMIT /OFFSET integers "
"inline in all cases using a post-compilation hook, so that the "
"value is still represented by a 'bound parameter' on the Core "
"Expression side.",
)
)
def __init__(
self,
use_ansi=True,
optimize_limits=False,
use_binds_for_limits=None,
use_nchar_for_unicode=False,
exclude_tablespaces=("SYSTEM", "SYSAUX"),
**kwargs
):
default.DefaultDialect.__init__(self, **kwargs)
self._use_nchar_for_unicode = use_nchar_for_unicode
self.use_ansi = use_ansi
self.optimize_limits = optimize_limits
self.exclude_tablespaces = exclude_tablespaces
def initialize(self, connection):
super(OracleDialect, self).initialize(connection)
self.implicit_returning = self.__dict__.get(
"implicit_returning", self.server_version_info > (10,)
)
if self._is_oracle_8:
self.colspecs = self.colspecs.copy()
self.colspecs.pop(sqltypes.Interval)
self.use_ansi = False
self.supports_identity_columns = self.server_version_info >= (12,)
def _get_effective_compat_server_version_info(self, connection):
# dialect does not need compat levels below 12.2, so don't query
# in those cases
if self.server_version_info < (12, 2):
return self.server_version_info
try:
compat = connection.exec_driver_sql(
"SELECT value FROM v$parameter WHERE name = 'compatible'"
).scalar()
except exc.DBAPIError:
compat = None
if compat:
try:
return tuple(int(x) for x in compat.split("."))
except:
return self.server_version_info
else:
return self.server_version_info
@property
def _is_oracle_8(self):
return self.server_version_info and self.server_version_info < (9,)
@property
def _supports_table_compression(self):
return self.server_version_info and self.server_version_info >= (10, 1)
@property
def _supports_table_compress_for(self):
return self.server_version_info and self.server_version_info >= (11,)
@property
def _supports_char_length(self):
return not self._is_oracle_8
@property
def _supports_update_returning_computed_cols(self):
# on version 18 this error is no longet present while it happens on 11
# it may work also on versions before the 18
return self.server_version_info and self.server_version_info >= (18,)
def do_release_savepoint(self, connection, name):
# Oracle does not support RELEASE SAVEPOINT
pass
def _check_max_identifier_length(self, connection):
if self._get_effective_compat_server_version_info(connection) < (
12,
2,
):
return 30
else:
# use the default
return None
def _check_unicode_returns(self, connection):
additional_tests = [
expression.cast(
expression.literal_column("'test nvarchar2 returns'"),
sqltypes.NVARCHAR(60),
)
]
return super(OracleDialect, self)._check_unicode_returns(
connection, additional_tests
)
_isolation_lookup = ["READ COMMITTED", "SERIALIZABLE"]
def get_isolation_level(self, connection):
raise NotImplementedError("implemented by cx_Oracle dialect")
def get_default_isolation_level(self, dbapi_conn):
try:
return self.get_isolation_level(dbapi_conn)
except NotImplementedError:
raise
except:
return "READ COMMITTED"
def set_isolation_level(self, connection, level):
raise NotImplementedError("implemented by cx_Oracle dialect")
def has_table(self, connection, table_name, schema=None):
self._ensure_has_table_connection(connection)
if not schema:
schema = self.default_schema_name
cursor = connection.execute(
sql.text(
"SELECT table_name FROM all_tables "
"WHERE table_name = :name AND owner = :schema_name"
),
dict(
name=self.denormalize_name(table_name),
schema_name=self.denormalize_name(schema),
),
)
return cursor.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
if not schema:
schema = self.default_schema_name
cursor = connection.execute(
sql.text(
"SELECT sequence_name FROM all_sequences "
"WHERE sequence_name = :name AND "
"sequence_owner = :schema_name"
),
dict(
name=self.denormalize_name(sequence_name),
schema_name=self.denormalize_name(schema),
),
)
return cursor.first() is not None
def _get_default_schema_name(self, connection):
return self.normalize_name(
connection.exec_driver_sql(
"select sys_context( 'userenv', 'current_schema' ) from dual"
).scalar()
)
def _resolve_synonym(
self,
connection,
desired_owner=None,
desired_synonym=None,
desired_table=None,
):
"""search for a local synonym matching the given desired owner/name.
if desired_owner is None, attempts to locate a distinct owner.
returns the actual name, owner, dblink name, and synonym name if
found.
"""
q = (
"SELECT owner, table_owner, table_name, db_link, "
"synonym_name FROM all_synonyms WHERE "
)
clauses = []
params = {}
if desired_synonym:
clauses.append("synonym_name = :synonym_name")
params["synonym_name"] = desired_synonym
if desired_owner:
clauses.append("owner = :desired_owner")
params["desired_owner"] = desired_owner
if desired_table:
clauses.append("table_name = :tname")
params["tname"] = desired_table
q += " AND ".join(clauses)
result = connection.execution_options(future_result=True).execute(
sql.text(q), params
)
if desired_owner:
row = result.mappings().first()
if row:
return (
row["table_name"],
row["table_owner"],
row["db_link"],
row["synonym_name"],
)
else:
return None, None, None, None
else:
rows = result.mappings().all()
if len(rows) > 1:
raise AssertionError(
"There are multiple tables visible to the schema, you "
"must specify owner"
)
elif len(rows) == 1:
row = rows[0]
return (
row["table_name"],
row["table_owner"],
row["db_link"],
row["synonym_name"],
)
else:
return None, None, None, None
@reflection.cache
def _prepare_reflection_args(
self,
connection,
table_name,
schema=None,
resolve_synonyms=False,
dblink="",
**kw
):
if resolve_synonyms:
actual_name, owner, dblink, synonym = self._resolve_synonym(
connection,
desired_owner=self.denormalize_name(schema),
desired_synonym=self.denormalize_name(table_name),
)
else:
actual_name, owner, dblink, synonym = None, None, None, None
if not actual_name:
actual_name = self.denormalize_name(table_name)
if dblink:
# using user_db_links here since all_db_links appears
# to have more restricted permissions.
# http://docs.oracle.com/cd/B28359_01/server.111/b28310/ds_admin005.htm
# will need to hear from more users if we are doing
# the right thing here. See [ticket:2619]
owner = connection.scalar(
sql.text("SELECT username FROM user_db_links " "WHERE db_link=:link"),
dict(link=dblink),
)
dblink = "@" + dblink
elif not owner:
owner = self.denormalize_name(schema or self.default_schema_name)
return (actual_name, owner, dblink or "", synonym)
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "SELECT username FROM all_users ORDER BY username"
cursor = connection.exec_driver_sql(s)
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
schema = self.denormalize_name(schema or self.default_schema_name)
# note that table_names() isn't loading DBLINKed or synonym'ed tables
if schema is None:
schema = self.default_schema_name
sql_str = "SELECT table_name FROM all_tables WHERE "
if self.exclude_tablespaces:
sql_str += "nvl(tablespace_name, 'no tablespace') " "NOT IN (%s) AND " % (
", ".join(["'%s'" % ts for ts in self.exclude_tablespaces])
)
sql_str += "OWNER = :owner " "AND IOT_NAME IS NULL " "AND DURATION IS NULL"
cursor = connection.execute(sql.text(sql_str), dict(owner=schema))
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_temp_table_names(self, connection, **kw):
schema = self.denormalize_name(self.default_schema_name)
sql_str = "SELECT table_name FROM all_tables WHERE "
if self.exclude_tablespaces:
sql_str += "nvl(tablespace_name, 'no tablespace') " "NOT IN (%s) AND " % (
", ".join(["'%s'" % ts for ts in self.exclude_tablespaces])
)
sql_str += "OWNER = :owner " "AND IOT_NAME IS NULL " "AND DURATION IS NOT NULL"
cursor = connection.execute(sql.text(sql_str), dict(owner=schema))
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
schema = self.denormalize_name(schema or self.default_schema_name)
s = sql.text("SELECT view_name FROM all_views WHERE owner = :owner")
cursor = connection.execute(s, dict(owner=self.denormalize_name(schema)))
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_sequence_names(self, connection, schema=None, **kw):
if not schema:
schema = self.default_schema_name
cursor = connection.execute(
sql.text(
"SELECT sequence_name FROM all_sequences "
"WHERE sequence_owner = :schema_name"
),
dict(schema_name=self.denormalize_name(schema)),
)
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_table_options(self, connection, table_name, schema=None, **kw):
options = {}
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
params = {"table_name": table_name}
columns = ["table_name"]
if self._supports_table_compression:
columns.append("compression")
if self._supports_table_compress_for:
columns.append("compress_for")
text = (
"SELECT %(columns)s "
"FROM ALL_TABLES%(dblink)s "
"WHERE table_name = :table_name"
)
if schema is not None:
params["owner"] = schema
text += " AND owner = :owner "
text = text % {"dblink": dblink, "columns": ", ".join(columns)}
result = connection.execute(sql.text(text), params)
enabled = dict(DISABLED=False, ENABLED=True)
row = result.first()
if row:
if "compression" in row._fields and enabled.get(row.compression, False):
if "compress_for" in row._fields:
options["oracle_compress"] = row.compress_for
else:
options["oracle_compress"] = True
return options
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
columns = []
if self._supports_char_length:
char_length_col = "char_length"
else:
char_length_col = "data_length"
if self.server_version_info >= (12,):
identity_cols = """\
col.default_on_null,
(
SELECT id.generation_type || ',' || id.IDENTITY_OPTIONS
FROM ALL_TAB_IDENTITY_COLS%(dblink)s id
WHERE col.table_name = id.table_name
AND col.column_name = id.column_name
AND col.owner = id.owner
) AS identity_options""" % {
"dblink": dblink
}
else:
identity_cols = "NULL as default_on_null, NULL as identity_options"
params = {"table_name": table_name}
text = """
SELECT
col.column_name,
col.data_type,
col.%(char_length_col)s,
col.data_precision,
col.data_scale,
col.nullable,
col.data_default,
com.comments,
col.virtual_column,
%(identity_cols)s
FROM all_tab_cols%(dblink)s col
LEFT JOIN all_col_comments%(dblink)s com
ON col.table_name = com.table_name
AND col.column_name = com.column_name
AND col.owner = com.owner
WHERE col.table_name = :table_name
AND col.hidden_column = 'NO'
"""
if schema is not None:
params["owner"] = schema
text += " AND col.owner = :owner "
text += " ORDER BY col.column_id"
text = text % {
"dblink": dblink,
"char_length_col": char_length_col,
"identity_cols": identity_cols,
}
c = connection.execute(sql.text(text), params)
for row in c:
colname = self.normalize_name(row[0])
orig_colname = row[0]
coltype = row[1]
length = row[2]
precision = row[3]
scale = row[4]
nullable = row[5] == "Y"
default = row[6]
comment = row[7]
generated = row[8]
default_on_nul = row[9]
identity_options = row[10]
if coltype == "NUMBER":
if precision is None and scale == 0:
coltype = INTEGER()
else:
coltype = NUMBER(precision, scale)
elif coltype == "FLOAT":
# TODO: support "precision" here as "binary_precision"
coltype = FLOAT()
elif coltype in ("VARCHAR2", "NVARCHAR2", "CHAR", "NCHAR"):
coltype = self.ischema_names.get(coltype)(length)
elif "WITH TIME ZONE" in coltype:
coltype = TIMESTAMP(timezone=True)
else:
coltype = re.sub(r"\(\d+\)", "", coltype)
try:
coltype = self.ischema_names[coltype]
except KeyError:
util.warn(
"Did not recognize type '%s' of column '%s'"
% (coltype, colname)
)
coltype = sqltypes.NULLTYPE
if generated == "YES":
computed = dict(sqltext=default)
default = None
else:
computed = None
if identity_options is not None:
identity = self._parse_identity_options(
identity_options, default_on_nul
)
default = None
else:
identity = None
cdict = {
"name": colname,
"type": coltype,
"nullable": nullable,
"default": default,
"autoincrement": "auto",
"comment": comment,
}
if orig_colname.lower() == orig_colname:
cdict["quote"] = True
if computed is not None:
cdict["computed"] = computed
if identity is not None:
cdict["identity"] = identity
columns.append(cdict)
return columns
def _parse_identity_options(self, identity_options, default_on_nul):
# identity_options is a string that starts with 'ALWAYS,' or
# 'BY DEFAULT,' and continues with
# START WITH: 1, INCREMENT BY: 1, MAX_VALUE: 123, MIN_VALUE: 1,
# CYCLE_FLAG: N, CACHE_SIZE: 1, ORDER_FLAG: N, SCALE_FLAG: N,
# EXTEND_FLAG: N, SESSION_FLAG: N, KEEP_VALUE: N
parts = [p.strip() for p in identity_options.split(",")]
identity = {
"always": parts[0] == "ALWAYS",
"on_null": default_on_nul == "YES",
}
for part in parts[1:]:
option, value = part.split(":")
value = value.strip()
if "START WITH" in option:
identity["start"] = compat.long_type(value)
elif "INCREMENT BY" in option:
identity["increment"] = compat.long_type(value)
elif "MAX_VALUE" in option:
identity["maxvalue"] = compat.long_type(value)
elif "MIN_VALUE" in option:
identity["minvalue"] = compat.long_type(value)
elif "CYCLE_FLAG" in option:
identity["cycle"] = value == "Y"
elif "CACHE_SIZE" in option:
identity["cache"] = compat.long_type(value)
elif "ORDER_FLAG" in option:
identity["order"] = value == "Y"
return identity
@reflection.cache
def get_table_comment(
self,
connection,
table_name,
schema=None,
resolve_synonyms=False,
dblink="",
**kw
):
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
if not schema:
schema = self.default_schema_name
COMMENT_SQL = """
SELECT comments
FROM all_tab_comments
WHERE table_name = :table_name AND owner = :schema_name
"""
c = connection.execute(
sql.text(COMMENT_SQL),
dict(table_name=table_name, schema_name=schema),
)
return {"text": c.scalar()}
@reflection.cache
def get_indexes(
self,
connection,
table_name,
schema=None,
resolve_synonyms=False,
dblink="",
**kw
):
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
indexes = []
params = {"table_name": table_name}
text = (
"SELECT a.index_name, a.column_name, "
"\nb.index_type, b.uniqueness, b.compression, b.prefix_length "
"\nFROM ALL_IND_COLUMNS%(dblink)s a, "
"\nALL_INDEXES%(dblink)s b "
"\nWHERE "
"\na.index_name = b.index_name "
"\nAND a.table_owner = b.table_owner "
"\nAND a.table_name = b.table_name "
"\nAND a.table_name = :table_name "
)
if schema is not None:
params["schema"] = schema
text += "AND a.table_owner = :schema "
text += "ORDER BY a.index_name, a.column_position"
text = text % {"dblink": dblink}
q = sql.text(text)
rp = connection.execute(q, params)
indexes = []
last_index_name = None
pk_constraint = self.get_pk_constraint(
connection,
table_name,
schema,
resolve_synonyms=resolve_synonyms,
dblink=dblink,
info_cache=kw.get("info_cache"),
)
uniqueness = dict(NONUNIQUE=False, UNIQUE=True)
enabled = dict(DISABLED=False, ENABLED=True)
oracle_sys_col = re.compile(r"SYS_NC\d+\$", re.IGNORECASE)
index = None
for rset in rp:
index_name_normalized = self.normalize_name(rset.index_name)
# skip primary key index. This is refined as of
# [ticket:5421]. Note that ALL_INDEXES.GENERATED will by "Y"
# if the name of this index was generated by Oracle, however
# if a named primary key constraint was created then this flag
# is false.
if pk_constraint and index_name_normalized == pk_constraint["name"]:
continue
if rset.index_name != last_index_name:
index = dict(
name=index_name_normalized,
column_names=[],
dialect_options={},
)
indexes.append(index)
index["unique"] = uniqueness.get(rset.uniqueness, False)
if rset.index_type in ("BITMAP", "FUNCTION-BASED BITMAP"):
index["dialect_options"]["oracle_bitmap"] = True
if enabled.get(rset.compression, False):
index["dialect_options"]["oracle_compress"] = rset.prefix_length
# filter out Oracle SYS_NC names. could also do an outer join
# to the all_tab_columns table and check for real col names there.
if not oracle_sys_col.match(rset.column_name):
index["column_names"].append(self.normalize_name(rset.column_name))
last_index_name = rset.index_name
return indexes
@reflection.cache
def _get_constraint_data(
self, connection, table_name, schema=None, dblink="", **kw
):
params = {"table_name": table_name}
text = (
"SELECT"
"\nac.constraint_name," # 0
"\nac.constraint_type," # 1
"\nloc.column_name AS local_column," # 2
"\nrem.table_name AS remote_table," # 3
"\nrem.column_name AS remote_column," # 4
"\nrem.owner AS remote_owner," # 5
"\nloc.position as loc_pos," # 6
"\nrem.position as rem_pos," # 7
"\nac.search_condition," # 8
"\nac.delete_rule" # 9
"\nFROM all_constraints%(dblink)s ac,"
"\nall_cons_columns%(dblink)s loc,"
"\nall_cons_columns%(dblink)s rem"
"\nWHERE ac.table_name = :table_name"
"\nAND ac.constraint_type IN ('R','P', 'U', 'C')"
)
if schema is not None:
params["owner"] = schema
text += "\nAND ac.owner = :owner"
text += (
"\nAND ac.owner = loc.owner"
"\nAND ac.constraint_name = loc.constraint_name"
"\nAND ac.r_owner = rem.owner(+)"
"\nAND ac.r_constraint_name = rem.constraint_name(+)"
"\nAND (rem.position IS NULL or loc.position=rem.position)"
"\nORDER BY ac.constraint_name, loc.position"
)
text = text % {"dblink": dblink}
rp = connection.execute(sql.text(text), params)
constraint_data = rp.fetchall()
return constraint_data
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
pkeys = []
constraint_name = None
constraint_data = self._get_constraint_data(
connection,
table_name,
schema,
dblink,
info_cache=kw.get("info_cache"),
)
for row in constraint_data:
(
cons_name,
cons_type,
local_column,
remote_table,
remote_column,
remote_owner,
) = row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
if cons_type == "P":
if constraint_name is None:
constraint_name = self.normalize_name(cons_name)
pkeys.append(local_column)
return {"constrained_columns": pkeys, "name": constraint_name}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
requested_schema = schema # to check later on
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
constraint_data = self._get_constraint_data(
connection,
table_name,
schema,
dblink,
info_cache=kw.get("info_cache"),
)
def fkey_rec():
return {
"name": None,
"constrained_columns": [],
"referred_schema": None,
"referred_table": None,
"referred_columns": [],
"options": {},
}
fkeys = util.defaultdict(fkey_rec)
for row in constraint_data:
(
cons_name,
cons_type,
local_column,
remote_table,
remote_column,
remote_owner,
) = row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
cons_name = self.normalize_name(cons_name)
if cons_type == "R":
if remote_table is None:
# ticket 363
util.warn(
(
"Got 'None' querying 'table_name' from "
"all_cons_columns%(dblink)s - does the user have "
"proper rights to the table?"
)
% {"dblink": dblink}
)
continue
rec = fkeys[cons_name]
rec["name"] = cons_name
local_cols, remote_cols = (
rec["constrained_columns"],
rec["referred_columns"],
)
if not rec["referred_table"]:
if resolve_synonyms:
(
ref_remote_name,
ref_remote_owner,
ref_dblink,
ref_synonym,
) = self._resolve_synonym(
connection,
desired_owner=self.denormalize_name(remote_owner),
desired_table=self.denormalize_name(remote_table),
)
if ref_synonym:
remote_table = self.normalize_name(ref_synonym)
remote_owner = self.normalize_name(ref_remote_owner)
rec["referred_table"] = remote_table
if (
requested_schema is not None
or self.denormalize_name(remote_owner) != schema
):
rec["referred_schema"] = remote_owner
if row[9] != "NO ACTION":
rec["options"]["ondelete"] = row[9]
local_cols.append(local_column)
remote_cols.append(remote_column)
return list(fkeys.values())
@reflection.cache
def get_unique_constraints(self, connection, table_name, schema=None, **kw):
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
constraint_data = self._get_constraint_data(
connection,
table_name,
schema,
dblink,
info_cache=kw.get("info_cache"),
)
unique_keys = filter(lambda x: x[1] == "U", constraint_data)
uniques_group = groupby(unique_keys, lambda x: x[0])
index_names = {
ix["name"] for ix in self.get_indexes(connection, table_name, schema=schema)
}
return [
{
"name": name,
"column_names": cols,
"duplicates_index": name if name in index_names else None,
}
for name, cols in [
[
self.normalize_name(i[0]),
[self.normalize_name(x[2]) for x in i[1]],
]
for i in uniques_group
]
]
@reflection.cache
def get_view_definition(
self,
connection,
view_name,
schema=None,
resolve_synonyms=False,
dblink="",
**kw
):
info_cache = kw.get("info_cache")
(view_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
view_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
params = {"view_name": view_name}
text = "SELECT text FROM all_views WHERE view_name=:view_name"
if schema is not None:
text += " AND owner = :schema"
params["schema"] = schema
rp = connection.execute(sql.text(text), params).scalar()
if rp:
if util.py2k:
rp = rp.decode(self.encoding)
return rp
else:
return None
@reflection.cache
def get_check_constraints(
self, connection, table_name, schema=None, include_all=False, **kw
):
resolve_synonyms = kw.get("oracle_resolve_synonyms", False)
dblink = kw.get("dblink", "")
info_cache = kw.get("info_cache")
(table_name, schema, dblink, synonym) = self._prepare_reflection_args(
connection,
table_name,
schema,
resolve_synonyms,
dblink,
info_cache=info_cache,
)
constraint_data = self._get_constraint_data(
connection,
table_name,
schema,
dblink,
info_cache=kw.get("info_cache"),
)
check_constraints = filter(lambda x: x[1] == "C", constraint_data)
return [
{"name": self.normalize_name(cons[0]), "sqltext": cons[8]}
for cons in check_constraints
if include_all or not re.match(r"..+?. IS NOT NULL$", cons[8])
]
class _OuterJoinColumn(sql.ClauseElement):
__visit_name__ = "outer_join_column"
def __init__(self, column):
self.column = column
| 34.983647 | 88 | 0.606685 |
00ca64f892f149d7e713a3d92d09bc63e2ff734d | 1,108 | py | Python | utils/bitsmaker.py | tmichalak/prjuray | 53f3c94b58ffc6d405ac20a3b340ae726717ed47 | [
"0BSD"
] | 39 | 2020-07-17T19:43:40.000Z | 2022-01-07T02:05:48.000Z | utils/bitsmaker.py | tmichalak/prjuray | 53f3c94b58ffc6d405ac20a3b340ae726717ed47 | [
"0BSD"
] | 24 | 2020-07-17T20:15:54.000Z | 2022-01-21T08:29:51.000Z | utils/bitsmaker.py | tmichalak/prjuray | 53f3c94b58ffc6d405ac20a3b340ae726717ed47 | [
"0BSD"
] | 11 | 2020-07-17T19:43:45.000Z | 2022-02-09T08:43:23.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The Project U-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
from utils import bitstream
def write(bits_fn, fnout, tags, bitfilter=None):
'''
seg 00020000_046
bit 18_20
bit 39_63
tag LIOB33.IOB_Y1.REFBIT 0
'''
fout = open(fnout, "w")
def line(s):
fout.write(s + "\n")
# Everything relative to start of bitstream
line("seg 00000000_000")
bitdata = bitstream.load_bitdata2(open(bits_fn, "r"))
for frame, words in bitdata.items():
for word, wbits in words.items():
if bitfilter is not None:
if not bitfilter(frame, word):
continue
for bitidx in sorted(list(wbits)):
# Are the names arbitrary? Lets just re-create
line("bit %08X_%03u_%02u" % (frame, word, bitidx))
for k, v in tags.items():
line("tag %s %u" % (k, v))
| 25.181818 | 66 | 0.597473 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.