id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
491738 | import os
from pathlib import Path
from time import sleep
from boucanpy.core import logger
from boucanpy.core.utils import storage_dir
from boucanpy.cli.base import BaseCommand
from boucanpy.api_client import ApiClient
from boucanpy.http.manager import HttpServerManager
class HttpServer(BaseCommand):
name = "http-server"
aliases = ["http"]
description = "run http server"
add_log_level = True
add_debug = True
def __init__(self, *args, **kwargs):
self.api_client = None
self.http_server = None
self.https_server = None
super().__init__(*args, **kwargs)
@classmethod
def parser(cls, parser):
parser.add_argument(
"-a",
"--api-url",
default="http://127.0.0.1:8080",
action="store",
help="api url",
)
parser.add_argument("-t", "--api-token", action="store", help="api token")
parser.add_argument(
"-p",
"--port",
action="store",
type=int,
default=80,
help="listen port of the http server",
)
parser.add_argument(
"-l",
"--listen",
action="store",
default="127.0.0.1",
help="bind address of the http server",
)
parser.add_argument(
"-s", "--enable-ssl", action="store_true", help="enable the ssl server"
)
parser.add_argument(
"--ssl-port",
action="store",
type=int,
default=443,
help="listen port of the https server",
)
parser.add_argument(
"--ssl-listen",
action="store",
default="127.0.0.1",
help="bind address of the https server",
)
parser.add_argument(
"--ssl-key-path",
action="store",
default=storage_dir("ssl/devkey.pem"),
help="path to ssl key",
)
parser.add_argument(
"--ssl-cert-path",
action="store",
default=storage_dir("ssl/devcert.pem"),
help="path to ssl cert",
)
# also need cert/pem paths
parser.add_argument(
"--no-sync",
action="store_true",
help="don't sync api token back to database",
)
parser.add_argument(
"--no-api-wait", action="store_true", help="don't wait for the api to be up"
)
parser.add_argument(
"--no-ssl-verify",
action="store_true",
help="skip ssl verify in the http server's api client",
)
parser.add_argument(
"-r",
"--refresh-ttl",
type=int,
default=3600,
action="store",
help="time to wait before polling for new things",
)
return parser
async def run(self):
# TODO: thread issues?
verify_ssl = True
if bool(self.option("no_ssl_verify")):
verify_ssl = False
self.api_client = ApiClient(
self.get_api_url(), self.get_api_token(), verify_ssl=verify_ssl
)
if not self.option("no_api_wait"):
if not self.api_client.wait_for_up():
logger.critical(
"<EMAIL> - Could not connect to api. quitting"
)
self.exit(1)
if self.option("no_sync"):
logger.info("<EMAIL> - Skipping syncing api token")
else:
self.api_client.sync()
self.boot()
self.start_servers()
try:
count = 0
while self.is_alive():
# zones don't need to be refreshed for http server
# may want to do something in the future thought
count = count + 1
sleep(1)
except KeyboardInterrupt:
pass
def boot(self):
port = self.option("port")
listen = self.option("listen")
logger.info("<EMAIL> - Building http server on port %d", port)
self.http_server = HttpServerManager(
port=port, listen=listen, ssl=False, api_client=self.api_client
)
if bool(self.option("enable_ssl")):
ssl_port = self.option("ssl_port")
ssl_listen = self.option("ssl_listen")
ssl_key_path = self.option("ssl_key_path")
ssl_cert_path = self.option("ssl_cert_path")
logger.info(
f"<EMAIL> - Building https server manager on port {str(ssl_port)} with {ssl_cert_path} and {ssl_key_path}",
)
self.https_server = HttpServerManager(
port=ssl_port,
listen=ssl_listen,
api_client=self.api_client,
ssl=True,
ssl_cert_path=ssl_cert_path,
ssl_key_path=ssl_key_path,
)
logger.info(
"<EMAIL> - Building https server on port %d", ssl_port
)
def start_servers(self):
self.http_server.start_thread()
if bool(self.option("enable_ssl")):
self.https_server.start_thread()
def stop_servers(self):
self.http_server.stop()
if bool(self.option("enable_ssl")):
self.https_server.stop()
def is_alive(self):
_is_alive = self.http_server.is_alive()
if bool(self.option("enable_ssl")):
_is_alive = _is_alive and self.https_server.is_alive()
return _is_alive
def get_api_url(self):
if os.environ.get("API_URL", None):
return os.environ.get("API_URL")
return self.option("api_url")
def get_api_token(self):
if os.environ.get("API_TOKEN", None):
return os.environ.get("API_TOKEN")
if self.option("api_token", None):
return self.option("api_token")
logger.critical("api token required")
self.exit(1)
|
491792 | import re
import pandas as pd
from cowidev.utils.clean import clean_count
from cowidev.utils.web.scraping import get_soup
from cowidev.vax.utils.incremental import enrich_data, increment
from cowidev.utils.clean.dates import localdate
import datetime
def read(source: str) -> pd.Series:
soup = get_soup(source)
for label in soup.find_all(class_="number-label"):
if label.text == "Total vaccins administrรฉs":
container = label.parent.parent
return pd.Series(
data={
"total_vaccinations": parse_total_vaccinations(container),
"people_vaccinated": parse_people_vaccinated(container),
"people_fully_vaccinated": parse_people_fully_vaccinated(container),
"source_url": source,
}
)
def parse_total_vaccinations(container) -> int:
total_vaccinations = clean_count(container.find(class_="number").text)
return total_vaccinations
def parse_people_vaccinated(container) -> int:
people_vaccinated = container.find(class_="cmp-text").text
people_vaccinated = re.search(r"Dose 1\:\s([\d\. ]{6,})", people_vaccinated).group(
1
)
people_vaccinated = clean_count(people_vaccinated)
return people_vaccinated
def parse_people_fully_vaccinated(container) -> int:
people_fully_vaccinated = container.find(class_="cmp-text").text
people_fully_vaccinated = re.search(
r"Dose 2\:\s([\d\. ]{6,})", people_fully_vaccinated
).group(1)
people_fully_vaccinated = clean_count(people_fully_vaccinated)
return people_fully_vaccinated
def enrich_date(ds: pd.Series) -> pd.Series:
return enrich_data(ds, "date", localdate("Europe/Luxembourg"))
def enrich_location(ds: pd.Series) -> pd.Series:
return enrich_data(ds, "location", "Luxembourg")
def enrich_vaccine(ds: pd.Series) -> pd.Series:
return enrich_data(ds, "vaccine", "Moderna, Oxford/AstraZeneca, Pfizer/BioNTech")
def pipeline(ds: pd.Series) -> pd.Series:
return ds.pipe(enrich_date).pipe(enrich_location).pipe(enrich_vaccine)
def main(paths):
source = "https://covid19.public.lu/fr.html"
data = read(source).pipe(pipeline)
output_file = paths.tmp_vax_out("Luxembourg")
previous_data = pd.read_csv(output_file)
if previous_data["total_vaccinations"].iloc[-1] >= data["total_vaccinations"]:
print("Luxembourg is up to date")
return
increment(
paths=paths,
location=data["location"],
total_vaccinations=data["total_vaccinations"],
people_vaccinated=data["people_vaccinated"],
people_fully_vaccinated=data["people_fully_vaccinated"],
date=data["date"],
source_url=data["source_url"],
vaccine=data["vaccine"],
)
if __name__ == "__main__":
main()
|
491849 | from . import statefulfunction
from .statefulfunction import *
__all__ = list(statefulfunction.__all__)
|
491880 | import os,os.path,shutil
import numpy as np
import pickle
import dmdd
import dmdd_efficiencies as eff
def check_min_mass(element='fluorine', Qmin=1., v_esc=544., v_lag=220., mx_guess=1.):
experiment = dmdd.Experiment('test',element,Qmin, 40.,100., eff.efficiency_unit)
res = experiment.find_min_mass(v_esc=v_esc, v_lag=v_lag, mx_guess=mx_guess)
print res,'GeV'
if res<0:
print 'Problem: try another mx_guess...'
def make_UVmodels(return_models=False):
SI_Higgs = dmdd.UV_Model('SI_Higgs', ['mass', 'sigma_si'], fixed_params={'fnfp_si': 1})
milicharge = dmdd.UV_Model('Milicharge', ['mass', 'sigma_si_massless'], fixed_params={'fnfp_si_massless': 0})
SD_flavoruniversal = dmdd.UV_Model('SD_fu', ['mass','sigma_sd'], fixed_params={'fnfp_sd': -1.1})
anapole = dmdd.UV_Model('Anapole', ['mass','sigma_anapole'])
magdip_heavy = dmdd.UV_Model('Mag.dip.heavy', ['mass','sigma_magdip'])
magdip_0 = dmdd.UV_Model('Mag.dip.light', ['mass','sigma_magdip_massless'])
elecdip_heavy = dmdd.UV_Model('Elec.dip.heavy', ['mass','sigma_elecdip'])
elecdip_0 = dmdd.UV_Model('Elec.dip.light', ['mass','sigma_elecdip_massless'])
f1 = dmdd.UV_Model('f1', ['mass','sigma_f1'], fixed_params={'fnfp_f1': 1.})
f2_Higgs = dmdd.UV_Model('f2_Higgs', ['mass','sigma_f2'], fixed_params={'fnfp_f2': -0.05})
#f2_flavoruniversal = dmdd.UV_Model('f2_flavor-universal', ['mass','sigma_f2'], fixed_params={'fnfp_f2': 1.})
f3_Higgs = dmdd.UV_Model('f3_Higgs', ['mass','sigma_f3'], fixed_params={'fnfp_f3': -0.05})
#f3_flavoruniversal = dmdd.UV_Model('f3_flavor-universal', ['mass','sigma_f3'], fixed_params={'fnfp_f3': 1.})
LS = dmdd.UV_Model('LS', ['mass','sigma_LS'], fixed_params={'fnfp_LS': 0.})
models = [SI_Higgs, milicharge, SD_flavoruniversal, anapole,
magdip_heavy, magdip_0, elecdip_heavy, elecdip_0,
f1, f2_Higgs, f3_Higgs, LS]
if return_models:
return models
def make_experiments(return_experiments=False):
xe = dmdd.Experiment('Xe','xenon',5., 40.,100., eff.efficiency_unit)
ge = dmdd.Experiment('Ge','germanium',0.4, 100.,100., eff.efficiency_unit)
if return_experiments:
return [xe,ge]
def test_MultinestRun(mass=50,test_fits=False):
SI_Higgs = dmdd.UV_Model('SI_Higgs', ['mass', 'sigma_si'], fixed_params={'fnfp_si': 1})
elecdip_heavy = dmdd.UV_Model('Elec.dip.heavy', ['mass','sigma_elecdip'])
experiment = make_experiments(return_experiments=True)
simmodel = SI_Higgs
fitmodel1 = SI_Higgs
fitmodel2 = elecdip_heavy
pardic = {'sigma_si': 70.,'mass': mass}
simname = 'simtest'
testrun1 = dmdd.MultinestRun(simname, experiment, simmodel, pardic,
fitmodel1, prior_ranges={'mass':(1,1000),
'sigma_si':(0.001,100000),
'sigma_elecdip':(0.001,100000)})
data1 = np.loadtxt(testrun1.simulations[0].datafile)
pardic = {'sigma_si': 70.0007,'mass': mass}
testrun2 = dmdd.MultinestRun(simname, experiment, simmodel, pardic,
fitmodel2, empty_run=False,
prior_ranges={'mass':(1,1000),
'sigma_si':(0.001,100000),
'sigma_elecdip':(0.001,100000)})
data2 = np.loadtxt(testrun1.simulations[0].datafile)
#simulation datafile should be created only for the first instance of MultinestRun:
assert np.allclose(data1, data2)
if test_fits:
testrun1.fit()
testrun1.visualize()
testrun2.fit()
testrun2.visualize()
if (not os.path.exists(testrun1.chains_file)) or (not os.path.exists(testrun1.pickle_file)) or (not os.path.exists(testrun1.stats_file)):
raise AssertionError('Stats or chains or pickle are not created or are erased.')
plotfile1 = testrun1.chainspath + '2d_posterior_mass_vs_sigma_si.pdf'
plotfile2 = testrun1.chainspath + '{}_theoryfitdata_Ge.pdf'.format(simname)
plotfile3 = testrun1.chainspath + '{}_theoryfitdata_Xe.pdf'.format(simname)
if (not os.path.exists(plotfile1)) or (not os.path.exists(plotfile2)) or (not os.path.exists(plotfile3)):
raise AssertionError('Plots are not created or are erased.')
if (not os.path.exists(testrun2.chains_file)) or (not os.path.exists(testrun2.pickle_file)) or (not os.path.exists(testrun2.stats_file)):
raise AssertionError('Stats or chains or pickle are not created.')
plotfile1 = testrun2.chainspath + '2d_posterior_mass_vs_sigma_elecdip.pdf'
plotfile2 = testrun2.chainspath + '{}_theoryfitdata_Ge.pdf'.format(simname)
plotfile3 = testrun2.chainspath + '{}_theoryfitdata_Xe.pdf'.format(simname)
if (not os.path.exists(plotfile1)) or (not os.path.exists(plotfile2)) or (not os.path.exists(plotfile3)):
raise AssertionError('Plots are not created.')
def test_UVrate():
experiment = dmdd.Experiment('Xe','xenon',5., 40.,10000., eff.efficiency_unit)
models = make_UVmodels(return_models=True)
mass = 40.
qs = np.array([15.])
v_lag = 200.
v_rms = 100.
v_esc = 600.
rho_x = 0.4
sigma_names = {}
fnfp_names = {}
fnfp_vals = {}
for m in models:
sigma_names[m.name] = m.param_names[1]
if len(m.fixed_params)>0:
fnfp_names[m.name] = m.fixed_params.keys()[0]
fnfp_vals[m.name] = m.fixed_params.values()[0]
else:
fnfp_names[m.name] = None
fnfp_vals[m.name] = None
dRdQs = np.zeros(len(models))
Rs = np.zeros(len(models))
for i,m in enumerate(models):
kwargs = {sigma_names[m.name]:1.}
if fnfp_names[m.name] is not None:
kwargs[fnfp_names[m.name]] = fnfp_vals[m.name]
dRdQs[i] = dmdd.rate_UV.dRdQ(qs, mass=mass, element=experiment.element,
v_lag=v_lag, v_rms=v_rms, v_esc=v_esc, rho_x=rho_x,
**kwargs)
Rs[i] = dmdd.rate_UV.R(eff.efficiency_unit, mass=mass, element=experiment.element,
Qmin=experiment.Qmin, Qmax=experiment.Qmax,
v_lag=v_lag, v_rms=v_rms, v_esc=v_esc, rho_x=rho_x,
**kwargs)
#print 'dRdQs = {}\n'.format(dRdQs)
#print 'Rs = {}\n'.format(Rs)
dRdQs_correct = [ 1.27974652e-12, 1.67031585e-13, 6.28936205e-13, 7.76864477e-13,
7.71724584e-13, 5.66164037e-13, 8.40579288e-13, 6.16678247e-13,
4.72480605e-13, 2.59857470e-16, 9.59390104e-16, 1.14295679e-13]
Rs_correct = [ 6.15358778e-11, 3.10857259e-11, 3.14982315e-11, 4.14119198e-11,
1.82181891e-11, 3.84877268e-11, 2.35638282e-11, 5.50063883e-11,
1.34702925e-11, 5.82472177e-15, 1.64213483e-14, 2.26028126e-12]
assert np.allclose(dRdQs_correct, dRdQs)
assert np.allclose(Rs_correct, Rs)
###
qs = np.array([8.3,15.7])
logtest1 = dmdd.rate_UV.loglikelihood(qs, eff.efficiency_unit, mass=mass,
sigma_si=1.,fnfp_si=1.,
element=experiment.element,
Qmin=experiment.Qmin, Qmax=experiment.Qmax,
exposure=experiment.exposure,energy_resolution=True,
v_lag=v_lag, v_rms=v_rms, v_esc=v_esc, rho_x=rho_x)
logtest2 = dmdd.rate_UV.loglikelihood(qs, eff.efficiency_unit, mass=mass,
sigma_si=1.,fnfp_si=1.,
element=experiment.element,
Qmin=experiment.Qmin, Qmax=experiment.Qmax,
exposure=experiment.exposure,energy_resolution=False,
v_lag=v_lag, v_rms=v_rms, v_esc=v_esc, rho_x=rho_x)
#print 'logtest1_correct={}'.format(logtest1)
#print 'logtest2_correct={}'.format(logtest2)
logtest1_correct=-19.6890210901
logtest2_correct=-13.4627188661
print('correct={} got={}\n'.format(logtest1_correct,logtest1))
print('correct={} got={}\n'.format(logtest2_correct,logtest2))
assert np.isclose(logtest1_correct, logtest1)
assert np.isclose(logtest2_correct, logtest2)
|
491885 | import gensim
import random
import logging
# configuration
trains = "../../temp_results/word2vec_hindi.txt"
create = 1
topn = 10
data_folder = '../data/word2vec_evaluation/'
TARGET_SYN = data_folder+'syntactic.questions.txt'
TARGET_SEM_OP = data_folder+'semantic_op.questions.txt'
TARGET_SEM_BM = data_folder+'semantic_bm.questions.txt'
TARGET_SEM_DF = data_folder+'semantic_df.questions.txt'
SRC_NOUNS = data_folder+'nouns.txt'
SRC_BESTMATCH = data_folder+'bestmatch.txt'
SRC_DOESNTFIT = data_folder+'doesntfit.txt'
SRC_OPPOSITE = data_folder+'opposite.txt'
PATTERN_SYN = [('nouns', 'SI/PL', SRC_NOUNS, 0, 1)]
#logger.write(filename=train.strip() + '.result', format='%(asctime)s : %(message)s', level=logging.INFO)
print ("TEST")
# function create_syntactic_testset
# ... creates syntactic test set and writes it into a file
# @return void
def create_syntactic_testset():
print ("TEST")
with open(TARGET_SYN, 'w') as t:
for label, short, src, index1, index2 in PATTERN_SYN:
t.write(': ' + label + ': ' + short + '\n')
for q in create_questions(src, index1, index2):
t.write(q + '\n')
# function create_semantic_testset
# ... creates semantic test set and writes it into a file
# @return void
def create_semantic_testset():
# opposite
print ("TEST")
with open(TARGET_SEM_OP, 'w') as t:
for q in create_questions(SRC_OPPOSITE):
t.write(q + '\n')
logging.info('created opposite questions')
# best match
with open(TARGET_SEM_BM, 'w') as t:
groups = open(SRC_BESTMATCH).read().split(':')
groups.pop(0) # remove first empty group
for group in groups:
questions = group.splitlines()
questions.pop(0)
while questions:
for i in range(1,len(questions)):
question = questions[0].split('-') + questions[i].split('-')
t.write(' '.join(question) + '\n')
questions.pop(0)
# doesn't fit
with open(TARGET_SEM_DF, 'w') as t:
for line in open(SRC_DOESNTFIT):
words = line.split()
for wrongword in words[-1].split('-'):
question = ' '.join(words[:3] + [wrongword])
t.write(question + '\n')
# function create_questions
# ... creates single questions from given source
# @param string src source file to load words from
# @param integer index2 index of first word in a line to focus on
# @param integer index2 index of second word in a line to focus on
# @param integer combinate number of combinations with random other lines
# @return list of question words
def create_questions(src, index1=0, index2=1):
# get source content
with open(src) as f:
content = f.readlines()
content = [x.strip() for x in content]
questions = []
for line in content:
for i in range(0, 10):
# get current word pair
question = list(line.split('-'))
# get random word pair that is not the current
random_line = random.choice(list(set(content) - {line}))
random_word = list(random_line.split('-'))
# merge both word pairs to one question
question.extend(random_word)
questions.append(' '.join(question))
print (len(questions))
return questions
# function test_mostsimilar
# ... tests given model to most similar word
# @param word2vec model to test
# @param string src source file to load words from
# @param string label to print current test case
# @param integer topn number of top matches
def test_mostsimilar(model, src, label='most similar', topn=5):
num_lines = sum(1 for line in open(src))
num_questions = 0
num_right = 0
num_topn = 0
# get questions
import codecs
with codecs.open(src,encoding='utf-8') as f:
questions = f.readlines()
questions = [x.strip() for x in questions]
# test each question
for question in questions:
words = question.split()
# check if all words exist in vocabulary
if all(x in model.index2word for x in words):
num_questions += 1
bestmatches = model.most_similar(positive=[words[1], words[2]], negative=[words[0]], topn=topn)
# best match
if words[3] in bestmatches[0]:
num_right += 1
# topn match
for topmatches in bestmatches[:topn]:
if words[3] in topmatches:
num_topn += 1
break
# calculate result
correct_matches = round(num_right/float(num_questions)*100, 1) if num_questions>0 else 0.0
topn_matches = round(num_topn/float(num_questions)*100, 1) if num_questions>0 else 0.0
coverage = round(num_questions/float(num_lines)*100, 1) if num_lines>0 else 0.0
# log result
print (correct_matches)
print (topn_matches)
print (coverage)
# function test_mostsimilar
# ... tests given model to most similar word
# @param word2vec model to test
# @param string src source file to load words from
# @param integer topn number of top matches
def test_mostsimilar_groups(model, src, topn=10):
num_lines = 0
num_questions = 0
num_right = 0
num_topn = 0
# test each group
groups = open(src).read().split('\n: ')
for group in groups:
questions = group.splitlines()
label = questions.pop(0)
label = label[2:] if label.startswith(': ') else label # handle first group
num_group_lines = len(questions)
num_group_questions = 0
num_group_right = 0
num_group_topn = 0
# test each question of current group
for question in questions:
words = question.decode('utf-8').split()
# check if all words exist in vocabulary
if all(x in model.index2word for x in words):
num_group_questions += 1
bestmatches = model.most_similar(positive=[words[1], words[2]], negative=[words[0]], topn=topn)
# best match
if words[3] in bestmatches[0]:
num_group_right += 1
# topn match
for topmatches in bestmatches[:topn]:
if words[3] in topmatches:
num_group_topn += 1
break
# calculate result
correct_group_matches = round(num_group_right/float(num_group_questions)*100, 1) if num_group_questions>0 else 0.0
topn_group_matches = round(num_group_topn/float(num_group_questions)*100, 1) if num_group_questions>0 else 0.0
group_coverage = round(num_group_questions/float(num_group_lines)*100, 1) if num_group_lines>0 else 0.0
# log result
# total numbers
num_lines += num_group_lines
num_questions += num_group_questions
num_right += num_group_right
num_topn += num_group_topn
# calculate result
correct_matches = round(num_right/float(num_questions)*100, 1) if num_questions>0 else 0.0
topn_matches = round(num_topn/float(num_questions)*100, 1) if num_questions>0 else 0.0
coverage = round(num_questions/float(num_lines)*100, 1) if num_lines>0 else 0.0
# function test_doesntfit
# ... tests given model to most not fitting word
# @param word2vec model to test
# @param string src source file to load words from
def test_doesntfit(model, src):
num_lines = sum(1 for line in open(src))
num_questions = 0
num_right = 0
# get questions
with open(src) as f:
questions = f.readlines()
questions = [x.strip() for x in questions]
# test each question
for question in questions:
words = question.decode('utf-8').split()
# check if all words exist in vocabulary
if all(x in model.index2word for x in words):
num_questions += 1
if model.doesnt_match(words) == words[3]:
num_right += 1
# calculate result
correct_matches = round(num_right/float(num_questions)*100, 1) if num_questions>0 else 0.0
coverage = round(num_questions/float(num_lines)*100, 1) if num_lines>0 else 0.0
if create == 1:
create_syntactic_testset()
create_semantic_testset()
# get trained model
model = gensim.models.KeyedVectors.load_word2vec_format(trains.strip())
print ("word 2 vec read successfully.")
# execute evaluation
test_mostsimilar_groups(model, TARGET_SYN, topn)
test_mostsimilar(model, TARGET_SEM_OP, 'opposite', topn)
test_mostsimilar(model, TARGET_SEM_BM, 'best match', topn)
test_doesntfit(model, TARGET_SEM_DF) |
491908 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from torch import Tensor
from .functions import *
from .common import *
class ActorCritic(nn.Module):
def __init__(self,
in_dim,
out_actions,
hidden_dim=400,
hidden_layers=4,
layer_norm=True,
gamma=0.999,
lambda_gae=0.95,
entropy_weight=1e-3,
target_interval=100,
actor_grad='reinforce',
actor_dist='onehot'
):
super().__init__()
self.in_dim = in_dim
self.out_actions = out_actions
self.gamma = gamma
self.lambda_ = lambda_gae
self.entropy_weight = entropy_weight
self.target_interval = target_interval
self.actor_grad = actor_grad
self.actor_dist = actor_dist
actor_out_dim = out_actions if actor_dist == 'onehot' else 2 * out_actions
self.actor = MLP(in_dim, actor_out_dim, hidden_dim, hidden_layers, layer_norm)
self.critic = MLP(in_dim, 1, hidden_dim, hidden_layers, layer_norm)
self.critic_target = MLP(in_dim, 1, hidden_dim, hidden_layers, layer_norm)
self.critic_target.requires_grad_(False)
self.train_steps = 0
def forward_actor(self, features: Tensor) -> D.Distribution:
y = self.actor.forward(features).float() # .float() to force float32 on AMP
if self.actor_dist == 'onehot':
return D.OneHotCategorical(logits=y)
if self.actor_dist == 'normal_tanh':
return normal_tanh(y)
if self.actor_dist == 'tanh_normal':
return tanh_normal(y)
assert False, self.actor_dist
def forward_value(self, features: Tensor) -> Tensor:
y = self.critic.forward(features)
return y
def training_step(self,
features: TensorJMF,
actions: TensorHMA,
rewards: D.Distribution,
terminals: D.Distribution,
log_only=False
):
if not log_only:
if self.train_steps % self.target_interval == 0:
self.update_critic_target()
self.train_steps += 1
reward1: TensorHM = rewards.mean[1:]
terminal0: TensorHM = terminals.mean[:-1]
terminal1: TensorHM = terminals.mean[1:]
# GAE from https://arxiv.org/abs/1506.02438 eq (16)
# advantage_gae[t] = advantage[t] + (gamma lambda) advantage[t+1] + (gamma lambda)^2 advantage[t+2] + ...
value_t: TensorJM = self.critic_target.forward(features)
value0t: TensorHM = value_t[:-1]
value1t: TensorHM = value_t[1:]
advantage = - value0t + reward1 + self.gamma * (1.0 - terminal1) * value1t
advantage_gae = []
agae = None
for adv, term in zip(reversed(advantage.unbind()), reversed(terminal1.unbind())):
if agae is None:
agae = adv
else:
agae = adv + self.lambda_ * self.gamma * (1.0 - term) * agae
advantage_gae.append(agae)
advantage_gae.reverse()
advantage_gae = torch.stack(advantage_gae)
# Note: if lambda=0, then advantage_gae=advantage, then value_target = advantage + value0t = reward + gamma * value1t
value_target = advantage_gae + value0t
# When calculating losses, should ignore terminal states, or anything after, so:
# reality_weight[i] = (1-terminal[0]) (1-terminal[1]) ... (1-terminal[i])
# Note this takes care of the case when initial state features[0] is terminal - it will get weighted by (1-terminals[0]).
reality_weight = (1 - terminal0.detach()).log().cumsum(dim=0).exp()
# Critic loss
value: TensorJM = self.critic.forward(features.detach())
value0: TensorHM = value[:-1]
loss_critic = 0.5 * torch.square(value_target.detach() - value0)
loss_critic = (loss_critic * reality_weight).mean()
# Actor loss
policy_distr = self.forward_actor(features.detach()[:-1]) # TODO: we could reuse this from dream()
if self.actor_grad == 'reinforce':
action_logprob = policy_distr.log_prob(actions.detach())
loss_policy = - action_logprob * advantage_gae.detach()
elif self.actor_grad == 'dynamics':
loss_policy = - value_target
else:
assert False, self.actor_grad
policy_entropy = policy_distr.entropy()
loss_actor = loss_policy - self.entropy_weight * policy_entropy
loss_actor = (loss_actor * reality_weight).mean()
assert (loss_policy.requires_grad and policy_entropy.requires_grad) or not loss_critic.requires_grad
with torch.no_grad():
metrics = dict(loss_critic=loss_critic.detach(),
loss_actor=loss_actor.detach(),
policy_entropy=policy_entropy.mean(),
policy_value=value0[0].mean(), # Value of real states
policy_value_im=value0.mean(), # Value of imagined states
policy_reward=reward1.mean(),
policy_reward_std=reward1.std(),
)
tensors = dict(value=value.detach(),
value_target=value_target.detach(),
value_advantage=advantage.detach(),
value_advantage_gae=advantage_gae.detach(),
value_weight=reality_weight.detach(),
)
return (loss_actor, loss_critic), metrics, tensors
def update_critic_target(self):
self.critic_target.load_state_dict(self.critic.state_dict()) # type: ignore
|
491926 | import doxygen_basic_notranslate
import inspect
import string
import sys
import comment_verifier
comment_verifier.check(inspect.getdoc(doxygen_basic_notranslate.function),
r"""\brief
Brief description.
The comment text
\author Some author
\return Some number
\sa function2"""
)
comment_verifier.check(inspect.getdoc(doxygen_basic_notranslate.function1),
r"""Single line comment """
)
comment_verifier.check(inspect.getdoc(doxygen_basic_notranslate.function2),
r"""A test of a very very very very very very very very very very very very very very very very
very very very very very long comment string."""
)
comment_verifier.check(inspect.getdoc(doxygen_basic_notranslate.function3),
r"""*Overload 1:*
A test for overloaded functions
This is function \b one
|
*Overload 2:*
A test for overloaded functions
This is function \b two"""
)
comment_verifier.check(inspect.getdoc(doxygen_basic_notranslate.function4),
r"""A test of some mixed tag usage
\if CONDITION
This \a code fragment shows us something \.
\par Minuses:
\arg it's senseless
\arg it's stupid
\arg it's null
\warning This may not work as expected
\code
int main() { while(true); }
\endcode
\endif"""
)
comment_verifier.check(inspect.getdoc(doxygen_basic_notranslate.function5),
r"""This is a post comment. """
)
comment_verifier.check(inspect.getdoc(doxygen_basic_notranslate.function6),
r"""Test for default args
@param a Some parameter, default is 42"""
)
comment_verifier.check(inspect.getdoc(doxygen_basic_notranslate.function7),
r"""Test for a parameter with difficult type
(mostly for python)
@param a Very strange param"""
)
|
491931 | import random
try:
random = random.SystemRandom()
except NotImplementedError:
pass
def get_random_string(length=12, allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'):
return ''.join([random.choice(allowed_chars) for i in range(length)])
def get_user_model():
"""
Get the user model that is being used. If the `get_user_model` method
is not available, default back to the standard User model provided
through `django.contrib.auth`.
"""
try:
from django.contrib.auth import get_user_model
return get_user_model()
except ImportError:
from django.contrib.auth.models import User
return User
try:
from django.utils import timezone
now = timezone.now
except ImportError:
from datetime import datetime
now = datetime.now
|
491947 | from django.test import TestCase
from ..importer import Importer
class ImporterTestCase(TestCase):
def test_load(self):
""" Test that the loaded attribute is changed appropriately when the
importer is loaded.
"""
importer = Importer('module')
self.assertFalse(importer.loaded)
importer.load()
self.assertTrue(importer.loaded)
def test_ensure_loaded(self):
""" Test that the load method is called when the importer is not
loaded for a decorated function.
"""
importer = Importer('module')
self.assertFalse(importer.loaded)
@importer.ensure_loaded
def func_a():
return 42
self.assertEqual(func_a(), 42)
self.assertTrue(importer.loaded)
|
491970 | import os
import subprocess
try:
from munkicon import worker
except ImportError:
from .munkicon import worker
# Keys: 'mac_os_python_path'
# 'mac_os_python_ver'
# 'munki_python_path'
# 'munki_python_symlink'
# 'munki_python_ver'
# 'official_python3_path'
# 'official_python3_ver'
class PythonConditions(object):
"""Generates information about python versions."""
def __init__(self):
self.conditions = self._process()
def _python_versions(self):
"""Gets the version of several Python paths (if they exist)."""
result = {'mac_os_python_path': '',
'mac_os_python_ver': '',
'munki_python_path': '',
'munki_python_symlink': '',
'munki_python_ver': '',
'official_python3_path': '',
'official_python3_ver': ''}
_munki_pythons = ['/usr/local/munki/munki-python', '/usr/local/munki/munki-python']
if any([os.path.exists(_p) for _p in _munki_pythons]):
_munki_python = [_x for _x in _munki_pythons if os.path.exists(_x)][0]
else:
_munki_python = ''
_python_paths = {'mac_os_python_path': '/usr/bin/python',
'munki_python_path': _munki_python,
'official_python3_path': '/usr/local/bin/python3'}
for _k, _v in _python_paths.items():
if os.path.exists(_v):
_real_path = os.path.realpath(_v)
result[_k] = _real_path
# Include the munki python symlink in use
if _k == 'munki_python_path':
result['munki_python_symlink'] = _v
# Include the symlink path of official python
if _k == 'official_python3_path':
result['official_python3_symlink'] = _v
_cmd = [_real_path, '--version']
_p = subprocess.Popen(_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_r, _e = _p.communicate()
if _p.returncode == 0:
_ver = None
if _r:
if isinstance(_r, bytes):
_r = _r.decode('utf-8').strip()
_ver = _r.replace('Python ', '')
elif _e:
if isinstance(_e, bytes):
_e = _e.decode('utf-8').strip()
_ver = _e.replace('Python ', '')
result[_k.replace('_path', '_ver')] = _ver
return result
def _process(self):
"""Process all conditions and generate the condition dictionary."""
result = dict()
result.update(self._python_versions())
return result
def runner(dest):
py = PythonConditions()
mc = worker.MunkiConWorker(conditions_file=dest, log_src=__file__)
mc.write(conditions=py.conditions)
|
491980 | import sys
def main(input_filename):
test_cases = open(input_filename, 'r')
for test in test_cases:
test = test.strip()
word = list(test.split(' ')[0])
mask = list(test.split(' ')[1])
result = []
for m, w in zip(mask, word):
if m == '1':
result.append(w.upper())
else:
result.append(w.lower())
print(''.join(result))
test_cases.close()
if __name__ == '__main__':
main(sys.argv[1])
|
491987 | def sum_zero(n):
zero_sum_array = []
counter = 1
if n % 2 != 0:
zero_sum_array.append(0)
n -= 1
while n:
zero_sum_array.append(counter)
zero_sum_array.append(-counter)
counter += 1
n -= 2
return zero_sum_array
|
492077 | from torchsso.utils.logger import Logger # NOQA
from torchsso.utils.inv_cupy import inv # NOQA
from torchsso.utils.cholesky_cupy import cholesky # NOQA
from torchsso.utils.accumulator import TensorAccumulator # NOQA
|
492112 | import copy
import itertools
from functools import reduce
import numpy as np
from narwhal.cast import AbstractCast, Cast
def count_casts(castlikes):
""" Given an iterable of Casts and CastCollections, count the number of
individual Casts. """
n = 0
for c in castlikes:
if isinstance(c, AbstractCast):
n += 1
else:
n += count_casts(c)
return n
def ensureiterable(item):
""" Turn *item* into an infinite lazy iterable. """
if not hasattr(item, "__iter__") or isinstance(item, str):
return itertools.repeat(item)
else:
return itertools.cycle(item)
def getiterable(kw, name, default):
""" Equivalent to dict.get except that it ensures the result is an iterable. """
return ensureiterable(kw.get(name, default))
def _nanmean(arr, axis=0):
""" Re-implement nanmean in a way that doesn't fire off warning when there
are NaN-filled rows.
Note that here axis is the axis to retain, which is not the behaviour of
np.nanmean. I did it this way for simplicity, but this might be confusing
in the future.
"""
if axis != 0:
arr = np.rollaxis(arr, axis)
means = np.empty(arr.shape[0], dtype=arr.dtype)
i = 0
for row in arr:
valid = row[~np.isnan(row)]
if len(valid) > 0:
means[i] = np.mean(row[~np.isnan(row)])
else:
means[i] = np.nan
i += 1
return means
def ccmeans(cc):
""" Calculate a mean Cast along isopycnals from a CastCollection. """
c0 = max(cc, key=lambda c: c.nvalid())
s0 = c0["sigma"]
sharedkeys = set(c0.data.keys()).intersection(
*[set(c.data.keys()) for c in cc[1:]]).difference(
set(("pres", "botdepth", "time")))
nanmask = reduce(lambda a,b: a*b, [c.nanmask() for c in cc])
data = dict()
for key in sharedkeys:
arr = np.nan * np.empty((len(cc), len(c0["pres"])))
arr[0,:] = c0[key]
for j, c in enumerate(cc[1:]):
s = np.convolve(c["sigma"], np.ones(3)/3.0, mode="same")
arr[j+1,:] = np.interp(s0, s, c[key])
data[key] = _nanmean(arr, axis=1)
data[key][nanmask] = np.nan
return Cast(copy.copy(c0["pres"]), **data)
def ccmeanp(cc):
if False in (np.all(cc[0]["pres"] == c["pres"]) for c in cc):
raise ValueError("casts must share pressure levels")
p = cc[0]["pres"]
# shared keys are those in all casts, minus pressure and botdepth
sharedkeys = set(cc[0].data.keys()).intersection(
*[set(c.data.keys()) for c in cc[1:]]).difference(
set(("pres", "botdepth", "time")))
data = dict()
for key in sharedkeys:
arr = np.vstack([c.data[key] for c in cc])
data[key] = _nanmean(arr, axis=1)
return Cast(p, **data)
|
492134 | import json
import sys
with open('reports/servers/index.json') as file_:
report = json.load(file_)
failures = sum(value['behavior'] == 'FAILED' for value in report['websockets'].values())
if failures > 0:
sys.exit(1)
else:
sys.exit(0)
|
492139 | import argparse
from preprocess import preprocess
import os
from pathlib import Path
import wave
import numpy as np
import unicodedata
import random
from tqdm import tqdm
import re
import yaml
import sys
import librosa
## Fairseq ์คํ์ผ๋ก ๋ณํํ๊ธฐ
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--root", default='/code/gitRepo/data/aihub/ksponspeech', metavar="DIR",
help="root directory containing flac files to index"
)
parser.add_argument(
"--info", default=None, metavar="DIR",
help="์ ์ฒ๋ฆฌ ์ถ๊ฐ์ ์ผ๋ก ์ํํ ๊ฒ."
)
parser.add_argument(
"--do_info", action="store_true",
help="์ ์ฒ๋ฆฌ ์ถ๊ฐ์ ์ผ๋ก ์ํํ ์ง ์ฌ๋ถ ํ์ธ"
)
parser.add_argument(
"--do_remove", action="store_true",
help="ํ๊ธ ์์๊ฐ ์๋ ์ซ์, ์์ด๊ฐ ํฌํจ๋์ด ์๋ ๋ชจ๋ ๋จ์ด๋ฅผ ์ญ์ ํ ์ง ์ฌ๋ถ ํ์ธ"
)
parser.add_argument(
"--token_limit", default=sys.maxsize, type=int,
help="์ต๋ ๊ธ์์ ์ฒดํฌ"
)
parser.add_argument(
"--dest", default='manifest_temp', type=str, metavar="DIR", help="output directory"
)
parser.add_argument(
"--ext", default="pcm", type=str, metavar="EXT", help="extension to look for"
)
parser.add_argument('--preprocess_mode', type=str,
default='phonetic',
help='Ex) (70%)/(์น ์ญ ํผ์ผํธ) ํ๋ฅ ์ด๋ผ๋ (๋ญ ๋ญ)/(๋ชจ ๋ชฌ) ์๋ฆฌ์ผ ์ง์ง (100%)/(๋ฐฑ ํ๋ก)๊ฐ ์ ์๋ผ?'
'phonetic: ์น ์ญ ํผ์ผํธ ํ๋ฅ ์ด๋ผ๋ ๋ชจ ๋ชฌ ์๋ฆฌ์ผ ์ง์ง ๋ฐฑ ํ๋ก๊ฐ ์ ์๋ผ?'
'spelling: 70% ํ๋ฅ ์ด๋ผ๋ ๋ญ ๋ญ ์๋ฆฌ์ผ ์ง์ง 100%๊ฐ ์ ์๋ผ?')
parser.add_argument('--output_unit', type=str,
default='grapheme',
help='character or subword or grapheme')
parser.add_argument('--additional_output_unit', type=str,
default=None,
help='character or subword or grapheme')
parser.add_argument("--seed", default=42, type=int, metavar="N", help="random seed")
parser.add_argument(
"--time",
default=None,
type=str,
metavar="MIN",
help="set if you want make split manifest",
)
parser.add_argument('--script_path', type=str,
default="/code/gitRepo/data/aihub/ksponspeech/KsponSpeech_scripts",
help='AIHUB์์ ์ ๊ณตํด ์ฃผ๋ ์คํฌ๋ฆฝํธ ํด๋')
parser.add_argument(
"--del_silence", action="store_true",
help="์์ฑ์ด ์๋ ๊ณณ์ ์ญ์ ํ๋ ๊ฑด ์ด๋?"
)
return parser
def find_index(durations, limit):
for idx in range(len(durations)):
if sum(durations[:idx]) > limit:
return idx
return len(durations)
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
def load_yaml(yaml_path):
# Read YAML file
with open(yaml_path, 'r') as stream:
data_loaded = yaml.load(stream, Loader=yaml.FullLoader)
return data_loaded
def load_info(info_path):
if not os.path.isdir(info_path):
return {}
info_files = [filename for filename in os.listdir(info_path) if '.yaml' in filename]
info_data = {}
for filename in info_files:
file_path = os.path.join(info_path, filename)
temp_data = load_yaml(file_path)
info_data.update(temp_data)
return info_data
def save_converted_info(args, name, converted_info):
if len(converted_info) == 0:
return
yaml_dict = {k: v for k, v in sorted(converted_info.items(), key=lambda item: (len(item[0]), item[0]))}
with open(os.path.join(args.dest, '{}.yaml'.format(name)), 'w', encoding="utf-8") as write_f:
yaml.dump(yaml_dict, write_f, allow_unicode=True, default_style=None, default_flow_style=False)
def save_wrong_script(args, name, transcripts, fileinfo, raw_sentences, new_sentences):
## ํ๋ฆฐ ๊ฒ ์ ์ฅํ๊ธฐ
## ์ํ๋ฒณ ์ถ๊ฐ
reg = re.compile(r'[A-Z]')
yaml_dict = {}
for grapheme_transcript, fileitem, raw_sentence, new_sentence in zip(transcripts, fileinfo, raw_sentences,
new_sentences):
graphemes = grapheme_transcript.split()
file_num = Path(fileitem.split()[0]).stem.split("_")[1]
assert len(file_num) == 6
for grapheme in graphemes:
if grapheme.isdigit() or reg.match(grapheme):
yaml_dict[file_num] = str(raw_sentence.replace('\n', ''))
if len(yaml_dict) == 0:
return
## Sorting
yaml_dict = {k: v for k, v in sorted(yaml_dict.items(), key=lambda item: (len(item[0]), item[0]))}
with open(os.path.join(args.dest, '{}.yaml'.format(name)), 'w', encoding="utf-8") as write_f:
yaml.dump(yaml_dict, write_f, allow_unicode=True, default_style=None, default_flow_style=False)
def save_dict(args, transcripts, dict_name='dict.ltr.txt', alphabet_name='alphabet.txt'):
vocab_list = list()
vocab_freq = list()
for grapheme_transcript in transcripts:
graphemes = grapheme_transcript.split()
for grapheme in graphemes:
if grapheme not in vocab_list:
vocab_list.append(grapheme)
vocab_freq.append(1)
else:
vocab_freq[vocab_list.index(grapheme)] += 1
## write ltr
vocab_freq, vocab_list = zip(*sorted(zip(vocab_freq, vocab_list), reverse=True))
with open(os.path.join(args.dest, dict_name), 'w') as write_f:
for idx, (grpm, freq) in enumerate(zip(vocab_list, vocab_freq)):
print("{} {}".format(grpm, freq), file=write_f)
## Write Vocab files
with open(os.path.join(args.dest, alphabet_name), 'w', encoding='UTF8') as write_f:
print("# Each line in this file represents the Unicode codepoint (UTF-8 encoded)", file=write_f)
print("# associated with a numeric label.", file=write_f)
print("# A line that starts with # is a comment. You can escape it with \# if you wish", file=write_f)
print("# to use '#' as a label.", file=write_f)
for token in vocab_list:
print(token, file=write_f)
## final token must be \n
print('', file=write_f)
print("# The last (non-comment) line needs to end with a newline.", file=write_f, end='')
return
def save_lexicon(args, texts, lexicon_name='lexicon.lst'):
vocab_list = {}
for text in texts:
for word in text.split():
new_word = word + "|"
vocab_list[word] = " ".join(new_word)
## Write Vocab files
## Sorting
vocab_list = {k: v for k, v in sorted(vocab_list.items(), key=lambda item: item[0])}
with open(os.path.join(args.dest, lexicon_name), 'w', encoding='UTF8') as write_f:
for k, v in vocab_list.items():
print("{}\t{}".format(k,v), file=write_f)
return
def save_files(args, file_name, dir_path, fileinfo, texts, transcripts):
with open(os.path.join(args.dest, file_name + ".tsv"), 'w') as tsv_out, open(
os.path.join(args.dest, file_name + ".ltr"), "w"
) as ltr_out, open(
os.path.join(args.dest, file_name + ".wrd"), "w"
) as wrd_out:
print(dir_path, file=tsv_out)
for tsv_item, wrd_item, ltr_item in zip(fileinfo, texts, transcripts):
print(tsv_item, file=tsv_out)
print(wrd_item, file=wrd_out)
print(ltr_item + " |", file=ltr_out)
print("save files [{}]".format(file_name))
return
def pcm2wav(pcm_file, channels=1, bit_depth=16, sampling_rate=16000):
wav_file = str(Path(pcm_file).with_suffix('.wav'))
# Check if the options are valid.
if bit_depth % 8 != 0:
raise ValueError("bit_depth " + str(bit_depth) + " must be a multiple of 8.")
# Read the .pcm file as a binary file and store the data to pcm_data
with open(pcm_file, 'rb') as opened_pcm_file:
pcm_data = opened_pcm_file.read()
with wave.open(wav_file, 'wb') as obj2write:
obj2write.setnchannels(channels)
obj2write.setsampwidth(bit_depth // 8)
obj2write.setframerate(sampling_rate)
obj2write.writeframes(pcm_data)
return wav_file
def load_script(args, script_path, info_data, token_limit=sys.maxsize):
assert os.path.isfile(script_path)
fileinfo = list()
durations = list()
texts = list()
audio_nums = list()
transcripts = list()
additional_texts = list()
additional_transcripts = list()
raw_sentences = list()
new_sentences = list()
converted_info = {}
reg = re.compile(r'.*[a-zA-Z0-9]')
limit_count = 0
remove_count = 0
with open(script_path, "r") as f:
for line in tqdm(f):
convert_flag = False
items = line.split(" :: ")
file_path = os.path.join(args.root, items[0])
file_path = os.path.realpath(file_path)
audio_num = str(Path(file_path).stem.split("_")[1])
raw_sentence = items[1]
if len(audio_num) ==6 and audio_num in info_data:
raw_sentence = info_data[audio_num]
convert_flag=True
## ํ์ฅ์ ํ์ธ
if args.ext == 'pcm':
try:
wav = np.memmap(file_path, dtype='h', mode='r').astype('float32') / 32767
sr = 16000
except ValueError:
# print('pcm load ์๋ฌ wave๋ก ๊ต์ฒด [{}]'.format(file_path))
file_path = pcm2wav(file_path)
wav, sr = librosa.load(file_path, sr=16000)
elif args.ext in ['flac', 'wav']:
wav, sr = librosa.load(file_path, sr=16000)
else:
raise ValueError("Unsupported extention method : {0}".format(args.ext))
if args.del_silence:
non_silence_indices = librosa.effects.split(wav, top_db=30)
wav = np.concatenate([wav[start:end] for start, end in non_silence_indices])
frames = len(wav)
if len(audio_num) ==6:
new_sentence = preprocess(raw_sentence=raw_sentence, mode=args.preprocess_mode, audio_num=audio_num)
else:
new_sentence = raw_sentence.replace('\n', '')
##################################
if len(new_sentence) > token_limit:
limit_count+=1
continue
if args.do_remove and reg.match(new_sentence) and args.preprocess_mode != 'spelling':
converted_info[audio_num] = new_sentence
remove_count += 1
continue
#################################
## ์ ์ฅ ๋ชจ๋๋ ์ฌ๊ธฐ์ ์ถ๊ฐํ๊ธฐ.
if args.output_unit == 'grapheme':
texts.append(unicodedata.normalize('NFKD', new_sentence).upper())
transcripts.append(" ".join(unicodedata.normalize('NFKD', new_sentence).replace(' ', '|')).upper())
elif args.output_unit == 'character':
texts.append(new_sentence.upper())
transcripts.append(" ".join(list(new_sentence.replace(' ', '|').upper())))
else:
raise ValueError("Unsupported preprocess method : {0}".format(args.output_unit))
## ์ ์ฅ ๋ชจ๋๋ ์ฌ๊ธฐ์ ์ถ๊ฐํ๊ธฐ.
if args.additional_output_unit is not None:
if args.additional_output_unit == 'grapheme':
additional_texts.append(unicodedata.normalize('NFKD', new_sentence).upper())
additional_transcripts.append(" ".join(unicodedata.normalize('NFKD', new_sentence).replace(' ', '|')).upper())
elif args.additional_output_unit == 'character':
additional_texts.append(new_sentence.upper())
additional_transcripts.append(" ".join(list(new_sentence.replace(' ', '|').upper())))
else:
raise ValueError("Unsupported preprocess method : {0}".format(args.output_unit))
if convert_flag:
converted_info[audio_num] = new_sentence
## ๋ฃ๊ธฐ
fileinfo.append("{}\t{}".format(os.path.relpath(file_path, args.root), frames))
durations.append(frames)
audio_nums.append(audio_num)
raw_sentences.append(raw_sentence)
new_sentences.append(new_sentence)
print("์ด ๋ฌด์๋ ์ซ์ : ", limit_count+remove_count)
print("๊ธธ์ด๋ฅผ ๋๊ฒจ์ ๋ฌด์๋ ์ซ์ : ", limit_count)
print("์ซ์๋ฑ์ด ์์ด์ ๋ฌด์๋ ์ซ์ : ", remove_count)
return fileinfo, durations, texts, audio_nums, transcripts, raw_sentences, new_sentences, converted_info, additional_texts, additional_transcripts
def main(args):
if not os.path.exists(args.dest):
os.makedirs(args.dest)
args.root = os.path.realpath(args.root)
## --dataset_path ์ ์์ด์ผ ํ๋ ํด๋๋ค
#for folder in ['KsponSpeech_01','KsponSpeech_02','KsponSpeech_03','KsponSpeech_04','KsponSpeech_05','KsponSpeech_eval']:
# if folder not in os.listdir(args.root):
# assert os.path.isdir(folder), "root ์์น์ ํด๋น ํด๋๊ฐ ๋ฐ๋์ ํ์ํฉ๋๋ค. [{}]".format(folder)
assert os.path.isdir(args.script_path), "aihub์์ ์ ๊ณตํด์ฃผ๋ ์คํฌ๋ฆฝํธ ํด๋๋ฅผ ๋ฃ์ด์ฃผ์๊ธฐ ๋ฐ๋๋๋ค. script_path : [{}]".format(args.script_path)
## Info ํ์ผ ๋ถ๋ฌ์ค๊ธฐ
info_data = {}
if args.do_info:
## info ํ์ผ ๋ถ๋ฌ์ค๊ธฐ
info_data = load_info(args.info)
## .trn ํ์ฅ์๋ง ํ์ธํจ
file_list = [file for file in os.listdir(args.script_path) if Path(file).suffix == '.trn']
assert len(file_list) > 0, "์คํฌ๋ฆฝํธ ํ์ผ์ด ํ๊ฐ๋ ์๋ค์ [{}]".format(args.script_path)
## ์คํฌ๋ฆฝํธ ์ฝ์ด์ค๊ธฐ.
script_name = 'train.trn'
if script_name in file_list:
print("generate [{}]".format(script_name))
fileinfo, durations, texts, audio_nums, transcripts, raw_sentences, new_sentences, converted_info, additional_texts, additional_transcripts = load_script(args, os.path.join(args.script_path, script_name), info_data, token_limit=args.token_limit)
fileinfo = np.array(fileinfo)
durations = np.array(durations)
texts = np.array(texts)
transcripts = np.array(transcripts)
## ์ถ๊ฐ์ฉ
additional_texts = np.array(additional_texts)
additional_transcripts = np.array(additional_transcripts)
## lexicon ๋ง๋ค๊ธฐ
save_lexicon(args, texts, lexicon_name='lexicon.lst')
## dictionary ์ ์ฅ
save_dict(args, transcripts, dict_name='dict.ltr.txt', alphabet_name='alphabet.txt')
## ์ถ๊ฐ์ฉ ๋ง๋ค๊ธฐ
if args.additional_output_unit is not None:
## lexicon ๋ง๋ค๊ธฐ
save_lexicon(args, additional_texts, lexicon_name='add_lexicon.lst')
## dictionary ์ ์ฅ
save_dict(args, additional_transcripts, dict_name='add_dict.ltr.txt', alphabet_name='add_alphabet.txt')
#save_wrong_script(args, 'train_wrong',transcripts, fileinfo, raw_sentences, new_sentences)
save_converted_info(args, 'train_converted', converted_info)
## train ์ด๋ dev ๋๋ ์ ์ ์ฅ
train_ids = [idx for idx, num in enumerate(audio_nums)]
limit_idx = len(train_ids)
if args.time is not None:
random.shuffle(train_ids)
assert args.time in ['10min', '1hour', '10hour', '100hour'], '์ค์ ์ฌ๋๋ก ํด๋ผ...'
time_limit = 0
if args.time == '10min':
## 16000 hz * 60์ด * 10๋ถ
time_limit = 16000 * 60 * 10
if args.time == '1hour':
## 16000 hz * 60์ด * 60๋ถ * 1
time_limit = 16000 * 60 * 60 * 1
if args.time == '10hour':
## 16000 hz * 60์ด * 60๋ถ * 10
time_limit = 16000 * 60 * 60 * 10
if args.time == '100hour':
## 16000 hz * 60์ด * 60๋ถ * 100
time_limit = 16000 * 60 * 60 * 100
limit_idx = find_index(durations[train_ids], time_limit)
save_files(args, 'train', args.root, fileinfo[train_ids[:limit_idx]], texts[train_ids[:limit_idx]],
transcripts[train_ids[:limit_idx]])
## ์ถ๊ฐ์ฉ ๋ง๋ค๊ธฐ
if args.additional_output_unit is not None:
save_files(args, 'add_train', args.root, fileinfo[train_ids[:limit_idx]], additional_texts[train_ids[:limit_idx]],
additional_transcripts[train_ids[:limit_idx]])
## ์คํฌ๋ฆฝํธ ์ฝ์ด์ค๊ธฐ.
script_name = 'dev.trn'
if script_name in file_list:
print("generate [{}]".format(script_name))
fileinfo, durations, texts, audio_nums, transcripts, raw_sentences, new_sentences, converted_info, additional_texts, additional_transcripts = load_script(args, os.path.join(args.script_path, script_name), info_data)
save_files(args, 'dev', args.root, fileinfo, texts, transcripts)
## ์ถ๊ฐ์ฉ ๋ง๋ค๊ธฐ
if args.additional_output_unit is not None:
save_files(args, 'add_dev', args.root, fileinfo, additional_texts, additional_transcripts)
#save_wrong_script(args, 'dev_wrong', transcripts, fileinfo, raw_sentences, new_sentences)
save_converted_info(args, 'dev_converted', converted_info)
## ์คํฌ๋ฆฝํธ ์ฝ์ด์ค๊ธฐ.
script_name = 'eval_other.trn'
if script_name in file_list:
print("generate [{}]".format(script_name))
fileinfo, durations, texts, audio_nums, transcripts, raw_sentences, new_sentences, converted_info, additional_texts, additional_transcripts = load_script(args, os.path.join(args.script_path,
script_name), info_data)
save_files(args, 'eval_other', args.root, fileinfo, texts, transcripts)
## ์ถ๊ฐ์ฉ ๋ง๋ค๊ธฐ
if args.additional_output_unit is not None:
save_files(args, 'add_eval_other', args.root, fileinfo, additional_texts, additional_transcripts)
#save_wrong_script(args, 'eval_other_wrong', transcripts, fileinfo, raw_sentences, new_sentences)
save_converted_info(args, 'eval_other_converted', converted_info)
## ์คํฌ๋ฆฝํธ ์ฝ์ด์ค๊ธฐ.
script_name = 'eval_clean.trn'
if script_name in file_list:
print("generate [{}]".format(script_name))
fileinfo, durations, texts, audio_nums, transcripts, raw_sentences, new_sentences, converted_info, additional_texts, additional_transcripts = load_script(args, os.path.join(args.script_path,
script_name), info_data)
save_files(args, 'eval_clean', args.root, fileinfo, texts, transcripts)
## ์ถ๊ฐ์ฉ ๋ง๋ค๊ธฐ
if args.additional_output_unit is not None:
save_files(args, 'add_eval_clean', args.root, fileinfo, additional_texts, additional_transcripts)
#save_wrong_script(args, 'eval_clean_wrong', transcripts, fileinfo, raw_sentences, new_sentences)
save_converted_info(args, 'eval_clean_converted', converted_info)
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
def _print_config(config):
import pprint
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(vars(config))
_print_config(args)
main(args)
|
492157 | from typing import List
from lib import Migrations, MigrationsConfig
from cli import MigrationsCli
__version__: str = '0.0.4'
__all__: List[str] = [
'Migrations',
'MigrationsConfig',
'MigrationsCli'
]
|
492189 | import copy
import random
from dataclasses import dataclass
from pathlib import Path
import pandas as pd
from joblib import dump, load
from tqdm import tqdm
from deepform.data.add_features import LABEL_COLS, pq_index_and_dir
from deepform.document import Document
from deepform.logger import logger
@dataclass(frozen=True)
class DocumentStore:
documents: list
def __len__(self):
return len(self.documents)
def __iter__(self):
for doc in self.documents:
yield doc
def __getitem__(self, n):
"""Return the pre-processed tokens for a specified document."""
return self.documents[n]
def random_document(self):
return random.choice(self.documents)
def sample(self, n=None):
if n is None:
n = len(self)
return DocumentStore(random.sample(self.documents, k=n))
def split(self, val_percent=0.2):
"""Divide into two DocumentStores, e.g. a training and a validation set."""
docs_copy = copy.deepcopy(self.documents)
random.shuffle(docs_copy)
split_index = int(val_percent * len(self))
return DocumentStore(docs_copy[:split_index]), DocumentStore(
docs_copy[split_index:]
)
@staticmethod
def open(index_file, config):
"""Load the documents referenced by `index_file` and apply `config`."""
index_file = Path(index_file)
doc_index = pd.read_parquet(index_file)
logger.info(f"{len(doc_index)} documents in index")
if not config.pad_windows:
# Filter out documents that are too short for the curent config.
doc_index = doc_index[doc_index["length"] >= config.window_len]
# Filter out documents that don't have a sufficiently high match.
# doc_index = doc_index[doc_index["best_match"] >= config.target_thresh]
logger.info(f"After applying config {len(doc_index)} documents are available")
# Sample down to no more than the requested number of documents.
num_docs = min(config.len_train, len(doc_index))
doc_index = doc_index.sample(n=num_docs)
# Load each of the documents, finishing any necessary feature computation.
slug_to_doc = caching_doc_getter(index_file, config)
# docs = concurrent.thread_map(slug_to_doc, doc_index["slug"])
labels = doc_index[LABEL_COLS.keys()]
docs = [
slug_to_doc(slug, labels.loc[slug])
for slug in tqdm(doc_index.index, desc="Creating docs")
]
docs = [d for d in docs if d != None] # noqa: E711
return DocumentStore(docs)
def caching_doc_getter(index_file, config):
_, pq_root = pq_index_and_dir(index_file)
if config.use_data_cache:
cache_root = pq_root.parent / "cache" / cache_master_key(config)
cache_root.mkdir(parents=True, exist_ok=True)
def slug_to_doc(slug, labels):
pq_path = pq_root / f"{slug}.parquet"
graph_path = pq_root / f"{slug}.graph"
if config.use_data_cache:
cache_path = cache_root / f"{slug}.joblib"
try:
with open(cache_path, "rb") as infile:
return load(infile)
except FileNotFoundError:
logger.debug(f"Cache file {cache_path} not found")
try:
doc = Document.from_parquet(slug, labels, pq_path, graph_path, config)
except AssertionError:
logger.warning(f"No correct answers for {slug}, skipping")
return None
if config.use_data_cache:
with open(cache_path, "wb") as outfile:
dump(doc, outfile)
logger.debug(f"Wrote document to cache file {cache_path}")
return doc
return slug_to_doc
def cache_master_key(config):
"""Create a string determined by any cache-invalidating config elements."""
return (
"str{use_string}_"
"vocab{vocab_size}_"
"pg{use_page}_"
"geom{use_geom}_"
"amt{use_amount}_"
"pad{pad_windows}_"
"len{window_len}"
).format(**config)
|
492207 | import gzip
import os
from typing import Optional, Dict
from kgx.cli.cli_utils import prepare_output_args, prepare_input_args # type: ignore
from kgx.transformer import Transformer # type: ignore
from kg_covid_19.transform_utils.transform import Transform
class GocamTransform(Transform):
"""
GocamTransform parses GO-CAMs that have been subjected to
RDF edge project (REP) pattern.
"""
def __init__(self, input_dir: str = None, output_dir: str = None):
source_name = "GOCAMs"
super().__init__(source_name, input_dir, output_dir)
def run(self, data_file: Optional[str] = None, **kwargs) -> None:
"""Method is called and performs needed transformations to process
an ontology.
Args:
data_file: data file to parse
Returns:
None.
"""
if not data_file:
data_file = os.path.join(self.input_base_dir, 'lifted-go-cams-20200619.nt')
if 'input_format' in kwargs:
input_format = kwargs['input_format']
if input_format not in {'nt', 'ttl', 'rdf/xml'}:
raise ValueError(f"Unsupported input_format: {input_format}")
else:
input_format = 'nt'
self.parse(data_file, input_format, compression=None)
def parse(self, data_file: str, input_format: str,
compression: Optional[str] = None) -> None:
"""Processes the data_file.
Args:
data_file: data file to parse
input_format: format of input file
compression: compression
Returns:
None
"""
print(f"Parsing {data_file}")
# define prefix to IRI mappings
cmap = {
'REACT': 'http://purl.obolibrary.org/obo/go/extensions/reacto.owl#REACTO_',
'WB': 'http://identifiers.org/wormbase/',
'FB': 'http://identifiers.org/flybase/',
'LEGO': 'http://geneontology.org/lego/',
'GOCAM': 'http://model.geneontology.org/',
'TAIR.LOCUS': 'http://identifiers.org/tair.locus/',
'POMBASE': 'http://identifiers.org/PomBase',
'DICTYBASE.GENE': 'http://identifiers.org/dictybase.gene/',
'XENBASE': 'http://identifiers.org/xenbase/'
}
# define predicates that are to be treated as node properties
np = {
'http://geneontology.org/lego/evidence',
'https://w3id.org/biolink/vocab/subjectActivity',
'https://w3id.org/biolink/vocab/objectActivity',
}
source: Dict = {
'input': {
'format': input_format,
'compression': compression,
'filename': data_file,
},
'output': {
'format': 'tsv',
'compression': None,
'filename': os.path.join(self.output_dir, self.source_name),
},
}
input_args = prepare_input_args(
key=self.source_name,
source=source,
output_directory=os.path.join(self.output_dir, self.source_name),
prefix_map=cmap,
node_property_predicates=np,
predicate_mappings=None
)
output_args = prepare_output_args(
key=self.source_name,
source=source,
output_directory=os.path.join(self.output_dir, self.source_name),
reverse_prefix_map=None,
reverse_predicate_mappings=None,
property_types=None,
)
transformer = Transformer(stream=False)
input_args['filename'] = [input_args['filename']]
transformer.transform(input_args, output_args)
|
492228 | from compiler.errors import TypedSyntaxError
from compiler.static.types import (
TYPED_INT8,
TYPED_INT16,
PRIM_OP_DIV_INT,
PRIM_OP_ADD_INT,
)
from .common import StaticTestBase
try:
import cinderjit
except ImportError:
cinderjit = None
class BinopTests(StaticTestBase):
def test_pow_of_int64s_returns_double(self):
codestr = """
from __static__ import int64
def foo():
x: int64 = 0
y: int64 = 1
z: int64 = x ** y
"""
with self.assertRaisesRegex(
TypedSyntaxError, "double cannot be assigned to int64"
):
self.compile(codestr, modname="foo")
def test_int_binop(self):
tests = [
("int8", 1, 2, "/", 0),
("int8", 4, 2, "/", 2),
("int8", 4, -2, "/", -2),
("uint8", 0xFF, 0x7F, "/", 2),
("int16", 4, -2, "/", -2),
("uint16", 0xFF, 0x7F, "/", 2),
("uint32", 0xFFFF, 0x7FFF, "/", 2),
("int32", 4, -2, "/", -2),
("uint32", 0xFF, 0x7F, "/", 2),
("uint32", 0xFFFFFFFF, 0x7FFFFFFF, "/", 2),
("int64", 4, -2, "/", -2),
("uint64", 0xFF, 0x7F, "/", 2),
("uint64", 0xFFFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFFFFF, "/", 2),
("int8", 1, -2, "-", 3),
("int8", 1, 2, "-", -1),
("int16", 1, -2, "-", 3),
("int16", 1, 2, "-", -1),
("int32", 1, -2, "-", 3),
("int32", 1, 2, "-", -1),
("int64", 1, -2, "-", 3),
("int64", 1, 2, "-", -1),
("int8", 1, -2, "*", -2),
("int8", 1, 2, "*", 2),
("int16", 1, -2, "*", -2),
("int16", 1, 2, "*", 2),
("int32", 1, -2, "*", -2),
("int32", 1, 2, "*", 2),
("int64", 1, -2, "*", -2),
("int64", 1, 2, "*", 2),
("int8", 1, -2, "&", 0),
("int8", 1, 3, "&", 1),
("int16", 1, 3, "&", 1),
("int16", 1, 3, "&", 1),
("int32", 1, 3, "&", 1),
("int32", 1, 3, "&", 1),
("int64", 1, 3, "&", 1),
("int64", 1, 3, "&", 1),
("int8", 1, 2, "|", 3),
("uint8", 1, 2, "|", 3),
("int16", 1, 2, "|", 3),
("uint16", 1, 2, "|", 3),
("int32", 1, 2, "|", 3),
("uint32", 1, 2, "|", 3),
("int64", 1, 2, "|", 3),
("uint64", 1, 2, "|", 3),
("int8", 1, 3, "^", 2),
("uint8", 1, 3, "^", 2),
("int16", 1, 3, "^", 2),
("uint16", 1, 3, "^", 2),
("int32", 1, 3, "^", 2),
("uint32", 1, 3, "^", 2),
("int64", 1, 3, "^", 2),
("uint64", 1, 3, "^", 2),
("int8", 1, 3, "%", 1),
("uint8", 1, 3, "%", 1),
("int16", 1, 3, "%", 1),
("uint16", 1, 3, "%", 1),
("int32", 1, 3, "%", 1),
("uint32", 1, 3, "%", 1),
("int64", 1, 3, "%", 1),
("uint64", 1, 3, "%", 1),
("int8", 1, -3, "%", 1),
("uint8", 1, 0xFF, "%", 1),
("int16", 1, -3, "%", 1),
("uint16", 1, 0xFFFF, "%", 1),
("int32", 1, -3, "%", 1),
("uint32", 1, 0xFFFFFFFF, "%", 1),
("int64", 1, -3, "%", 1),
("uint64", 1, 0xFFFFFFFFFFFFFFFF, "%", 1),
("int8", 1, 2, "<<", 4),
("uint8", 1, 2, "<<", 4),
("int16", 1, 2, "<<", 4),
("uint16", 1, 2, "<<", 4),
("int32", 1, 2, "<<", 4),
("uint32", 1, 2, "<<", 4),
("int64", 1, 2, "<<", 4),
("uint64", 1, 2, "<<", 4),
("int8", 4, 1, ">>", 2),
("int8", -1, 1, ">>", -1),
("uint8", 0xFF, 1, ">>", 127),
("int16", 4, 1, ">>", 2),
("int16", -1, 1, ">>", -1),
("uint16", 0xFFFF, 1, ">>", 32767),
("int32", 4, 1, ">>", 2),
("int32", -1, 1, ">>", -1),
("uint32", 0xFFFFFFFF, 1, ">>", 2147483647),
("int64", 4, 1, ">>", 2),
("int64", -1, 1, ">>", -1),
("uint64", 0xFFFFFFFFFFFFFFFF, 1, ">>", 9223372036854775807),
("int64", 2, 2, "**", 4.0, "double"),
("int16", -1, 1, "**", -1, "double"),
("int32", -1, 1, "**", -1, "double"),
("int64", -1, 1, "**", -1, "double"),
("int64", -2, -3, "**", -0.125, "double"),
("uint8", 0xFF, 2, "**", float(0xFF * 0xFF), "double"),
("uint16", 0xFFFF, 2, "**", float(0xFFFF * 0xFFFF), "double"),
("uint32", 0xFFFFFFFF, 2, "**", float(0xFFFFFFFF * 0xFFFFFFFF), "double"),
(
"uint64",
0xFFFFFFFFFFFFFFFF,
1,
"**",
float(0xFFFFFFFFFFFFFFFF),
"double",
),
]
for type, x, y, op, res, *output_type_option in tests:
if len(output_type_option) == 0:
output_type = type
else:
output_type = output_type_option[0]
codestr = f"""
from __static__ import {type}, box
from __static__ import {output_type}
def testfunc(tst):
x: {type} = {x}
y: {type} = {y}
if tst:
x = x + 1
y = y + 2
z: {output_type} = x {op} y
return box(z), box(x {op} y)
"""
with self.subTest(type=type, x=x, y=y, op=op, res=res):
with self.in_module(codestr) as mod:
f = mod.testfunc
self.assertEqual(
f(False), (res, res), f"{type} {x} {op} {y} {res} {output_type}"
)
def test_primitive_arithmetic(self):
cases = [
("int8", 127, "*", 1, 127),
("int8", -64, "*", 2, -128),
("int8", 0, "*", 4, 0),
("uint8", 51, "*", 5, 255),
("uint8", 5, "*", 0, 0),
("int16", 3123, "*", -10, -31230),
("int16", -32767, "*", -1, 32767),
("int16", -32768, "*", 1, -32768),
("int16", 3, "*", 0, 0),
("uint16", 65535, "*", 1, 65535),
("uint16", 0, "*", 4, 0),
("int32", (1 << 31) - 1, "*", 1, (1 << 31) - 1),
("int32", -(1 << 30), "*", 2, -(1 << 31)),
("int32", 0, "*", 1, 0),
("uint32", (1 << 32) - 1, "*", 1, (1 << 32) - 1),
("uint32", 0, "*", 4, 0),
("int64", (1 << 63) - 1, "*", 1, (1 << 63) - 1),
("int64", -(1 << 62), "*", 2, -(1 << 63)),
("int64", 0, "*", 1, 0),
("uint64", (1 << 64) - 1, "*", 1, (1 << 64) - 1),
("uint64", 0, "*", 4, 0),
("int8", 127, "//", 4, 31),
("int8", -128, "//", 4, -32),
("int8", 0, "//", 4, 0),
("uint8", 255, "//", 5, 51),
("uint8", 0, "//", 5, 0),
("int16", 32767, "//", -1000, -32),
("int16", -32768, "//", -1000, 32),
("int16", 0, "//", 4, 0),
("uint16", 65535, "//", 5, 13107),
("uint16", 0, "//", 4, 0),
("int32", (1 << 31) - 1, "//", (1 << 31) - 1, 1),
("int32", -(1 << 31), "//", 1, -(1 << 31)),
("int32", 0, "//", 1, 0),
("uint32", (1 << 32) - 1, "//", 500, 8589934),
("uint32", 0, "//", 4, 0),
("int64", (1 << 63) - 1, "//", 2, (1 << 62) - 1),
("int64", -(1 << 63), "//", 2, -(1 << 62)),
("int64", 0, "//", 1, 0),
("uint64", (1 << 64) - 1, "//", (1 << 64) - 1, 1),
("uint64", 0, "//", 4, 0),
("int8", 127, "%", 4, 3),
("int8", -128, "%", 4, 0),
("int8", 0, "%", 4, 0),
("uint8", 255, "%", 6, 3),
("uint8", 0, "%", 5, 0),
("int16", 32767, "%", -1000, 767),
("int16", -32768, "%", -1000, -768),
("int16", 0, "%", 4, 0),
("uint16", 65535, "%", 7, 1),
("uint16", 0, "%", 4, 0),
("int32", (1 << 31) - 1, "%", (1 << 31) - 1, 0),
("int32", -(1 << 31), "%", 1, 0),
("int32", 0, "%", 1, 0),
("uint32", (1 << 32) - 1, "%", 500, 295),
("uint32", 0, "%", 4, 0),
("int64", (1 << 63) - 1, "%", 2, 1),
("int64", -(1 << 63), "%", 2, 0),
("int64", 0, "%", 1, 0),
("uint64", (1 << 64) - 1, "%", (1 << 64) - 1, 0),
("uint64", 0, "%", 4, 0),
]
for typ, a, op, b, res in cases:
for const in ["noconst", "constfirst", "constsecond"]:
if const == "noconst":
codestr = f"""
from __static__ import {typ}
def f(a: {typ}, b: {typ}) -> {typ}:
return a {op} b
"""
elif const == "constfirst":
codestr = f"""
from __static__ import {typ}
def f(b: {typ}) -> {typ}:
return {a} {op} b
"""
elif const == "constsecond":
codestr = f"""
from __static__ import {typ}
def f(a: {typ}) -> {typ}:
return a {op} {b}
"""
with self.subTest(typ=typ, a=a, op=op, b=b, res=res, const=const):
with self.in_module(codestr) as mod:
f = mod.f
act = None
if const == "noconst":
act = f(a, b)
elif const == "constfirst":
act = f(b)
elif const == "constsecond":
act = f(a)
self.assertEqual(act, res)
def test_int_binop_type_context(self):
codestr = f"""
from __static__ import box, int8, int16
def f(x: int8, y: int8) -> int:
z: int16 = x * y
return box(z)
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertInBytecode(
f, "CONVERT_PRIMITIVE", TYPED_INT8 | (TYPED_INT16 << 4)
)
self.assertEqual(f(120, 120), 14400)
def test_mixed_binop(self):
with self.assertRaisesRegex(
TypedSyntaxError, "cannot add int64 and Literal\\[1\\]"
):
self.bind_module(
"""
from __static__ import ssize_t
def f():
x: ssize_t = 1
y = 1
x + y
"""
)
with self.assertRaisesRegex(
TypedSyntaxError, "cannot add Literal\\[1\\] and int64"
):
self.bind_module(
"""
from __static__ import ssize_t
def f():
x: ssize_t = 1
y = 1
y + x
"""
)
def test_mixed_binop_okay(self):
codestr = """
from __static__ import ssize_t, box
def f():
x: ssize_t = 1
y = x + 1
return box(y)
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertEqual(f(), 2)
def test_mixed_binop_okay_1(self):
codestr = """
from __static__ import ssize_t, box
def f():
x: ssize_t = 1
y = 1 + x
return box(y)
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertEqual(f(), 2)
def test_inferred_primitive_type(self):
codestr = """
from __static__ import ssize_t, box
def f():
x: ssize_t = 1
y = x
return box(y)
"""
with self.in_module(codestr) as mod:
f = mod.f
self.assertEqual(f(), 1)
def test_mixed_binop_sign(self):
"""mixed signed/unsigned ops should be promoted to signed"""
codestr = """
from __static__ import int8, uint8, box
def testfunc():
x: uint8 = 42
y: int8 = 2
return box(x / y)
"""
code = self.compile(codestr)
f = self.find_code(code)
self.assertInBytecode(f, "PRIMITIVE_BINARY_OP", PRIM_OP_DIV_INT)
with self.in_module(codestr) as mod:
f = mod.testfunc
self.assertEqual(f(), 21)
codestr = """
from __static__ import int8, uint8, box
def testfunc():
x: int8 = 42
y: uint8 = 2
return box(x / y)
"""
code = self.compile(codestr)
f = self.find_code(code)
self.assertInBytecode(f, "PRIMITIVE_BINARY_OP", PRIM_OP_DIV_INT)
with self.in_module(codestr) as mod:
f = mod.testfunc
self.assertEqual(f(), 21)
codestr = """
from __static__ import uint32, box
def testfunc():
x: uint32 = 2
a = box(x / -2)
return box(x ** -2)
"""
with self.in_module(codestr) as mod:
f = mod.testfunc
self.assertEqual(f(), 0.25)
codestr = """
from __static__ import int32, box
def testfunc():
x: int32 = 2
return box(x ** -2)
"""
with self.in_module(codestr) as mod:
f = mod.testfunc
self.assertEqual(f(), 0.25)
codestr = """
from __static__ import uint32, box
def testfunc():
x: uint32 = 2
return box(x ** -2)
"""
with self.in_module(codestr) as mod:
f = mod.testfunc
self.assertEqual(f(), 0.25)
codestr = """
from __static__ import int8, uint8, box
def testfunc():
x: int8 = 4
y: uint8 = 2
return box(x ** y)
"""
with self.assertRaisesRegex(TypedSyntaxError, "cannot pow int8 and uint8"):
self.compile(codestr)
codestr = """
from __static__ import int8, uint8, box
def testfunc():
x: uint8 = 2
y: int8 = -3
return box(x ** y)
"""
with self.assertRaisesRegex(TypedSyntaxError, "cannot pow uint8 and int8"):
self.compile(codestr)
codestr = """
from __static__ import uint8, box, double
def testfunc():
x: uint8 = 2
y: double = -3.0
return box(x ** y)
"""
with self.assertRaisesRegex(TypedSyntaxError, "cannot pow uint8 and double"):
self.compile(codestr)
def test_double_binop(self):
tests = [
(1.732, 2.0, "+", 3.732),
(1.732, 2.0, "-", -0.268),
(1.732, 2.0, "/", 0.866),
(1.732, 2.0, "*", 3.464),
(1.732, 2, "+", 3.732),
(2.5, 2, "**", 6.25),
(2.5, 2.5, "**", 9.882117688026186),
]
if cinderjit is not None:
# test for division by zero
tests.append((1.732, 0.0, "/", float("inf")))
for x, y, op, res in tests:
codestr = f"""
from __static__ import double, box
def testfunc(tst):
x: double = {x}
y: double = {y}
z: double = x {op} y
return box(z)
"""
with self.subTest(type=type, x=x, y=y, op=op, res=res):
with self.in_module(codestr) as mod:
f = mod.testfunc
self.assertEqual(f(False), res, f"{type} {x} {op} {y} {res}")
def test_double_binop_with_literal(self):
codestr = f"""
from __static__ import double, unbox
def f():
y: double = 1.2
y + 1.0
"""
f = self.run_code(codestr)["f"]
f()
def test_subclass_binop(self):
codestr = """
class C: pass
class D(C): pass
def f(x: C, y: D):
return x + y
"""
code = self.compile(codestr, modname="foo")
f = self.find_code(code, "f")
self.assertInBytecode(f, "BINARY_ADD")
def test_mixed_add_reversed(self):
codestr = """
from __static__ import int8, uint8, int64, box, int16
def testfunc(tst=False):
x: int8 = 42
y: int16 = 2
if tst:
x += 1
y += 1
return box(y + x)
"""
code = self.compile(codestr)
f = self.find_code(code)
self.assertInBytecode(f, "PRIMITIVE_BINARY_OP", PRIM_OP_ADD_INT)
with self.in_module(codestr) as mod:
f = mod.testfunc
self.assertEqual(f(), 44)
def test_mixed_tri_add(self):
codestr = """
from __static__ import int8, uint8, int64, box
def testfunc(tst=False):
x: uint8 = 42
y: int8 = 2
z: int64 = 3
if tst:
x += 1
y += 1
return box(x + y + z)
"""
code = self.compile(codestr)
f = self.find_code(code)
self.assertInBytecode(f, "PRIMITIVE_BINARY_OP", PRIM_OP_ADD_INT)
with self.in_module(codestr) as mod:
f = mod.testfunc
self.assertEqual(f(), 47)
def test_mixed_tri_add_unsigned(self):
"""promote int/uint to int, can't add to uint64"""
codestr = """
from __static__ import int8, uint8, uint64, box
def testfunc(tst=False):
x: uint8 = 42
y: int8 = 2
z: uint64 = 3
return box(x + y + z)
"""
with self.assertRaisesRegex(TypedSyntaxError, "cannot add int16 and uint64"):
self.compile(codestr)
def test_literal_int_binop_inferred_type(self):
"""primitive literal doesn't wrongly carry through arithmetic"""
for rev in [False, True]:
with self.subTest(rev=rev):
op = "1 + x" if rev else "x + 1"
codestr = f"""
from __static__ import int64
def f(x: int64):
reveal_type({op})
"""
self.type_error(codestr, "'int64'", f"reveal_type({op})")
def test_error_type_ctx_left_operand_mismatch(self):
codestr = f"""
from __static__ import int64
def f(k: int64):
l = [1, 2, 3]
# slices cannot be primitives, so this is invalid
l[:k + 1] = [0]
return l
"""
self.type_error(codestr, "int64 cannot be assigned to dynamic", f"k + 1")
|
492285 | from mayan.apps.events.classes import EventModelRegistry
from mayan.apps.testing.tests.base import BaseTestCase
from ..models import DownloadFile, SharedUploadedFile
from ..settings import (
setting_download_file_expiration_interval,
setting_shared_uploaded_file_expiration_interval
)
from .mixins import DownloadFileTestMixin, SharedUploadedFileTestMixin
class DownloadFileModelTestCase(DownloadFileTestMixin, BaseTestCase):
def test_download_file_expiration(self):
setting_download_file_expiration_interval.set(value=60)
self._create_test_download_file()
self.assertEqual(DownloadFile.objects.stale().count(), 0)
setting_download_file_expiration_interval.set(value=0)
self.assertEqual(DownloadFile.objects.stale().count(), 1)
def test_method_get_absolute_url_without_content_object(self):
self._create_test_download_file()
self.assertFalse(self.test_download_file.get_absolute_url())
def test_method_get_absolute_url_with_content_object(self):
self._create_test_object()
self.TestModel.add_to_class(
name='get_absolute_url', value=lambda self: 'test_value'
)
EventModelRegistry.register(model=self.TestModel)
self._create_test_download_file(content_object=self.test_object)
self.assertTrue(self.test_download_file.get_absolute_url())
class SharedUploadedFileManagerTestCase(
SharedUploadedFileTestMixin, BaseTestCase
):
def test_shared_uploaded_expiration(self):
setting_shared_uploaded_file_expiration_interval.set(value=60)
self._create_test_shared_uploaded_file()
self.assertEqual(SharedUploadedFile.objects.stale().count(), 0)
setting_shared_uploaded_file_expiration_interval.set(value=0)
self.assertEqual(SharedUploadedFile.objects.stale().count(), 1)
|
492300 | import unittest
from kafka.tools.protocol.requests import ArgumentError
from kafka.tools.protocol.requests.offset_commit_v2 import OffsetCommitV2Request
class OffsetCommitV2RequestTests(unittest.TestCase):
def test_process_arguments(self):
val = OffsetCommitV2Request.process_arguments(['groupname', '16', 'membername', '76', 'topicname', '4,2', 'nexttopic', '9,3'])
assert val == {'group_id': 'groupname',
'group_generation_id': 16,
'member_id': 'membername',
'retention_time': 76,
'topics': [{'topic': 'topicname', 'partitions': [{'partition': 4, 'offset': 2, 'metadata': None}]},
{'topic': 'nexttopic', 'partitions': [{'partition': 9, 'offset': 3, 'metadata': None}]}]}
def test_process_arguments_notenough(self):
self.assertRaises(ArgumentError, OffsetCommitV2Request.process_arguments, ['groupname', '16', 'membername', '76', 'topicname'])
def test_process_arguments_nonnumeric(self):
self.assertRaises(ArgumentError, OffsetCommitV2Request.process_arguments, ['groupname', 'foo', 'membername', '76', 'topicname', '4,2'])
self.assertRaises(ArgumentError, OffsetCommitV2Request.process_arguments, ['groupname', '16', 'membername', 'foo', 'topicname', '4,2'])
|
492306 | import asyncio
from aiokafka import AIOKafkaProducer, AIOKafkaConsumer
from aiokafka.helpers import create_ssl_context
from kafka.structs import TopicPartition
context = create_ssl_context(
cafile="./ca-cert", # CA used to sign certificate.
# `CARoot` of JKS store container
certfile="./cert-signed", # Signed certificate
keyfile="./cert-key", # Private Key file of `certfile` certificate
password="<PASSWORD>"
)
async def produce_and_consume():
# Produce
producer = AIOKafkaProducer(
bootstrap_servers='localhost:9093',
security_protocol="SSL", ssl_context=context)
await producer.start()
try:
msg = await producer.send_and_wait(
'my_topic', b"Super Message", partition=0)
finally:
await producer.stop()
consumer = AIOKafkaConsumer(
"my_topic", bootstrap_servers='localhost:9093',
security_protocol="SSL", ssl_context=context)
await consumer.start()
try:
consumer.seek(TopicPartition('my_topic', 0), msg.offset)
fetch_msg = await consumer.getone()
finally:
await consumer.stop()
print("Success", msg, fetch_msg)
if __name__ == "__main__":
asyncio.run(produce_and_consume())
|
492314 | from caresjpsutil import PythonLogger
#This is the module that writes output file from a template
##thoughts: write copied part first, then write changeble part
HEADER = '''
&ADMS_HEADER
Comment = "This is an ADMS parameter file"
Model = "ADMS"
Version = 5.2
FileVersion = 8
Complete = 1
/
'''
SUP = '''
&ADMS_PARAMETERS_SUP
SupSiteName = "terrain dispersion site"
SupProjectName = "chlorine leakage tank dispersion"
SupUseAddInput = {2}
SupAddInputPath = "{3}"
SupReleaseType = 0
SupModelBuildings = 1
SupModelComplexTerrain = {0}
SupModelCoastline = 0
SupPufType = 0
SupCalcChm = {1}
SupCalcDryDep = 0
SupCalcWetDep = {4}
SupCalcPlumeVisibility = 1
SupModelFluctuations = 0
SupModelRadioactivity = 0
SupModelOdours = 0
SupOdourUnits = "ou_e"
SupPaletteType = 1
SupUseTimeVaryingEmissions = 0
SupTimeVaryingEmissionsType = 0
SupTimeVaryingVARPath = " "
SupTimeVaryingFACPath = " "
SupTimeVaryingEmissionFactorsWeekday =
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
SupTimeVaryingEmissionFactorsSaturday =
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
SupTimeVaryingEmissionFactorsSunday =
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
1.0e+0 1.0e+0 1.0e+0 1.0e+0
/
'''
MET = '''
&ADMS_PARAMETERS_MET
MetLatitude = {1}
MetDataSource = 0
MetDataFileWellFormedPath = "{0}"
MetWindHeight = 1.0e+1
MetWindInSectors = 0
MetWindSectorSizeDegrees = 1.0e+1
MetDataIsSequential = 0
MetUseSubset = 0
MetSubsetHourStart = 1
MetSubsetDayStart = 1
MetSubsetMonthStart = 1
MetSubsetYearStart = 2016
MetSubsetHourEnd = 0
MetSubsetDayEnd = 1
MetSubsetMonthEnd = 1
MetSubsetYearEnd = 2017
MetUseVerticalProfile = 0
MetVerticalProfilePath = " "
Met_DS_RoughnessMode = 1
Met_DS_Roughness = 1.0e+0
Met_DS_UseAdvancedMet = 0
Met_DS_SurfaceAlbedoMode = 1
Met_DS_SurfaceAlbedo = 2.3e-1
Met_DS_PriestlyTaylorMode = 1
Met_DS_PriestlyTaylor = 1.0e+0
Met_DS_MinLmoMode = 1
Met_DS_MinLmo = 3.45e+1
Met_DS_PrecipFactorMode = 1
Met_DS_PrecipFactor = 4.5e-1
Met_MS_RoughnessMode = 3
Met_MS_Roughness = 1.0e-1
Met_MS_UseAdvancedMet = 0
Met_MS_SurfaceAlbedoMode = 3
Met_MS_SurfaceAlbedo = 2.3e-1
Met_MS_PriestlyTaylorMode = 3
Met_MS_PriestlyTaylor = 1.0e+0
Met_MS_MinLmoMode = 3
Met_MS_MinLmo = 1.0e+0
MetHeatFluxType = 0
MetInclBoundaryLyrHt = 0
MetInclSurfaceTemp = 1
MetInclLateralSpread = 0
MetInclRelHumidity = 0
MetHandNumEntries = 1
MetWindSpeed =
3.06e+0
MetWindDirection =
6.0e+1
MetJulianDayNum =
2.47e+2
MetLocalTime =
5.0e+0
MetCloudAmount =
5.0e+0
MetSurfaceHeatFlux =
0.0e+0
MetBoundaryLayerHeight =
8.00e+2
MetSurfaceTemp =
2.8e+1
MetLateralSpread =
7.5e+0
MetYear =
2017
MetRelHumidity =
7.4e+1
/
'''
HIL = r'''
&ADMS_PARAMETERS_HIL
HilGridSize = 2
HilUseTerFile = 1
HilUseRoughFile = 0
HilTerrainPath = "C:\JPS_DATA\workingdir\JPS\ADMS\hkterrainlatestupdated.ter"
HilRoughPath = " "
HilCreateFlowField = 1
/
'''
FLC = '''
&ADMS_PARAMETERS_CST
CstPoint1X = 0.0e+0
CstPoint1Y = 0.0e+0
CstPoint2X = -1.000e+3
CstPoint2Y = 1.000e+3
CstLandPointX = 5.00e+2
CstLandPointY = 5.00e+2
/
&ADMS_PARAMETERS_FLC
FlcAvgTime = 9.00e+2
FlcUnitsPollutants = "ug/m3"
FlcUnitsIsotopes = "Bq/m3"
FlcCalcToxicResponse = 0
FlcToxicExp = 1.0e+0
FlcCalcPercentiles = 0
FlcNumPercentiles = 0
FlcCalcPDF = 0
FlcPDFMode = 0
FlcNumPDF = 0
/
'''
#todo:should retreive it from kb
GRD = '''
&ADMS_PARAMETERS_GRD
GrdType = 0
GrdCoordSysType = 0
GrdSpacingType = 0
GrdRegularMin =
{0} {1} 0.00e+0
1.0e+1 0.0e+0 0.0e+0
GrdRegularMax =
{2} {3} 3.00e+1
1.000e+3 3.30e+2 0.0e+0
GrdRegularNumPoints =
80 80 4
10 12 1
GrdVarSpaceNumPointsX = 0
GrdVarSpaceNumPointsY = 0
GrdVarSpaceNumPointsZ = 0
GrdVarSpaceNumPointsR = 0
GrdVarSpaceNumPointsTh = 0
GrdVarSpaceNumPointsZp = 0
GrdPtsNumPoints = 0 0
GrdPolarCentreX = 0.0e+0
GrdPolarCentreY = 0.0e+0
GrdPtsUsePointsFile = 1
GrdPtsPointsFilePath = " "
/
'''
PUFGAM = '''
&ADMS_PARAMETERS_PUF
PufStart = 1.00e+2
PufStep = 1.00e+2
PufNumSteps = 10
/
&ADMS_PARAMETERS_GAM
GamCalcDose = 0
/
'''
BKG = '''
&ADMS_PARAMETERS_BKG
BkgFilePath = "{0}"
BkgFixedLevels = 0
/
'''
ETC = '''
&ADMS_PARAMETERS_ETC
SrcNumSources = {0}
PolNumPollutants = 19
PolNumIsotopes = 0
/
'''
CHM = '''
&ADMS_PARAMETERS_CHM
ChmScheme = 2
/
'''
MAP = '''
&ADMS_MAPPERPROJECT
ProjectFilePath = " "
/
'''
POLD = '''
&ADMS_POLLUTANT_DETAILS
PolName = "CO2"
PolPollutantType = 0
PolGasDepVelocityKnown = 1
PolGasDepositionVelocity = 0.0e+0
PolGasType = 1
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 0
PolWetWashout = 0.0e+0
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 5.47e-1
PolBkgLevel = 4.14e+5
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "NOx"
PolPollutantType = 0
PolGasDepVelocityKnown = 0
PolGasDepositionVelocity = 0.0e+0
PolGasType = 1
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 0
PolWetWashout = 0.0e+0
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 5.2e-1
PolBkgLevel = 6.0e+1
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "NO2"
PolPollutantType = 0
PolGasDepVelocityKnown = 1
PolGasDepositionVelocity = 1.5e-3
PolGasType = 1
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 0
PolWetWashout = 0.0e+0
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 5.2e-1
PolBkgLevel = 4.41e+1
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "NO"
PolPollutantType = 0
PolGasDepVelocityKnown = 1
PolGasDepositionVelocity = 1.5e-3
PolGasType = 1
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 1
PolWetWashout = 0.0e+0
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 8.0e-1
PolBkgLevel = 0.0e+0
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "O3"
PolPollutantType = 0
PolGasDepVelocityKnown = 1
PolGasDepositionVelocity = 0.0e+0
PolGasType = 1
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 1
PolWetWashout = 0.0e+0
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 5.0e-1
PolBkgLevel = 6.899e+1
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "VOC"
PolPollutantType = 0
PolGasDepVelocityKnown = 1
PolGasDepositionVelocity = 0.0e+0
PolGasType = 1
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 0
PolWetWashout = 0.0e+0
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 3.1e-1
PolBkgLevel = 0.0e+0
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "SO2"
PolPollutantType = 0
PolGasDepVelocityKnown = 1
PolGasDepositionVelocity = 1.2e-2
PolGasType = 1
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 1
PolWetWashout = {0}
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 3.7e-1
PolBkgLevel = 1.513e+1
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "PM10"
PolPollutantType = 1
PolGasDepVelocityKnown = 1
PolGasDepositionVelocity = 0.0e+0
PolGasType = 1
PolParDepVelocityKnown = 0
PolParTermVelocityKnown = 0
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-5
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 1
PolWetWashout = {1}
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 1.0e+0
PolBkgLevel = 5.63e+1
PolBkgUnits = "ug/m3"
/
&ADMS_POLLUTANT_DETAILS
PolName = "PM2.5"
PolPollutantType = 1
PolGasDepVelocityKnown = 1
PolGasDepositionVelocity = 0.0e+0
PolGasType = 1
PolParDepVelocityKnown = 0
PolParTermVelocityKnown = 0
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
2.5e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 0
PolWetWashout = 0.0e+0
PolWetWashoutA = 3.552e-1
PolWetWashoutB = 5.394e-1
PolConvFactor = 1.0e+0
PolBkgLevel = 8.0e+0
PolBkgUnits = "ug/m3"
/
&ADMS_POLLUTANT_DETAILS
PolName = "CO"
PolPollutantType = 0
PolGasDepVelocityKnown = 1
PolGasDepositionVelocity = 0.0e+0
PolGasType = 1
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 0
PolWetWashout = 0.0e+0
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 8.6e-1
PolBkgLevel = 1.222e+3
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "BENZENE"
PolPollutantType = 0
PolGasDepVelocityKnown = 1
PolGasDepositionVelocity = 0.0e+0
PolGasType = 1
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 1
PolWetWashout = 0.0e+0
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 3.1e-1
PolBkgLevel = 0.0e+0
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "BUTADIENE"
PolPollutantType = 0
PolGasDepVelocityKnown = 1
PolGasDepositionVelocity = 0.0e+0
PolGasType = 1
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 1
PolWetWashout = 0.0e+0
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 4.5e-1
PolBkgLevel = 0.0e+0
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "HCl"
PolPollutantType = 0
PolGasDepVelocityKnown = 1
PolGasDepositionVelocity = 0.0e+0
PolGasType = 0
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 0
PolWetWashout = 0.0e+0
PolWetWashoutA = 3.0e-4
PolWetWashoutB = 6.6e-1
PolConvFactor = 6.589e-1
PolBkgLevel = 0.0e+0
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "Cl2"
PolPollutantType = 0
PolGasDepVelocityKnown = 0
PolGasDepositionVelocity = 5.0e+0
PolGasType = 0
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 1
PolWetWashout = 1.0e-4
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 3.5e-1
PolBkgLevel = 0.0e+0
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "CH3Cl"
PolPollutantType = 0
PolGasDepVelocityKnown = 0
PolGasDepositionVelocity = 0.0e+0
PolGasType = 0
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 1
PolWetWashout = 1.0e-4
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 4.922e-1
PolBkgLevel = 6.0e-1
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "ISOBUTYLENE"
PolPollutantType = 0
PolGasDepVelocityKnown = 0
PolGasDepositionVelocity = 0.0e+0
PolGasType = 0
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 1
PolWetWashout = 1.0e-4
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 4.43e-1
PolBkgLevel = 0.0e+0
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "NH3"
PolPollutantType = 0
PolGasDepVelocityKnown = 0
PolGasDepositionVelocity = 0.0e+0
PolGasType = 0
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 0
PolWetWashout = 1.0e-4
PolWetWashoutA = 5.0e-3
PolWetWashoutB = 6.4e-1
PolConvFactor = 1.462e+0
PolBkgLevel = 6.0e+0
PolBkgUnits = "ppb"
/
&ADMS_POLLUTANT_DETAILS
PolName = "HC"
PolPollutantType = 0
PolGasDepVelocityKnown = 0
PolGasDepositionVelocity = 0.0e+0
PolGasType = 0
PolParDepVelocityKnown = 1
PolParTermVelocityKnown = 1
PolParNumDepositionData = 1
PolParDepositionVelocity =
0.0e+0
PolParTerminalVelocity =
0.0e+0
PolParDiameter =
1.0e-6
PolParDensity =
1.000e+3
PolParMassFraction =
1.0e+0
PolWetWashoutKnown = 1
PolWetWashout = 1.0e-4
PolWetWashoutA = 1.0e-4
PolWetWashoutB = 6.4e-1
PolConvFactor = 0.802e+0
PolBkgLevel = 0.0e+0
PolBkgUnits = "ppb"
/
'''
class admsAplWriter(object):
def __init__(self, data, address):
self.data = data
self.address = address
#main function
def write(self):
pythonLogger = PythonLogger('admsAplWriter.py')
pythonLogger.postInfoToLogServer('writing to ' + self.address)
#if contains this data, execute the corresponding writing function
with open(self.address, 'w') as file:
self.writeStr(file, HEADER)
#self.writeStr(file,SUP)
self.writeTemp(file,SUP,[self.data['terrindicator'],self.data['chemindicator'],self.data['night'],self.data['dirnight'],self.data['wetindicator']])
self.writeTemp(file,MET,[self.data['Met'],self.data['Lat']])
self.writeBdn(file, self.data['Bdn'])
self.writeStr(file,HIL)
self.writeStr(file,FLC)
self.writeTemp(file,GRD,self.data['Grd'])
self.writeStr(file,PUFGAM)
self.writeOpt(file,self.data['Opt'])
self.writeStr(file,CHM)
self.writeTemp(file,BKG,[self.data['Bkg']])
self.writeStr(file,ETC.format(len(self.data['Src'])))
self.writeCoordSys(file, self.data['CoordiSys'])# a special case, to be unified in future
#self.writeCoordSys(file, self.data['CoordiSys'])# a special case, to be unified in future
self.writeStr(file,MAP)
#self.writeStr(file,POLD)
self.writeTemp(file,POLD,[self.data['so2washout'],self.data['pm10washout']])
#self.writeTemp(self.data['OPT'])
#del self.data['OPT']
self.writePol(file,self.data['Pol'])
self.writeSrc(file, self.data['Src'])
# for category,value in self.data.items():
# self.__getattribute__('write'+category)(file, value)
#to be canceled in future, no copy all
pythonLogger.postInfoToLogServer('finished writing to ' + self.address)
def writeAttr(self, file, obj):
for attr in [a for a in dir(obj) if not a.startswith('_') and a not in ['index', 'count'] ] :
#todo: if obj is a list
#print (attr)
attrV = getattr(obj, attr)
#if is function, don't print at all
if callable(attrV):
break
#format str with quote first
if isinstance(attrV, str):
attrV = '"{}"'.format(attrV)
if isinstance(attrV, (list, tuple)):
file.write(attr+'='+' '.join(['{}'.format('"'+i+'"' if isinstance(i, str) else i) for i in attrV])+'\n' )
else:
file.write('{0}= {1}\n'.format(attr, attrV))
def writeSrc(self, file, srcs):
print('writing src')
for src in srcs:
file.write('&ADMS_SOURCE_DETAILS\n')
self.writeAttr(file, src)
file.write('/\n')
def writePol(self, file, pol):
print('writing extra pol')
file.write('&ADMS_POLLUTANT_DETAILS\n')
self.writeAttr(file, pol)
file.write('/\n')
def quoteStr(self, item):
if isinstance(item, str):
return '\"{0}\"'.format(item)
return item
def writeBdn(self, file, bdn):
print('writing bdn')
file.write("&ADMS_PARAMETERS_BLD\n")
self.writeAttr(file, bdn)
file.write("/\n")
def writeOpt(self, file, bdn):
print('writing Opt')
file.write("&ADMS_PARAMETERS_OPT\n")
self.writeAttr(file, bdn)
file.write("/\n")
def writeCoordSys(self, file, csys = 28992):
self.writeTemp(file, '&ADMS_COORDINATESYSTEM\nProjectedEPSG = {0}\n', [csys] )
# self.writeTemp(file, '&ADMS_COORDINATESYSTEM\nProjectedEPSG = {0}\n', [28992] )
file.write("/\n")
def writeStr(self, file, ori):
###Todo
file.write(ori)
file.write('\n')
def writeTemp(self, file, temp, data):
print(data)
self.writeStr( file, temp.format(*data))
#a = admsAplWriter(template = '', data={'Grd':100, 'Bdn': 10})
#a.write()
|
492333 | import inspect
import operator
from functools import reduce
from typing import Any, Optional
from rest_framework.fields import empty
from rest_framework.request import Request
from .param_settings import ParamSettings
from rest_typed.utils import inspect_complex_type
def get_nested_value(dic: dict, path: str, fallback=None) -> Any:
try:
return reduce(operator.getitem, path.split("."), dic)
except (TypeError, KeyError, ValueError):
return fallback
def get_default_value(param: inspect.Parameter) -> Any:
if (
not is_default_used_to_pass_settings(param)
and param.default is not inspect.Parameter.empty
):
return param.default
return empty
def is_default_used_to_pass_settings(param: inspect.Parameter) -> bool:
return get_explicit_param_settings(param) is not None
def get_explicit_param_settings(param: inspect.Parameter) -> Optional[ParamSettings]:
try:
param_type = param.default.param_type
return param.default
except AttributeError:
return None
def is_implicit_body_param(param: inspect.Parameter) -> bool:
t = inspect_complex_type(param.annotation)
return t is not None
def is_explicit_request_param(param: inspect.Parameter) -> bool:
return param.annotation is Request
def is_implicit_request_param(param: inspect.Parameter) -> bool:
return param.name == "request" and param.annotation is inspect.Parameter.empty
def find_request(original_args: list) -> Request:
for arg in original_args:
if isinstance(arg, Request):
return arg
raise Exception("Could not find request in args:" + str(original_args))
|
492352 | from llvm.core import Type
void = Type.void()
char = Type.int(8)
short = Type.int(16)
int = Type.int(32)
int16 = short
int32 = int
int64 = Type.int(64)
float = Type.float()
double = Type.double()
# platform dependent
def _determine_sizes():
import ctypes
# Makes following assumption:
# sizeof(py_ssize_t) == sizeof(ssize_t) == sizeof(size_t)
any_size_t = getattr(ctypes, 'c_ssize_t', ctypes.c_size_t)
return ctypes.sizeof(ctypes.c_void_p) * 8, ctypes.sizeof(any_size_t) * 8
pointer_size, _py_ssize_t_bits = _determine_sizes()
intp = {32: int32, 64: int64}[pointer_size]
npy_intp = Type.int(pointer_size)
py_ssize_t = Type.int(_py_ssize_t_bits)
# pointers
pointer = Type.pointer
void_p = pointer(char)
char_p = pointer(char)
npy_intp_p = pointer(npy_intp)
# vector
def vector(ty, ct):
return Type.vector(ty, 4)
|
492375 | from setuptools import setup, find_packages
import versioneer
DISTNAME = 'mynn'
DESCRIPTION = 'A pure-Python neural network library'
LICENSE = 'MIT'
AUTHOR = '<NAME>'
AUTHOR_EMAIL = '<EMAIL>'
URL = 'https://github.com/davidmascharka/MyNN'
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Intended Audience :: Education",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering",
]
INSTALL_REQUIRES = ['numpy >= 1.13', 'mygrad >= 1.6']
TESTS_REQUIRE = ['pytest >= 3.8', 'hypothesis >= 4.6']
LONG_DESCRIPTION = """
MyNN is a simple NumPy-centric neural network library that builds on top of MyGrad. It provides
convenient wrappers for such functionality as
- Convenient neural network layers (e.g. convolutional, dense, batch normalization, dropout)
- Weight initialization functions (e.g. Glorot, He, uniform, normal)
- Neural network activation functions (e.g. elu, glu, tanh, sigmoid)
- Common loss functions (e.g. cross-entropy, KL-divergence, Huber loss)
- Optimization algorithms (e.g. sgd, adadelta, adam, rmsprop)
MyNN comes complete with several examples to ramp you up to being a fluent user of the library.
It was written as an extension to MyGrad for rapid prototyping of neural networks with minimal dependencies,
a clean codebase with excellent documentation, and as a learning tool.
"""
if __name__ == '__main__':
setup(
name=DISTNAME,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license=LICENSE,
author=AUTHOR,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_ocntent_type="text/markdown",
url=URL,
python_requires=">=3.6",
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
tests_require=TESTS_REQUIRE,
)
|
492376 | import numpy as np
import heapq
from operator import itemgetter
from copy import copy
class SDRRL(object):
def __init__(self, numState, numHidden, numAction, initMinWeight, initMaxWeight):
self._numState = numState
self._numHidden = numHidden
self._numAction = numAction
self._actionDelta = np.zeros((numAction, 1))
self._hiddenStates = np.zeros((numHidden, 1))
self._stimuli = np.zeros((numHidden, 1))
self._sparsities = np.zeros((numHidden, 1))
self._biases = np.zeros((numHidden, 1))
self._weightsFF = np.random.randn(numHidden, numState) * (initMaxWeight - initMinWeight) + initMinWeight
self._weightsQ = np.random.randn(1, numHidden) * (initMaxWeight - initMinWeight) + initMinWeight
self._tracesQ = np.zeros((1, numHidden))
self._weightsAction = np.random.randn(numAction, numHidden) * (initMaxWeight - initMinWeight) + initMinWeight
self._prevV = 0.0
self._alphaFF = 0.0
self._alphaAction = 0.1
self._alphaQ = 0.001
self._alphaBias = 0.0
self._gamma = 0.97
self._lambda = 0.92
self._activeRatio = 0.1
self._noise = 0.05
def simStep(self, reward, state):
numActive = int(self._activeRatio * self._numHidden)
self._stimuli = np.dot(self._weightsFF, state)
activations = self._stimuli - self._biases
# Generate tuples for sorting
heap = [(activations.item(0), 0)]
for i in range(1, self._numHidden):
heapq.heappush(heap, (activations.item(i), i))
# Use sorted information for inhibition
hiddenStatesPrev = copy(self._hiddenStates)
self._sparsities = np.zeros((self._numHidden, 1))
nLargest = heapq.nlargest(numActive, heap, key=itemgetter(0))
# Inhibition
for i in range(0, numActive):
self._sparsities[nLargest[i][1]] = 1.0
self._hiddenStates = np.multiply(self._sparsities, activations)
# Q
q = np.dot(self._weightsQ, self._hiddenStates).item(0)
# Action
action = np.tanh(np.dot(self._weightsAction, self._hiddenStates))
actionExp = copy(action)
for i in range(0, self._numAction):
if np.random.rand() < self._noise:
actionExp[i] = np.random.rand() * 2.0 - 1.0
#actionExp = np.minimum(1.0, np.maximum(-1.0, action + np.random.randn(self._numAction, 1) * self._noise))
# Reconstruction
recon = np.dot(self._weightsFF.T, self._hiddenStates)
delta = state - recon
# Update
self._weightsFF += self._alphaFF * np.dot(self._hiddenStates, delta.T)
tdError = reward + self._gamma * q - self._prevV
self._tracesQ = np.maximum(self._tracesQ * self._lambda, hiddenStatesPrev.T)
self._weightsQ += self._alphaQ * tdError * self._tracesQ
if tdError > 0.0:
self._weightsAction += self._alphaAction * np.dot(self._actionDelta, hiddenStatesPrev.T)
self._biases += self._alphaBias * (self._stimuli - self._biases)
self._prevV = q
self._actionDelta = actionExp - action
return actionExp |
492409 | import torch.nn as nn
import torch.nn.functional as F
from base.base_net import BaseNet
class CIFAR10_LeNet(BaseNet):
def __init__(self, rep_dim=256, bias_terms=False):
super().__init__()
self.rep_dim = rep_dim
self.pool = nn.MaxPool2d(2, 2)
# Encoder network
self.conv1 = nn.Conv2d(3, 32, 5, bias=bias_terms, padding=2)
nn.init.xavier_normal_(self.conv1.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=bias_terms)
self.conv2 = nn.Conv2d(32, 64, 5, bias=bias_terms, padding=2)
nn.init.xavier_normal_(self.conv2.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=bias_terms)
self.conv3 = nn.Conv2d(64, 128, 5, bias=bias_terms, padding=2)
nn.init.xavier_normal_(self.conv3.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=bias_terms)
self.fc1 = nn.Linear(128 * 4 * 4, 512, bias=bias_terms)
nn.init.xavier_normal_(self.fc1.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn1d1 = nn.BatchNorm1d(512, eps=1e-04, affine=bias_terms)
self.fc2 = nn.Linear(512, self.rep_dim, bias=bias_terms)
nn.init.xavier_normal_(self.fc2.weight)
def forward(self, x):
x = x.view(-1, 3, 32, 32)
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.leaky_relu(self.bn2d3(x)))
x = x.view(int(x.size(0)), -1)
x = self.fc1(x)
x = F.leaky_relu(self.bn1d1(x))
x = self.fc2(x)
return x
|
492417 | from .MultimodalManipulationDataset import MultimodalManipulationDataset
from .MultimodalManipulationDataset_robust import MultimodalManipulationDataset_robust
from .ProcessForce import ProcessForce
from .ToTensor import ToTensor
|
492464 | import gym
import torch
from load_policy import load_policy
from model import Agent
from train import BehavioralCloning, DAgger, Eval
class Config():
seed = 3
envname = 'Humanoid-v2'
env = gym.make(envname)
method = 'DA' # BC: Behavioral Cloning DA: DAgger
device = torch.device('cuda')
expert_path = './experts/'
model_save_path = './models/'
n_expert_rollouts = 30 # number of rollouts from expert
n_dagger_rollouts = 10 # number of new rollouts from learned model for a DAgger iteration
n_dagger_iter = 10 # number of DAgger iterations
n_eval_rollouts = 10 # number of rollouts for evaluating a policy
L2 = 0.00001
lr = 0.0001
epochs = 20
batch_size = 64
eval_steps = 500
def main():
config = Config()
print('*' * 20, config.envname, config.method, '*' * 20)
env = config.env
if config.seed:
env.seed(config.seed)
torch.manual_seed(config.seed)
agent = Agent(env.observation_space.shape[0], env.action_space.shape[0]).to(config.device)
expert = load_policy(config.expert_path + config.envname + '.pkl')
method = config.method
if method == 'BC':
agent = BehavioralCloning(config, agent, expert)
elif method == 'DA':
agent = DAgger(config, agent, expert)
else:
NotImplementedError(method)
avrg_mean, avrg_std = Eval(config, expert)
print('[expert] avrg_mean:{:.2f} avrg_std:{:.2f}'.format(avrg_mean, avrg_std))
avrg_mean, avrg_std = Eval(config, agent)
print('[agent] avrg_mean:{:.2f} avrg_std:{:.2f}'.format(avrg_mean, avrg_std))
if __name__ == '__main__':
main()
|
492473 | from typing import Callable
class ForWithProgress:
def __init__(self, total: int, every_nth: int, run_both_on_every: bool, run_on_start: bool):
self._total = total
self._every_nth = every_nth
self._run_both_on_every = run_both_on_every
self._run_on_start = run_on_start
def every_nth(self, every_nth_action: Callable[[int], None], every_action: Callable[[int, bool], None])->None:
def must_run_nth(current: int) -> bool:
if current == 0 and self._run_on_start:
return True
if current == 0:
return False
return current % self._every_nth == 0
for i in range(self._total+1):
must = must_run_nth(i)
if must:
every_nth_action(i)
if must and not self._run_both_on_every:
continue
every_action(i, must) |
492475 | from django.contrib import admin
from . import models
@admin.register(models.Model)
class ModelAdmin(admin.ModelAdmin):
list_display = ("name",)
|
492500 | import numpy as np
from ba3l.ingredients.ingredient import Ingredient
# credit: https://github.com/iBelieveCJM/Tricks-of-Semi-supervisedDeepLeanring-Pytorch/blob/master/utils/ramps.py
def pseudo_rampup(T1, T2):
def warpper(epoch):
if epoch > T1:
alpha = (epoch - T1) / (T2 - T1)
if epoch > T2:
alpha = 1.0
else:
alpha = 0.0
return alpha
return warpper
def exp_rampup(rampup_length):
"""Exponential rampup from https://arxiv.org/abs/1610.02242"""
def warpper(epoch):
if epoch < rampup_length:
epoch = np.clip(epoch, 0.5, rampup_length)
phase = 1.0 - epoch / rampup_length
return float(np.exp(-5.0 * phase * phase))
else:
return 1.0
return warpper
def linear_rampup(rampup_length):
"""Linear rampup"""
def warpper(epoch):
if epoch < rampup_length:
return epoch / rampup_length
else:
return 1.0
return warpper
def linear_rampdown(rampdown_length, start=0, last_value=0):
"""Linear rampup -(start)- (rampdown_length) \ _(for the rest) """
def warpper(epoch):
if epoch <= start:
return 1.
elif epoch - start < rampdown_length:
return last_value + (1. - last_value) * (rampdown_length - epoch + start) / rampdown_length
else:
return last_value
return warpper
def exp_rampdown(rampdown_length, num_epochs):
"""Exponential rampdown from https://arxiv.org/abs/1610.02242"""
def warpper(epoch):
if epoch >= (num_epochs - rampdown_length):
ep = .5 * (epoch - (num_epochs - rampdown_length))
return float(np.exp(-(ep * ep) / rampdown_length))
else:
return 1.0
return warpper
def cosine_rampdown(rampdown_length, num_epochs):
"""Cosine rampdown from https://arxiv.org/abs/1608.03983"""
def warpper(epoch):
if epoch >= (num_epochs - rampdown_length):
ep = .5 * (epoch - (num_epochs - rampdown_length))
return float(.5 * (np.cos(np.pi * ep / rampdown_length) + 1))
else:
return 1.0
return warpper
def exp_warmup(rampup_length, rampdown_length, num_epochs):
rampup = exp_rampup(rampup_length)
rampdown = exp_rampdown(rampdown_length, num_epochs)
def warpper(epoch):
return rampup(epoch) * rampdown(epoch)
return warpper
def exp_warmup_linear_down(warmup, rampdown_length, start_rampdown, last_value):
rampup = exp_rampup(warmup)
rampdown = linear_rampdown(rampdown_length, start_rampdown, last_value)
def warpper(epoch):
return rampup(epoch) * rampdown(epoch)
return warpper
def test_warmup():
warmup = exp_warmup(20, 100, 150)
for ep in range(500):
print(warmup(ep))
def test_warmupl():
warmup = exp_warmup_linear_down(20, 100, 50, 0.001)
for ep in range(500):
print(warmup(ep))
def cosine_cycle(cycle_len=20,ramp_down_start=100,last_lr_value=0.01):
"""Cosine rampdown from https://arxiv.org/abs/1608.03983"""
ramp_down_start = cycle_len+ (ramp_down_start-1)//cycle_len*(cycle_len)
print("adjusted ramp_down_start:",ramp_down_start)
def warpper(epoch):
ep = (epoch+cycle_len//2.)/(1.*cycle_len)
if epoch>ramp_down_start:
return last_lr_value
return float(last_lr_value + (1.-last_lr_value)* .5 * (np.cos(2.*np.pi * ep) + 1))
return warpper
if __name__ == '__main__':
test= exp_warmup_linear_down(20, 100, 50, 150)
for i in range(250):
print(test(i)) |
492505 | import torch
from typing_extensions import TypedDict
class ConfigMap(TypedDict):
rank : int
local_rank : int
world_size : int
local_size : int
calc_stream : torch.cuda.Stream
load_stream : torch.cuda.Stream
load_event : torch.cuda.Event
barrier_stream : torch.cuda.Stream
loss_scale_factor : float
loss_scale_steps : int
gradient_inspect : bool
comm : 'NCCLCommunicator'
config = ConfigMap()
def rank():
"""
Returns the global rank of the current process. (0 ~ world_size-1)
"""
return config['rank']
def world_size():
"""
Returns the total number of workers across all nodes.
"""
return config['world_size'] |
492515 | def gather_writable_check_list(hook, check_list, **kwargs):
pass
def before_project_updated(hook, project_uploader, **kwargs):
pass
def after_project_updated(hook, project_uploader, **kwargs):
pass
def before_resource_group_updated(hook, resource_group_uploader, **kwargs):
pass
def after_resource_group_updated(hook, resource_group_uploader, **kwargs):
pass
def add_framework_version_update_writable_files(hook, from_version, to_version, writable_file_paths, **kwargs):
pass
def before_framework_version_updated(hook, from_version, to_version, **kwargs):
pass
def after_framework_version_updated(hook, from_version, to_version, **kwargs):
pass
|
492536 | import os
from numpy.testing import assert_allclose
from glue_geospatial.data_factory import is_geospatial, geospatial_reader
DATA = os.path.join(os.path.dirname(__file__), 'data')
def test_geospatial(tmpdir):
assert not is_geospatial(os.path.join(DATA, 'plain.tif'))
assert is_geospatial(os.path.join(DATA, 'simplegeo.tif'))
data = geospatial_reader(os.path.join(DATA, 'simplegeo.tif'))
assert data.shape == (18, 24)
assert_allclose(data.coords.pixel_to_world_values(9, 12),
(-3.9716666666666676, 2.9816666666666665))
|
492540 | from datetime import date
from gazette.spiders.base.imprensa_oficial import ImprensaOficialSpider
class BaMunizFerreiraSpider(ImprensaOficialSpider):
name = "ba_muniz_ferreira"
allowed_domains = ["pmmunizferreiraba.imprensaoficial.org"]
start_date = date(2014, 12, 1)
end_date = date(2021, 1, 19)
url_base = "http://pmmunizferreiraba.imprensaoficial.org/{}"
TERRITORY_ID = "2922201"
|
492593 | import argparse
import numpy as np
import os
import tensorflow as tf
import time
import pickle
import scipy.io
import skimage.transform
import matplotlib.pyplot as plt
import augmentation as augm
import evaluation
import tensorboard as tb
from datetime import datetime
np.set_printoptions(suppress=True, precision=4)
joint_names = np.array(['lsho', 'lelb', 'lwri', 'rsho', 'relb', 'rwri', 'lhip', 'rhip', 'nose', 'torso'])
# star like model with chain-like inference (just to test)
# joint_dependence = {'lsho': ['nose', 'lelb'], 'lelb': ['lsho', 'lwri'], 'lwri': ['lelb'],
# 'rsho': ['nose', 'relb'], 'relb': ['rsho', 'rwri'], 'rwri': ['relb'],
# 'lhip': ['nose'], 'rhip': ['nose'], 'nose': ['lsho', 'rsho', 'lhip', 'rhip']}
joint_dependence = {} # Fully-connected PGM
for joint in joint_names:
joint_dependence[joint] = [joint_cond for joint_cond in joint_names if joint_cond != joint]
def model(x, n_joints):
"""
A computational graph for the part detector (CNN). Note that the size of heat maps is 8 times smaller (due to
a convolution with stride=2 and 2 max pooling layers) than the original image. However, even such huge downsampling
preserves satisfactory localization, and significantly saves computational power.
:param x: full resolution image hps.batch_size x 480 x 720 x 3
:return: predicted heat map hps.batch_size x 60 x 90 x n_joints.
"""
# First convolutional layer - maps one grayscale image to 32 feature maps.
n_filters = np.array([64, 128, 256, 512, 512])
# n_filters = np.array([128, 128, 128, 512, 512])
if hps.debug:
n_filters = n_filters // 4
x1 = x
x1 = conv_layer(x1, 5, 2, 3, n_filters[0], 'conv1_fullres') # result: 360x240
x1 = max_pool_layer(x1, 2, 2) # result: 180x120
x1 = conv_layer(x1, 5, 1, n_filters[0], n_filters[1], 'conv2_fullres') # result: 180x120
x1 = max_pool_layer(x1, 2, 2) # result: 180x60
x1 = conv_layer(x1, 5, 1, n_filters[1], n_filters[2], 'conv3_fullres') # result: 90x60
x1 = conv_layer(x1, 9, 1, n_filters[2], n_filters[3], 'conv4_fullres') # result: 90x60
x2 = tf.image.resize_images(x, [int(x.shape[1]) // 2, int(x.shape[2]) // 2])
x2 = conv_layer(x2, 5, 2, 3, n_filters[0], 'conv1_halfres') # result: 180x120
x2 = max_pool_layer(x2, 2, 2) # result: 90x60
x2 = conv_layer(x2, 5, 1, n_filters[0], n_filters[1], 'conv2_halfres') # result: 90x60
x2 = max_pool_layer(x2, 2, 2) # result: 45x30
x2 = conv_layer(x2, 5, 1, n_filters[1], n_filters[2], 'conv3_halfres') # result: 45x30
x2 = conv_layer(x2, 9, 1, n_filters[2], n_filters[3], 'conv4_halfres') # result: 45x30
x2 = tf.image.resize_images(x2, [int(x1.shape[1]), int(x1.shape[2])])
x3 = tf.image.resize_images(x, [int(x.shape[1]) // 4, int(x.shape[2]) // 4])
x3 = conv_layer(x3, 5, 2, 3, n_filters[0], 'conv1_quarterres') # result: 90x60
x3 = max_pool_layer(x3, 2, 2) # result: 45x30
x3 = conv_layer(x3, 5, 1, n_filters[0], n_filters[1], 'conv2_quarterres') # result: 45x30
x3 = max_pool_layer(x3, 2, 2) # result: 23x15
x3 = conv_layer(x3, 5, 1, n_filters[1], n_filters[2], 'conv3_quarterres') # result: 23x15
x3 = conv_layer(x3, 9, 1, n_filters[2], n_filters[3], 'conv4_quarterres') # result: 23x15
x3 = tf.image.resize_images(x3, [int(x1.shape[1]), int(x1.shape[2])])
x = x1 + x2 + x3
x /= 3 # to compensate for summing up - should improve the convergence
x = conv_layer(x, 9, 1, n_filters[3], n_filters[4], 'conv5') # result: 90x60
x = conv_layer(x, 9, 1, n_filters[4], n_joints, 'conv6', last_layer=True) # result: 90x60
return x
def conv_mrf(A, B):
"""
:param A: conv kernel 1 x 120 x 180 x 1 (prior)
:param B: input heatmaps: hps.batch_size x 60 x 90 x 1 (likelihood)
:return: C is hps.batch_size x 60 x 90 x 1
"""
B = tf.transpose(B, [1, 2, 3, 0])
B = tf.reverse(B, axis=[0, 1]) # [h, w, 1, b], we flip kernel to get convolution, and not cross-correlation
# conv between 1 x 120 x 180 x 1 and 60 x 90 x 1 x ? => 1 x 61 x 91 x ?
C = tf.nn.conv2d(A, B, strides=[1, 1, 1, 1], padding='VALID') # 1 x 61 x 91 x ?
# C = C[:, :hm_height, :hm_width, :] # 1 x 60 x 90 x ?
C = tf.image.resize_images(C, [hm_height, hm_width])
C = tf.transpose(C, [3, 1, 2, 0])
return C
def spatial_model(heat_map):
"""
Implementation of the spatial model in log space (given by Eq. 2 in the original paper).
:param heat_map: is produced by model as the unary distributions: hps.batch_size x 60 x 90 x n_joints
"""
def relu_pos(x, eps=0.00001):
"""
It is described in the paper, but we decided not to use it. Instead we apply softplus everywhere.
"""
return tf.maximum(x, eps)
def softplus(x):
softplus_alpha = 5
return 1 / softplus_alpha * tf.nn.softplus(softplus_alpha * x)
delta = 10 ** -6 # for numerical stability
heat_map_hat = []
with tf.variable_scope('bn_sm'):
heat_map = tf.contrib.layers.batch_norm(heat_map, decay=0.9, center=True, scale=True, is_training=flag_train)
for joint_id, joint_name in enumerate(joint_names[:n_joints]):
with tf.variable_scope(joint_name):
hm = heat_map[:, :, :, joint_id:joint_id + 1]
marginal_energy = tf.log(softplus(hm) + delta) # heat_map: batch_size x 90 x 60 x 1
for cond_joint in joint_dependence[joint_name]:
cond_joint_id = np.where(joint_names == cond_joint)[0][0]
prior = softplus(pairwise_energies[joint_name + '_' + cond_joint])
likelihood = softplus(heat_map[:, :, :, cond_joint_id:cond_joint_id + 1])
bias = softplus(pairwise_biases[joint_name + '_' + cond_joint])
marginal_energy += tf.log(conv_mrf(prior, likelihood) + bias + delta)
heat_map_hat.append(marginal_energy)
return tf.stack(heat_map_hat, axis=3)[:, :, :, :, 0]
def batch_norm(x):
x = tf.contrib.layers.batch_norm(x, decay=0.9, center=True, scale=True, is_training=flag_train, trainable=train_pd)
return x
def conv2d(x, W, stride):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')
def weight_variable(shape, fc=False):
"""weight_variable generates a weight variable of a given shape. Uses He initialization."""
if not fc:
n_in = shape[0] * shape[1] * shape[2]
n_out = shape[0] * shape[1] * shape[3]
else:
n_in = shape[0]
n_out = shape[1]
initial = tf.truncated_normal(shape, stddev=np.sqrt(2.0 / n_in))
return tf.get_variable('weights', initializer=initial, trainable=train_pd)
def bias_variable(shape, init=0.0, name='biases'):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(init, shape=shape)
return tf.get_variable(name, initializer=initial, trainable=train_pd)
def conv_layer(x, size, stride, n_in, n_out, name, last_layer=False):
with tf.variable_scope(name):
w = weight_variable([size, size, n_in, n_out])
b = bias_variable([n_out])
pre_activ = conv2d(x, w, stride) + b
if not last_layer:
activ = tf.nn.relu(pre_activ)
activ = batch_norm(activ)
else:
activ = pre_activ
# we do it out of the namescope to show it separately in Tensorboard
tb.var_summary(pre_activ, name)
tf.summary.image('f_activ_' + name, activ[:, :, :, 7:8], 3)
return activ
def max_pool_layer(x, size, stride):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, size, size, 1], strides=[1, stride, stride, 1], padding='SAME')
def fully_connected(x, n_in, n_out, name):
with tf.name_scope(name):
w = weight_variable([n_in, n_out], fc=True)
b = bias_variable([n_out])
return tf.matmul(x, w) + b
def get_next_batch(X, Y, batch_size, shuffle=False):
n_batches = len(X) // batch_size
if shuffle:
x_idx = np.random.permutation(len(X))[:n_batches * hps.batch_size]
else:
x_idx = np.arange(len(X))[:n_batches * hps.batch_size]
for batch_idx in x_idx.reshape([n_batches, hps.batch_size]):
batch_x, batch_y = X[batch_idx], Y[batch_idx]
yield batch_x, batch_y
def weight_decay(var_pattern):
"""
L2 weight decay loss, based on all weights that have var_pattern in their name
var_pattern - a substring of a name of weights variables that we want to use in Weight Decay.
"""
costs = []
for var in tf.global_variables():
if var.op.name.find(var_pattern) != -1:
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def get_var_by_name(var_name_to_find):
return [v for v in tf.trainable_variables() if v.name == var_name_to_find][0]
def spatial_softmax(hm):
hm_height, hm_width, n_joints = int(hm.shape[1]), int(hm.shape[2]), int(hm.shape[3])
hm = tf.reshape(hm, [-1, hm_height * hm_width, n_joints])
hm = tf.nn.softmax(logits=hm, dim=1)
hm = tf.reshape(hm, [-1, hm_height, hm_width, n_joints])
return hm
def softmax_cross_entropy(hm1, hm2):
"""
Softmax applied over 2 spatial dimensions (for this we do reshape) followed by cross-entropy.
hm1, hm2: tensor of size [n_images, height, width, n_joints]
"""
# MSE: tf.reduce_mean((hm1 - hm2) ** 2) * hm_height * hm_width * n_joints
hm_height, hm_width, n_joints = int(hm1.shape[1]), int(hm1.shape[2]), int(hm1.shape[3])
hm1 = tf.reshape(hm1, [-1, hm_height*hm_width, n_joints])
hm2 = tf.reshape(hm2, [-1, hm_height*hm_width, n_joints])
# Element-wise sigmoid with binary cross-entropy on top of them:
# loss_list = []
# for i in range(n_joints):
# loss_i = tf.nn.sigmoid_cross_entropy_with_logits(logits=hm1[:, :, i], labels=hm2[:, :, i])
# loss_list.append(loss_i)
# loss = tf.stack(loss_list, axis=1)
# Our choice: softmax applied over 2 spatial dimensions followed by cross-entropy
loss = tf.nn.softmax_cross_entropy_with_logits(logits=hm1, labels=hm2, dim=1)
return tf.reduce_mean(loss)
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def avg_tensor_list(tensor_list):
tensors = tf.stack(axis=0, values=tensor_list)
return tf.reduce_mean(tensors, axis=0)
def eval_error(X_np, Y_np, sess, batch_size):
"""Get all predictions for a dataset by running it in small batches."""
n_batches = len(X_np) // hps.batch_size
mse_pd_val, mse_sm_val, det_rate_pd_val, det_rate_sm_val = 0.0, 0.0, 0.0, 0.0
for batch_x, batch_y in get_next_batch(X_np, Y_np, hps.batch_size):
v1, v2, v3, v4 = sess.run([loss_pd, loss_sm, det_rate_pd, det_rate_sm], feed_dict={x_in: batch_x, y_in: batch_y, flag_train: False})
mse_pd_val, mse_sm_val = mse_pd_val + v1, mse_sm_val + v2
det_rate_pd_val, det_rate_sm_val = det_rate_pd_val + v3, det_rate_sm_val + v4
return mse_pd_val / n_batches, mse_sm_val / n_batches, det_rate_pd_val / n_batches, det_rate_sm_val / n_batches
def get_dataset():
"""
Note, that in order to have these files you need to run `data.py` first.
"""
x_train = np.load('x_train_flic.npy')
x_test = np.load('x_test_flic.npy')
y_train = np.load('y_train_flic.npy')
y_test = np.load('y_test_flic.npy')
return x_train, y_train, x_test, y_test
def get_pairwise_distr():
with open('pairwise_distribution.pickle', 'rb') as handle:
return pickle.load(handle)
def grad_renorm(gs_vs, norm):
"""
It is useful to stabilize the training, especially with high amount of weight decay.
"""
grads, vars = zip(*gs_vs)
grads, _ = tf.clip_by_global_norm(grads, norm)
gs_vs = zip(grads, vars)
return gs_vs
def get_gpu_memory(gpus):
"""
Small heuristic to calculate the amount of memory needed for each GPU in case of multi-gpu training.
"""
if len(gpus) >= 5:
return 0.4
elif len(gpus) >= 3:
return 0.5
elif len(gpus) == 2:
return 0.6
else:
return 0.6
def get_different_scales(x, pad_array, crop_array, orig_h, orig_w):
x_new = []
for pad_c in pad_array:
n_pad_h = round(orig_h * (pad_c - 1) / 2)
n_pad_w = round(orig_w * (pad_c - 1) / 2)
x_pad = np.lib.pad(x, ((n_pad_h, n_pad_h), (n_pad_w, n_pad_w), (0, 0)), 'constant', constant_values=0)
x_orig_size = skimage.transform.resize(x_pad, (orig_h, orig_w))
x_new.append(x_orig_size)
for crop_c in crop_array:
h1 = round((1-crop_c)/2*orig_h)
h2 = h1 + round(crop_c*orig_h)
w1 = round((1-crop_c)/2*orig_w)
w2 = w1 + round(crop_c*orig_w)
x_crop = x[h1:h2, w1:w2]
x_orig_size = skimage.transform.resize(x_crop, (orig_h, orig_w))
x_new.append(x_orig_size)
# for i in range(9):
# plt.figure(i)
# plt.imshow(x_new[i])
# plt.savefig('img/img_'+str(i)+'.png', dpi=300)
# plt.clf()
return np.array(x_new)
def scale_hm_back(hms, pad_array, crop_array, orig_h, orig_w):
hms_new = []
for i, crop_c in enumerate(pad_array):
crop_c = 1 / crop_c
h1 = round((1-crop_c)/2*orig_h)
h2 = h1 + round(crop_c*orig_h)
w1 = round((1-crop_c)/2*orig_w)
w2 = w1 + round(crop_c*orig_w)
hm_crop = hms[i][h1:h2, w1:w2]
hm_orig_size = skimage.transform.resize(hm_crop, (orig_h, orig_w))
hms_new.append(hm_orig_size)
for i, pad_c in enumerate(crop_array):
pad_c = 1 / pad_c
n_pad_h = round(orig_h * (pad_c - 1) / 2)
n_pad_w = round(orig_w * (pad_c - 1) / 2)
hm_pad = np.lib.pad(hms[i+len(pad_array)], ((n_pad_h, n_pad_h), (n_pad_w, n_pad_w), (0, 0)), 'constant', constant_values=0)
hm_orig_size = skimage.transform.resize(hm_pad, (orig_h, orig_w))
hms_new.append(hm_orig_size)
# for i in range(9):
# plt.figure(i)
# plt.imshow(x_new[i][:, :, 8])
# plt.savefig('img/img_'+str(i)+'_processed.png', dpi=300)
# plt.clf()
# plt.imshow(hms[i][:, :, 8])
# plt.savefig('img/img_'+str(i)+'_orig.png', dpi=300)
# plt.clf()
return np.array(hms_new)
def get_predictions(X_np, Y_np, sess):
""" Get all predictions for a dataset by running it in small batches.
We use a multi-scale evaluation procedure proposed in "Learning human pose estimation features with
convolutional networks". We strongly suspect that this procedure was used in the original paper. However,
they do not report it.
Without this procedure it's impossible to reproduce their part detector.
"""
def argmax_hm(hm):
hm = np.squeeze(hm)
hm = np.reshape(hm, [hm_height * hm_width, n_joints])
coords_raw = np.argmax(hm, axis=0) # [n_images, n_joints]
# Now we obtain real spatial coordinates for each image and for each joint
coords_x = coords_raw // hm_width
coords_y = coords_raw - coords_x * hm_width
coords_xy = np.stack([coords_x, coords_y], axis=0)
return coords_xy
n = 1100
X_np, Y_np = X_np[:n], Y_np[:n]
drs_pd, drs_sm, pred_coords_pd, pred_coords_sm = [], [], [], []
pad_array, crop_array = [1.1, 1.2, 1.3, 1.4], [0.7, 0.8, 0.9, 1.0]
for x_np, y_np in zip(X_np, Y_np):
x_np_diff_scales = get_different_scales(x_np, pad_array, crop_array, in_height, in_width)
y_np = np.repeat(np.expand_dims(y_np, 0), x_np_diff_scales.shape[0], axis=0)
hm_pd_np, hm_sm_np = sess.run([hm_pred_pd, hm_pred_sm], feed_dict={x_in: x_np_diff_scales, y_in: y_np, flag_train: False})
hm_pd_np = scale_hm_back(hm_pd_np, pad_array, crop_array, hm_height, hm_width)
hm_sm_np = scale_hm_back(hm_sm_np, pad_array, crop_array, hm_height, hm_width)
# argmax over 1st dimension to get the most confident prediction for each joint
# hm_pd_np = np.max(hm_pd_np, axis=0, keepdims=True)
# hm_sm_np = np.max(hm_sm_np, axis=0, keepdims=True)
hm_pd_np = np.expand_dims(np.average(hm_pd_np, axis=0), 0)
hm_sm_np = np.expand_dims(np.average(hm_sm_np, axis=0), 0)
pred_coords_pd.append(argmax_hm(hm_pd_np))
pred_coords_sm.append(argmax_hm(hm_sm_np))
# input aggregated hm and get det_rate
dr_pd, dr_sm = sess.run([wrist_det_rate10_pd, wrist_det_rate10_sm],
feed_dict={hm_pred_pd: hm_pd_np, hm_pred_sm: hm_sm_np, y_in: y_np, flag_train: False})
drs_pd.append(dr_pd)
drs_sm.append(dr_sm)
print('test_dr: {} {}'.format(np.average(drs_pd), np.average(drs_sm)))
return np.stack(pred_coords_pd, axis=2), np.stack(pred_coords_sm, axis=2)
parser = argparse.ArgumentParser(description='Define hyperparameters.')
parser.add_argument('--debug', action='store_true', help='True if we want to debug.')
parser.add_argument('--train', action='store_true', help='True if we want to train the model.')
parser.add_argument('--gpus', nargs='+', type=int, default=[6], help='GPU indices.')
parser.add_argument('--restore', action='store_true', help='True if we want to restore the model.')
parser.add_argument('--use_sm', action='store_true', help='True if we want to use the Spatial Model.')
parser.add_argument('--data_augm', action='store_true', help='True if we want to use data augmentation.')
parser.add_argument('--n_epochs', type=int, default=30, help='Number of epochs.')
parser.add_argument('--batch_size', type=int, default=14, help='Batch size.')
parser.add_argument('--optimizer', type=str, default='adam', help='momentum or adam')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate.')
parser.add_argument('--lmbd', type=float, default=0.001, help='Regularization coefficient.')
hps = parser.parse_args() # returns a Namespace object, new fields can be set like hps.abc = 10
gpu_memory = get_gpu_memory(hps.gpus)
train_pd = True # to train the part detector or not
best_model_name = '2018-02-17 11:34:12_lr=0.001_lambda=0.001_bs=14-76'
model_path = 'models_ex'
time_start = time.time()
cur_timestamp = str(datetime.now())[:-7] # get rid of milliseconds
model_name = '{}_lr={}_lambda={}_bs={}'.format(cur_timestamp, hps.lr, hps.lmbd, hps.batch_size)
tb_folder = 'tb'
tb_train_iter = '{}/{}/train_iter'.format(tb_folder, model_name) # is not used, but can be useful for debugging
tb_train = '{}/{}/train'.format(tb_folder, model_name)
tb_test = '{}/{}/test'.format(tb_folder, model_name)
tb_log_iters = False
n_eval_ex = 512 if hps.debug else 1100
joints_to_eval = [2] # for example, 2 - left wrist, 5 - right wrist, 8 - nose, 'all' - all joints
det_radius = 10 # moderate detection radius
n_joints = 9 # excluding "torso-joint", which is 10-th
x_train, y_train, x_test, y_test = get_dataset()
pairwise_distr_np = get_pairwise_distr()
n_train, in_height, in_width, n_colors = x_train.shape[0:4]
n_test, hm_height, hm_width = y_test.shape[0:3]
if hps.debug:
n_train, n_test = 1024, 512 # for debugging purposes we take only a small subset
train_idx, test_idx = np.random.permutation(x_train.shape[0])[:n_train], np.random.permutation(x_test.shape[0])[:n_test]
x_train, y_train, x_test, y_test = x_train[train_idx], y_train[train_idx], x_test[test_idx], y_test[test_idx]
# Main hyperparameters
n_updates_total = hps.n_epochs * n_train // hps.batch_size
lr_decay_n_updates = [round(0.7 * n_updates_total), round(0.8 * n_updates_total), round(0.9 * n_updates_total)]
lr_decay_coefs = [hps.lr, hps.lr / 2, hps.lr / 5, hps.lr / 10]
img_tb_from = 450 # 50 or 450
img_tb_to = img_tb_from + hps.batch_size
graph = tf.Graph()
with graph.as_default(), tf.device('/cpu:0'):
x_in = tf.placeholder(tf.float32, [None, in_height, in_width, n_colors], name='input_full')
if hps.use_sm:
pairwise_energies, pairwise_biases = {}, {}
for joint in joint_names[:n_joints]:
for cond_joint in joint_dependence[joint]:
joint_key = joint + '_' + cond_joint
tensor = tf.convert_to_tensor(pairwise_distr_np[joint_key], dtype=tf.float32)
pairwise_energy_jj = tf.reshape(tensor, [1, tensor.shape[0].value, tensor.shape[1].value, 1])
pairwise_energies[joint_key] = tf.get_variable('energy_' + joint_key, initializer=pairwise_energy_jj)
init = tf.constant(0.00001, shape=[1, hm_height, hm_width, 1])
pairwise_biases[joint_key] = tf.get_variable('bias_' + joint_key, initializer=init)
y_in = tf.placeholder(tf.float32, [None, hm_height, hm_width, n_joints+1], name='heat_map')
flag_train = tf.placeholder(tf.bool, name='is_training')
n_iters_tf = tf.get_variable('n_iters', initializer=0, trainable=False)
lr_tf = tf.train.piecewise_constant(n_iters_tf, lr_decay_n_updates, lr_decay_coefs)
# Data augmentation: we apply the same random transformations both to images and heat maps
if hps.data_augm:
x_batch, hm_target_batch = tf.cond(flag_train, lambda: augm.augment_train(x_in, y_in),
lambda: augm.augment_test(x_in, y_in))
else:
x_batch, hm_target_batch = x_in, y_in
if hps.optimizer == 'adam':
opt = tf.train.AdamOptimizer(lr_tf)
elif hps.optimizer == 'momentum':
opt = tf.train.MomentumOptimizer(learning_rate=lr_tf, momentum=0.9)
else:
raise Exception('wrong optimizer')
# Calculate the gradients for each model tower.
tower_grads, losses, det_rates_pd, det_rates_sm, hms_pred_pd, hms_pred_sm = [], [], [], [], [], []
mses_pd, mses_sm = [], []
imgs_per_gpu = hps.batch_size // len(hps.gpus)
with tf.variable_scope(tf.get_variable_scope()):
for i in range(len(hps.gpus)):
with tf.device('/gpu:%d' % i), tf.name_scope('tower_%d' % i) as scope:
# Dequeues one batch for the GPU
id_from, id_to = i*imgs_per_gpu, i*imgs_per_gpu + imgs_per_gpu
x, hm_target = x_batch[id_from:id_to], hm_target_batch[id_from:id_to]
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across all towers.
# The whole heat map prediction model is here
hm_pred_pd_logit = model(x, n_joints)
hm_pred_pd = spatial_softmax(hm_pred_pd_logit)
hms_pred_pd.append(hm_pred_pd)
if hps.use_sm:
# To disambiguate multiple people on the same image
hm_pred_pd_with_torso = tf.concat([hm_pred_pd, hm_target[:, :, :, n_joints:]], axis=3)
hm_pred_sm_logit = spatial_model(hm_pred_pd_with_torso)
hm_pred_sm = spatial_softmax(hm_pred_sm_logit)
hms_pred_sm.append(hm_pred_sm)
else:
# for compatibility with Tensorboard, we should have this variable
hm_pred_sm_logit, hm_pred_sm = hm_pred_pd_logit, hm_pred_pd
hms_pred_sm.append(hm_pred_sm)
with tf.name_scope('loss'):
loss_pd = softmax_cross_entropy(hm_pred_pd_logit, hm_target[:, :, :, :n_joints])
loss_sm = softmax_cross_entropy(hm_pred_sm_logit, hm_target[:, :, :, :n_joints])
loss_tower = loss_pd + loss_sm + hps.lmbd * weight_decay(var_pattern='weights')
losses.append(loss_tower)
mses_pd.append(loss_tower)
mses_sm.append(loss_tower)
with tf.name_scope('evaluation'):
wrist_det_rate10_pd = evaluation.det_rate(hm_pred_pd, hm_target[:, :, :, :n_joints],
normalized_radius=det_radius, joints=joints_to_eval)
wrist_det_rate10_sm = evaluation.det_rate(hm_pred_sm, hm_target[:, :, :, :n_joints],
normalized_radius=det_radius, joints=joints_to_eval)
det_rates_pd.append(wrist_det_rate10_pd)
det_rates_sm.append(wrist_det_rate10_sm)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# Calculate the gradients for the batch of data on this tower.
grads_vars_in_tower = opt.compute_gradients(loss_tower)
# Keep track of the gradients across all towers.
tower_grads.append(grads_vars_in_tower)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads_vars = average_gradients(tower_grads)
loss = avg_tensor_list(losses)
det_rate_pd = avg_tensor_list(det_rates_pd)
det_rate_sm = avg_tensor_list(det_rates_sm)
mses_pd = avg_tensor_list(mses_pd)
mses_sm = avg_tensor_list(mses_sm)
hms_pred_pd = tf.concat(hms_pred_pd, axis=0)
hms_pred_sm = tf.concat(hms_pred_sm, axis=0)
grads_vars = grad_renorm(grads_vars, 4.0)
train_step = opt.apply_gradients(grads_vars, global_step=n_iters_tf)
# # Track the moving averages of all trainable variables. We did not use it in the final evaluation.
# variable_averages = tf.train.ExponentialMovingAverage(0.99, n_iters_tf)
# variables_averages_op = variable_averages.apply(tf.trainable_variables())
#
# # Group all updates to into a single train op.
# train_step = tf.group(apply_gradient_op, variables_averages_op)
tf.summary.image('input', x_batch, 30)
if hps.use_sm:
for key in pairwise_energies:
tf.summary.image('pairwise_potential_' + key, pairwise_energies[key], 30)
tf.summary.image('pairwise_biases_' + key, pairwise_biases[key], 30)
tb.var_summary(pairwise_energies[key], 'pairwise_energies_' + key)
tb.var_summary(pairwise_biases[key], 'pairwise_biases_' + key)
tb.main_summaries(grads_vars)
tb.show_img_plus_hm(x_batch, hm_target_batch[:, :, :, :n_joints], joint_names[:n_joints], in_height, in_width, 'target')
tb.show_img_plus_hm(x_batch, hms_pred_pd, joint_names[:n_joints], in_height, in_width, 'pred_part_detector')
tb.show_img_plus_hm(x_batch, hms_pred_sm, joint_names[:n_joints], in_height, in_width, 'pred_spatial_model')
tb_merged = tf.summary.merge_all()
train_iters_writer = tf.summary.FileWriter(tb_train_iter, flush_secs=30)
train_writer = tf.summary.FileWriter(tb_train, flush_secs=30)
test_writer = tf.summary.FileWriter(tb_test, flush_secs=30)
# saver_old = tf.train.Saver(var_list=[v for v in tf.global_variables() if 'bn_sm' not in v.name])
saver = tf.train.Saver(max_to_keep=50)
gpu_options = tf.GPUOptions(visible_device_list=str(hps.gpus)[1:-1], per_process_gpu_memory_fraction=gpu_memory)
config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
with tf.Session(config=config, graph=graph) as sess:
if not hps.restore:
sess.run(tf.global_variables_initializer())
else:
saver.restore(sess, model_path + '/' + best_model_name)
# vars_to_init = [var for var in tf.global_variables() if 'energy' in var.op.name or 'bias_' in var.op.name]
# vars_to_init = [var for var in tf.global_variables() if 'bn_sm' in var.op.name]
# sess.run(tf.variables_initializer(vars_to_init + [n_iters_tf]))
# sess.run(tf.global_variables_initializer())
print('trainable:', tf.trainable_variables(), sep='\n')
if hps.train:
tb.run_summary(sess, train_writer, tb_merged, 0,
feed_dict={x_in: x_train[img_tb_from:img_tb_to], y_in: y_train[img_tb_from:img_tb_to],
flag_train: False})
tb.run_summary(sess, test_writer, tb_merged, 0,
feed_dict={x_in: x_test[img_tb_from:img_tb_to], y_in: y_test[img_tb_from:img_tb_to],
flag_train: False})
train_mse_pd, train_mse_sm, train_dr_pd, train_dr_sm = eval_error(x_train[:n_eval_ex], y_train[:n_eval_ex],
sess, hps.batch_size)
test_mse_pd, test_mse_sm, test_dr_pd, test_dr_sm = eval_error(x_test[:n_eval_ex], y_test[:n_eval_ex], sess,
hps.batch_size)
print(
'Epoch {:d} test_dr {:.3f} {:.3f} train_dr {:.3f} {:.3f} test_mse {:.5f} {:.5f} train_mse {:.5f} {:.5f}'.
format(0, test_dr_pd, test_dr_sm, train_dr_pd, train_dr_sm, test_mse_pd, test_mse_sm, train_mse_pd,
train_mse_sm))
tb.write_summary(test_writer, [test_mse_pd, test_mse_sm, test_dr_pd, test_dr_sm],
['main/mse_pd', 'main/mse_sm', 'main/det_rate_pd', 'main/det_rate_sm'], 0)
tb.write_summary(train_writer, [train_mse_pd, train_mse_sm, train_dr_pd, train_dr_sm],
['main/mse_pd', 'main/mse_sm', 'main/det_rate_pd', 'main/det_rate_sm'], 0)
global_iter = 0
for epoch in range(1, hps.n_epochs + 1):
for x_train_batch, y_train_batch in get_next_batch(x_train, y_train, hps.batch_size, shuffle=True):
global_iter += 1
if tb_log_iters:
_, summary = sess.run([train_step, tb_merged],
feed_dict={x_in: x_train_batch, y_in: y_train_batch, flag_train: True})
train_iters_writer.add_summary(summary, global_iter)
else:
sess.run(train_step, feed_dict={x_in: x_train_batch, y_in: y_train_batch, flag_train: True})
tb.run_summary(sess, train_writer, tb_merged, epoch, feed_dict={
x_in: x_train[img_tb_from:img_tb_to], y_in: y_train[img_tb_from:img_tb_to], flag_train: False})
tb.run_summary(sess, test_writer, tb_merged, epoch, feed_dict={
x_in: x_test[img_tb_from:img_tb_to], y_in: y_test[img_tb_from:img_tb_to], flag_train: False})
train_mse_pd, train_mse_sm, train_dr_pd, train_dr_sm = eval_error(x_train[:n_eval_ex], y_train[:n_eval_ex], sess, hps.batch_size)
test_mse_pd, test_mse_sm, test_dr_pd, test_dr_sm = eval_error(x_test[:n_eval_ex], y_test[:n_eval_ex], sess, hps.batch_size)
print('Epoch {:d} test_dr {:.3f} {:.3f} train_dr {:.3f} {:.3f} test_mse {:.5f} {:.5f} train_mse {:.5f} {:.5f}'.
format(epoch, test_dr_pd, test_dr_sm, train_dr_pd, train_dr_sm, test_mse_pd, test_mse_sm, train_mse_pd, train_mse_sm))
tb.write_summary(test_writer, [test_mse_pd, test_mse_sm, test_dr_pd, test_dr_sm],
['main/mse_pd', 'main/mse_sm', 'main/det_rate_pd', 'main/det_rate_sm'], epoch)
tb.write_summary(train_writer, [train_mse_pd, train_mse_sm, train_dr_pd, train_dr_sm],
['main/mse_pd', 'main/mse_sm', 'main/det_rate_pd', 'main/det_rate_sm'], epoch)
# Save the model on each epoch after half of epochs are done
if epoch > hps.n_epochs // 2:
if not os.path.exists(model_path):
os.makedirs(model_path)
saver.save(sess, '{}/{}'.format(model_path, model_name), global_step=epoch)
else:
tb.run_summary(sess, train_writer, tb_merged, 0,
feed_dict={x_in: x_train[img_tb_from:img_tb_to], y_in: y_train[img_tb_from:img_tb_to],
flag_train: False})
tb.run_summary(sess, test_writer, tb_merged, 0,
feed_dict={x_in: x_test[img_tb_from:img_tb_to], y_in: y_test[img_tb_from:img_tb_to],
flag_train: False})
pred_coords_pd, pred_coords_sm = get_predictions(x_test, y_test, sess)
scipy.io.savemat('matlab/predictions.mat', {'flic_pred_pd': pred_coords_pd,
'flic_pred_sm': pred_coords_sm})
train_writer.close()
test_writer.close()
train_iters_writer.close()
print('Done in {:.2f} min\n\n'.format((time.time() - time_start) / 60))
|
492620 | import math
import numpy as np
import matplotlib.pyplot as plt
from porousmedialab.phcalc import Acid
import seaborn as sns
from matplotlib.colors import ListedColormap
sns.set_style("whitegrid")
def custom_plot(lab, x, y, ttl='', y_lbl='', x_lbl=''):
plt.figure()
ax = plt.subplot(111)
plt.plot(x, y, lw=3)
plt.title(ttl)
plt.xlim(x[0], x[-1])
plt.ylabel(y_lbl)
plt.xlabel(x_lbl)
ax.grid(linestyle='-', linewidth=0.2)
return ax
def plot_batch_rates(batch, *args, **kwargs):
for rate in sorted(batch.estimated_rates):
plt.figure()
plot_batch_rate(batch, rate, *args, **kwargs)
def plot_batch_rate(batch, rate, time_factor=1):
plt.plot(batch.time * time_factor,
batch.estimated_rates[rate][0] / time_factor, label=rate, lw=3)
plt.ylabel('Rate, $[\Delta C/\Delta T]$')
plt.xlabel('Time, [T]')
plt.legend(frameon=1)
plt.grid(linestyle='-', linewidth=0.2)
def plot_batch_deltas(batch, *args, **kwargs):
for element in sorted(batch.species):
plt.figure()
plot_batch_delta(batch, element, *args, **kwargs)
def plot_batch_delta(batch, element, time_factor=1):
plt.plot(batch.time[1:] * time_factor, batch.species[element]
['rates'][0] / time_factor, label=element, lw=3)
plt.ylabel('Rate of change, $[\Delta C/ \Delta T]$')
plt.xlabel('Time, [T]')
plt.legend(frameon=1)
plt.grid(linestyle='-', linewidth=0.2)
def saturation_index_countour(lab, elem1, elem2, Ks, labels=False):
plt.figure()
plt.title('Saturation index %s%s' % (elem1, elem2))
resoluion = 100
n = math.ceil(lab.time.size / resoluion)
plt.xlabel('Time')
z = np.log10((lab.species[elem1]['concentration'][:, ::n] + 1e-8) * (
lab.species[elem2]['concentration'][:, ::n] + 1e-8) / lab.constants[Ks])
lim = np.max(abs(z))
lim = np.linspace(-lim - 0.1, +lim + 0.1, 51)
X, Y = np.meshgrid(lab.time[::n], -lab.x)
plt.xlabel('Time')
CS = plt.contourf(X, Y, z, 20, cmap=ListedColormap(sns.color_palette(
"RdBu_r", 101)), origin='lower', levels=lim, extend='both')
if labels:
plt.clabel(CS, inline=1, fontsize=10, colors='w')
# cbar = plt.colorbar(CS)
if labels:
plt.clabel(CS, inline=1, fontsize=10, colors='w')
cbar = plt.colorbar(CS)
plt.ylabel('Depth')
ax = plt.gca()
ax.ticklabel_format(useOffset=False)
cbar.ax.set_ylabel('Saturation index %s%s' % (elem1, elem2))
return ax
def plot_fractions(lab):
for component in lab.acid_base_components:
if isinstance(component['pH_object'], Acid):
plt.figure()
for idx in range(len(component['species'])):
plt.plot(lab.time, lab.species[component['species'][idx]]
['alpha'][0, :], label=component['species'][idx])
plt.ylabel('Fraction')
plt.xlabel('Time')
plt.legend(frameon=1)
plt.grid(linestyle='-', linewidth=0.2)
def all_plot_depth_index(lab, *args, **kwargs):
for element in sorted(lab.species):
plt.figure()
plot_depth_index(lab, element, *args, **kwargs, ax=None)
def plot_depth_index(lab, element, idx=0, time_to_plot=False, time_factor=1, ax=None):
if ax is None:
ax = plt.subplot(111)
if element == 'Temperature':
ax.set_title('Temperature')
ax.set_ylabel('Temperature, C')
elif element == 'pH':
ax.set_title('pH')
ax.set_ylabel('pH')
else:
ax.set_ylabel('Concentration')
if time_to_plot:
num_of_elem = int(time_to_plot / lab.dt)
else:
num_of_elem = len(lab.time)
t = lab.time[-num_of_elem:] * time_factor
ax.set_xlabel('Time')
if isinstance(element, str):
ax.plot(t, lab.species[element]['concentration']
[idx][-num_of_elem:], lw=3)
ax.set_title(element + ' concentration')
elif isinstance(element, (list, tuple)):
for e in element:
ax.plot(t, lab.species[e]['concentration']
[idx][-num_of_elem:], lw=3, label=e)
ax.legend(frameon=1)
ax.grid(linestyle='-', linewidth=0.2)
return ax
def plot_depths(lab, element, depths=[0, 1, 2, 3, 4], time_to_plot=False):
plt.figure()
ax = plt.subplot(111)
if element == 'Temperature':
plt.title('Temperature at specific depths')
plt.ylabel('Temperature, C')
else:
plt.title(element + ' concentration at specific depths')
plt.ylabel('Concentration')
if time_to_plot:
num_of_elem = int(time_to_plot / lab.dt)
else:
num_of_elem = len(lab.time)
t = lab.time[-num_of_elem:]
plt.xlabel('Time')
for depth in depths:
lbl = str(depth)
plt.plot(t, lab.species[element]['concentration'][int(
depth / lab.dx)][-num_of_elem:], lw=3, label=lbl)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.grid(linestyle='-', linewidth=0.2)
return ax
def plot_times(lab, element, time_slices=[0, 1, 2, 3, 4]):
plt.figure()
ax = plt.subplot(111)
if element == 'Temperature':
plt.title('Temperature profile')
plt.xlabel('Temperature, C')
else:
plt.title(element + ' concentration')
plt.xlabel('Concentration')
plt.ylabel('Depth, cm')
for tms in time_slices:
lbl = 'at time: %.2f ' % (tms)
plt.plot(lab.species[element]['concentration'][
:, int(tms / lab.dt)], -lab.x, lw=3, label=lbl)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), ncol=2)
ax.grid(linestyle='-', linewidth=0.2)
return ax
def plot_profiles(lab):
for element in sorted(lab.species):
plot_profile(lab, element)
def plot_profile(lab, element):
plt.figure()
plt.plot(lab.profiles[element], -lab.x,
sns.xkcd_rgb["denim blue"], lw=3, label=element)
if element == 'Temperature':
plt.title('Temperature profile')
plt.xlabel('Temperature, C')
elif element == 'pH':
plt.title('pH profile')
plt.xlabel('pH')
else:
plt.title('%s concentration' % (element, ))
plt.xlabel('Concentration')
plt.ylabel('Depth')
ax = plt.gca()
ax.ticklabel_format(useOffset=False)
ax.grid(linestyle='-', linewidth=0.2)
plt.legend()
plt.tight_layout()
return ax
def plot_contourplots(lab, **kwargs):
for element in sorted(lab.species):
contour_plot(lab, element, **kwargs)
def contour_plot(lab, element, labels=False, days=False, last_year=False):
plt.figure()
plt.title(element + ' concentration')
resoluion = 100
n = math.ceil(lab.time.size / resoluion)
if last_year:
k = n - int(1 / lab.dt)
else:
k = 1
if days:
X, Y = np.meshgrid(lab.time[k::n] * 365, -lab.x)
plt.xlabel('Time')
else:
X, Y = np.meshgrid(lab.time[k::n], -lab.x)
plt.xlabel('Time')
z = lab.species[element]['concentration'][:, k - 1:-1:n]
CS = plt.contourf(X, Y, z, 51, cmap=ListedColormap(
sns.color_palette("Blues", 51)), origin='lower')
if labels:
plt.clabel(CS, inline=1, fontsize=10, colors='w')
cbar = plt.colorbar(CS)
plt.ylabel('Depth')
ax = plt.gca()
ax.ticklabel_format(useOffset=False)
cbar.ax.set_ylabel('%s [M/V]' % element)
if element == 'Temperature':
plt.title('Temperature contour plot')
cbar.ax.set_ylabel('Temperature, C')
if element == 'pH':
plt.title('pH contour plot')
cbar.ax.set_ylabel('pH')
return ax
def plot_contourplots_of_rates(lab, **kwargs):
rate = sorted(lab.estimated_rates)
for r in rate:
contour_plot_of_rates(lab, r, **kwargs)
def contour_plot_of_rates(lab, r, labels=False, last_year=False):
plt.figure()
plt.title('{}'.format(r))
resoluion = 100
n = math.ceil(lab.time.size / resoluion)
if last_year:
k = n - int(1 / lab.dt)
else:
k = 1
z = lab.estimated_rates[r][:, k - 1:-1:n]
# lim = np.max(np.abs(z))
# lim = np.linspace(-lim - 0.1, +lim + 0.1, 51)
X, Y = np.meshgrid(lab.time[k::n], -lab.x)
plt.xlabel('Time')
CS = plt.contourf(X, Y, z, 20, cmap=ListedColormap(
sns.color_palette("Blues", 51)))
if labels:
plt.clabel(CS, inline=1, fontsize=10, colors='w')
cbar = plt.colorbar(CS)
plt.ylabel('Depth')
ax = plt.gca()
ax.ticklabel_format(useOffset=False)
cbar.ax.set_ylabel('Rate %s [M/V/T]' % r)
return ax
def plot_contourplots_of_deltas(lab, **kwargs):
elements = sorted(lab.species)
if 'Temperature' in elements:
elements.remove('Temperature')
for element in elements:
contour_plot_of_delta(lab, element, **kwargs)
def contour_plot_of_delta(lab, element, labels=False, last_year=False):
plt.figure()
plt.title('Rate of %s consumption/production' % element)
resoluion = 100
n = math.ceil(lab.time.size / resoluion)
if last_year:
k = n - int(1 / lab.dt)
else:
k = 1
z = lab.species[element]['rates'][:, k - 1:-1:n]
lim = np.max(np.abs(z))
lim = np.linspace(-lim - 0.1, +lim + 0.1, 51)
X, Y = np.meshgrid(lab.time[k:-1:n], -lab.x)
plt.xlabel('Time')
CS = plt.contourf(X, Y, z, 20, cmap=ListedColormap(sns.color_palette(
"RdBu_r", 101)), origin='lower', levels=lim, extend='both')
if labels:
plt.clabel(CS, inline=1, fontsize=10, colors='w')
cbar = plt.colorbar(CS)
plt.ylabel('Depth')
ax = plt.gca()
ax.ticklabel_format(useOffset=False)
cbar.ax.set_ylabel('Rate of %s change $[\Delta/T]$' % element)
return ax
|
492628 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
from self_driving.segnet import segnet_vgg
import tensorflow as tf
import numpy as np
NUM_CLASSES = 11
class PoolingTest(test.TestCase):
def testMaxPoolingWithArgmax(self):
# MaxPoolWithArgMax is implemented only on CUDA.
if not test.is_gpu_available(cuda_only=True):
return
'''[[[[ 1. 2.]
[ 3. 4.]
[ 5. 6.]]
[[ 7. 8.]
[ 9. 10.]
[ 11. 12.]]
[[ 13. 14.]
[ 15. 16.]
[ 17. 18.]]]]'''
tensor_input = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,
10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
with self.test_session(use_gpu=True) as sess:
t = constant_op.constant(tensor_input, shape=[1, 3, 3, 2])
out_op, argmax_op = segnet_vgg.max_pool_with_argmax(t)
out, argmax = sess.run([out_op, argmax_op])
self.assertShapeEqual(out, out_op)
self.assertShapeEqual(argmax, argmax_op)
'''[[[9, 10]
[11, 12]]
[[15, 16]
[17, 18]]]'''
self.assertAllClose(out.ravel(), [9., 10., 11., 12., 15., 16., 17., 18.])
self.assertAllEqual(argmax.ravel(), [8, 9, 10, 11, 14, 15, 16, 17])
def testMaxUnpoolingWithArgmax(self):
'''[[[[ 1. 2.]
[ 3. 4.]
[ 5. 6.]]
[[ 7. 8.]
[ 9. 10.]
[ 11. 12.]]
[[ 13. 14.]
[ 15. 16.]
[ 17. 18.]]]]'''
tensor_input = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,
10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0]
with self.test_session(use_gpu=True) as sess:
t = constant_op.constant(tensor_input, shape=[1, 3, 3, 2])
out_op, argmax_op = segnet_vgg.max_pool_with_argmax(t)
out_op = segnet_vgg.max_unpool_with_argmax(out_op,
argmax_op,
output_shape=np.int64([1, 3, 3, 2]))
out = sess.run([out_op])
self.assertAllClose(out, [[[[[ 0., 0.],
[ 0., 0.],
[ 0., 0.]],
[[ 0., 0.],
[ 9., 10.],
[ 11., 12.]],
[[ 0., 0.],
[ 15., 16.],
[ 17., 18.]]]]])
def testGetBias(self):
with self.test_session(use_gpu=True) as sess:
bias = segnet_vgg.get_bias("conv1_1")
sess.run(tf.global_variables_initializer())
self.assertEqual(bias.get_shape(), [64,])
self.assertAllClose(tf.reduce_sum(bias).eval(), 32.08903503417969)
def testGetConvFilter(self):
with self.test_session(use_gpu=True) as sess:
weight = segnet_vgg.get_conv_filter("conv1_1")
sess.run(tf.global_variables_initializer())
self.assertEqual(weight.get_shape(), [3, 3, 3, 64])
self.assertAllClose(tf.reduce_sum(weight).eval(), -4.212705612182617)
def testConvLayerWithBn(self):
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
tensor_input = tf.ones([10, 495, 289, 3], tf.float32)
with self.test_session(use_gpu=True, config = config) as sess:
conv_op = segnet_vgg.conv_layer_with_bn(tensor_input, tf.constant(True), "conv1_1")
sess.run(tf.global_variables_initializer())
conv_out = sess.run([conv_op])
self.assertEqual(np.array(conv_out).shape, (1, 10, 495, 289, 64))
def testDeconvLayerWithBn(self):
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
tensor_input = tf.ones([10, 495, 289, 3], tf.float32)
with self.test_session(use_gpu=True, config = config) as sess:
conv_op = segnet_vgg.deconv_layer_with_bn(tensor_input,
[3, 3, 3, 128],
tf.constant(True), "conv1_1")
sess.run(tf.global_variables_initializer())
conv_out = sess.run([conv_op])
self.assertEqual(np.array(conv_out).shape, (1, 10, 495, 289, 128))
def testInference(self):
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
train_data = tf.ones([10, 495, 289, 3], tf.float32)
with self.test_session(use_gpu=True, config = config) as sess:
model_op = segnet_vgg.inference(train_data)
sess.run(tf.global_variables_initializer())
model_out = sess.run([model_op])
self.assertEqual(np.array(model_out).shape, (1, 10, 495, 289, NUM_CLASSES))
if __name__ == "__main__":
test.main()
|
492629 | from napari.components.cursor import Cursor
def test_cursor():
"""Test creating cursor object"""
cursor = Cursor()
assert cursor is not None
|
492647 | from starGen import *
import numpy as np
import sys, os, traceback
from timeout import timeout, TimeoutError
from common import *
from scipy.optimize import linear_sum_assignment
time = int(os.getenv("TIMEOUT",30))
error = os.getenv("ERROR", 5)
required = int(os.getenv("REQUIRED",10))
def grade(guess, actual):
correct = 0
wrong = 0
guess = set(guess)
for g in guess:
if g in actual:
correct += 1
else :
wrong += 1
return (correct, wrong)
@timeout(time)
def makeTest(stars_xyz):
try:
viewable, view_indices, targetV = genQuestion(stars_xyz)
for v in viewable:
sys.stdout.write("%f,\t%f,\t%f\n" % (v[0], v[1], v[2]))
sys.stdout.write("\n")
#print(f"TargetV (debug): {str(targetV)}")
#print(f"View indices (debug): {str(view_indices)}")
#create_3d_scatter_graph(viewable)
while True:
sys.stdout.write("Index Guesses (Comma Delimited):\n")
sys.stdout.flush()
guess = sys.stdin.readline().strip()
if len(guess) == 0:
return False
guesses = guess.split(",")
if len(guesses) < required:
print("More stars please, try again! (%d required)" % required)
continue
try:
guess_indices = set(map(int, guesses))
except ValueError:
print("Non Integer Detected, try again")
continue
if len(guess_indices) < len(guesses):
print("Tried to use duplicates huh....")
continue
(correct, wrong) = grade(guess_indices, view_indices)
if (wrong > error):
print("Too many were incorrect, try again")
elif (correct < required):
print("Not enough were correct, try again")
else:
break
except TimeoutError as e:
raise e
except Exception:
traceback.print_exc()
return False
return True
def dumpStars(stars_xyz, f):
for row in stars_xyz:
f.write(",\t".join(map(lambda x: str(x), row)))
f.write("\n")
f.write("\n")
if __name__ == "__main__":
seed = 8036002836425871957 # int(os.getenv("SEED", 2020))
flag = os.getenv("FLAG", "FooBarFlag")
TrialCount = os.getenv("TRIALS", 5)
np.random.seed(seed % 0xFFFFFFFF)
stars_xyz = genStars()
filename = "catalog.txt"
with open(filename, "w+") as f:
dumpStars(stars_xyz, f)
#create_3d_scatter_graph(stars_xyz)
for ii in range(0,TrialCount):
try:
win = makeTest(stars_xyz)
except TimeoutError:
sys.stdout.write("Timeout, Bye\n")
sys.exit(1)
if not win:
sys.stdout.write("Failed...\n")
sys.exit(2)
sys.stdout.write("%d Left...\n" % (TrialCount - ii -1))
sys.stdout.flush()
sys.stdout.write(flag)
sys.stdout.write("\n")
sys.stdout.flush()
sys.exit(0)
|
492685 | from docxcompose.utils import xpath
class StructuredDocumentTags(object):
"""Structured Document Tags (aka Content Controls)"""
def __init__(self, doc):
self.doc = doc
def tags_by_alias(self, alias):
"""Get Structured Document Tags by alias."""
return xpath(
self.doc.element.body,
'.//w:sdt/w:sdtPr/w:alias[@w:val="%s"]/ancestor::w:sdt' % alias)
def set_text(self, alias, text):
"""Set the text content of all Structured Document Tags identified by
an alias. Only plain text SDTs are supported.
"""
tags = self.tags_by_alias(alias)
for tag in tags:
# Ignore if it's not a plain text SDT
if not xpath(tag, './w:sdtPr/w:text'):
continue
content = xpath(tag, './w:sdtContent')
if not content:
continue
showing_placeholder = xpath(tag, './w:sdtPr/w:showingPlcHdr')
text_elements = xpath(content[0], './/w:r/w:t')
if text_elements:
text_elements[0].text = text
if showing_placeholder:
# Remove placeholder marker and style
showing_placeholder[0].getparent().remove(
showing_placeholder[0])
run_props = xpath(text_elements[0].getparent(), './w:rPr')
if run_props:
text_elements[0].getparent().remove(run_props[0])
# Remove any other text elements
if len(text_elements) > 1:
for el in text_elements[1:]:
if el.getparent() == text_elements[0].getparent():
el.getparent().remove(el)
else:
el.getparent().getparent().remove(el.getparent())
def get_text(self, alias):
"""Get the text content of the first Structured Document Tag identified
by the given alias.
"""
tags = self.tags_by_alias(alias)
for tag in tags:
# Ignore if it's not a plain text SDT
if not xpath(tag, './w:sdtPr/w:text'):
continue
text_elements = xpath(tag, './w:sdtContent//w:r/w:t')
if text_elements:
return text_elements[0].text
|
492691 | import asyncio
import datetime
import os
import pytz
import re
import logging
from logging.handlers import TimedRotatingFileHandler
import discord
from discord.ext import commands
from utils import customchecks, sql, punishmentshelper
workDir = os.getcwd()
logDir = os.path.join(workDir, "logs")
if not os.path.exists(logDir):
os.makedirs(logDir)
fh = TimedRotatingFileHandler("logs/log", "midnight", encoding="utf-8", backupCount=7)
fh.setFormatter(logging.Formatter(fmt="[%(asctime)s] [%(name)-19s] %(levelname)-8s: %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S%z"))
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(fmt="[%(asctime)s] %(levelname)-8s: %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S%z"))
logging.basicConfig(handlers=[fh, ch],
level=logging.INFO)
logger = logging.getLogger('root')
intents = discord.Intents.none()
intents.bans = True
intents.guilds = True
intents.members = True
intents.messages = True
intents.presences = True
if "UBOT" not in os.environ:
logger.critical("Couldn't find a token. Please enter one in the UBOT environment variable. "
"The bot will not run without it")
raise customchecks.NoTokenError()
async def initdb():
"""
Initializes the database (makes sure that all tables are present)
"""
tables = [table[0] for table in await sql.fetch("SELECT name FROM sqlite_master WHERE type='table'")]
if any(table not in tables for table in ["servers", "faq", "prefixes", "modroles"]):
if "servers" not in tables:
await sql.execute("CREATE TABLE servers (serverid varchar(18) PRIMARY KEY, joinleavechannel varchar(18), comment text, muteroleid varchar(18))")
if "faq" not in tables:
await sql.execute("CREATE TABLE faq (serverid varchar(18), title text, content text, image text, creator varchar(18), timestamp timestamptz, link text)")
if "prefixes" not in tables:
await sql.execute("CREATE TABLE prefixes (serverid varchar(18), prefix text)")
if "modroles" not in tables:
await sql.execute("CREATE TABLE modroles (serverid varchar(18), roleid varchar(18))")
if "mutes" not in tables:
await sql.execute("CREATE TABLE mutes (serverid varchar(18), userid varchar(18), until timestamptz)")
if "bans" not in tables:
await sql.execute("CREATE TABLE bans (serverid varchar(18), userid varchar(18), until timestamptz)")
async def get_prefix(bot: commands.AutoShardedBot, message: discord.Message):
"""
Returns the prefix(es) for the bot
"""
prefixes = await sql.fetch("SELECT prefix FROM prefixes WHERE serverid=?", str(message.guild.id))
prefixes = [prefix[0] for prefix in [prefix[0] for prefix in prefixes]]
return commands.when_mentioned_or(*prefixes)(bot, message) if prefixes else commands.when_mentioned(bot, message)
bot = commands.AutoShardedBot(command_prefix=get_prefix, intents=intents)
@bot.event
async def on_command_error(ctx: commands.Context, error: Exception):
origerror = getattr(error, "original", error)
if isinstance(origerror, commands.errors.CommandNotFound):
pass
elif isinstance(origerror, commands.MissingPermissions):
description = origerror.args[0].replace('run command', f'use the command `{ctx.command}`')
em = discord.Embed(title="Error",
description=description,
colour=discord.Colour.red())
await ctx.send(embed=em)
elif isinstance(origerror, customchecks.NotAModError):
em = discord.Embed(title="Error",
description=f"You are not a moderator on this server.\n" +
f"For modifying moderator roles, see `{ctx.prefix}help modroles`",
colour=discord.Colour.red())
await ctx.send(embed=em)
elif isinstance(origerror, discord.errors.Forbidden):
em = discord.Embed(title="Error",
description="I don't have sufficient permissions to do that.",
colour=discord.Colour.red())
await ctx.send(embed=em)
else:
try:
errorMsg = origerror.message
except AttributeError:
errorMsg = str(origerror)
em = discord.Embed(title="Error",
description=f"I've encountered an error ({type(origerror)}). Please contact my creator. ```{errorMsg}```",
colour=discord.Colour.red())
await ctx.send(embed=em)
raise error
@bot.event
async def on_ready():
# In case the bot was off when leaving/joining a guild
logger.info("Verifying guilds match DB")
guilds = bot.guilds
guildIds = [guild.id for guild in guilds]
missingGuildIds = [guildId for guildId in guildIds if len(await sql.fetch("SELECT 1 FROM servers WHERE serverid=?", str(guildId))) == 0]
for guildId in missingGuildIds:
logger.debug(f"Added guild with id {guildId} to DB")
await sql.initserver(guildId)
undeletedGuildIds = [guildId[0] for guildId in await sql.fetch("SELECT serverid FROM servers") if int(guildId[0]) not in guildIds]
for guildId in undeletedGuildIds:
logger.debug(f"Removed guild with id {guildId} from DB")
await sql.deleteserver(guildId)
unfinishedMutes = await sql.fetch("SELECT * FROM mutes")
utcnow = pytz.utc.localize(datetime.datetime.utcnow())
for serverid, userid, until in unfinishedMutes:
if until is None:
continue
until = datetime.datetime.strptime(until, "%Y-%m-%d %H:%M:%S%z")
roleid = (await sql.fetch("SELECT muteroleid FROM servers WHERE serverid=?", serverid))[0][0]
guild = bot.get_guild(int(serverid))
if roleid is not None:
role = guild.get_role(int(roleid))
else:
role = None
member = guild.get_member(int(userid))
if utcnow >= until:
if member is not None and role is not None:
await member.remove_roles(role, reason="Temporary mute ended.")
await sql.execute("DELETE FROM mutes WHERE serverid=? AND userid=?", serverid, userid)
else:
duration = (until - utcnow).total_seconds()
asyncio.ensure_future(punishmentshelper.ensure_unmute(guild, int(userid), duration, role, partialDuration=True))
unfinishedBans = await sql.fetch("SELECT * FROM bans")
for serverid, userid, until in unfinishedBans:
until = datetime.datetime.strptime(until, "%Y-%m-%d %H:%M:%S%z")
guild = bot.get_guild(int(serverid))
guildBans = await guild.bans()
userid = int(userid)
for _, user in guildBans:
if user.id == userid:
break
else:
await sql.execute("DELETE FROM bans WHERE serverid=? AND userid=?", serverid, userid)
continue
if utcnow >= until:
user = await guild.unban(user, reason="Temporary ban ended.")
await sql.execute("DELETE FROM bans WHERE serverid=? AND userid=?", serverid, userid)
else:
duration = (until - utcnow).total_seconds()
asyncio.ensure_future(punishmentshelper.ensure_unban(guild, user, duration, partialDuration=True))
logger.info(f"Logged in as: {bot.user.name} - {bot.user.id}")
logger.info(f"Serving {len(bot.users)} users in {len(guilds)} server{('s' if len(guilds) > 1 else '')}")
@bot.event
async def on_guild_join(guild: discord.Guild):
logger.info(f"Joined server \'{guild.name}\' - {guild.id}")
await sql.initserver(guild.id)
@bot.event
async def on_guild_remove(guild: discord.Guild):
logger.info(f"Left server \'{guild.name}\' - {guild.id}")
await sql.deleteserver(guild.id)
wikiEx = re.compile(r"\[\[(.*?)\]\]")
negativeWikiEx = re.compile(r"\`[\S\s]*?\[\[(.*?)\]\][\S\s]*?\`")
modEx = re.compile(r"\>\>(.*?)\<\<")
negativeModEx = re.compile(r"\`[\S\s]*?\>\>(.*?)\<\<[\S\s]*?\`")
@bot.event
async def on_message(message: discord.Message):
if not isinstance(message.channel, discord.abc.GuildChannel):
return
msg = message.content
comment = await sql.fetch("SELECT comment FROM servers WHERE serverid=?", str(message.guild.id))
comment = comment[0][0] if len(comment) > 0 and str(comment[0]) != "None" else None
wikiSearch = None if not wikiEx.search(msg) or negativeWikiEx.search(msg) else wikiEx.search(msg).group(1)
modSearch = None if not modEx.search(msg) or negativeModEx.search(msg) else modEx.search(msg).group(1)
if wikiSearch or modSearch:
ctx = await bot.get_context(message)
if wikiSearch:
await ctx.invoke(bot.get_command("wiki"), searchterm=wikiSearch)
elif modSearch:
await ctx.invoke(bot.get_command("linkmod"), modname=modSearch)
else:
if comment is not None:
message.content = message.content.split(comment)[0]
await bot.process_commands(message)
@bot.event
async def on_member_join(member: discord.Member):
joinLeaveRow = await sql.fetch("SELECT joinleavechannel FROM servers WHERE serverid=?", str(member.guild.id))
if len(joinLeaveRow) > 0: # To avoid errors if the bot was the one removed
joinLeaveID = joinLeaveRow[0][0]
if joinLeaveID is not None:
joinLeaveChannel = bot.get_channel(int(joinLeaveID))
await joinLeaveChannel.send(f"**Join** - {member.mention}, account created at {member.created_at.isoformat()}.\n"
f"ID {member.id}. {member.guild.member_count} members.")
muteRow = await sql.fetch("SELECT * FROM mutes WHERE userid=?", str(member.id))
if len(muteRow) > 0:
muteRow = muteRow[0]
roleRow = await sql.fetch("SELECT muteroleid FROM servers WHERE serverid=?",
str(member.guild.id))
if roleRow[0][0] is not None:
role = member.guild.get_role(int(roleRow[0][0]))
else:
role = None
if muteRow[2] is not None and role is not None:
utcnow = pytz.utc.localize(datetime.datetime.utcnow())
until = datetime.datetime.strptime(muteRow[2], "%Y-%m-%d %H:%M:%S%z")
if utcnow < until:
await member.add_roles(role) # ensure_unmute is already running
elif role is not None:
await member.add_roles(role)
@bot.event
async def on_member_remove(member: discord.Member):
joinLeaveRow = await sql.fetch("SELECT joinleavechannel FROM servers WHERE serverid=?", str(member.guild.id))
if len(joinLeaveRow) > 0:
joinLeaveID = joinLeaveRow[0][0]
if joinLeaveID is not None:
joinLeaveChannel = bot.get_channel(int(joinLeaveID))
await joinLeaveChannel.send(f"**Leave** - {member.name}. ID {member.id}.\n"
f"{member.guild.member_count} members.")
@bot.event
async def on_member_ban(guild: discord.Guild, user: discord.User):
joinLeaveRow = await sql.fetch("SELECT joinleavechannel FROM servers WHERE serverid=?", str(guild.id))
if len(joinLeaveRow) > 0:
joinLeaveID = joinLeaveRow[0][0]
if joinLeaveID is not None:
joinLeaveChannel = bot.get_channel(int(joinLeaveID))
await joinLeaveChannel.send(f"**Ban** - {user.name}, ID {user.id}.\n")
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(initdb())
hadError = False
coglist = []
for root, directories, files in os.walk("cogs"):
for filename in files:
filepath = os.path.join(root, filename)
if filepath.endswith(".py"):
coglist.append(filepath.split(".py")[0].replace(os.sep, "."))
logger.debug("Loading cogs")
for cog in coglist:
logger.debug(f"Loading {cog}")
try:
bot.load_extension(cog)
logger.debug(f"Loaded {cog} successfully")
except Exception:
logger.exception(f"Failed to load cog: {cog}")
hadError = True
if hadError:
logger.warning("Error during cog loading")
else:
logger.info("Successfully loaded all cogs")
bot.run(os.environ["UBOT"], bot=True, reconnect=True)
|
492708 | from scipy import interpolate
from cached_property import cached_property
import numpy as np
try:
import matplotlib.pyplot as plt
except:
plt = None
from devito import Dimension
from devito.function import SparseTimeFunction
__all__ = ['PointSource', 'Receiver', 'Shot', 'WaveletSource',
'RickerSource', 'GaborSource', 'TimeAxis']
class TimeAxis(object):
""" Data object to store the time axis. Exactly three of the four key arguments
must be prescribed. Because of remainder values it is not possible to create
a time axis that exactly adhears to the inputs therefore start, stop, step
and num values should be taken from the TimeAxis object rather than relying
upon the input values.
The four possible cases are:
start is None: start = step*(1 - num) + stop
step is None: step = (stop - start)/(num - 1)
num is None: num = ceil((stop - start + step)/step);
because of remainder stop = step*(num - 1) + start
stop is None: stop = step*(num - 1) + start
:param start:(Optional) Start of time axis.
:param step: (Optional) Time interval.
:param: num: (Optional) Number of values (Note: this is the number of intervals + 1).
stop value is reset to correct for remainder.
:param stop: (Optional) End time.
"""
def __init__(self, start=None, step=None, num=None, stop=None):
try:
if start is None:
start = step*(1 - num) + stop
elif step is None:
step = (stop - start)/(num - 1)
elif num is None:
num = int(np.ceil((stop - start + step)/step))
stop = step*(num - 1) + start
elif stop is None:
stop = step*(num - 1) + start
else:
raise ValueError("Only three of start, step, num and stop may be set")
except:
raise ValueError("Three of args start, step, num and stop may be set")
if not isinstance(num, int):
raise TypeError("input argument must be of type int")
self.start = start
self.stop = stop
self.step = step
self.num = num
def __str__(self):
return "TimeAxis: start=%g, stop=%g, step=%g, num=%g" % \
(self.start, self.stop, self.step, self.num)
def _rebuild(self):
return TimeAxis(start=self.start, stop=self.stop, num=self.num)
@cached_property
def time_values(self):
return np.linspace(self.start, self.stop, self.num)
class PointSource(SparseTimeFunction):
"""Symbolic data object for a set of sparse point sources
:param name: Name of the symbol representing this source.
:param grid: :class:`Grid` object defining the computational domain.
:param time_range: :class:`TimeAxis` TimeAxis(start, step, num) object.
:param npoint: (Optional) number of sparse points represented by this source.
:param data: (Optional) data values to initialise point data.
:param coordinates: (Optional) point coordinates for this source.
:param space_order: (Optional) space discretization order.
:param time_order: (Optional) time discretization order (defaults to 2).
:param dtype: (Optional) data type of the buffered data.
:param dimension: (Optional) class:`Dimension` object for
representing the number of points in this source.
"""
def __new__(cls, **kwargs):
name = kwargs.pop('name')
grid = kwargs.pop('grid')
time_range = kwargs.pop('time_range')
time_order = kwargs.pop('time_order', 2)
p_dim = kwargs.pop('dimension', Dimension(name='p_%s' % name))
coordinates = kwargs.pop('coordinates', kwargs.pop('coordinates_data', None))
# Either `npoint` or `coordinates` must be provided
npoint = kwargs.pop('npoint', None)
if npoint is None:
if coordinates is None:
raise TypeError("Need either `npoint` or `coordinates`")
npoint = coordinates.shape[0]
# Create the underlying SparseTimeFunction object
obj = SparseTimeFunction.__new__(cls, name=name, grid=grid,
dimensions=(grid.time_dim, p_dim),
npoint=npoint, nt=time_range.num,
time_order=time_order,
coordinates=coordinates, **kwargs)
obj._time_range = time_range._rebuild()
# If provided, copy initial data into the allocated buffer
data = kwargs.get('data')
if data is not None:
obj.data[:] = data
return obj
@cached_property
def time_values(self):
return self._time_range.time_values
@property
def time_range(self):
return self._time_range
def resample(self, dt=None, num=None, rtol=1e-5, order=3):
# Only one of dt or num may be set.
if dt is None:
assert num is not None
else:
assert num is None
start, stop = self._time_range.start, self._time_range.stop
dt0 = self._time_range.step
if dt is None:
new_time_range = TimeAxis(start=start, stop=stop, num=num)
dt = new_time_range.step
else:
new_time_range = TimeAxis(start=start, stop=stop, step=dt)
if np.isclose(dt, dt0):
return
nsamples, ntraces = self.data.shape
new_traces = np.zeros((new_time_range.num, ntraces))
for i in range(ntraces):
tck = interpolate.splrep(self._time_range.time_values,
self.data[:, i], k=order)
new_traces[:, i] = interpolate.splev(new_time_range.time_values, tck)
# Return new object
return PointSource(name=self.name, grid=self.grid, data=new_traces,
time_range=new_time_range, coordinates=self.coordinates.data)
# Pickling support
_pickle_kwargs = SparseTimeFunction._pickle_kwargs + ['time_range']
_pickle_kwargs.remove('nt') # `nt` is inferred from `time_range`
Receiver = PointSource
Shot = PointSource
class WaveletSource(PointSource):
"""
Abstract base class for symbolic objects that encapsulate a set of
sources with a pre-defined source signal wavelet.
:param name: Name for the resulting symbol
:param grid: :class:`Grid` object defining the computational domain.
:param f0: Peak frequency for Ricker wavelet in kHz
:param time_values: Discretized values of time in ms
"""
def __new__(cls, *args, **kwargs):
npoint = kwargs.pop('npoint', 1)
obj = PointSource.__new__(cls, npoint=npoint, **kwargs)
obj.f0 = kwargs.get('f0')
for p in range(npoint):
obj.data[:, p] = obj.wavelet(obj.f0, obj.time_values)
return obj
def __init__(self, *args, **kwargs):
if not self._cached():
super(WaveletSource, self).__init__(*args, **kwargs)
def wavelet(self, f0, t):
"""
Defines a wavelet with a peak frequency f0 at time t.
:param f0: Peak frequency in kHz
:param t: Discretized values of time in ms
"""
raise NotImplementedError('Wavelet not defined')
def show(self, idx=0, wavelet=None):
"""
Plot the wavelet of the specified source.
:param idx: Index of the source point for which to plot wavelet
:param wavelet: Prescribed wavelet instead of one from this symbol
:param time: Prescribed time instead of time from this symbol
"""
wavelet = wavelet or self.data[:, idx]
plt.figure()
plt.plot(self.time_values, wavelet)
plt.xlabel('Time (ms)')
plt.ylabel('Amplitude')
plt.tick_params()
plt.show()
# Pickling support
_pickle_kwargs = PointSource._pickle_kwargs + ['f0']
class RickerSource(WaveletSource):
"""
Symbolic object that encapsulate a set of sources with a
pre-defined Ricker wavelet:
http://subsurfwiki.org/wiki/Ricker_wavelet
:param name: Name for the resulting symbol
:param grid: :class:`Grid` object defining the computational domain.
:param f0: Peak frequency for Ricker wavelet in kHz
:param time: Discretized values of time in ms
"""
def wavelet(self, f0, t):
"""
Defines a Ricker wavelet with a peak frequency f0 at time t.
:param f0: Peak frequency in kHz
:param t: Discretized values of time in ms
"""
r = (np.pi * f0 * (t - 1./f0))
return (1-2.*r**2)*np.exp(-r**2)
class GaborSource(WaveletSource):
"""
Symbolic object that encapsulate a set of sources with a
pre-defined Gabor wavelet:
https://en.wikipedia.org/wiki/Gabor_wavelet
:param name: Name for the resulting symbol
:param grid: :class:`Grid` object defining the computational domain.
:param f0: Peak frequency for Ricker wavelet in kHz
:param time: Discretized values of time in ms
"""
def wavelet(self, f0, t):
"""
Defines a Gabor wavelet with a peak frequency f0 at time t.
:param f0: Peak frequency in kHz
:param t: Discretized values of time in ms
"""
agauss = 0.5 * f0
tcut = 1.5 / agauss
s = (t-tcut) * agauss
return np.exp(-2*s**2) * np.cos(2 * np.pi * s)
|
492727 | import os
from typing import List
from unittest import TestCase
from message_passing_nn.data.data_preprocessor import DataPreprocessor
from message_passing_nn.model.trainer import Trainer
from message_passing_nn.infrastructure.file_system_repository import FileSystemRepository
from message_passing_nn.usecase.grid_search import GridSearch
from message_passing_nn.utils.saver import Saver
from tests.fixtures.matrices_and_vectors import BASE_GRAPH, BASE_GRAPH_NODE_FEATURES
class TestTraining(TestCase):
def setUp(self) -> None:
self.features = BASE_GRAPH_NODE_FEATURES
self.adjacency_matrix = BASE_GRAPH
self.labels = BASE_GRAPH.view(-1)
self.dataset = 'training-test-data'
self.tests_data_directory = 'tests/test_data/'
tests_model_directory = 'tests/model_checkpoints'
tests_results_directory = 'tests/grid_search_results'
device = "cpu"
self.data_path = self.tests_data_directory + self.dataset + "/"
self.repository = FileSystemRepository(self.tests_data_directory, self.dataset)
self.data_preprocessor = DataPreprocessor()
self.data_preprocessor.enable_test_mode()
self.model_trainer = Trainer(self.data_preprocessor, device)
self.saver = Saver(tests_model_directory, tests_results_directory)
def test_start_for_multiple_batches_of_the_same_size(self):
# Given
dataset_size = 6
grid_search_dictionary = {
"model": ["RNN"],
"epochs": [10],
"batch_size": [3],
"validation_split": [0.2],
"test_split": [0.1],
"loss_function": ["MSE"],
"optimizer": ["SGD"],
"time_steps": [1],
"validation_period": [5]
}
grid_search = GridSearch(self.data_path,
self.data_preprocessor,
self.model_trainer,
grid_search_dictionary,
self.saver,
test_mode=True)
adjacency_matrix_filenames, features_filenames, labels_filenames = self._save_test_data(dataset_size)
# When
losses = grid_search.start()
configuration_id = list(losses["training_loss"].keys())[0]
# Then
self.assertTrue(losses["training_loss"][configuration_id][grid_search_dictionary["epochs"][0]] > 0.0)
self.assertTrue(
losses["validation_loss"][configuration_id][grid_search_dictionary["validation_period"][0]] > 0.0)
self.assertTrue(losses["test_loss"][configuration_id]["final_epoch"] > 0.0)
# Tear down
self._remove_files(dataset_size, features_filenames, adjacency_matrix_filenames, labels_filenames)
def test_start_for_multiple_batches_of_differing_size(self):
# Given
dataset_size = 5
grid_search_dictionary = {
"model": ["RNN"],
"epochs": [10],
"batch_size": [3],
"validation_split": [0.2],
"test_split": [0.1],
"loss_function": ["MSE"],
"optimizer": ["SGD"],
"time_steps": [1],
"validation_period": [5]
}
grid_search = GridSearch(self.data_path,
self.data_preprocessor,
self.model_trainer,
grid_search_dictionary,
self.saver,
test_mode=True)
adjacency_matrix_filenames, features_filenames, labels_filenames = self._save_test_data(dataset_size)
# When
losses = grid_search.start()
configuration_id = list(losses["training_loss"].keys())[0]
# Then
self.assertTrue(losses["training_loss"][configuration_id][grid_search_dictionary["epochs"][0]] > 0.0)
self.assertTrue(
losses["validation_loss"][configuration_id][grid_search_dictionary["validation_period"][0]] > 0.0)
self.assertTrue(losses["test_loss"][configuration_id]["final_epoch"] > 0.0)
# Tear down
self._remove_files(dataset_size, features_filenames, adjacency_matrix_filenames, labels_filenames)
def test_start_a_grid_search(self):
# Given
dataset_size = 6
grid_search_dictionary = {
"model": ["RNN"],
"epochs": [10, 15],
"batch_size": [3, 4],
"validation_split": [0.2],
"test_split": [0.1],
"loss_function": ["MSE"],
"optimizer": ["SGD"],
"time_steps": [1],
"validation_period": [5]
}
grid_search = GridSearch(self.data_path,
self.data_preprocessor,
self.model_trainer,
grid_search_dictionary,
self.saver,
test_mode=True)
adjacency_matrix_filenames, features_filenames, labels_filenames = self._save_test_data(dataset_size)
# When
losses = grid_search.start()
configuration_id = list(losses["training_loss"].keys())[0]
# Then
self.assertTrue(losses["training_loss"][configuration_id][grid_search_dictionary["epochs"][0]] > 0.0)
self.assertTrue(
losses["validation_loss"][configuration_id][grid_search_dictionary["validation_period"][0]] > 0.0)
self.assertTrue(losses["test_loss"][configuration_id]["final_epoch"] > 0.0)
# Tear down
self._remove_files(dataset_size, features_filenames, adjacency_matrix_filenames, labels_filenames)
def _save_test_data(self, dataset_size):
features_filenames = [str(i) + '_training_features' + '.pickle' for i in range(dataset_size)]
adjacency_matrix_filenames = [str(i) + '_training_adjacency-matrix' '.pickle' for i in range(dataset_size)]
labels_filenames = [str(i) + '_training_labels' '.pickle' for i in range(dataset_size)]
for i in range(dataset_size):
self.repository.save(features_filenames[i], self.features)
self.repository.save(adjacency_matrix_filenames[i], self.adjacency_matrix)
self.repository.save(labels_filenames[i], self.labels)
return adjacency_matrix_filenames, features_filenames, labels_filenames
def _remove_files(self,
dataset_size: int,
features_filenames: List[str],
adjacency_matrix_filenames: List[str],
labels_filenames: List[str]) -> None:
for i in range(dataset_size):
os.remove(self.tests_data_directory + self.dataset + "/" + features_filenames[i])
os.remove(self.tests_data_directory + self.dataset + "/" + adjacency_matrix_filenames[i])
os.remove(self.tests_data_directory + self.dataset + "/" + labels_filenames[i])
|
492754 | import sqlite3
import os
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
cursor.executescript("alter table blob add last_announced_time integer;")
cursor.executescript("alter table blob add single_announce integer;")
cursor.execute("update blob set next_announce_time=0")
connection.commit()
connection.close()
|
492783 | import datetime
import logging
import threading
from . import util
class Cache:
def __init__(self):
self._cache = {}
self._lock = threading.Lock()
def get(self, key):
with self._lock:
entry = self._cache.get(key)
if not entry:
return None
timestamp, value = entry
if util.next_update_time(timestamp) <= datetime.datetime.utcnow():
logging.info("cache entry '%s' is expired", key)
del self._cache[key]
return None
return value
def put(self, key, value):
with self._lock:
logging.info("writing '%s' to inproc cache", key)
self._cache[key] = datetime.datetime.utcnow(), value
|
492841 | from typing import Union
import numpy as np
from experiments.utils import sample_uniform_weights
from simulation.outcome_generators import (OutcomeGenerator,
generate_outcome_tcga)
class TCGASimulator(OutcomeGenerator):
def __init__(
self,
id_to_graph_dict: dict,
noise_mean: float = 0.0,
noise_std: float = 1.0,
dim_covariates: int = 25,
):
super().__init__(
id_to_graph_dict=id_to_graph_dict,
noise_mean=noise_mean,
noise_std=noise_std,
)
self.covariates_weights = sample_uniform_weights(
num_weights=3, dim_covariates=dim_covariates
)
def set_id_to_graph_dict(self, id_to_graph_dict: dict) -> None:
self.id_to_graph_dict = id_to_graph_dict
def generate_outcomes_for_units(
self, pca_features: list, unit_features: list, treatment_ids: list
) -> np.ndarray:
return self.__generate_outcomes(
pca_features=pca_features,
unit_features=unit_features,
treatment_ids=treatment_ids,
)
def generate_outcomes_for_unit(
self, pca_features, unit_features, treatment_ids
) -> np.ndarray:
pca_features = np.repeat(
np.expand_dims(pca_features, axis=0), len(treatment_ids), axis=0
)
unit_features = np.repeat(
np.expand_dims(unit_features, axis=0), len(treatment_ids), axis=0
)
return self.__generate_outcomes(
pca_features=pca_features,
unit_features=unit_features,
treatment_ids=treatment_ids,
)
def __generate_outcomes(
self,
pca_features: Union[list, np.ndarray],
unit_features: Union[list, np.ndarray],
treatment_ids: list,
) -> np.ndarray:
outcomes = []
for pca_features, unit_features, treatment_id in zip(
pca_features, unit_features, treatment_ids
):
prop = self.id_to_graph_dict[treatment_id]["prop"]
outcome = (
generate_outcome_tcga(
unit_features=unit_features,
pca_features=pca_features,
prop=prop,
random_weights=self.covariates_weights,
)
+ self._sample_noise()
)
outcomes.append(outcome)
return np.array(outcomes).squeeze()
|
492871 | from torch import nn as nn
from torch.nn import functional as F
class ConvLayer(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, conv_type, transpose=False):
super(ConvLayer, self).__init__()
self.transpose = transpose
self.stride = stride
self.kernel_size = kernel_size
self.conv_type = conv_type
# How many channels should be normalised as one group if GroupNorm is activated
# WARNING: Number of channels has to be divisible by this number!
NORM_CHANNELS = 8
if self.transpose:
self.filter = nn.ConvTranspose1d(n_inputs, n_outputs, self.kernel_size, stride, padding=kernel_size-1)
else:
self.filter = nn.Conv1d(n_inputs, n_outputs, self.kernel_size, stride)
if conv_type == "gn":
assert(n_outputs % NORM_CHANNELS == 0)
self.norm = nn.GroupNorm(n_outputs // NORM_CHANNELS, n_outputs)
elif conv_type == "bn":
self.norm = nn.BatchNorm1d(n_outputs, momentum=0.01)
# Add you own types of variations here!
def forward(self, x):
# Apply the convolution
if self.conv_type == "gn" or self.conv_type == "bn":
out = F.relu(self.norm((self.filter(x))))
else: # Add your own variations here with elifs conditioned on "conv_type" parameter!
assert(self.conv_type == "normal")
out = F.leaky_relu(self.filter(x))
return out
def get_input_size(self, output_size):
# Strided conv/decimation
if not self.transpose:
curr_size = (output_size - 1)*self.stride + 1 # o = (i-1)//s + 1 => i = (o - 1)*s + 1
else:
curr_size = output_size
# Conv
curr_size = curr_size + self.kernel_size - 1 # o = i + p - k + 1
# Transposed
if self.transpose:
assert ((curr_size - 1) % self.stride == 0)# We need to have a value at the beginning and end
curr_size = ((curr_size - 1) // self.stride) + 1
assert(curr_size > 0)
return curr_size
def get_output_size(self, input_size):
# Transposed
if self.transpose:
assert(input_size > 1)
curr_size = (input_size - 1)*self.stride + 1 # o = (i-1)//s + 1 => i = (o - 1)*s + 1
else:
curr_size = input_size
# Conv
curr_size = curr_size - self.kernel_size + 1 # o = i + p - k + 1
assert (curr_size > 0)
# Strided conv/decimation
if not self.transpose:
assert ((curr_size - 1) % self.stride == 0) # We need to have a value at the beginning and end
curr_size = ((curr_size - 1) // self.stride) + 1
return curr_size |
492890 | import operator
import numpy
import numba
class Dependent(object):
def __init__(self, **available):
self.available = available
def __getitem__(self, where):
return self.available[where]
@numba.extending.typeof_impl.register(Dependent)
def _Dependent_typeof(val, c):
return DependentType(list(val.available))
class DependentType(numba.types.Type):
def __init__(self, available):
self.available = available
self.requested = set()
super(DependentType, self).__init__(name="DependentType({0})".format(", ".join(sorted(self.available))))
def request(self, name):
if name not in self.requested:
self.requested.add(name)
@numba.typing.templates.infer
class _DependentType_type_getitem(numba.typing.templates.AbstractTemplate):
key = "static_getitem"
def generic(self, args, kwargs):
if len(args) == 2 and len(kwargs) == 0:
objtype, where = args
print("checking type", objtype, where)
objtype.request(where)
return numba.types.int64
@numba.extending.register_model(DependentType)
class DependentModel(numba.datamodel.models.StructModel):
def __init__(self, dmm, fe_type):
print("making model", fe_type)
print("requested", fe_type.requested)
members = []
super(DependentModel, self).__init__(dmm, fe_type, members)
@numba.extending.unbox(DependentType)
def _JaggedArray_unbox(typ, obj, c):
print("unboxing", typ)
print("requested", typ.requested)
out = numba.cgutils.create_struct_proxy(typ)(c.context, c.builder)
is_error = numba.cgutils.is_not_null(c.builder, c.pyapi.err_occurred())
return numba.extending.NativeValue(out._getvalue(), is_error)
@numba.extending.lower_builtin("static_getitem", DependentType, numba.types.StringLiteral)
def _DependentType_lower_static_getitem(context, builder, sig, args):
print("lowering", sig.args[0], sig.args[1].literal_value)
return context.get_constant(numba.types.int64, 999)
obj = Dependent(one=999, two=999, three=999)
@numba.njit
def f(x):
return x["one"]
print(f(obj))
print(f(obj))
@numba.njit
def g(x):
return x["two"]
print(g(obj))
print(g(obj))
print(f(obj))
obj = Dependent(one=999, two=999, three=999)
print(f(obj))
print(g(obj))
obj = Dependent(one=999, two=999, three=999)
@numba.njit
def h(x):
return f(x)
print(h(obj))
|
492902 | from gym_kuka_mujoco.utils.kinematics import forwardKin, inverseKin, identity_quat
from gym_kuka_mujoco.utils.quaternion import mat2Quat
import numpy as np
def hole_insertion_samples(sim, nsamples=10, range=(0, 0.05)):
# The points to be transformed.
pos = np.array([0., 0., 0.])
peg_body_id = sim.model.body_name2id('peg')
tip_site_id = sim.model.site_name2id('peg_tip')
tip_body_pos = sim.model.site_pos[tip_site_id]
# The desired world coordinates
hole_id = sim.model.body_name2id('hole')
world_pos_desired, _ = forwardKin(sim, np.zeros(3), identity_quat, hole_id)
world_pos_delta = np.zeros((nsamples, 3))
world_pos_delta[:,2] = np.linspace(range[0], range[1], nsamples)
world_pos_desired = world_pos_delta + world_pos_desired
world_quat = np.array([0., 1., 0., 0.])
# Compute the forward kinematics
q_nom = np.zeros(7)
q_init = np.zeros(7)
upper = np.array([1e-6, np.inf, 1e-6, np.inf, 1e-6, np.inf, np.inf])
lower = -upper
q_sol = []
for w_pos in world_pos_desired:
q_opt = inverseKin(sim, q_init, q_nom, tip_body_pos, w_pos, world_quat, peg_body_id, upper=upper, lower=lower)
q_sol.append(q_opt)
return q_sol
def hole_insertion_samples_unrestricted(sim, nsamples=10, insertion_range=(0, 0.05), raise_on_fail=False):
# The points to be transformed.
pos = np.array([0., 0., 0.])
peg_body_id = sim.model.body_name2id('peg')
tip_site_id = sim.model.site_name2id('peg_tip')
tip_body_pos = sim.model.site_pos[tip_site_id]
# The desired world coordinates
hole_id = sim.model.body_name2id('hole')
hole_pos_delta = np.zeros((nsamples, 3))
hole_pos_delta[:,2] = np.linspace(insertion_range[0], insertion_range[1], nsamples)
world_pos_desired = []
world_quat_desired = []
world_quat = np.array([0., 1., 0., 0.])
for i in range(nsamples):
pos_desired, mat_desired = forwardKin(sim, hole_pos_delta[i,:], world_quat, hole_id)
world_pos_desired.append(pos_desired)
world_quat_desired.append(mat2Quat(mat_desired))
# Compute the forward kinematics
q_nom = np.zeros(7)
q_init = np.zeros(7)
upper = sim.model.jnt_range[:, 1]
lower = sim.model.jnt_range[:, 0]
q_sol = []
for w_pos, w_quat in zip(world_pos_desired, world_quat_desired):
q_opt = inverseKin(sim, q_init, q_nom, tip_body_pos, w_pos, w_quat, peg_body_id, upper=upper, lower=lower, raise_on_fail=raise_on_fail)
q_sol.append(q_opt)
q_init = q_opt.copy() # warm start the next solution
return q_sol |
492937 | from mypy.nodes import SymbolTableNode
from mypy.plugin import AnalyzeTypeContext
from mypy.types import Instance
from mypy.types import Type as MypyType
from mypy.types import UnionType
from typing_extensions import final
from classes.contrib.mypy.semanal.variadic_generic import (
analize_variadic_generic,
)
from classes.contrib.mypy.validation import validate_supports
@final
class VariadicGeneric(object):
"""
Variadic generic support for ``Supports`` type.
We also need to validate that
all type args of ``Supports`` are subtypes of ``AssociatedType``.
"""
__slots__ = ('_associated_type_node',)
def __init__(self, associated_type_node: SymbolTableNode) -> None:
"""We need ``AssociatedType`` fullname here."""
self._associated_type_node = associated_type_node
def __call__(self, ctx: AnalyzeTypeContext) -> MypyType:
"""Main entry point."""
analyzed_type = analize_variadic_generic(
validate_callback=self._validate,
ctx=ctx,
)
if isinstance(analyzed_type, Instance):
return analyzed_type.copy_modified(
args=[UnionType.make_union(analyzed_type.args)],
)
return analyzed_type
def _validate(self, instance: Instance, ctx: AnalyzeTypeContext) -> bool:
return validate_supports.check_type(
instance,
self._associated_type_node,
ctx,
)
|
492940 | import numpy as np
import cv2
import os
SRC_VIDEOS_DIR = '/home/deepano/workspace/dataset/roadSign/video'
DIST_FRAME_DIR = '/home/deepano/workspace/dataset/roadSign/frame'
def get_frame(src_video_dir, dist_frame_dir, fps):
for _file in os.listdir(src_video_dir):
video_file = os.path.join(src_video_dir, _file)
print(video_file)
cap=cv2.VideoCapture(video_file)
count = 0
while (True):
ret,frame=cap.read()
if ret == True:
if not os.path.isdir(dist_frame_dir): # Create the log directory if it doesn't exist
os.makedirs(dist_frame_dir)
#if count %
frame_name = _file.split('.')[0] + '_' + str(count) + '.jpg'
frame_file = os.path.join(dist_frame_dir, frame_name)
cv2.imwrite(frame_file, frame)
count += 1
else:
break
cap.release()
def main():
get_frame(SRC_VIDEOS_DIR, DIST_FRAME_DIR, 0)
if __name__=="__main__":
main()
|
492948 | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
try:
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.core import urls as wagtail_urls
from wagtail.documents import urls as wagtaildocs_urls
except ImportError:
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtailcore import urls as wagtail_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
urlpatterns = [
url(r'^django-admin/', admin.site.urls),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'', include(wagtail_urls)),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
492994 | from pathlib import Path
from typing import Union
import cv2
import numpy as np
def load_rgb(file_path: Union[str, Path]) -> np.ndarray:
image = cv2.imread(str(file_path))
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
images = {
"with_faces": {
"image": load_rgb("tests/data/13.jpg"),
"faces": [
{
"bbox": [256.9, 93.64, 336.79, 201.76],
"score": 1.0,
"landmarks": [
[286.17, 134.94],
[323.32, 135.28],
[309.15, 161.34],
[283.74, 168.48],
[320.72, 168.48],
],
},
{
"bbox": [436.62, 118.5, 510.04, 211.13],
"score": 1.0,
"landmarks": [[460.96, 155.7], [494.47, 154.35], [480.52, 175.92], [464.73, 188.05], [491.9, 187.53]],
},
{
"bbox": [657.3, 156.87, 729.81, 245.78],
"score": 1.0,
"landmarks": [[665.64, 187.11], [696.5, 196.97], [670.65, 214.76], [666.92, 220.2], [689.45, 228.91]],
},
],
},
"with_no_faces": {
"image": load_rgb("tests/data/no_face.jpg"),
"faces": [{"bbox": [], "score": -1, "landmarks": []}],
},
}
|
493002 | from pyanp.priority import *
import numpy as np
from scipy.stats.mstats import gmean
a = np.array([
[1, 2, 5],
[1/2, 1, 3],
[1/5, 1/3, 1]
])
print(pri_eigen(a))
vals = [
[2, 5/3],
[2*3, 5],
[3, 5/2]
]
means = [gmean(row) for row in vals]
b = utmrowlist_to_npmatrix(means)
print(b)
print(means)
print(incon_std(b))
means = [np.mean(row) for row in vals]
b = utmrowlist_to_npmatrix(means)
print(b)
print(means)
print(incon_std(b)) |
493022 | import hypothesis as hp
from hypothesis import strategies as st
import time
import pypeln as pl
import cytoolz as cz
MAX_EXAMPLES = 10
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_flat_map_square(nums):
def _generator(x):
yield x
yield x + 1
yield x + 2
nums_py = map(lambda x: x ** 2, nums)
nums_py = cz.mapcat(_generator, nums_py)
nums_py = list(nums_py)
nums_pl = pl.sync.map(lambda x: x ** 2, nums)
nums_pl = pl.sync.flat_map(_generator, nums_pl)
nums_pl = list(nums_pl)
assert nums_pl == nums_py
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_flat_map_square_workers(nums):
def _generator(x):
yield x
yield x + 1
yield x + 2
nums_py = map(lambda x: x ** 2, nums)
nums_py = cz.mapcat(_generator, nums_py)
nums_py = list(nums_py)
nums_pl = pl.sync.map(lambda x: x ** 2, nums)
nums_pl = pl.sync.flat_map(_generator, nums_pl, workers=3)
nums_pl = list(nums_pl)
assert sorted(nums_pl) == sorted(nums_py)
|
493039 | from django.apps import AppConfig
class DatasetsConfig(AppConfig):
name = 'datasets'
verbose_name = 'Scaleout Datasets'
|
493065 | import requests.packages
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
|
493084 | from cv2 import cv2
import numpy as np
from keras_squeezenet import SqueezeNet
from keras.optimizers import Adam
from keras.utils import np_utils
from keras.layers import Activation, Dropout, Convolution2D, GlobalAveragePooling2D
from keras.models import Sequential
import tensorflow as tf
import os
IMG_SAVE_PATH = 'image_data'
CLASS_MAP = {
"none":0,
"one":1,
"two":2,
"three":3,
"four":4,
"five":5,
"six":6
}
NUM_CLASSES = len(CLASS_MAP)
def mapper(val):
return CLASS_MAP[val]
def get_model():
model = Sequential([
SqueezeNet(input_shape=(227, 227, 3), include_top=False),
Dropout(0.5),
Convolution2D(NUM_CLASSES, (1, 1), padding='valid'),
Activation('relu'),
GlobalAveragePooling2D(),
Activation('softmax')
])
return model
# load images from the directory
dataset = []
for directory in os.listdir(IMG_SAVE_PATH):
path = os.path.join(IMG_SAVE_PATH, directory)
if not os.path.isdir(path):
continue
for item in os.listdir(path):
# to make sure no hidden files get in our way
if item.startswith("."):
continue
img = cv2.imread(os.path.join(path, item))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (227, 227))
dataset.append([img, directory])
'''
dataset = [
[[...], 'one'],
[[...], 'two'],
...
]
'''
data, labels = zip(*dataset)
labels = list(map(mapper, labels))
'''
labels: one,two,three...
one hot encoded: [1,0,0,0,0,0,0], [0,1,0,0,0,0,0], [0,0,1,0,0,0,0],...
'''
# one hot encode the labels
labels = np_utils.to_categorical(labels)
# define the model
model = get_model()
model.compile(
optimizer=Adam(lr=0.0001),
loss='categorical_crossentropy',
metrics=['accuracy']
)
# start training
model.fit(np.array(data), np.array(labels), epochs=10)
# save the model for later use
model.save("Hand-cricket2-model.h5")
|
493090 | import tensorflow as tf
from avb.decoders import get_reconstr_err, get_decoder_mean, get_interpolations
from avb.utils import *
from avb.ops import *
from avb.validate import run_tests
from avb.validate.ais import AIS
from avb.iaf import IAFVAE, apply_iaf
from tqdm import tqdm
import time
import ipdb
def test(encoder, decoder, iaf_layers, x_test, config):
log_dir = config['log_dir']
eval_dir = config['eval_dir']
results_dir = os.path.join(eval_dir, "results")
z_dim = config['z_dim']
batch_size = config['batch_size']
ais_nchains = config['test_ais_nchains']
test_nais = config['test_nais']
z_sampled = tf.random_normal([batch_size, z_dim])
iaf_test = IAFVAE(encoder, decoder, iaf_layers, x_test, z_sampled, config, is_training=False)
stats_scalar = {
'loss': iaf_test.loss,
}
stats_dist = {
'ELBO': iaf_test.ELBO,
'KL': iaf_test.KL,
'reconst_err': iaf_test.reconst_err,
'z': iaf_test.z_real,
}
params_posterior = [] #iaf_test.z_mean, iaf_test.log_z_std, iaf_test.a]
def energy0(z, theta):
E = tf.reduce_sum(
0.5 * tf.square(z) + 0.5 * np.log(2*np.pi), [1]
)
return E
# z_mean = theta[0]
# log_z_std = theta[1]
# a = theta[2]
# logq = get_pdf_gauss(z_mean, log_z_std, z)
#
# # IAF layers
# _, logq = apply_iaf(iaf_layers, a, z, logq)
#
# return -logq
def get_z0(theta):
return tf.random_normal([batch_size, z_dim])
run_tests(decoder, stats_scalar, stats_dist,
iaf_test.x_real, params_posterior, energy0, get_z0, config,
)
|
493157 | import milan
import exponential
import hepmass
import estimator
import cvx_estimator
import gauss_estimator
import time
import pandas as pd
import numpy
import math
def main():
ps = numpy.linspace(0, 1, 21)
ps[0] = 0.01
ps[-1] = 0.99
k = 7
datasets = {
"milan": milan.data,
"exponential": exponential.data,
"hepmass": hepmass.data
}
isLog = {
"milan": True,
"exponential": False,
"hepmass": False
}
solvers = {
"lp": cvx_estimator.CvxEstimator(k,1000,solver="lp"),
"maxent": cvx_estimator.CvxEstimator(k,1000,solver="maxent"),
"mindensity": cvx_estimator.CvxEstimator(k,1000,solver="mindensity"),
"gaussian": gauss_estimator.GaussEstimator(k),
}
results = []
for dname in datasets:
print(dname)
data = datasets[dname]
distributions = {}
num_trials = {
"lp": 500,
"maxent": 10,
"mindensity": 50,
"gaussian": 1000,
}
# num_trials = {
# "lp": 10,
# "maxent": 1,
# "mindensity": 1,
# "gaussian": 1,
# }
for sname in solvers:
print(sname)
e = solvers[sname]
if isLog[dname]:
e.set_statistics(
data["ranges"][2],
data["ranges"][3],
data["sLogMoments"][:k]
)
else:
e.set_statistics(
data["ranges"][0],
data["ranges"][1],
data["sMoments"][:k]
)
distributions[sname] = e.solve()
start_time = time.time()
for i in range(num_trials[sname]):
e.solve()
end_time = time.time()
for p in ps:
q_est = e.estimate(p)
if isLog[dname]:
q_est = math.exp(q_est)
results.append({
"dataset": dname,
"size_param": k,
"sketch": sname,
"query_time": ((end_time-start_time)/num_trials[sname]) * 1e9,
"q": "{0:.3g}".format(p),
"quantile_estimate": q_est
})
pd.DataFrame(results).to_csv("lesion_results.csv", index=False)
# import matplotlib.pyplot as plt
# import numpy as np
# plt.figure()
# xs = np.linspace(0, 1, 1000)
# for sname in solvers:
# plt.plot(xs, distributions[sname], label=sname)
# plt.legend()
# plt.show()
if __name__ == "__main__":
main() |
493181 | import numpy as np
from scipy.stats import norm
def black_scholes(t=40, r=4.00, v=32.00, K=60, St=62, type='c'):
"""
Parameters:
K : Excercise Price
St: Current Stock Price
v : Volatility in percentage
r : Risk free rate in percentage
t : Time to expiration in days
type: Type of option 'c' for call 'p' for put
default: 'c'
"""
# if type = 'c' or 'C' call option else put option
try:
type=type.lower()
if(type=='c'):
option_type='call'
else:
option_type='put'
except:
option_type='put'
#Check time
try:
#convert time in days to years
t=t/365
except:
raise TypeError("Enter numerical value for time")
#Check risk free rate
try:
#convert percentage to decimal
r=r/100
except:
raise TypeError("Enter numerical value for risk free rate")
#Check volatility
try:
#convert percentage to decimal
v=v/100
except:
raise TypeError("Enter numerical value for volatility")
#Check Stock Price
try:
St=St+0
except:
raise TypeError("Enter numerical value for stock price")
#Check Exercise Price
try:
K=K+0
except:
raise TypeError("Enter numerical value for Exercise price")
n1=np.log(St/K)
n2=(r+(np.power(v,2)/2))*t
d=v*(np.sqrt(t))
d1=(n1+n2)/d
d2=d1-(v*np.sqrt(t))
if type=='c':
N_d1=norm.cdf(d1)
N_d2=norm.cdf(d2)
else:
N_d1=norm.cdf(-d1)
N_d2=norm.cdf(-d2)
A=(St*N_d1)
B=(K*N_d2*(np.exp(-r*t)))
if type=='c':
val=A-B
val_int=max(0,St-K)
else:
val=B-A
val_int=max(0,K-St)
val_time=val-val_int
# Option values in dictionary
value={'option value':val, 'intrinsic value':val_int, 'time value':val_time}
#CALCULATE OPTION GREEKS
if type=='c':
delta=N_d1
theta=(-((St*v*np.exp(-np.power(d1,2)/2))/(np.sqrt(8*np.pi*t)))-(N_d2*r*K*np.exp(-r*t)))/365
rho=t*K*N_d2*np.exp(-r*t)/100
else:
delta=-N_d1
theta=(-((St*v*np.exp(-np.power(d1,2)/2))/(np.sqrt(8*np.pi*t)))+(N_d2*r*K*np.exp(-r*t)))/365
rho=-t*K*N_d2*np.exp(-r*t)/100
gamma=(np.exp(-np.power(d1,2)/2))/(St*v*np.sqrt(2*np.pi*t))
vega=(St*np.sqrt(t)*np.exp(-np.power(d1,2)/2))/(np.sqrt(2*np.pi)*100)
#Option greeks in Dictionary
greeks={'delta':delta, 'gamma':gamma, 'theta':theta, 'vega':vega, 'rho':rho}
return {'value':value, 'greeks':greeks} |
493222 | from flask import session, request, render_template
from app import app, LINK, get_url
from requests import post
from json import loads
@app.route('/sys_sign_in', methods=['POST'])
def signin():
x = request.form
if not all([i in x for i in ('login', 'pass')]):
return render_template('message.html', cont='3')
req = post(LINK, json={'method': 'profile.auth', 'login': x['login'], 'pass': x['pass']}).text
if len(req) <= 3:
return render_template('message.html', cont=req)
req = loads(req)
session['token'] = req['token']
session['id'] = req['id']
return get_url(request.args.get('url')) |
493227 | from styx_msgs.msg import TrafficLight
import numpy as np
import rospkg
import os
import rospy
import tensorflow as tf
import cv2
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
class TLClassifier(object):
def __init__(self):
#TODO load classifier
self.color_th = 15
rospack = rospkg.RosPack()
model_dir = os.path.join(rospack.get_path('tl_detector/light_classification'), 'model')
model_path = os.path.join(model_dir, 'frozen_inference_graph.pb')
self.config = tf.ConfigProto()
# load object detection graph
self.graph = self.load_graph(model_path, self.config)
# tensorflow session
self.sess = tf.Session(graph=self.graph, config=self.config)
# definition of input and output tensors for the detection graph.
self.image_tensor = self.graph.get_tensor_by_name('image_tensor:0')
# each box represents a part of the image in which a specific object has been located.
self.boxes = self.graph.get_tensor_by_name('detection_boxes:0')
# for each detected object the score represents the confidence level.
self.scores = self.graph.get_tensor_by_name('detection_scores:0')
# this is the MS COCO dataset class, we only need class 10 for the traffic light.
self.classes = self.graph.get_tensor_by_name('detection_classes:0')
# minimum required confidence for traffic lights
self.confidence_th = 0.1
# Traffic light publisher.
self.bridge = CvBridge()
self.tl_image = rospy.Publisher('/tl_detector/traffic_light', Image, queue_size=1)
rospy.loginfo('tl_classifier init complete')
# Function to load a graph from a proto buf file.
def load_graph(self, model, config):
with tf.Session(graph = tf.Graph(), config=config) as sess:
assert tf.get_default_session() is sess
gd = tf.GraphDef()
with tf.gfile.Open(model, 'rb') as f:
data = f.read()
gd.ParseFromString(data)
tf.import_graph_def(gd, name='')
graph = tf.get_default_graph()
#print('Graph v' + str(graph.version) + ', nodes: ' + ', '.join([n.name for n in graph.as_graph_def().node]))
return graph
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return self.predict_detection(image)
def predict_detection(self, image):
h, w, _ = image.shape
image_exp = cv2.resize(image, (300, 300))
image_exp = np.expand_dims(image, axis=0)
boxes, scores, classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={self.image_tensor: image_exp}
)
# post processing
boxes = np.squeeze(boxes)
classes = np.squeeze(classes).astype(np.int32)
scores = np.squeeze(scores)
# Get the box with the highest score.
conf_max = 0
idx = -1
for i in range(boxes.shape[0]):
if scores[i] > self.confidence_th and classes[i] == 10:
if scores[i] > conf_max:
conf_max = scores[i]
idx = i
prediction = TrafficLight.UNKNOWN
# if there is a traffic light with high confidence (max confidence traffic light is selected)
if idx != -1:
# create a tuple for the box
box = tuple(boxes[idx].tolist())
# corners of the box
top, left, bottom, right = box
# resize and expand a little more
left = int(max(0, (left * w) - 3))
top = int(max(0, (top * h) - 5))
bottom = int(min(h, (bottom * h) + 5))
right = int(min(w, (right * w) + 3))
box = left, top, right, bottom
ROI = image[top:bottom, left:right]
tl_image = cv2.resize(ROI, (32, 32))
self.tl_image.publish(self.bridge.cv2_to_imgmsg(tl_image, "rgb8"))
#rospy.loginfo(ROI.shape)
#rospy.loginfo(box)
prediction = self.predict_color(ROI)
else:
# publish empty image
self.tl_image.publish(self.bridge.cv2_to_imgmsg(cv2.cvtColor(np.zeros((32, 32), np.uint8), cv2.COLOR_GRAY2RGB), "rgb8"))
#rospy.loginfo(prediction)
return prediction
def predict_color(self, image):
"""
image: cv2.Image (RGB)
"""
R = image[:,:,0]
G = image[:,:,1]
R_area = np.sum(R > R.max() - 5)
G_area = np.sum(G > G.max() - 5)
#rospy.loginfo(G_area)
prediction = TrafficLight.UNKNOWN
if R_area >= self.color_th and G_area <= self.color_th:
prediction = TrafficLight.RED
elif R_area >= self.color_th and G_area >= self.color_th:
prediction = TrafficLight.YELLOW
elif G_area >= self.color_th:
prediction = TrafficLight.GREEN
else:
prediction = TrafficLight.UNKNOWN
if prediction == TrafficLight.RED:
print("red")
elif prediction == TrafficLight.YELLOW:
print("yellow")
elif prediction == TrafficLight.GREEN:
print("green")
else:
print("unknown - clear")
return prediction |
493240 | import logging
import os
import signal
import yaml
import time
from flask import Flask, jsonify, request
from gevent.pywsgi import WSGIServer
from triggerflow.service import storage
from triggerflow.service.worker import Worker
from triggerflow import eventsources
import threading
app = Flask(__name__)
app.debug = False
workers = {}
monitors = {}
config_map = None
trigger_storage = None
CONFIG_MAP_PATH = 'config_map.yaml'
def authenticate_request(db, auth):
if not auth or 'username' not in auth or 'password' not in auth:
return False
password = db.get_auth(username=auth['username'])
return password and password == auth['password']
@app.before_request
def before_request_func():
pass
# if not authenticate_request(trigger_storage, request.auth):
# return jsonify('Unauthorized'), 401
@app.route('/workspace/<workspace>', methods=['POST'])
def create_worker(workspace):
"""
This method gets the request parameters and starts a new thread worker
that will act as the event-processor for the the specific trigger workspace.
It returns 400 error if the provided parameters are not correct.
"""
if not trigger_storage.workspace_exists(workspace):
return jsonify('Workspace {} does not exists in the database'.format(workspace)), 400
if workspace in monitors:
return jsonify('Workspace {} is already created'.format(workspace)), 400
logging.info('New request to create workspace {}'.format(workspace))
start_worker_monitor(workspace)
return jsonify('Created workspace {}'.format(workspace)), 201
def start_worker_monitor(workspace):
"""
Auxiliary method to monitor a worker triggers
"""
global monitors
logging.info('Starting {} workspace monitor'.format(workspace))
def monitor():
if len(trigger_storage.get(workspace, 'triggers')) > 1:
start_worker(workspace)
while True:
if trigger_storage.new_trigger(workspace):
start_worker(workspace)
else:
break
monitors[workspace] = threading.Thread(target=monitor, daemon=True)
monitors[workspace].start()
def start_worker(workspace):
"""
Auxiliary method to start a worker
"""
global workers
if workspace not in workers or not workers[workspace].is_alive():
logging.info('Starting {} workspace'.format(workspace))
workers[workspace] = Worker(workspace, config_map)
workers[workspace].start()
@app.route('/workspace/<workspace>', methods=['DELETE'])
def delete_worker(workspace):
logging.info('New request to delete workspace {}'.format(workspace))
global workers, monitors
if workspace not in monitors and workspace not in workers:
return jsonify('Workspace {} is not active'.format(workspace)), 400
else:
if workspace in workers:
if workers[workspace].is_alive():
workers[workspace].stop_worker()
del workers[workspace]
del monitors[workspace]
return jsonify('Workspace {} deleted'.format(workspace)), 200
@app.route('/workspace/<workspace>/timeout', methods=['POST'])
def timeout(workspace):
logging.info('New request to add timeout'.format(workspace))
timeout_data = request.get_json(force=True, silent=True)
if timeout_data is None:
return jsonify('Parameters error'), 400
def _timeout(timeout_data):
logging.debug('Starting event source instance')
logging.debug(timeout_data)
event_source_class = getattr(eventsources, '{}'.format(timeout_data['event_source']['class']))
event_source = event_source_class(**timeout_data['event_source']['parameters'])
time.sleep(timeout_data['seconds'])
timeout_data['event']['type'] = 'event.triggerflow.timeout'
event_source.publish_cloudevent(timeout_data['event'])
logging.debug('Event {} sent after {} secodns'.format(timeout_data['event'], timeout_data['seconds']))
timeout_thread = threading.Thread(target=_timeout, args=(timeout_data.copy(),))
timeout_thread.start()
logging.debug('Timeout set for workspace {}'.format(workspace))
return jsonify('Timeout set'.format(workspace)), 201
def main():
global config_map, trigger_storage, workers
# Create process group
os.setpgrp()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
component = os.getenv('INSTANCE', 'triggerflow-controller')
# Make sure we log to the console
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s.%(msecs)03dZ][%(levelname)s][triggerflow] %(message)s',
datefmt="%Y-%m-%dT%H:%M:%S")
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logging.info('Starting Triggerflow Controller')
# also log to file if /logs is present
if os.path.isdir('/logs'):
fh = logging.FileHandler('/logs/{}_logs.log'.format(component))
fh.setFormatter(formatter)
logger.addHandler(fh)
logging.info('Loading private credentials')
with open(CONFIG_MAP_PATH, 'r') as config_file:
config_map = yaml.safe_load(config_file)
# Instantiate trigger storage client
logging.info('Creating trigger storage client')
backend = config_map['trigger_storage']['backend']
trigger_storage_class = getattr(storage, backend.capitalize() + 'TriggerStorage')
trigger_storage = trigger_storage_class(**config_map['trigger_storage']['parameters'])
port = int(os.getenv('PORT', 5000))
server = WSGIServer(('', port), app, log=logging.getLogger())
logging.info('Triggerflow service started on port {}'.format(port))
workspaces = trigger_storage.list_workspaces()
for wsp in workspaces:
start_worker(wsp)
try:
server.serve_forever()
except KeyboardInterrupt:
print('exiting...')
finally:
# Kill all child processes
os.killpg(0, signal.SIGKILL)
if __name__ == '__main__':
main()
|
493301 | from .version import __version__
from .core import *
from .topology import *
from .exterior import *
from .printing import *
|
493323 | import ajenti
from ajenti.api import *
from ajenti.plugins import *
ajenti.edition += '+vh'
info = PluginInfo(
title='Ajenti V Virtual Hosting',
description='Adds easy web hosting management to Ajenti',
icon='globe',
dependencies=[
PluginDependency('main'),
PluginDependency('services'),
],
)
destroyed_configs = []
def init():
import api
import extensions
import main
import ipc
import processes
import gate_static
import gate_proxy
import gate_fcgi |
493333 | from layout import ListInfoData
from layout import FieldMetadata
class ListInfoItemMetadata:
# private final ListInfoData infoData
# private String variableName
#
def __init__(self, infoData, _type, value):
self.infoData = infoData
self.type = _type
self.value = value
self.variableName = ""
def getIsText(self):
return self.type.type == FieldMetadata.TEXT
def getIsLast(self):
return self.infoData.isLast(self) |
493339 | import json
import os
class Config:
def __init__(self):
if self.exists():
print("Loading the config.json")
self.read()
else:
print("config.json file doesn't exist")
self.config = {}
def package_root_path(self, value=None):
if value is not None:
self.config["package_root_path"] = value
else:
return self.config["package_root_path"]
def repo_root_path(self, value=None):
if value is not None:
self.config["repo_root_path"] = value
else:
return self.config["repo_root_path"]
def refresh(self):
self.read()
def read(self):
with open("config.json", "r") as config_file:
self.config = json.load(config_file)
def save(self):
with open("config.json", "w+") as config_file:
config_file.write(json.dumps(self.config, indent=4, sort_keys=True))
def exists(self):
return os.path.exists("config.json")
|
493412 | from PyQt5.QtCore import QModelIndex, QRect
from PyQt5.QtWidgets import QAbstractItemView
from inselect.lib.inselect_error import InselectError
from inselect.lib.utils import debug_print
from inselect.gui.roles import PixmapRole, RectRole, MetadataValidRole
from inselect.gui.utils import update_selection_model
from .boxes_scene import BoxesScene
class GraphicsItemView(QAbstractItemView):
"""Qt have used 'view' in two different contexts: the model-view
architecture and the graphics/view framework, henceforth MV and GV
respectively.
This class is a MV view that acts as an interface between MV and GV.
A limited number of events are passed between the two systems:
* changes in selection
* changes in boxes' position and size (RectRole)
* addition of boxes
* deletion of boxes
* metadata valid status (MetadataValidRole)
* TODO box verified status
"""
# Based on idea in:
# http://stackoverflow.com/questions/3188584/how-to-use-qt-model-view-framework-with-the-graphics-view-framework
def __init__(self, parent=None):
super(GraphicsItemView, self).__init__(parent)
self.scene = BoxesScene(self, parent)
# List of QGraphicsRectItem
self._rows = []
self.handling_selection_update = False
self.scene.selectionChanged.connect(self.scene_selection_changed)
def reset(self):
"""QAbstractItemView virtual
"""
debug_print('GraphicsItemView.reset')
super(GraphicsItemView, self).reset()
model = self.model()
self.scene.new_document(model.data(QModelIndex(), PixmapRole))
# Build up new mapping
rows = [None] * model.rowCount()
for row in range(model.rowCount()):
index = model.index(row, 0)
rows[row] = self.scene.add_box(index.data(RectRole),
index.data(MetadataValidRole))
self._rows = rows
def show_alternative_pixmap(self, pixmap):
"""Show or clear an alternative pixmap in place of the document's usual
pixmap. pixmaps should either be a QPixmap of the same dimensions as the
documents pixmap (which is shown) or None (which clears any existing
alternative pixmap)
"""
debug_print('show_alternative_pixmap', pixmap)
model = self.model()
pixmap = pixmap if pixmap else model.data(QModelIndex(), PixmapRole)
self.scene.set_pixmap(pixmap)
def rowsInserted(self, parent, start, end):
"""QAbstractItemView slot
"""
debug_print('GraphicsItemView.rowsInserted', start, end)
# New boxes but are coming but their rects are not yet known.
# Create new items with zero height and zero width rects - actual rects
# will be set in dataChanged()
n = 1 + end - start
new = [None] * n
rect = QRect(0, 0, 0, 0)
for row in range(n):
new[row] = self.scene.add_box(rect, False)
self._rows[start:start] = new
def dataChanged(self, topLeft, bottomRight, roles=[]):
"""QAbstractItemView virtual
"""
debug_print('GraphicsItemView.dataChanged', topLeft.row(), bottomRight.row())
for row in range(topLeft.row(), 1 + bottomRight.row()):
item = self._rows[row]
# new is a QRect - integer coordinates
index = self.model().index(row, 0)
item.set_rect(index.data(RectRole))
item.set_isvalid(index.data(MetadataValidRole))
def rowsAboutToBeRemoved(self, parent, start, end):
"""QAbstractItemView slot
"""
debug_print('GraphicsItemView.rowsAboutToBeRemoved', start, end)
if self.handling_selection_update:
debug_print('Unexpected handling_selection_update in '
'GraphicsItemView.rowsAboutToBeRemoved')
# Ignore the selectionChanged() notifications that the scene will send
# for every item that is about to be removed.
self.handling_selection_update = True
try:
# TODO Context for this
for item in self._rows[start:end]:
self.scene.removeItem(item)
finally:
self.handling_selection_update = False
# Remove items
self._rows[start:end] = []
def selectionChanged(self, selected, deselected):
"""QAbstractItemView virtual
"""
# Tell the scene about the new selection
# TODO LH Use a timer to implement a delayed refresh
if not self.handling_selection_update:
# TODO Context for this
debug_print('GraphicsItemView.selectionChanged')
self.handling_selection_update = True
try:
current = set(self.scene.selectedItems())
new = set(self._rows[i.row()] for i in self.selectionModel().selectedIndexes())
for item in new.difference(current):
item.setSelected(True)
item.update()
for item in current.difference(new):
item.setSelected(False)
item.update()
if new:
for view in self.scene.views():
view.zoom_to_items(new)
finally:
self.handling_selection_update = False
def rows_of_items(self, items):
"""Returns a generator of row numbers of the list of QGraphicsItems
"""
# TODO LH This is horrible
# TODO LH Use a view to support changes to self._rows during iteration?
return (self._rows.index(i) for i in items)
def indexes_of_items(self, items):
"""Returns a generator of indexes of the list of QGraphicsItems
"""
# TODO LH Use a view to support changes to self._rows during iteration?
return (self.model().index(row, 0) for row in self.rows_of_items(items))
def items_of_rows(self, rows):
"""Returns an iterable of QGraphicsItems for the given rows
"""
return (self._rows[r] for r in rows)
def items_of_indexes(self, indexes):
"""Returns an iterable of QGraphicsItems for the given indexes
"""
return (self._rows[i.row()] for i in indexes)
def scene_selection_changed(self):
"""scene.selectionChanged slot
"""
# TODO LH Fix dreadful performance when selection changing as a result
# of mouse drag
if not self.handling_selection_update:
debug_print('GraphicsItemView.scene_selection_changed')
# TODO Context for this
self.handling_selection_update = True
try:
new_selection = set(self.rows_of_items(self.scene.selectedItems()))
update_selection_model(self.model(), self.selectionModel(),
new_selection)
finally:
self.handling_selection_update = False
def scene_item_rects_updated(self, items):
"""The user moved or resized items in the scene
"""
debug_print('GraphicsItemView.item_rects_updated')
for index, item in zip(self.indexes_of_items(items), items):
# item.sceneBoundingRect() is the items rects in the correct
# coordinates system
debug_print('Row [{0}] updated'.format(index.row()))
rect = item.sceneBoundingRect()
# Cumbersome conversion to ints
rect = QRect(rect.left(), rect.top(), rect.width(), rect.height())
self.model().setData(index, rect, RectRole)
def scene_box_added(self, rect):
"""The user added a box
"""
m = self.model()
row = len(self._rows)
if not m.insertRow(row):
raise InselectError('Could not insert row')
else:
# Cumbersome conversion to ints
rect = QRect(rect.left(), rect.top(), rect.width(), rect.height())
if not m.setData(m.index(row, 0), rect, RectRole):
raise InselectError('Could not set rect')
else:
# Select the new box
self.scene.clearSelection()
item = next(self.items_of_rows([row]))
item.setSelected(True)
item.update()
|
493414 | import math
from seamless.highlevel import Context, Cell
import json
ctx = Context()
ctx.pi = math.pi
ctx.doubleit = lambda a: 2 * a
ctx.doubleit.hash_pattern = {"*": "#"}
ctx.doubleit.a = ctx.pi
ctx.twopi = ctx.doubleit
ctx.translate()
graph = ctx.get_graph()
print(json.dumps( graph, indent=2, sort_keys=True))
json.dump(graph, open("twopi-deepcell.seamless", "w"), indent=2, sort_keys=True)
ctx.compute()
print(ctx.pi.value)
print(ctx.twopi.value)
ctx.doubleit.code = lambda a: 42
ctx.compute()
print(ctx.pi.value)
print(ctx.twopi.value)
ctx.translate(force=True)
ctx.compute()
print(ctx.pi.value)
print(ctx.twopi.value)
print()
ctx.doubleit.code = lambda a: 2 * a
ctx.compute()
print(ctx.pi.value)
print(ctx.twopi.value)
graph = ctx.get_graph()
json.dump(graph, open("twopi-deepcell-result.seamless", "w"), indent=2, sort_keys=True)
archive = ctx.get_zip()
with open("twopi-deepcell-result.zip", "wb") as f:
f.write(archive)
import os
os.system("md5sum twopi-deepcell.seamless twopi-deepcell-result.seamless twopi-deepcell-result.zip") |
493449 | from builtins import range
from mrq.helpers import ratelimit
import time
def test_helpers_ratelimit(worker):
worker.start_deps()
assert ratelimit("k3", 1, per=1) == 1
assert ratelimit("k3", 1, per=1) == 0
assert ratelimit("k3", 1, per=1) == 0
for i in range(0, 10):
r = ratelimit("k", 10, per=1)
assert r == 10 - i
assert ratelimit("k", 10, per=1) == 0
assert ratelimit("k2", 5, per=1) == 5
# We *could* have failures there if we go over a second but we've not seen
# it much so far.
for i in range(0, 100):
assert ratelimit("k", 10, per=1) == 0
# TODO: test the "per" argument a bit better.
time.sleep(1)
assert ratelimit("k", 10, per=1) == 10
assert ratelimit("k", 10, per=1) == 9
# This is actually another counter.
assert ratelimit("k", 10, per=10) == 10
assert ratelimit("k", 10, per=10) == 9
worker.stop_deps()
|
493467 | import torch.nn as nn
class PoseDecoder(nn.Module):
def __init__(self, input_channels):
super().__init__()
self.nl = nn.ReLU()
self.squeeze = nn.Conv2d(input_channels, 256, 1)
self.conv_1 = nn.Conv2d(256, 256, 3, 1, 1)
self.conv_2 = nn.Conv2d(256, 256, 3, 1, 1)
self.conv_3 = nn.Conv2d(256, 6, 1)
# The original monodepth2 PoseDecoder
# included a constant multiplication by
# 0.01 in the forward pass, possibly to
# make x_angle and x_translation tiny at
# the beginning of training for stability.
# In my opinion this hurts performance
# with weight_decay enabled.
# Scaling the initial weights should have
# a similar effect.
self.conv_3.weight.data *= 0.01
self.conv_3.bias.data *= 0.01
def forward(self, x):
x = self.squeeze(x)
x = self.nl(x)
x = self.conv_1(x)
x = self.nl(x)
x = self.conv_2(x)
x = self.nl(x)
x = self.conv_3(x)
x = x.mean((3, 2)).view(-1, 1, 1, 6)
x_angle = x[..., :3]
x_translation = x[..., 3:]
return x_angle, x_translation
|
493474 | import numpy as np
import os
from PIL import Image
from keras.preprocessing import image
def preprocess_input(x):
x = x.astype(np.float32)
x /= 255.
return x
def decode_output(x):
x = x.astype(np.float32)
x *= 255.
return x
def make_paths_from_directory(root):
input_paths = []
for dirpath, dirnames, filenames in os.walk(root):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
with open(filepath, 'rb') as fp:
magic = fp.read(8)
if magic.startswith(b'GIF89a') or magic.startswith(b'GIF87a'):
filetype = 'gif'
elif magic == b'\xff\xd8\xff\xe0\x00\x10JF':
filetype = 'jpeg'
elif magic.startswith(b'\x89PNG'):
filetype = 'png'
else:
print(' unsupported file type', repr(magic), filepath)
continue
input_paths.append(filepath)
return input_paths
def make_arrays_from_paths(paths, preprocess=None, target_size=None):
rv = []
for path in paths:
img = image.load_img(path, target_size=target_size)
ar = image.img_to_array(img)
if preprocess:
ar = preprocess(ar)
rv.append(ar)
return np.array(rv)
def generate_img(xgenerater):
num_generate_imgs = 144
z_dim = xgenerater.input_shape[-1]
z = np.random.normal(size=(num_generate_imgs, 1, 1, z_dim))
x_gen = xgenerater.predict_on_batch(z)
x_gen = decode_output(x_gen)
x_gen = np.clip(x_gen, 0., 255.).astype(np.uint8)
# Concatenate generated images
grid_size = int(np.sqrt(num_generate_imgs))
rows = []
for i in range(0, num_generate_imgs, grid_size):
row = np.concatenate(x_gen[i:i+grid_size], axis=1)
rows.append(row)
concatenated = np.concatenate(rows, axis=0)
return Image.fromarray(concatenated)
def reconstruct_img(x, xgen, zgen):
"""
x assumes x_train
xgen: trained xgenerater
zgen: trained zgenerater
"""
# original images
ind = np.random.permutation(len(x))
num_generate_imgs = 144
x = (x[ind])[:num_generate_imgs//2]
x = x.astype(np.uint8)
# generated images
x_copy = np.copy(x)
x_copy = x_copy.astype(np.float32)
x_copy = preprocess_input(x_copy)
z_gen = zgen.predict_on_batch(x_copy)
x_gen = xgen.predict_on_batch(z_gen)
x_gen = decode_output(x_gen)
x_gen = np.clip(x_gen, 0., 255.).astype(np.uint8)
grid_size = int(np.sqrt(num_generate_imgs))
cols = []
for i in range(0, num_generate_imgs//2, grid_size):
col_orig = np.concatenate(x[i:i+grid_size], axis=0)
col_gen = np.concatenate(x_gen[i:i+grid_size], axis=0)
col = np.concatenate([col_orig, col_gen], axis=1)
cols.append(col)
concatenated = np.concatenate(cols, axis=1)
return Image.fromarray(concatenated)
|
493491 | import numpy as np
import random
import config
class DataProvider:
def __init__(self, nparray_path):
print('Load data for STN ...')
self.imgs_trans = np.load(nparray_path + 'SE_transformed_imgs_with_W.npy')
img_embd_sz_arr = np.load(nparray_path + 'img_embd_sz.npy')
self.feed_size = self.imgs_trans.shape[0]
self.img_sz = (int(img_embd_sz_arr[0]), int(img_embd_sz_arr[1]), 4)
self.cnt = 0
idx_train = list(range(0, self.feed_size)) #random.sample(range(self.feed_size), self.feed_size)
# self.batch_test = self.imgs_trans[idx_test, ...]
self.batch_train = self.imgs_trans[idx_train, ...]
print('Image size: ', self.img_sz)
print('Finished uploading np array, Train data shape:', self.batch_train.shape, ', feed size: ', self.feed_size)
def next_batch(self, batch_size, data_type):
batch_img = None
if data_type == 'train':
if config.stn['ordered_batch']: # (use ordered batch size)
if self.cnt+batch_size <= self.feed_size:
idx = list(range(self.cnt, self.cnt+batch_size))
self.cnt = self.cnt+(batch_size//7)
else:
idx = list(range(self.feed_size-batch_size, self.feed_size))
self.cnt = 0
# END DEBUG (use ordered batch size)
else: # (use random batch size)
idx = random.sample(range(len(self.batch_train)), batch_size)
# print("idx: ", idx)
batch_img = self.batch_train[idx, ...]
elif data_type == 'test':
batch_img = self.batch_train
return batch_img
|
493519 | from django.contrib import admin
from django.db import models
from django.forms import TextInput
from django.utils.safestring import mark_safe
from ..core.admin import DittoItemModelAdmin
from .models import Account, Photo, Photoset, User
@admin.register(Account)
class AccountAdmin(admin.ModelAdmin):
list_display = (
"user",
"has_credentials",
"is_active",
"time_created",
"time_modified",
)
fieldsets = (
(None, {"fields": ("user", "is_active",)}),
(
"API",
{
"fields": ("api_key", "api_secret",),
"description": (
"Keys and secrets require creation of an app at "
'<a href="https://www.flickr.com/services/apps/create/apply/">'
"flickr.com/...</a>"
),
},
),
(
"Data",
{"classes": ("collapse",), "fields": ("time_created", "time_modified",)},
),
)
formfield_overrides = {
# Make the inputs full-width.
models.CharField: {"widget": TextInput(attrs={"class": "vLargeTextField"})},
}
readonly_fields = (
"time_created",
"time_modified",
)
def has_credentials(self, obj):
return obj.has_credentials()
has_credentials.boolean = True
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = (
"show_avatar",
"realname",
"username",
"fetch_time",
)
list_display_links = (
"show_avatar",
"realname",
)
fieldsets = (
(
None,
{
"fields": (
"realname",
"username",
"nsid",
"is_pro",
"location",
"description",
"photos_url",
"profile_url",
"avatar",
"photos_count",
"photos_views",
"photos_first_date",
"photos_first_date_taken",
"timezone_id",
)
},
),
(
"Data",
{
"classes": ("collapse",),
"fields": (
"iconserver",
"iconfarm",
"raw",
"fetch_time",
"time_created",
"time_modified",
),
},
),
)
readonly_fields = (
"raw",
"fetch_time",
"time_created",
"time_modified",
)
search_fields = (
"realname",
"username",
)
def show_avatar(self, instance):
return mark_safe(
'<img src="%s" width="24" height="24" />' % (instance.avatar_url)
)
show_avatar.short_description = ""
@admin.register(Photoset)
class PhotosetAdmin(admin.ModelAdmin):
list_display = (
"title",
"flickr_created_time",
"fetch_time",
)
list_display_links = ("title",)
fieldsets = (
(
None,
{
"fields": (
"user",
"title",
"description",
"primary_photo",
"flickr_id",
"photo_count",
"video_count",
"view_count",
"comment_count",
"last_update_time",
"flickr_created_time",
"photos",
)
},
),
(
"Data",
{
"classes": ("collapse",),
"fields": ("raw", "fetch_time", "time_created", "time_modified",),
},
),
)
readonly_fields = (
"raw",
"fetch_time",
"time_created",
"time_modified",
)
search_fields = (
"title",
"description",
)
raw_id_fields = (
"primary_photo",
"photos",
)
class TaggedPhotoInline(admin.TabularInline):
model = Photo.tags.through
raw_id_fields = ("tag",)
@admin.register(Photo)
class PhotoAdmin(DittoItemModelAdmin):
list_display = (
"title",
"show_thumb",
"post_time",
"taken_time",
)
list_display_links = (
"title",
"show_thumb",
)
list_filter = (
"post_time",
"fetch_time",
)
date_hierarchy = "post_time"
search_fields = [
"title",
"description",
]
fieldsets = (
(
None,
{
"fields": (
"show_image",
"user",
"title",
"description",
"summary",
"permalink",
"is_private",
"flickr_id",
"media",
"license",
"original_file",
"video_original_file",
)
},
),
(
"Times",
{
"classes": ("collapse",),
"fields": (
"post_time",
"post_year_str",
"last_update_time",
"taken_time",
"taken_year_str",
"taken_granularity",
"taken_unknown",
),
},
),
(
"Counts, secrets, server, etc",
{
"classes": ("collapse",),
"fields": (
"view_count",
"comment_count",
"secret",
"original_secret",
"server",
"farm",
"rotation",
"original_format",
"safety_level",
"has_people",
),
},
),
(
"Sizes",
{
"classes": ("collapse",),
"fields": (
("thumbnail_width", "thumbnail_height",),
("small_width", "small_height",),
("small_320_width", "small_320_height",),
("medium_width", "medium_height",),
("medium_640_width", "medium_640_height",),
("medium_800_width", "medium_800_height",),
("large_width", "large_height",),
("large_1600_width", "large_1600_height",),
("large_2048_width", "large_2048_height",),
("x_large_3k_width", "x_large_3k_height",),
("x_large_4k_width", "x_large_4k_height",),
("x_large_5k_width", "x_large_5k_height",),
("x_large_6k_width", "x_large_6k_height",),
("original_width", "original_height",),
("mobile_mp4_width", "mobile_mp4_height",),
("site_mp4_width", "site_mp4_height",),
("hd_mp4_width", "hd_mp4_height",),
("video_original_width", "video_original_height",),
),
},
),
(
"Location",
{
"classes": ("collapse",),
"fields": (
"geo_is_private",
("latitude", "longitude",),
"location_accuracy",
"location_context",
"location_place_id",
"location_woeid",
"locality_name",
"locality_place_id",
"locality_woeid",
"county_name",
"county_place_id",
"county_woeid",
"region_name",
"region_place_id",
"region_woeid",
"country_name",
"country_place_id",
"country_woeid",
),
},
),
(
"EXIF",
{
"classes": ("collapse",),
"fields": (
"exif_camera",
"exif_lens_model",
"exif_aperture",
"exif_exposure",
"exif_flash",
"exif_focal_length",
"exif_iso",
),
},
),
(
"Data",
{
"classes": ("collapse",),
"fields": (
"raw",
"sizes_raw",
"exif_raw",
"fetch_time",
"time_created",
"time_modified",
),
},
),
)
radio_fields = {"media": admin.HORIZONTAL}
readonly_fields = (
"post_year_str",
"taken_year_str",
"show_image",
"raw",
"fetch_time",
"time_created",
"time_modified",
"sizes_raw",
"exif_raw",
)
formfield_overrides = {
# Make the inputs full-width.
models.CharField: {"widget": TextInput(attrs={"class": "vLargeTextField"})},
}
inlines = [
TaggedPhotoInline,
]
raw_id_fields = ("user",)
def show_thumb(self, instance):
return mark_safe(
'<img src="%s" width="%s" height="%s" />'
% (
instance.thumbnail_url,
instance.thumbnail_width,
instance.thumbnail_height,
)
)
show_thumb.short_description = "Thumbnail"
def show_image(self, instance):
return mark_safe(
'<img src="%s" width="%s" height="%s" />'
% (instance.small_url, instance.small_width, instance.small_height)
)
show_image.short_description = "Small image"
def taken_year_str(self, instance):
"So Admin doesn't add a comma, like '2,016'."
return str(instance.taken_year)
taken_year_str.short_description = "Taken year"
|
493530 | import logging
from collections import defaultdict
from typing import Dict, List, Sequence, Any, Optional, Union, cast
from .encoding import flip_bloom_filter
from .pprlindex import PPRLIndex, ReversedIndexResult
from .signature_generator import generate_signatures
from .stats import reversed_index_per_strategy_stats, reversed_index_stats
from .validation import PSigConfig
class PPRLIndexPSignature(PPRLIndex):
"""Class that implements the PPRL indexing technique:
Reference scalability entity resolution using probability signatures
on parallel databases.
This class includes an implementation of p-sig algorithm.
"""
def __init__(self, config: Union[PSigConfig, Dict]) -> None:
"""Initialize the class and set the required parameters.
Arguments:
- config: Configuration for P-Sig reverted index.
"""
if isinstance(config, dict):
config = PSigConfig.parse_obj(config)
config = cast(PSigConfig, config)
super().__init__(config)
self.blocking_features = config.blocking_features
self.filter_config = config.filter
self.blocking_config = config.blocking_filter
self.signature_strategies = config.signatures
self.rec_id_col = config.record_id_column
def build_reversed_index(self, data: Sequence[Sequence], header: Optional[List[str]] = None):
"""Build inverted index given P-Sig method.
"""
feature_to_index = self.get_feature_to_index_map(data, header)
self.set_blocking_features_index(self.blocking_features, feature_to_index)
# Build index of records
if self.rec_id_col is None:
record_ids = list(range(len(data)))
else:
record_ids = [x[self.rec_id_col] for x in data]
reversed_index_per_strategy = \
[defaultdict(list) for _ in range(len(self.signature_strategies))] # type: List[Dict[str, List[Any]]]
# Build inverted index
# {signature -> record ids}
for rec_id, dtuple in zip(record_ids, data):
signatures = generate_signatures(self.signature_strategies, dtuple, feature_to_index)
for i, signature in enumerate(signatures):
reversed_index_per_strategy[i][signature].append(rec_id)
reversed_index_per_strategy = [self.filter_reversed_index(data, reversed_index) for reversed_index in
reversed_index_per_strategy]
# somehow the reversed_index of the first strategy gets overwritten in the next step. Thus, we generate the
# statistics of the different strategies first.
strategy_stats = reversed_index_per_strategy_stats(reversed_index_per_strategy, len(data))
# combine the reversed indices into one
filtered_reversed_index = reversed_index_per_strategy[0]
for rev_idx in reversed_index_per_strategy[1:]:
filtered_reversed_index.update(rev_idx)
# check if final inverted index is empty
if len(filtered_reversed_index) == 0:
raise ValueError('P-Sig: All records are filtered out!')
# compute coverage information
entities = set()
for recids in filtered_reversed_index.values():
for rid in recids:
entities.add(rid)
coverage = len(entities) / len(record_ids)
if coverage < 1:
logging.warning(
f'The P-Sig configuration leads to incomplete coverage ({round(coverage * 100, 2)}%)!\n'
f'This means that not all records are part of at least one block. You can increase coverage by '
f'adjusting the filter to be less aggressive or by finding signatures that produce smaller block sizes.'
)
# map signatures in reversed_index into bloom filter
num_hash_func = self.blocking_config.number_of_hash_functions
bf_len = self.blocking_config.bloom_filter_length
reversed_index = {} # type: Dict[str, List[Any]]
for signature, rec_ids in filtered_reversed_index.items():
bf_set = str(tuple(flip_bloom_filter(signature, bf_len, num_hash_func)))
if bf_set in reversed_index:
reversed_index[bf_set].extend(rec_ids)
else:
reversed_index[bf_set] = rec_ids
# create some statistics around the blocking results
stats = reversed_index_stats(reversed_index)
stats['statistics_per_strategy'] = strategy_stats
stats['coverage'] = coverage
return ReversedIndexResult(reversed_index, stats)
def filter_reversed_index(self, data: Sequence[Sequence], reversed_index: Dict):
# Filter inverted index based on ratio
n = len(data)
# filter blocks based on filter type
filter_type = self.filter_config.type
if filter_type == "ratio":
min_occur_ratio = self.filter_config.min
max_occur_ratio = self.filter_config.max
reversed_index = {k: v for k, v in reversed_index.items() if n * max_occur_ratio > len(v) > n * min_occur_ratio}
elif filter_type == "count":
min_occur_count = self.filter_config.min
max_occur_count = self.filter_config.max
reversed_index = {k: v for k, v in reversed_index.items() if max_occur_count > len(v) > min_occur_count}
else:
raise NotImplementedError("Don't support {} filter yet.".format(filter_type))
return reversed_index
|
493569 | import argparse
import math
from datetime import datetime
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
import os.path
from os import path
import json
import pickle
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR) # model
sys.path.append(os.path.join(BASE_DIR, 'utils'))
sys.path.append(os.path.join(BASE_DIR, '..'))
sys.path.append(os.path.join(BASE_DIR, 'tf_ops/nn_distance'))
import tf_nndistance
import shutil
import time
import mesh_utils
import objloader
SHAPENET_BASEDIR = '/orion/group/ShapeNetManifold_10000/'
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--category', default="chair", help='Which single class to use')
parser.add_argument('--num_point', type=int, default=2048, help='Point Number [default: 2048]')
parser.add_argument('--batch_size', type=int, default=64, help='Batch Size during training [default: 32]')
parser.add_argument('--num_candidates', type=int, default=50, help='Number of \'nearest neighbors\' to take.')
parser.add_argument('--generate_negatives', default = False, help='for generating negatives [default: False]')
parser.add_argument('--data_split', default = "train", help='which data split to use')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
GPU_INDEX = FLAGS.gpu
OBJ_CAT = FLAGS.category
NUM_CANDIDATES = FLAGS.num_candidates
GENERATE_NEGS = FLAGS.generate_negatives
DATA_SPLIT = FLAGS.data_split
HOSTNAME = socket.gethostname()
np.random.seed(0)
#### Get point clouds for each object instance
with open('../shapenetcore_v2_split.json') as json_file:
data = json.load(json_file)
train_data = data[DATA_SPLIT]
num_categories = len(list(train_data.keys()))
idx = -1
for i in range(num_categories):
cat_name = train_data[str(i)]["category"]
if (cat_name == OBJ_CAT):
idx = i
break
synsetid = train_data[str(idx)]["synsetid"]
model_names = train_data[str(idx)]["model_names"]
num_samples = train_data[str(idx)]["num_samples"]
print("Category Name: "+cat_name)
print("Synsetid: "+synsetid)
print("Num samples: "+str(num_samples))
#####h5 file handles
def save_dataset(fname, pcs):
cloud = np.stack([pc for pc in pcs])
fout = h5py.File(fname)
fout.create_dataset('data', data=cloud, compression='gzip', dtype='float32')
fout.close()
def load_h5(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:]
return data
# ##load obj files and get point clouds
start_time = time.time()
fname = DATA_SPLIT +"_"+OBJ_CAT+".h5"
if not path.exists(fname):
OBJ_POINTCLOUDS = []
for i in range(len(model_names)):
ref_model_name = model_names[i]
ref_filename = os.path.join(SHAPENET_BASEDIR, synsetid, ref_model_name, 'models', 'model_normalized.obj')
V, F, VT, FT, VN, FN, face_mat, kdmap = objloader.LoadSimpleOBJ_manifold(ref_filename)
idx_pts = np.arange(V.shape[0])
np.random.shuffle(idx_pts)
if(V.shape[0]<NUM_POINT):
pass
pc = mesh_utils.mesh_to_pc(V, F, num_points=NUM_POINT)
OBJ_POINTCLOUDS.append(pc)
if (i%50==0):
print("Time elapsed: "+str(time.time()-start_time)+" sec for "+str(i)+" samples.")
OBJ_POINTCLOUDS = np.array(OBJ_POINTCLOUDS)
save_dataset(fname, OBJ_POINTCLOUDS)
OBJ_POINTCLOUDS = load_h5(fname)
print(OBJ_POINTCLOUDS.shape)
print("Loading and sampling time: "+str(time.time()-start_time)+" sec")
print("Done processing h5 files.")
def chamfer_loss(pc1, pc2):
""" pred: BxNx3,
label: BxNx3, """
dists_forward,_,dists_backward,_ = tf_nndistance.nn_distance(pc1, pc2)
# loss = dists_forward+dists_backward
loss = tf.reduce_mean(dists_forward+dists_backward, axis=1)
return loss
def evaluate():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl_1 = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_POINT, 3))
pointclouds_pl_2 = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_POINT, 3))
chamfer_distance = chamfer_loss(pointclouds_pl_1, pointclouds_pl_2)
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Init variables
init = tf.global_variables_initializer()
sess.run(init)
ops = {'pointclouds_pl_1': pointclouds_pl_1,
'pointclouds_pl_2': pointclouds_pl_2,
'chamfer_distance': chamfer_distance,
}
epoch_loss = eval_one_epoch(sess, ops)
def eval_one_epoch(sess, ops):
""" ops: dict mapping from string to tf ops """
current_data = OBJ_POINTCLOUDS
num_batches = current_data.shape[0]//BATCH_SIZE
candidates_idx = []
start_time = time.time()
for j in range(current_data.shape[0]):
curr_pc = current_data[j, :, :]
curr_pc = np.expand_dims(curr_pc, axis=0)
curr_pc = np.tile(curr_pc, (BATCH_SIZE, 1, 1))
#To compare
chamfer_distances = []
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
batch_data = current_data[start_idx:end_idx, :, :]
feed_dict = {ops['pointclouds_pl_1']: curr_pc,
ops['pointclouds_pl_2']: batch_data,}
chamfer_distance = sess.run([ops['chamfer_distance']], feed_dict=feed_dict)
chamfer_distance = np.array(chamfer_distance)[0]
for i in range(BATCH_SIZE):
chamfer_distances.append(chamfer_distance[i])
for extra in range((num_batches)*BATCH_SIZE, current_data.shape[0]):
extra_pc = current_data[extra, :, :]
extra_pc = np.expand_dims(extra_pc, axis=0)
extra_pc = np.tile(extra_pc, (BATCH_SIZE, 1, 1))
feed_dict = {ops['pointclouds_pl_1']: curr_pc,
ops['pointclouds_pl_2']: extra_pc,}
chamfer_distance = sess.run([ops['chamfer_distance']], feed_dict=feed_dict)
chamfer_distance = np.array(chamfer_distance)[0]
#Only the first pair is not a placeholder
chamfer_distances.append(chamfer_distance[0])
chamfer_distances = np.array(chamfer_distances)
if (GENERATE_NEGS):
#Get random 25 from (1/3, 2/3) * num_samples and 25 from (2/3, 1) * num_samples
num_samples = chamfer_distances.shape[0]
idx = np.argsort(chamfer_distances)
med_cd_idx = idx[int(num_samples/3):2*int(num_samples/3)]
m_idx_select = np.random.choice(med_cd_idx.shape[0], int(NUM_CANDIDATES/2), replace=False)
med_cd_idx_selected = med_cd_idx[m_idx_select]
far_cd_idx = idx[2*int(num_samples/3):]
f_idx_select = np.random.choice(far_cd_idx.shape[0], int(NUM_CANDIDATES/2), replace=False)
far_cd_idx_selected = far_cd_idx[f_idx_select]
selected = np.concatenate((med_cd_idx_selected, far_cd_idx_selected), axis=0)
candidates_idx.append(selected)
###Gets top 50 chamfer distance candidates
else:
idx = np.argsort(chamfer_distances) ## sorted but slower runtime
#Remove itself
idx_candidates = np.delete(idx, np.argwhere(idx==j))
candidates_idx.append(idx_candidates[:NUM_CANDIDATES])
if (j%100==0):
print("Time elapsed: "+str(time.time()-start_time)+" sec for "+str(j)+" samples.")
print(candidates_idx[0])
print(candidates_idx[100])
if (GENERATE_NEGS):
filename = 'candidates_' + DATA_SPLIT + '_'+OBJ_CAT+'_negatives.pickle'
else:
filename = 'candidates_' + DATA_SPLIT + '_'+OBJ_CAT+'_retrieval.pickle'
with open(filename, 'w') as handle:
pickle.dump(candidates_idx, handle, protocol=pickle.HIGHEST_PROTOCOL)
print(filename)
print("Done.")
if __name__ == "__main__":
evaluate()
|
493674 | import os, sys
import subprocess
TOOLS_DIR = "tools"
def RunCommand(cmds):
ret = 0
cmds[0] = "{}/{}/{}.py".format(os.getcwd(), TOOLS_DIR, cmds[0])
if os.path.exists(cmds[0]):
cmds.insert(0, "python3")
ret = subprocess.call(cmds)
else:
print("Invalid command: ", cmds[0])
ret = -1
return ret
# ----- Main Entry Point ----- #
argc = len(sys.argv)
i = 1
while i < argc:
cmds = [sys.argv[i]]
while True:
if i < argc - 1 and sys.argv[i+1][0] == "-":
cmds.append(sys.argv[i+1][1:])
i = i + 1
else:
break
print("\n------------------------------")
print("Executing: ", cmds[0])
if len(cmds) > 1:
print("With arguments: {}".format(", ".join(cmds[1:])))
if RunCommand(cmds) != 0:
break
i = i + 1
|
493679 | import numpy as np
from .util import zero_pad, format_json
from .grouping import group_rows
from .constants import log, _log_time
_MIN_BIN_COUNT = 20
_TOL_FREQ = 1e-3
def rotationally_invariant_identifier(mesh, length=6, as_json=False, json_digits=None):
'''
Given an input mesh, return a vector or string that has the following properties:
* invariant to rotation of the mesh
* robust to different tesselation of the surfaces
* meshes that are similar but not identical return values that are close in euclidean distance
Does this by computing the area- weighted distribution of the radius (from the center of mass).
Arguments
---------
mesh: Trimesh
length: number of terms to compute of the identifier
as_json: whether to return the identifier as json (vs 1D float array)
Returns
---------
identifer: if not as_json: (length) float array of unique identifier
else: same as above, but serialized as json
'''
frequency_count = int(length - 2)
# calculate the mass properties of the mesh, which is doing a surface integral to
# find the center of volume of the mesh
mass_properties = mesh.mass_properties(skip_inertia=True)
vertex_radii = np.sum((mesh.vertices.view(np.ndarray) - mesh.center_mass) ** 2,
axis=1) ** .5
# since we will be computing the shape distribution of the radii, we need to make sure there
# are enough values to populate more than one sample per bin.
bin_count = int(np.min([256,
mesh.vertices.shape[0] * 0.2,
mesh.faces.shape[0] * 0.2]))
# if any of the frequency checks fail, we will use this zero length vector as the
# formatted information for the identifier
freq_formatted = np.zeros(frequency_count)
if bin_count > _MIN_BIN_COUNT:
face_area = mesh.area_faces
face_radii = vertex_radii[mesh.faces].reshape(-1)
area_weight = np.tile((face_area.reshape((-1, 1)) * (1.0 / 3.0)), (1, 3)).reshape(-1)
if face_radii.std() > 1e-3:
freq_formatted = fft_freq_histogram(face_radii,
bin_count=bin_count,
frequency_count=frequency_count,
weight=area_weight)
# using the volume (from surface integral), surface area, and top frequencies
identifier = np.hstack((mass_properties['volume'],
mass_properties['surface_area'],
freq_formatted))
if as_json:
# return as a json string rather than an array
return format_json(identifier)
return identifier
def fft_freq_histogram(data, bin_count, frequency_count=4, weight=None):
data = np.reshape(data, -1)
if weight is None:
weight = np.ones(len(data))
hist, bin_edges = np.histogram(data,
weights=weight,
bins=bin_count)
# we calculate the fft of the radius distribution
fft = np.abs(np.fft.fft(hist))
# the magnitude is dependant on our weighting being good
# frequency should be more solid in more cases
freq = np.fft.fftfreq(data.size, d=(bin_edges[1] - bin_edges[0])) + bin_edges[0]
# now we must select the top FREQ_COUNT frequencies
# if there are a bunch of frequencies whose components are very close in magnitude,
# just picking the top FREQ_COUNT of them is non-deterministic
# thus we take the top frequencies which have a magnitude that is distingushable
# and we zero pad if this means fewer values available
fft_top = fft.argsort()[-(frequency_count + 1):]
fft_ok = np.diff(fft[fft_top]) > _TOL_FREQ
if fft_ok.any():
fft_start = np.nonzero(fft_ok)[0][0] + 1
fft_top = fft_top[fft_start:]
freq_final = np.sort(freq[fft_top])
else:
freq_final = []
freq_formatted = zero_pad(freq_final, frequency_count)
return freq_formatted
@_log_time
def merge_duplicates(meshes):
'''
Given a list of meshes, find meshes which are duplicates and merge them.
Arguments
---------
meshes: (n) list of meshes
Returns
---------
merged: (m) list of meshes where (m <= n)
'''
# so we can use advanced indexing
meshes = np.array(meshes)
# by default an identifier is a 1D float array with 6 elements
hashes = [i.identifier for i in meshes]
groups = group_rows(hashes, digits=1)
merged = [None] * len(groups)
for i, group in enumerate(groups):
quantity = 0
metadata = {}
for mesh in meshes[group]:
# if metadata exists don't nuke it
if 'quantity' in mesh.metadata:
quantity += mesh.metadata['quantity']
else:
quantity += 1
metadata.update(mesh.metadata)
metadata['quantity'] = int(quantity)
metadata['original_index'] = group
merged[i] = meshes[group[0]]
merged[i].metadata = metadata
log.info('merge_duplicates reduced part count from %d to %d',
len(meshes),
len(merged))
return np.array(merged)
|
493691 | Runtime.createAndStart("sweety", "Sweety")
sweety.chatBot.startSession("default", "wikiTestFR")
sweety.chatBot.setPredicate("default","name","unknow")
wdf = Runtime.createAndStart("wikiDataFetcher", "WikiDataFetcher")
wdf.setLanguage("fr")
wdf.setWebSite("frwiki")
# Add route from webKitSpeechRecognition to Program AB
sweety.ear.addTextListener(sweety.chatBot)
# Add route from Program AB to html filter
sweety.chatBot.addTextListener(sweety.htmlFilter)
# Add route from html filter to mouth
sweety.htmlFilter.addListener("publishText", python.name, "talk");
sweety.mouth.setLanguage("FR");
sweety.mouth.setVoice("Antoine");
print cutStart("Le chat")
def talk(data):
sweety.mouth.speak(data)
print "Saying :", data
def askWiki(start,query):
query = unicode(query,'utf-8')
answer = ( start + " " + query + " est " + wdf.getDescription(query))
print " send aswer to the bot : " + answer
sweety.chatBot.getResponse("say " + answer)
def getProperty(query, ID, what):
query = unicode(query,'utf-8')
what = unicode(what,'utf-8')
print " query : " + query
answer = ( query +" " + what + " " + wdf.getSnak(query,ID))
print " send aswer to the bot : " + answer
sweety.chatBot.getResponse("say " + answer) |
493781 | import os
from test.podman_testcase import PodmanTestCase
import podman
from podman import FoldedString
pod = None
class TestPodsCtnrs(PodmanTestCase):
@classmethod
def setUpClass(cls):
# Populate storage
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
self.tmpdir = os.environ['TMPDIR']
self.host = os.environ['PODMAN_HOST']
self.pclient = podman.Client(self.host)
def test_010_populate(self):
global pod
pod = self.pclient.pods.create('pod1')
self.assertEqual('pod1', pod.name)
img = self.pclient.images.get('docker.io/library/alpine:latest')
ctnr = img.container(pod=pod.id)
pod.refresh()
self.assertEqual('1', pod.numberofcontainers)
self.assertEqual(ctnr.id, pod.containersinfo[0]['id'])
def test_015_one_shot(self):
global pod
details = pod.inspect()
state = FoldedString(details.containers[0]['state'])
self.assertEqual(state, 'configured')
pod = pod.start()
status = FoldedString(pod.containersinfo[0]['status'])
# Race on whether container is still running or finished
self.assertIn(status, ('stopped', 'exited', 'running'))
pod = pod.restart()
status = FoldedString(pod.containersinfo[0]['status'])
self.assertIn(status, ('stopped', 'exited', 'running'))
# Pod kill is broken, so use stop for now
killed = pod.stop()
self.assertEqual(pod, killed)
def test_999_remove(self):
global pod
ident = pod.remove(force=True)
self.assertEqual(ident, pod.id)
with self.assertRaises(StopIteration):
next(self.pclient.pods.list())
|
493825 | from .blocks import conv_block
from .transform_block import transformation_block
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow as tf
def get_shape_segmentation_model(num_points: int, num_classes: int) -> keras.Model:
input_points = keras.Input(shape=(num_points, 3))
# PointNet Classification Network.
transformed_inputs = transformation_block(
input_points, num_features=3, name="input_transformation_block"
)
features_64 = conv_block(transformed_inputs, filters=64, name="features_64")
features_128_1 = conv_block(features_64, filters=128, name="features_128_1")
features_128_2 = conv_block(features_128_1, filters=128, name="features_128_2")
transformed_features = transformation_block(
features_128_2, num_features=128, name="transformed_features"
)
features_512 = conv_block(transformed_features, filters=512, name="features_512")
features_2048 = conv_block(features_512, filters=2048, name="pre_maxpool_block")
global_features = layers.MaxPool1D(pool_size=num_points, name="global_features")(
features_2048
)
global_features = tf.tile(global_features, [1, num_points, 1])
# Segmentation Head
segmentation_input = layers.Concatenate(name="segmentation_input")(
[
features_64,
features_128_1,
features_128_2,
transformed_features,
features_512,
global_features,
]
)
segmentation_features = conv_block(
segmentation_input, filters=128, name="segmentation_features"
)
prefinal_outputs = layers.Conv1D(
num_classes, kernel_size=1, name="pre_final_layer"
)(segmentation_features)
outputs = layers.Activation("softmax", dtype="float32", name="segmentation_head")(
prefinal_outputs
)
return keras.Model(input_points, outputs)
|
493865 | import zlib
import sys
import argparse
print('\033[0;32m'+"Zlib file decompressor : " + '1.0' + " Updated: " + 'May 15, 2018' +'\033[0;39m')
parser = argparse.ArgumentParser(description='\033[0;31m'+'Decompress a zlib file'+'\033[0;39m')
parser.add_argument("-input", metavar='file', type=str, default="file.zlib", help='Input zlib file (default: %(default)s)')
parser.add_argument("-output", metavar='file', type=str, default="file.out", help='Output decompressed file (default: %(default)s)')
args = parser.parse_args()
str_object1 = open(args.input, 'rb').read()
str_object2 = zlib.decompress(str_object1)
f = open(args.output, 'wb')
f.write(str_object2)
f.close()
|
493876 | import nltk
sentences1 = nltk.corpus.treebank.tagged_sents()[17]
print(nltk.ne_chunk(sentences1, binary=True))
sentences2 = nltk.corpus.treebank.tagged_sents()[7]
print(nltk.ne_chunk(sentences2, binary=True))
print(nltk.ne_chunk(sentences2))
|
493934 | from tool.runners.python import SubmissionPy
class EvqnaSubmission(SubmissionPy):
def rotate_trig(self, u, v, angle):
if angle == 90:
return -v, u
elif angle == 180:
return -u, -v
elif angle == 270:
return v, -u
def run(self, s):
instructions = s.splitlines()
x, y = 0, 0
u, v = 10, 1
for ins in instructions:
action, val = ins[0], int(ins[1:])
if action == 'L':
u, v = self.rotate_trig(u, v, val)
elif action == 'R':
u, v = self.rotate_trig(u, v, 360 - val)
elif action == 'F':
x += val * u
y += val * v
elif action == 'N':
v += val
elif action == 'S':
v -= val
elif action == 'E':
u += val
elif action == 'W':
u -= val
return abs(x) + abs(y)
|
493964 | import importlib
import pytest
from zope.interface import Interface
from pyramid.testing import Configurator
from pyramid.httpexceptions import HTTPNotFound
from clld.db.models.common import Contribution, ValueSet, Language, Language_files
from clld.interfaces import IMapMarker, IDataTable
from clld.web.adapters.download import N3Dump
@pytest.fixture
def config():
config_ = Configurator(
root_package=importlib.import_module('clld.web'),
settings={
'sqlalchemy.url': 'sqlite://',
'clld.pacific_centered_maps': True})
config_.include('clld.web.app')
return config_
def test_CLLDRequest(env):
c = env['request'].db.query(Contribution).first()
env['request'].resource_url(c, ext='geojson')
assert env['request'].ctx_for_url('/some/path/to/nowhere') is None
assert env['request'].ctx_for_url('/')
env['request'].file_url(Language_files(id='1', object=Language.first()))
assert env['request'].get_datatable('valuesets', ValueSet)
assert env['request'].contact_email_address.startswith('from.settings')
def test_menu_item(env):
from clld.web.app import menu_item
assert menu_item('contributions', None, env['request'])
def test_ctx_factory(request_factory):
from clld.web.app import ctx_factory
for model, route in [
(Contribution, 'contributions'),
(ValueSet, 'valuesets'),
(Language, 'languages'),
]:
obj = model.first()
with request_factory(matchdict={'id': obj.id}, matched_route=route) as req:
ctx_factory(model, 'index', req)
ctx_factory(model, 'rsc', req)
with request_factory(matchdict={'id': 'xxx'}) as req:
with pytest.raises(HTTPNotFound):
ctx_factory(Contribution, 'rsc', req)
def test_MapMarker(env):
marker = env['request'].registry.getUtility(IMapMarker)
assert marker(None, env['request'])
def test_add_config_from_file(testsdir):
from clld.web.app import add_settings_from_file
config = Configurator()
add_settings_from_file(config, testsdir / 'test.ini')
assert 'app:main.use' in config.registry.settings
def test_config(config):
class IF(Interface):
pass
# should have no effect, because a resource with this name is registered by
# default:
config.register_menu('languages', ('sources', dict(label='References')))
config.register_resource('language', None, None)
config.register_resource('testresource', Language, IF, with_index=True, test=True)
config.register_download(N3Dump(Language, 'clld'))
config.add_301('/301pattern', 'http://example.org')
config.add_410('/410pattern')
def test_no_overwrite_registration(config):
config.register_utility(1, IMapMarker)
assert config.registry.queryUtility(IMapMarker) == 1
config.register_utility(2, IMapMarker)
assert config.registry.queryUtility(IMapMarker) == 2
config.register_utility(3, IMapMarker, overwrite=False)
assert config.registry.queryUtility(IMapMarker) == 2
config.register_datatable('route', 1)
assert config.registry.queryUtility(IDataTable, name='route') == 1
config.register_datatable('route', 2, overwrite=False)
assert config.registry.queryUtility(IDataTable, name='route') == 1
def test_includeme_error(tmp_path, capsys):
import sys
sys.path.append(str(tmp_path))
pkg = tmp_path.joinpath('failingapp')
pkg.mkdir()
pkg.joinpath('__init__.py').write_text('#\n', 'ascii')
pkg.joinpath('util.py').write_text('import xyzxyz', 'ascii')
config = Configurator(
root_package=importlib.import_module('failingapp'),
settings={'sqlalchemy.url': 'sqlite://'})
with pytest.raises(ImportError):
config.include('clld.web.app')
out, err = capsys.readouterr()
assert 'failingapp.util' in out
sys.path.pop()
|
493986 | import requests.api
from requests.auth import HTTPBasicAuth
from amqpstorm.compatibility import urlparse
from amqpstorm.management.exception import ApiConnectionError
from amqpstorm.management.exception import ApiError
class HTTPClient(object):
def __init__(self, api_url, username, password, timeout):
self._auth = HTTPBasicAuth(username, password)
self._timeout = timeout
self._base_url = api_url
def get(self, path, payload=None, headers=None):
"""HTTP GET operation.
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:return: Response
"""
return self._request('get', path, payload, headers)
def post(self, path, payload=None, headers=None):
"""HTTP POST operation.
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:return: Response
"""
return self._request('post', path, payload, headers)
def delete(self, path, payload=None, headers=None):
"""HTTP DELETE operation.
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:return: Response
"""
return self._request('delete', path, payload, headers)
def put(self, path, payload=None, headers=None):
"""HTTP PUT operation.
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:return: Response
"""
return self._request('put', path, payload, headers)
def _request(self, method, path, payload=None, headers=None):
"""HTTP operation.
:param method: Operation type (e.g. post)
:param path: URI Path
:param payload: HTTP Body
:param headers: HTTP Headers
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:return: Response
"""
url = urlparse.urljoin(self._base_url, 'api/%s' % path)
headers = headers or {}
headers['content-type'] = 'application/json'
try:
response = requests.request(method, url,
auth=self._auth, data=payload,
headers=headers,
timeout=self._timeout)
except requests.RequestException as why:
raise ApiConnectionError(str(why))
json_response = self._get_json_output(response)
self._check_for_errors(response, json_response)
return json_response
@staticmethod
def _get_json_output(response):
"""Get JSON output from the HTTP response.
:param requests.Response response:
:return: Json payload
"""
try:
content = response.json()
except ValueError:
content = None
return content
@staticmethod
def _check_for_errors(response, json_response):
"""Check payload for errors.
:param response: HTTP response
:param json_response: Json response
:raises ApiError: Raises if the remote server encountered an error.
:return:
"""
status_code = response.status_code
try:
response.raise_for_status()
except requests.HTTPError as why:
raise ApiError(str(why), reply_code=status_code)
if isinstance(json_response, dict) and 'error' in json_response:
raise ApiError(json_response['error'], reply_code=status_code)
|
493999 | import json
import os
from termcolor import cprint
import colorama
from pyvoc import pyvoc
from pyvoc.check_config import check_config_dir, config_dir_path
from pyvoc.settings import USER_GROUP_ENTRIES_LIMIT
colorama.init()
# create an empty vocabulary group file
def create_new_vocab_group(group_path, group_number):
pyvoc.stop_loading_animation()
cprint("creating ", color="cyan", attrs=["bold"], end="")
cprint("vocabulary group number {}...".format(group_number), color="green")
with open(group_path, "w") as f:
json.dump({}, f)
# selects the first user group which has the space for one more word
def select_user_group():
path = os.path.join(config_dir_path(), "usergroups.json")
usergroups = dict()
with open(path, "r") as f:
usergroups = json.load(f)
for user_group_number, group_entries in usergroups.items():
if group_entries < USER_GROUP_ENTRIES_LIMIT:
return user_group_number
cprint("No space left for new entries in user groups.", color="red")
exit()
def check_if_group_full(group_number):
path = os.path.join(config_dir_path(), "usergroups.json")
with open(path, "r") as f:
content = json.load(f)
if content[str(group_number)] >= USER_GROUP_ENTRIES_LIMIT:
cprint(
"cannot add more words to group number {}".format(group_number),
color="yellow",
)
exit()
def counter_increment(group_number):
path = os.path.join(config_dir_path(), "usergroups.json")
with open(path, "r") as f:
content = json.load(f)
content[str(group_number)] += 1
with open(path, "w") as f:
json.dump(content, f)
def add_word_to_vocab(word, parsed_response, group_number=None):
check_config_dir()
config_path = config_dir_path()
definition = {word: parsed_response}
if not group_number:
group_number = select_user_group()
group_path = os.path.join(config_dir_path(), "group" + str(group_number) + ".json")
if not os.path.isfile(group_path):
create_new_vocab_group(group_path, group_number)
check_if_group_full(group_number)
pyvoc.stop_loading_animation()
cprint("\nwriting to vocabulary group...", color="yellow")
with open(group_path, "r") as f:
content = json.load(f)
content.update(definition)
with open(group_path, "w") as f:
json.dump(content, f, ensure_ascii=False)
# increase the count of word entries in the group by 1
counter_increment(group_number)
# add word (not definition) to all_words.json
with open(os.path.join(config_path, "all_words.json"), "r") as f:
all_words = json.load(f)
all_words.update({word: True})
with open(os.path.join(config_path, "all_words.json"), "w") as f:
json.dump(all_words, f)
cprint("word added to ", color="green", end="")
cprint("group number {}".format(group_number), color="cyan", attrs=["bold"])
def list_all_groups():
# reading user groups name and size
usergroups_path = os.path.join(config_dir_path(), "usergroups.json")
with open(usergroups_path, "r") as f:
user_group_numbers = json.load(f)
default_group_numbers = {101: 800, 102: 800, 103: 800}
pyvoc.stop_loading_animation()
# print user groups
cprint("\nUSER GROUPS", color="cyan", on_color="on_grey")
cprint("Group no.", color="green", end=" " * (14 - len("Group no")))
cprint("No. of words")
for group in user_group_numbers:
cprint(group, color="green", end=" " * (15 - len(str(group))))
cprint(str(user_group_numbers[group]))
# print default groups
cprint("\nDEFAULT GROUP", color="cyan", on_color="on_grey")
cprint("Group no.", color="green", end=" " * (14 - len("Group no")))
cprint("No. of words")
for group in default_group_numbers:
cprint(group, color="green", end=" " * (15 - len(str(group))))
cprint(str(default_group_numbers[group]))
exit()
|
494015 | import time
# time.time() return the time in seconds since the epoch as a floating point number
def chour(t):
hour = t / 3600
print("The number of hours has passed since epoch is %f" % hour)
def cminute(t):
minute = t / 60
print("The number of minutes has passed since epoch is %f" % minute)
def cseconds(t):
seconds = t
print("The number of seconds has passed since epoch is %f" % seconds)
def num_day():
sec = time.time()
num_day = sec / (60*60*24)
print("The number of days has passed since epoch is %f" % num_day)
num_day()
chour(time.time())
cminute(time.time())
cseconds(time.time()) |
494032 | import cvcomm
class ControlVault2:
NAME = 'Broadcom ControlVault 2'
turn_on_seq1 = [
"10 2f 04 00",
"10 2f 1d 03 05 90 65",
"10 2f 2d 00",
"10 2f 11 01 f7",
"01 27 fc 0c 08 00 01 00 01 00 00 00 00 00 00 00",
]
turn_on_seq2 = [
"10 20 00 01 01",
"10 20 01 02 01 00",
"10 20 02 67 01 b9 64 01 00 ff ff 50 00 8b 13 00 10 00 06 00 00 00 00 00 ff 00 00 00 ff 00 00 04 00 00 00 00 03 00 00 00 03 00 0c 00 00 0d 00 00 00 00 00 00 00 00 00 00 33 23 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 03 00 02 53 3b 0f 00 00 00 00 00 00 00 00 00 00 00 00 04 00 00 00 00 00 00 00",
"10 20 02 90 0a ca 05 00 00 00 00 2c 80 01 01 b0 05 01 03 03 03 08 b5 03 01 03 ff c9 0d 24 00 00 00 01 00 bb 00 e4 00 0a 01 02 d6 0d 01 02 00 00 00 00 00 01 00 01 5a 00 8a b2 02 e8 03 c8 1e 06 1f 00 0a 00 30 00 04 24 00 1c 00 75 00 77 00 76 00 1c 00 03 00 0a 00 56 01 00 00 40 04 d7 01 07 dd 32 00 00 00 29 16 08 08 06 04 00 00 00 1f 27 0a 6d 20 00 52 20 00 00 00 01 85 00 00 32 1f 00 00 02 0a 16 00 02 55 55 55 55 55 55 55 55 55 55 55 55 55 1e",
"10 20 02 06 01 b7 03 02 00 01",
"10 2f 06 01 01",
"10 20 02 0e 02 51 08 20 79 ff ff ff ff ff ff 58 01 07",
"10 21 00 07 02 04 03 02 05 03 03",
"10 20 02 17 01 29 14 46 66 6d 01 01 11 02 02 07 ff 03 02 00 13 04 01 64 07 01 03",
"10 20 02 1a 02 61 14 46 66 6d 01 01 11 02 02 07 ff 03 02 00 13 04 01 64 07 01 03 60 01 07",
"10 20 02 10 05 30 01 04 31 01 00 32 01 40 38 01 00 50 01 02",
"10 20 02 05 01 00 02 fa 00",
"10 20 02 0b 01 c2 08 01 08 00 04 80 c3 c9 01",
"10 21 03 0d 06 00 01 01 01 02 01 80 01 82 01 06 01",
]
def __init__(self, device):
self.device = device
self.communicator = cvcomm.ControlVaultCommunicator(device)
def turn_on(self):
self.communicator.ctrl_transfer(0x41, 0, 1, 3)
self.communicator.talk(self.turn_on_seq1)
self.communicator.ctrl_transfer(0x41, 1, 0, 3)
self.communicator.talk(self.turn_on_seq2)
self.communicator.ctrl_transfer(0x41, 1, 1, 3)
def turn_off(self):
self.communicator.ctrl_transfer(0x41, 1, 0, 3)
self.communicator.ctrl_transfer(0x41, 0, 0, 3)
def reset(self):
self.device.reset()
|
494044 | from __future__ import absolute_import
from django.conf import settings
from appconf import AppConf
class AjaxAppConf(AppConf):
AJAX_AUTHENTICATION = 'ajax.authentication.BaseAuthentication'
|
494046 | from typing import Dict, Any
from .module import Module
from .parameter import Parameter
_dict_methods = ['__setitem__', '__getitem__', '__delitem__', '__len__', '__iter__', '__contains__',
'update', 'keys', 'values', 'items', 'clear', 'pop']
class ModuleDict(Module, dict): # use dict for auto-complete
"""
Essentially this exposes some methods of `Module._modules`.
"""
def __init__(self, modules: Dict[Any, Module] = None):
super().__init__()
for method in _dict_methods:
setattr(self, method, getattr(self._modules, method))
if modules is not None:
self.update(modules)
def forward(self):
raise RuntimeError("ModuleDict is not callable")
# Do we need a factory for it?
class ParameterDict(Module, dict):
def __init__(self, parameters: Dict[Any, Parameter] = None):
super().__init__()
for method in _dict_methods:
setattr(self, method, getattr(self._modules, method))
if parameters is not None:
self.update(parameters)
def forward(self):
raise RuntimeError("ParameterDict is not callable")
|
494063 | import tensorflow as tf
class SmoothL1Loss(tf.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.loss_weight = loss_weight
def __call__(self, pred, target, weight=None, avg_factor=None, **kwargs):
"""Forward function.
Args:
pred (tf.Tensor): The prediction.
target (tf.Tensor): The learning target of the prediction.
weight (tf.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert pred.shape == target.shape and tf.size(target) > 0
diff = tf.abs(pred - target)
loss = tf.where(diff < self.beta, 0.5 * diff * diff / self.beta,
diff - 0.5 * self.beta)
if weight is not None:
loss = loss * weight
loss = loss * self.loss_weight
if avg_factor:
return tf.reduce_sum(loss) / avg_factor
else:
return tf.reduce_mean(loss)
|
494075 | import os
import sys
import numpy as np
from torch.utils.data import Dataset
Cross_Subject = [1, 2, 4, 5, 8, 9, 13, 14, 15, 16, 17, 18, 19, 25, 27, 28, 31, 34, 35, 38]
class NTU60Subject(Dataset):
def __init__(self, root, meta, frames_per_clip=23, step_between_clips=2, num_points=2048, train=True):
super(NTU60Subject, self).__init__()
self.videos = []
self.labels = []
self.index_map = []
index = 0
with open(meta, 'r') as f:
for line in f:
name, nframes = line.split()
subject = int(name[9:12])
if train:
if subject in Cross_Subject:
label = int(name[-3:]) - 1
nframes = int(nframes)
for t in range(0, nframes-step_between_clips*(frames_per_clip-1), step_between_clips):
self.index_map.append((index, t))
index += 1
self.labels.append(label)
self.videos.append(os.path.join(root, name+'.npz'))
else:
if subject not in Cross_Subject:
label = int(name[-3:]) - 1
nframes = int(nframes)
for t in range(0, nframes-step_between_clips*(frames_per_clip-1), step_between_clips):
self.index_map.append((index, t))
index += 1
self.labels.append(label)
self.videos.append(os.path.join(root, name+'.npz'))
self.frames_per_clip = frames_per_clip
self.step_between_clips = step_between_clips
self.num_points = num_points
self.train = train
self.num_classes = max(self.labels) + 1
def __len__(self):
return len(self.index_map)
def __getitem__(self, idx):
index, t = self.index_map[idx]
video = self.videos[index]
video = np.load(video, allow_pickle=True)['data'] * 100
label = self.labels[index]
clip = [video[t+i*self.step_between_clips] for i in range(self.frames_per_clip)]
for i, p in enumerate(clip):
if p.shape[0] > self.num_points:
r = np.random.choice(p.shape[0], size=self.num_points, replace=False)
else:
repeat, residue = self.num_points // p.shape[0], self.num_points % p.shape[0]
r = np.random.choice(p.shape[0], size=residue, replace=False)
r = np.concatenate([np.arange(p.shape[0]) for _ in range(repeat)] + [r], axis=0)
clip[i] = p[r, :]
clip = np.array(clip)
if self.train:
# scale the points
scales = np.random.uniform(0.9, 1.1, size=3)
clip = clip * scales
return clip.astype(np.float32), label, index
if __name__ == '__main__':
dataset = NTU60Subject(root='/home/yuhading/Data/HeheFan/pami/data/ntu/video', meta='/home/yuhading/Data/HeheFan/pami/data/ntu/ntu60.list', frames_per_clip=16)
clip, label, video_idx = dataset[0]
data = clip[0]
print(data[:,0].max()-data[:,0].min())
print(data[:,1].max()-data[:,1].min())
print(data[:,2].max()-data[:,2].min())
#print(clip)
print(label)
print(video_idx)
print(dataset.num_classes)
|
494089 | import random
from CellModeller.Regulation.ModuleRegulator import ModuleRegulator
from CellModeller.Biophysics.BacterialModels.CLBacterium import CLBacterium
from CellModeller.GUI import Renderers
import numpy
import math
N0 = 10
def setup(sim):
# Set biophysics, signalling, and regulation models
biophys = CLBacterium(sim, jitter_z=False, gamma = 100, max_cells=100000, max_planes=1)
regul = ModuleRegulator(sim, sim.moduleName) # use this file for reg too
# Only biophys and regulation
sim.init(biophys, regul, None, None)
#biophys.addPlane((0,0,0),(0,0,1),1.0) #Base plane
#biophys.addPlane((10,0,0),(-1,0,0),1.0)
#biophys.addPlane((-10,0,0),(1,0,0),1.0)
#biophys.addPlane((0,10,0),(0,-1,0),1.0)
#biophys.addPlane((0,-10,0),(0,1,0),1.0)
sim.addCell(cellType=0, pos=(0,0,0))
# Add some objects to draw the models
therenderer = Renderers.GLBacteriumRenderer(sim)
sim.addRenderer(therenderer)
sim.pickleSteps = 1
def init(cell):
cell.targetVol = 3.5 + random.uniform(0.0,0.5)
cell.growthRate = 1.0
cell.n_a = N0//2
cell.n_b = N0 - cell.n_a
def update(cells):
for (id, cell) in cells.items():
cell.color = [0.1, cell.n_a/3.0, cell.n_b/3.0]
if cell.volume > cell.targetVol:
cell.divideFlag = True
def divide(parent, d1, d2):
d1.targetVol = 3.5 + random.uniform(0.0,0.5)
d2.targetVol = 3.5 + random.uniform(0.0,0.5)
plasmids = [0]*parent.n_a*2 + [1]*parent.n_b*2
random.shuffle(plasmids)
d1.n_a = 0
d1.n_b = 0
d2.n_a = 0
d2.n_b = 0
for p in plasmids[:N0]:
if p == 0: d1.n_a +=1
else: d1.n_b +=1
for p in plasmids[N0:2*N0]:
if p == 0: d2.n_a +=1
else: d2.n_b +=1
assert parent.n_a + parent.n_b == N0
assert d1.n_a + d1.n_b == N0
assert d2.n_a + d2.n_b == N0
assert parent.n_a*2 == d1.n_a+d2.n_a
assert parent.n_b*2 == d1.n_b+d2.n_b
assert parent.n_a > 0 or (d1.n_a == 0 and d2.n_a == 0)
assert parent.n_b > 0 or (d1.n_b == 0 and d2.n_b == 0)
|
494093 | from collections import deque
import gym
import numpy as np
from gym import spaces, logger
from gym.utils import seeding
class SnakeAction(object):
LEFT = 0
RIGHT = 1
UP = 2
DOWN = 3
class BoardColor(object):
BODY_COLOR = np.array([0, 0, 0], dtype=np.uint8)
FOOD_COLOR = np.array([0, 255, 0], dtype=np.uint8)
SPACE_COLOR = np.array([255, 255, 255], dtype=np.uint8)
class SnakeEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
}
def __init__(self, observation_mode='rgb', energy_consum=False):
self.observation_mode = observation_mode
self.energy_consum = energy_consum
self.width = 42
self.height = 42
self.action_space = spaces.Discrete(4)
if observation_mode == 'rgb':
self.observation_space = spaces.Box(low=0, high=256, shape=(self.width * 2, self.height * 2, 3),
dtype=np.float32)
else:
self.observation_space = spaces.Box(low=0, high=255, shape=(self.width, self.height, 1), dtype=np.uint8)
self.snake = Snake()
self.foods = []
self.n_foods = 5
self.viewer = None
self.np_random = np.random
def set_foods(self, n):
self.n_foods = n
def reset(self):
self.snake.body.clear()
self.foods.clear()
empty_cells = self.get_empty_cells()
empty_cells = self.snake.init(empty_cells, self.np_random)
self.foods = [empty_cells[i] for i in self.np_random.choice(len(empty_cells), self.n_foods, replace=False)]
import pdb
pdb.set_trace()
return self.get_observation()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (action, type(action))
snake_tail = self.snake.step(action)
self.snake.reward = 0.
if self.energy_consum:
self.snake.reward -= 0.01
if self.snake.head in self.foods:
self.snake.reward += 1.
self.snake.body.append(snake_tail)
self.foods.remove(self.snake.head)
empty_cells = self.get_empty_cells()
food = empty_cells[self.np_random.choice(len(empty_cells))]
self.foods.append(food)
# snake collided wall
if self.is_collided_wall(self.snake.head):
self.snake.reward -= 1.
self.snake.done = True
# snake bite itself
if self.snake.head in list(self.snake.body)[1:]:
self.snake.reward -= 1.
self.snake.done = True
self.snake.reward = np.clip(self.snake.reward, -1., 1.)
return self.get_observation(), self.snake.reward, self.snake.done, {}
def get_observation(self):
if self.observation_mode == 'rgb':
return self.get_image()
else:
observation = np.zeros((self.width, self.height), dtype=np.uint8)
for x, y in self.snake.body:
try:
observation[x][y] = 100
except:
pass
for food in self.foods:
x, y = food
observation[x][y] = 200
return observation[:, :, None]
def get_image(self):
board_width = 2 * self.width
board_height = 2 * self.height
cell_size = 2
board = Board(board_height, board_width)
for x, y in self.snake.body:
board.fill_cell((x * cell_size, y * cell_size), cell_size, BoardColor.BODY_COLOR)
for food in self.foods:
x, y = food
board.fill_cell((x * cell_size, y * cell_size), cell_size, BoardColor.FOOD_COLOR)
return board.board
def get_empty_cells(self):
empty_cells = [(x, y) for x in range(self.width) for y in range(self.height)]
for cell in self.snake.body:
if cell in empty_cells:
empty_cells.remove(cell)
for food in self.foods:
if food in empty_cells:
empty_cells.remove(food)
return empty_cells
def is_collided_wall(self, head):
x, y = head
if x < 0 or x > (self.width - 1) or y < 0 or y > (self.height - 1):
return True
return False
def render(self, mode='human'):
img = self.get_image()
if mode == 'rgb_array':
return img
elif mode == 'human':
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
return self.viewer.isopen
class Snake(object):
def __init__(self):
self.body = deque()
self.prev_act = None
self.done = False
self.reward = 0.
def step(self, action):
if not self.done:
if not self.is_valid_action(action):
action = self.prev_act
self.prev_act = action
x, y = self.head
if action == SnakeAction.LEFT:
self.body.appendleft((x, y - 1))
if action == SnakeAction.RIGHT:
self.body.appendleft((x, y + 1))
if action == SnakeAction.UP:
self.body.appendleft((x - 1, y))
if action == SnakeAction.DOWN:
self.body.appendleft((x + 1, y))
return self.body.pop()
@property
def head(self):
return self.body[0]
def is_valid_action(self, action):
if len(self.body) == 1:
return True
horizontal_actions = [SnakeAction.LEFT, SnakeAction.RIGHT]
vertical_actions = [SnakeAction.UP, SnakeAction.DOWN]
if self.prev_act in horizontal_actions:
return action in vertical_actions
return action in horizontal_actions
def init(self, empty_cells, np_random):
self.body.clear()
self.done = False
self.reward = 0.
self.prev_act = None
start_head = empty_cells[np_random.choice(len(empty_cells))]
self.body.appendleft(start_head)
empty_cells.remove(start_head)
return empty_cells
class Board(object):
def __init__(self, height, weight):
self.board = np.empty((height, weight, 3), dtype=np.uint8)
self.board[:, :, :] = BoardColor.SPACE_COLOR
def fill_cell(self, vertex, cell_size, color):
x, y = vertex
self.board[x:x + cell_size, y:y + cell_size, :] = color
class SnakeEnvMC(SnakeEnv):
def __init__(self):
super().__init__(observation_mode='rgb')
snake_env = SnakeEnvMC()
if __name__ == '__main__':
ss = SnakeEnv()
ss.reset()
|
Subsets and Splits